diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 2883a0db..68358f72 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -26,3 +26,5 @@ jobs: run: cargo check - name: Clippy run: cargo clippy + - name: Test + run: cargo test diff --git a/Cargo.lock b/Cargo.lock index e4949974..2b2eaa22 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -394,6 +394,19 @@ dependencies = [ "typenum", ] +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.2", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "data-url" version = "0.3.0" @@ -571,6 +584,21 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "futures" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + [[package]] name = "futures-channel" version = "0.3.29" @@ -578,6 +606,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", + "futures-sink", ] [[package]] @@ -586,6 +615,23 @@ version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +[[package]] +name = "futures-executor" +version = "0.3.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + [[package]] name = "futures-macro" version = "0.3.29" @@ -615,9 +661,13 @@ version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ + "futures-channel", "futures-core", + "futures-io", "futures-macro", + "futures-sink", "futures-task", + "memchr", "pin-project-lite", "pin-utils", "slab", @@ -1399,6 +1449,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "serial_test", "sha1", "sha2", "simple_asn1", @@ -1614,6 +1665,31 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "serial_test" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "953ad9342b3aaca7cb43c45c097dd008d4907070394bd0751a0aa8817e5a018d" +dependencies = [ + "dashmap", + "futures", + "lazy_static", + "log", + "parking_lot", + "serial_test_derive", +] + +[[package]] +name = "serial_test_derive" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "sha1" version = "0.10.6" diff --git a/Cargo.toml b/Cargo.toml index 4bc05b48..2c53290c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,6 +47,7 @@ data-url = "0.3.0" prost = "0.12.1" log = "0.4.20" itertools = "0.12.1" +serial_test = "3.0.0" [build-dependencies] prost-build = "0.12.1" diff --git a/src/config.rs b/src/config.rs index 8df0a6e6..152a4871 100644 --- a/src/config.rs +++ b/src/config.rs @@ -631,8 +631,10 @@ pub(crate) fn parse_additional_trust_bundle(value: &str) -> Result { #[cfg(test)] mod tests { use super::*; + use serial_test::serial; #[test] + #[serial] fn test_redact_config_file_raw_private_keys() { let raw_config = r#" use_key_rules: @@ -653,6 +655,7 @@ use_key_rules: } #[test] + #[serial] fn test_redact_config_file_raw_pull_secret() { let raw_config = r#" pull_secret: | @@ -670,6 +673,7 @@ pull_secret: | } #[test] + #[serial] fn test_dont_redact_config_file_raw() { let raw_config = r#" use_key_rules: @@ -687,6 +691,7 @@ use_key_rules: } #[test] + #[serial] fn test_pull_secret_not_serialized() { let mut config = RecertConfig::empty().unwrap(); diff --git a/src/ocp_postprocess/additional_trust_bundle/filesystem_rename.rs b/src/ocp_postprocess/additional_trust_bundle/filesystem_rename.rs index 2d46b61b..f12c1fb9 100644 --- a/src/ocp_postprocess/additional_trust_bundle/filesystem_rename.rs +++ b/src/ocp_postprocess/additional_trust_bundle/filesystem_rename.rs @@ -11,7 +11,6 @@ pub(crate) async fn fix_filesystem_ca_trust_anchors(additional_trust_bundle: &st file_utils::globvec(dir, "**/anchors/openshift-config-user-ca-bundle.crt")? .into_iter() .map(|file_path| { - let crt_file_path = file_path.clone(); let additional_trust_bundle = additional_trust_bundle.to_string(); tokio::spawn(async move { async move { diff --git a/vendor/dashmap/.cargo-checksum.json b/vendor/dashmap/.cargo-checksum.json new file mode 100644 index 00000000..45eec83d --- /dev/null +++ b/vendor/dashmap/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"c758b23fe682b4e447bc5350e1e838577ed6ec4ec8833607ff16f57f1d0495a7","LICENSE":"16692e8cee4aa06e3913787497eba2d47c42002014136f5da67be6ee640e28a3","README.md":"7f7b4edce5b4baabf639df517a66bfa2cd8ff759f09060c17fe6da54531b5819","rust-toolchain.toml":"8e14ad3424881d09f3b219e947b4b95f12726295abde287f91d10af8c5a7dd13","src/arbitrary.rs":"236abf377cbae8393961e0e4327ce2515d5aeeddfb1fabb1f2035c69dfea290c","src/iter.rs":"086a7be951241253b3ea60c64fb324aaa32742038461994e3946bdd8712ce452","src/iter_set.rs":"5327da951dc93d30b293aeb7a805666ee4b8e6364881348ab5d86b23c29a0e13","src/lib.rs":"6ec142f4ad577da1e399738a4bbcdee0baae5290d5b5274e6d295d92bcf113ba","src/lock.rs":"22b1013ee3cbd1598c26403552faf8e5b44fe98a8ff4c8812da15d57bdeee4b3","src/mapref/entry.rs":"c03777d2b863a98fea517ad97037741930c766e7cbd17aea2aab1fbf0e11c719","src/mapref/mod.rs":"15bd45cfc642a9e3e77bbfbf2d9fad9c5fac0ff6904214a3c501e262242ec8b0","src/mapref/multiple.rs":"17a87879229d78f75d71817d100aa0a210d764d6b66e4a05b17a3e092d17e306","src/mapref/one.rs":"0026aff85dd5411b5c181396b8b5b9a9cbc524720a8f409fd4aa62c1b5501146","src/rayon/map.rs":"68d541bb6c9372fe13e44f09d9fcba4f214f52f16649e326486f8976dc03cd99","src/rayon/read_only.rs":"99dc0083f6840f59b2090bc9c5f9767a44088832e63935cb8f6bc7eca8b86102","src/rayon/set.rs":"21fe2ca8c58c8ff1bc753f5e2eb6131afd598211eea375f2ac7190cd0f9abcba","src/read_only.rs":"012a6e44195125eb3c0fd028123b348f5294c7e083bf1c3c0bf8400201c35e27","src/serde.rs":"9854d44b05ebe400ef8ac8ea9c8f140e1c59870d74aaf3ccaf72e82c6dc40905","src/set.rs":"cf75d91db42d18afec314afe9a461cc6e86ddedd11ff9248ed7345780948e5e0","src/setref/mod.rs":"cc39e406a333dc6c04398f4b1336fb400e3a8360c387466d8a91f5d7f4ed40a7","src/setref/multiple.rs":"2270749e83f80dbb4761448f0629ecd02b0b4268f76834236d1167844e6d5e37","src/setref/one.rs":"69738583f2a160f3907e540277ccd88e0036406b3ead66b3d4015ddd938d6937","src/t.rs":"9b40ebfba22369813bf159ed60fa70e742ae72bfaec6aee91a716cba91cb8f0d","src/try_result.rs":"81d1dd396f70b0aa2a6e1969851b706cbfdfb09c802ac9d33dcace882aa28dd1","src/util.rs":"7a8096a713cf04e60d6c9e38cd366314ed74472abab4bb8a3a9ed081a0e0301b"},"package":"978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856"} \ No newline at end of file diff --git a/vendor/dashmap/Cargo.toml b/vendor/dashmap/Cargo.toml new file mode 100644 index 00000000..cb7480b5 --- /dev/null +++ b/vendor/dashmap/Cargo.toml @@ -0,0 +1,73 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.65" +name = "dashmap" +version = "5.5.3" +authors = ["Acrimon "] +description = "Blazing fast concurrent HashMap for Rust." +homepage = "https://github.com/xacrimon/dashmap" +documentation = "https://docs.rs/dashmap" +readme = "README.md" +keywords = [ + "atomic", + "concurrent", + "hashmap", +] +categories = [ + "concurrency", + "algorithms", + "data-structures", +] +license = "MIT" +repository = "https://github.com/xacrimon/dashmap" + +[package.metadata.docs.rs] +features = [ + "rayon", + "raw-api", + "serde", +] + +[dependencies.arbitrary] +version = "1.3.0" +optional = true + +[dependencies.cfg-if] +version = "1.0.0" + +[dependencies.hashbrown] +version = "0.14.0" +default-features = false + +[dependencies.lock_api] +version = "0.4.10" + +[dependencies.once_cell] +version = "1.18.0" + +[dependencies.parking_lot_core] +version = "0.9.8" + +[dependencies.rayon] +version = "1.7.0" +optional = true + +[dependencies.serde] +version = "1.0.188" +features = ["derive"] +optional = true + +[features] +inline = ["hashbrown/inline-more"] +raw-api = [] diff --git a/vendor/dashmap/LICENSE b/vendor/dashmap/LICENSE new file mode 100644 index 00000000..5a8b9ef8 --- /dev/null +++ b/vendor/dashmap/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Acrimon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/dashmap/README.md b/vendor/dashmap/README.md new file mode 100644 index 00000000..2eb721eb --- /dev/null +++ b/vendor/dashmap/README.md @@ -0,0 +1,67 @@ +# DashMap + +Blazingly fast concurrent map in Rust. + +DashMap is an implementation of a concurrent associative array/hashmap in Rust. + +DashMap tries to implement an easy to use API similar to `std::collections::HashMap` +with some slight changes to handle concurrency. + +DashMap tries to be very simple to use and to be a direct replacement for `RwLock>`. +To accomplish these goals, all methods take `&self` instead of modifying methods taking `&mut self`. +This allows you to put a DashMap in an `Arc` and share it between threads while still being able to modify it. + +DashMap puts great effort into performance and aims to be as fast as possible. +If you have any suggestions or tips do not hesitate to open an issue or a PR. + +[![version](https://img.shields.io/crates/v/dashmap)](https://crates.io/crates/dashmap) + +[![documentation](https://docs.rs/dashmap/badge.svg)](https://docs.rs/dashmap) + +[![downloads](https://img.shields.io/crates/d/dashmap)](https://crates.io/crates/dashmap) + +[![minimum rustc version](https://img.shields.io/badge/rustc-1.65-orange.svg)](https://crates.io/crates/dashmap) + +## Cargo features + +- `serde` - Enables serde support. + +- `raw-api` - Enables the unstable raw-shard api. + +- `rayon` - Enables rayon support. + +- `inline` - Enables `inline-more` feature from the `hashbrown` crate. Can lead to better performance, but with the cost of longer compile-time. + +- `arbitrary` - Enables support for the `arbitrary` crate. + +## Contributing + +DashMap gladly accepts contributions! +Do not hesitate to open issues or PR's. + +I will take a look as soon as I have time for it. + +That said I do not get paid (yet) to work on open-source. This means +that my time is limited and my work here comes after my personal life. + +## Performance + +A comprehensive benchmark suite including DashMap can be found [here](https://github.com/xacrimon/conc-map-bench). + +## Special thanks + +- [Jon Gjengset](https://github.com/jonhoo) + +- [Yato](https://github.com/RustyYato) + +- [Karl Bergström](https://github.com/kabergstrom) + +- [Dylan DPC](https://github.com/Dylan-DPC) + +- [Lokathor](https://github.com/Lokathor) + +- [namibj](https://github.com/namibj) + +## License + +This project is licensed under MIT. diff --git a/vendor/dashmap/rust-toolchain.toml b/vendor/dashmap/rust-toolchain.toml new file mode 100644 index 00000000..a32876aa --- /dev/null +++ b/vendor/dashmap/rust-toolchain.toml @@ -0,0 +1,4 @@ +[toolchain] +channel = "1.65" +components = ["rustfmt", "clippy"] +profile = "minimal" diff --git a/vendor/dashmap/src/arbitrary.rs b/vendor/dashmap/src/arbitrary.rs new file mode 100644 index 00000000..a760964b --- /dev/null +++ b/vendor/dashmap/src/arbitrary.rs @@ -0,0 +1,13 @@ +use arbitrary::{Arbitrary, Unstructured}; +use core::hash::BuildHasher; + +impl<'a, K, V, S> Arbitrary<'a> for crate::DashMap +where + K: Eq + std::hash::Hash + Arbitrary<'a>, + V: Arbitrary<'a>, + S: Default + BuildHasher + Clone, +{ + fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + u.arbitrary_iter()?.collect() + } +} diff --git a/vendor/dashmap/src/iter.rs b/vendor/dashmap/src/iter.rs new file mode 100644 index 00000000..ce50e739 --- /dev/null +++ b/vendor/dashmap/src/iter.rs @@ -0,0 +1,315 @@ +use super::mapref::multiple::{RefMulti, RefMutMulti}; +use super::util; +use crate::lock::{RwLockReadGuard, RwLockWriteGuard}; +use crate::t::Map; +use crate::util::SharedValue; +use crate::{DashMap, HashMap}; +use core::hash::{BuildHasher, Hash}; +use core::mem; +use hashbrown::hash_map; +use std::collections::hash_map::RandomState; +use std::sync::Arc; + +/// Iterator over a DashMap yielding key value pairs. +/// +/// # Examples +/// +/// ``` +/// use dashmap::DashMap; +/// +/// let map = DashMap::new(); +/// map.insert("hello", "world"); +/// map.insert("alex", "steve"); +/// let pairs: Vec<(&'static str, &'static str)> = map.into_iter().collect(); +/// assert_eq!(pairs.len(), 2); +/// ``` +pub struct OwningIter { + map: DashMap, + shard_i: usize, + current: Option>, +} + +impl OwningIter { + pub(crate) fn new(map: DashMap) -> Self { + Self { + map, + shard_i: 0, + current: None, + } + } +} + +type GuardOwningIter = hash_map::IntoIter>; + +impl Iterator for OwningIter { + type Item = (K, V); + + fn next(&mut self) -> Option { + loop { + if let Some(current) = self.current.as_mut() { + if let Some((k, v)) = current.next() { + return Some((k, v.into_inner())); + } + } + + if self.shard_i == self.map._shard_count() { + return None; + } + + //let guard = unsafe { self.map._yield_read_shard(self.shard_i) }; + let mut shard_wl = unsafe { self.map._yield_write_shard(self.shard_i) }; + + let hasher = self.map._hasher(); + + let map = mem::replace(&mut *shard_wl, HashMap::with_hasher(hasher)); + + drop(shard_wl); + + let iter = map.into_iter(); + + //unsafe { ptr::write(&mut self.current, Some((arcee, iter))); } + self.current = Some(iter); + + self.shard_i += 1; + } + } +} + +unsafe impl Send for OwningIter +where + K: Eq + Hash + Send, + V: Send, + S: BuildHasher + Clone + Send, +{ +} + +unsafe impl Sync for OwningIter +where + K: Eq + Hash + Sync, + V: Sync, + S: BuildHasher + Clone + Sync, +{ +} + +type GuardIter<'a, K, V, S> = ( + Arc>>, + hash_map::Iter<'a, K, SharedValue>, +); + +type GuardIterMut<'a, K, V, S> = ( + Arc>>, + hash_map::IterMut<'a, K, SharedValue>, +); + +/// Iterator over a DashMap yielding immutable references. +/// +/// # Examples +/// +/// ``` +/// use dashmap::DashMap; +/// +/// let map = DashMap::new(); +/// map.insert("hello", "world"); +/// assert_eq!(map.iter().count(), 1); +/// ``` +pub struct Iter<'a, K, V, S = RandomState, M = DashMap> { + map: &'a M, + shard_i: usize, + current: Option>, +} + +impl<'i, K: Clone + Hash + Eq, V: Clone, S: Clone + BuildHasher> Clone for Iter<'i, K, V, S> { + fn clone(&self) -> Self { + Iter::new(self.map) + } +} + +unsafe impl<'a, 'i, K, V, S, M> Send for Iter<'i, K, V, S, M> +where + K: 'a + Eq + Hash + Send, + V: 'a + Send, + S: 'a + BuildHasher + Clone, + M: Map<'a, K, V, S>, +{ +} + +unsafe impl<'a, 'i, K, V, S, M> Sync for Iter<'i, K, V, S, M> +where + K: 'a + Eq + Hash + Sync, + V: 'a + Sync, + S: 'a + BuildHasher + Clone, + M: Map<'a, K, V, S>, +{ +} + +impl<'a, K: Eq + Hash, V, S: 'a + BuildHasher + Clone, M: Map<'a, K, V, S>> Iter<'a, K, V, S, M> { + pub(crate) fn new(map: &'a M) -> Self { + Self { + map, + shard_i: 0, + current: None, + } + } +} + +impl<'a, K: Eq + Hash, V, S: 'a + BuildHasher + Clone, M: Map<'a, K, V, S>> Iterator + for Iter<'a, K, V, S, M> +{ + type Item = RefMulti<'a, K, V, S>; + + fn next(&mut self) -> Option { + loop { + if let Some(current) = self.current.as_mut() { + if let Some((k, v)) = current.1.next() { + let guard = current.0.clone(); + + return unsafe { Some(RefMulti::new(guard, k, v.get())) }; + } + } + + if self.shard_i == self.map._shard_count() { + return None; + } + + let guard = unsafe { self.map._yield_read_shard(self.shard_i) }; + + let sref: &HashMap = unsafe { util::change_lifetime_const(&*guard) }; + + let iter = sref.iter(); + + self.current = Some((Arc::new(guard), iter)); + + self.shard_i += 1; + } + } +} + +/// Iterator over a DashMap yielding mutable references. +/// +/// # Examples +/// +/// ``` +/// use dashmap::DashMap; +/// +/// let map = DashMap::new(); +/// map.insert("Johnny", 21); +/// map.iter_mut().for_each(|mut r| *r += 1); +/// assert_eq!(*map.get("Johnny").unwrap(), 22); +/// ``` +pub struct IterMut<'a, K, V, S = RandomState, M = DashMap> { + map: &'a M, + shard_i: usize, + current: Option>, +} + +unsafe impl<'a, 'i, K, V, S, M> Send for IterMut<'i, K, V, S, M> +where + K: 'a + Eq + Hash + Send, + V: 'a + Send, + S: 'a + BuildHasher + Clone, + M: Map<'a, K, V, S>, +{ +} + +unsafe impl<'a, 'i, K, V, S, M> Sync for IterMut<'i, K, V, S, M> +where + K: 'a + Eq + Hash + Sync, + V: 'a + Sync, + S: 'a + BuildHasher + Clone, + M: Map<'a, K, V, S>, +{ +} + +impl<'a, K: Eq + Hash, V, S: 'a + BuildHasher + Clone, M: Map<'a, K, V, S>> + IterMut<'a, K, V, S, M> +{ + pub(crate) fn new(map: &'a M) -> Self { + Self { + map, + shard_i: 0, + current: None, + } + } +} + +impl<'a, K: Eq + Hash, V, S: 'a + BuildHasher + Clone, M: Map<'a, K, V, S>> Iterator + for IterMut<'a, K, V, S, M> +{ + type Item = RefMutMulti<'a, K, V, S>; + + fn next(&mut self) -> Option { + loop { + if let Some(current) = self.current.as_mut() { + if let Some((k, v)) = current.1.next() { + let guard = current.0.clone(); + + unsafe { + let k = util::change_lifetime_const(k); + + let v = &mut *v.as_ptr(); + + return Some(RefMutMulti::new(guard, k, v)); + } + } + } + + if self.shard_i == self.map._shard_count() { + return None; + } + + let mut guard = unsafe { self.map._yield_write_shard(self.shard_i) }; + + let sref: &mut HashMap = unsafe { util::change_lifetime_mut(&mut *guard) }; + + let iter = sref.iter_mut(); + + self.current = Some((Arc::new(guard), iter)); + + self.shard_i += 1; + } + } +} + +#[cfg(test)] +mod tests { + use crate::DashMap; + + #[test] + fn iter_mut_manual_count() { + let map = DashMap::new(); + + map.insert("Johnny", 21); + + assert_eq!(map.len(), 1); + + let mut c = 0; + + for shard in map.shards() { + c += shard.write().iter_mut().count(); + } + + assert_eq!(c, 1); + } + + #[test] + fn iter_mut_count() { + let map = DashMap::new(); + + map.insert("Johnny", 21); + + assert_eq!(map.len(), 1); + + assert_eq!(map.iter_mut().count(), 1); + } + + #[test] + fn iter_count() { + let map = DashMap::new(); + + map.insert("Johnny", 21); + + assert_eq!(map.len(), 1); + + assert_eq!(map.iter().count(), 1); + } +} diff --git a/vendor/dashmap/src/iter_set.rs b/vendor/dashmap/src/iter_set.rs new file mode 100644 index 00000000..619a5b50 --- /dev/null +++ b/vendor/dashmap/src/iter_set.rs @@ -0,0 +1,71 @@ +use crate::setref::multiple::RefMulti; +use crate::t::Map; +use core::hash::{BuildHasher, Hash}; + +pub struct OwningIter { + inner: crate::iter::OwningIter, +} + +impl OwningIter { + pub(crate) fn new(inner: crate::iter::OwningIter) -> Self { + Self { inner } + } +} + +impl Iterator for OwningIter { + type Item = K; + + fn next(&mut self) -> Option { + self.inner.next().map(|(k, _)| k) + } +} + +unsafe impl Send for OwningIter +where + K: Eq + Hash + Send, + S: BuildHasher + Clone + Send, +{ +} + +unsafe impl Sync for OwningIter +where + K: Eq + Hash + Sync, + S: BuildHasher + Clone + Sync, +{ +} + +pub struct Iter<'a, K, S, M> { + inner: crate::iter::Iter<'a, K, (), S, M>, +} + +unsafe impl<'a, 'i, K, S, M> Send for Iter<'i, K, S, M> +where + K: 'a + Eq + Hash + Send, + S: 'a + BuildHasher + Clone, + M: Map<'a, K, (), S>, +{ +} + +unsafe impl<'a, 'i, K, S, M> Sync for Iter<'i, K, S, M> +where + K: 'a + Eq + Hash + Sync, + S: 'a + BuildHasher + Clone, + M: Map<'a, K, (), S>, +{ +} + +impl<'a, K: Eq + Hash, S: 'a + BuildHasher + Clone, M: Map<'a, K, (), S>> Iter<'a, K, S, M> { + pub(crate) fn new(inner: crate::iter::Iter<'a, K, (), S, M>) -> Self { + Self { inner } + } +} + +impl<'a, K: Eq + Hash, S: 'a + BuildHasher + Clone, M: Map<'a, K, (), S>> Iterator + for Iter<'a, K, S, M> +{ + type Item = RefMulti<'a, K, S>; + + fn next(&mut self) -> Option { + self.inner.next().map(RefMulti::new) + } +} diff --git a/vendor/dashmap/src/lib.rs b/vendor/dashmap/src/lib.rs new file mode 100644 index 00000000..34e5d8ba --- /dev/null +++ b/vendor/dashmap/src/lib.rs @@ -0,0 +1,1465 @@ +#![allow(clippy::type_complexity)] + +#[cfg(feature = "arbitrary")] +mod arbitrary; +pub mod iter; +pub mod iter_set; +mod lock; +pub mod mapref; +mod read_only; +#[cfg(feature = "serde")] +mod serde; +mod set; +pub mod setref; +mod t; +pub mod try_result; +mod util; + +#[cfg(feature = "rayon")] +pub mod rayon { + pub mod map; + pub mod read_only; + pub mod set; +} + +#[cfg(not(feature = "raw-api"))] +use crate::lock::{RwLock, RwLockReadGuard, RwLockWriteGuard}; + +#[cfg(feature = "raw-api")] +pub use crate::lock::{RawRwLock, RwLock, RwLockReadGuard, RwLockWriteGuard}; + +use cfg_if::cfg_if; +use core::borrow::Borrow; +use core::fmt; +use core::hash::{BuildHasher, Hash, Hasher}; +use core::iter::FromIterator; +use core::ops::{BitAnd, BitOr, Shl, Shr, Sub}; +use iter::{Iter, IterMut, OwningIter}; +use mapref::entry::{Entry, OccupiedEntry, VacantEntry}; +use mapref::multiple::RefMulti; +use mapref::one::{Ref, RefMut}; +use once_cell::sync::OnceCell; +pub use read_only::ReadOnlyView; +pub use set::DashSet; +use std::collections::hash_map::RandomState; +pub use t::Map; +use try_result::TryResult; + +cfg_if! { + if #[cfg(feature = "raw-api")] { + pub use util::SharedValue; + } else { + use util::SharedValue; + } +} + +pub(crate) type HashMap = hashbrown::HashMap, S>; + +// Temporary reimplementation of [`std::collections::TryReserveError`] +// util [`std::collections::TryReserveError`] stabilises. +// We cannot easily create `std::collections` error type from `hashbrown` error type +// without access to `TryReserveError::kind` method. +#[non_exhaustive] +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct TryReserveError {} + +fn default_shard_amount() -> usize { + static DEFAULT_SHARD_AMOUNT: OnceCell = OnceCell::new(); + *DEFAULT_SHARD_AMOUNT.get_or_init(|| { + (std::thread::available_parallelism().map_or(1, usize::from) * 4).next_power_of_two() + }) +} + +fn ncb(shard_amount: usize) -> usize { + shard_amount.trailing_zeros() as usize +} + +/// DashMap is an implementation of a concurrent associative array/hashmap in Rust. +/// +/// DashMap tries to implement an easy to use API similar to `std::collections::HashMap` +/// with some slight changes to handle concurrency. +/// +/// DashMap tries to be very simple to use and to be a direct replacement for `RwLock>`. +/// To accomplish this, all methods take `&self` instead of modifying methods taking `&mut self`. +/// This allows you to put a DashMap in an `Arc` and share it between threads while being able to modify it. +/// +/// Documentation mentioning locking behaviour acts in the reference frame of the calling thread. +/// This means that it is safe to ignore it across multiple threads. +pub struct DashMap { + shift: usize, + shards: Box<[RwLock>]>, + hasher: S, +} + +impl Clone for DashMap { + fn clone(&self) -> Self { + let mut inner_shards = Vec::new(); + + for shard in self.shards.iter() { + let shard = shard.read(); + + inner_shards.push(RwLock::new((*shard).clone())); + } + + Self { + shift: self.shift, + shards: inner_shards.into_boxed_slice(), + hasher: self.hasher.clone(), + } + } +} + +impl Default for DashMap +where + K: Eq + Hash, + S: Default + BuildHasher + Clone, +{ + fn default() -> Self { + Self::with_hasher(Default::default()) + } +} + +impl<'a, K: 'a + Eq + Hash, V: 'a> DashMap { + /// Creates a new DashMap with a capacity of 0. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let reviews = DashMap::new(); + /// reviews.insert("Veloren", "What a fantastic game!"); + /// ``` + pub fn new() -> Self { + DashMap::with_hasher(RandomState::default()) + } + + /// Creates a new DashMap with a specified starting capacity. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let mappings = DashMap::with_capacity(2); + /// mappings.insert(2, 4); + /// mappings.insert(8, 16); + /// ``` + pub fn with_capacity(capacity: usize) -> Self { + DashMap::with_capacity_and_hasher(capacity, RandomState::default()) + } + + /// Creates a new DashMap with a specified shard amount + /// + /// shard_amount should greater than 0 and be a power of two. + /// If a shard_amount which is not a power of two is provided, the function will panic. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let mappings = DashMap::with_shard_amount(32); + /// mappings.insert(2, 4); + /// mappings.insert(8, 16); + /// ``` + pub fn with_shard_amount(shard_amount: usize) -> Self { + Self::with_capacity_and_hasher_and_shard_amount(0, RandomState::default(), shard_amount) + } + + /// Creates a new DashMap with a specified capacity and shard amount. + /// + /// shard_amount should greater than 0 and be a power of two. + /// If a shard_amount which is not a power of two is provided, the function will panic. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let mappings = DashMap::with_capacity_and_shard_amount(32, 32); + /// mappings.insert(2, 4); + /// mappings.insert(8, 16); + /// ``` + pub fn with_capacity_and_shard_amount(capacity: usize, shard_amount: usize) -> Self { + Self::with_capacity_and_hasher_and_shard_amount( + capacity, + RandomState::default(), + shard_amount, + ) + } +} + +impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone> DashMap { + /// Wraps this `DashMap` into a read-only view. This view allows to obtain raw references to the stored values. + pub fn into_read_only(self) -> ReadOnlyView { + ReadOnlyView::new(self) + } + + /// Creates a new DashMap with a capacity of 0 and the provided hasher. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// use std::collections::hash_map::RandomState; + /// + /// let s = RandomState::new(); + /// let reviews = DashMap::with_hasher(s); + /// reviews.insert("Veloren", "What a fantastic game!"); + /// ``` + pub fn with_hasher(hasher: S) -> Self { + Self::with_capacity_and_hasher(0, hasher) + } + + /// Creates a new DashMap with a specified starting capacity and hasher. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// use std::collections::hash_map::RandomState; + /// + /// let s = RandomState::new(); + /// let mappings = DashMap::with_capacity_and_hasher(2, s); + /// mappings.insert(2, 4); + /// mappings.insert(8, 16); + /// ``` + pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self { + Self::with_capacity_and_hasher_and_shard_amount(capacity, hasher, default_shard_amount()) + } + + /// Creates a new DashMap with a specified hasher and shard amount + /// + /// shard_amount should be greater than 0 and a power of two. + /// If a shard_amount which is not a power of two is provided, the function will panic. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// use std::collections::hash_map::RandomState; + /// + /// let s = RandomState::new(); + /// let mappings = DashMap::with_hasher_and_shard_amount(s, 32); + /// mappings.insert(2, 4); + /// mappings.insert(8, 16); + /// ``` + pub fn with_hasher_and_shard_amount(hasher: S, shard_amount: usize) -> Self { + Self::with_capacity_and_hasher_and_shard_amount(0, hasher, shard_amount) + } + + /// Creates a new DashMap with a specified starting capacity, hasher and shard_amount. + /// + /// shard_amount should greater than 0 and be a power of two. + /// If a shard_amount which is not a power of two is provided, the function will panic. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// use std::collections::hash_map::RandomState; + /// + /// let s = RandomState::new(); + /// let mappings = DashMap::with_capacity_and_hasher_and_shard_amount(2, s, 32); + /// mappings.insert(2, 4); + /// mappings.insert(8, 16); + /// ``` + pub fn with_capacity_and_hasher_and_shard_amount( + mut capacity: usize, + hasher: S, + shard_amount: usize, + ) -> Self { + assert!(shard_amount > 1); + assert!(shard_amount.is_power_of_two()); + + let shift = util::ptr_size_bits() - ncb(shard_amount); + + if capacity != 0 { + capacity = (capacity + (shard_amount - 1)) & !(shard_amount - 1); + } + + let cps = capacity / shard_amount; + + let shards = (0..shard_amount) + .map(|_| RwLock::new(HashMap::with_capacity_and_hasher(cps, hasher.clone()))) + .collect(); + + Self { + shift, + shards, + hasher, + } + } + + /// Hash a given item to produce a usize. + /// Uses the provided or default HashBuilder. + pub fn hash_usize(&self, item: &T) -> usize { + let mut hasher = self.hasher.build_hasher(); + + item.hash(&mut hasher); + + hasher.finish() as usize + } + + cfg_if! { + if #[cfg(feature = "raw-api")] { + /// Allows you to peek at the inner shards that store your data. + /// You should probably not use this unless you know what you are doing. + /// + /// Requires the `raw-api` feature to be enabled. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let map = DashMap::<(), ()>::new(); + /// println!("Amount of shards: {}", map.shards().len()); + /// ``` + pub fn shards(&self) -> &[RwLock>] { + &self.shards + } + + /// Provides mutable access to the inner shards that store your data. + /// You should probably not use this unless you know what you are doing. + /// + /// Requires the `raw-api` feature to be enabled. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// use dashmap::SharedValue; + /// + /// let mut map = DashMap::::new(); + /// let shard_ind = map.determine_map(&42); + /// map.shards_mut()[shard_ind].get_mut().insert(42, SharedValue::new("forty two")); + /// assert_eq!(*map.get(&42).unwrap(), "forty two"); + /// ``` + pub fn shards_mut(&mut self) -> &mut [RwLock>] { + &mut self.shards + } + + /// Consumes this `DashMap` and returns the inner shards. + /// You should probably not use this unless you know what you are doing. + /// + /// Requires the `raw-api` feature to be enabled. + /// + /// See [`DashMap::shards()`] and [`DashMap::shards_mut()`] for more information. + pub fn into_shards(self) -> Box<[RwLock>]> { + self.shards + } + } else { + #[allow(dead_code)] + pub(crate) fn shards(&self) -> &[RwLock>] { + &self.shards + } + + #[allow(dead_code)] + pub(crate) fn shards_mut(&mut self) -> &mut [RwLock>] { + &mut self.shards + } + + #[allow(dead_code)] + pub(crate) fn into_shards(self) -> Box<[RwLock>]> { + self.shards + } + } + } + + cfg_if! { + if #[cfg(feature = "raw-api")] { + /// Finds which shard a certain key is stored in. + /// You should probably not use this unless you know what you are doing. + /// Note that shard selection is dependent on the default or provided HashBuilder. + /// + /// Requires the `raw-api` feature to be enabled. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let map = DashMap::new(); + /// map.insert("coca-cola", 1.4); + /// println!("coca-cola is stored in shard: {}", map.determine_map("coca-cola")); + /// ``` + pub fn determine_map(&self, key: &Q) -> usize + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let hash = self.hash_usize(&key); + self.determine_shard(hash) + } + } + } + + cfg_if! { + if #[cfg(feature = "raw-api")] { + /// Finds which shard a certain hash is stored in. + /// + /// Requires the `raw-api` feature to be enabled. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let map: DashMap = DashMap::new(); + /// let key = "key"; + /// let hash = map.hash_usize(&key); + /// println!("hash is stored in shard: {}", map.determine_shard(hash)); + /// ``` + pub fn determine_shard(&self, hash: usize) -> usize { + // Leave the high 7 bits for the HashBrown SIMD tag. + (hash << 7) >> self.shift + } + } else { + + pub(crate) fn determine_shard(&self, hash: usize) -> usize { + // Leave the high 7 bits for the HashBrown SIMD tag. + (hash << 7) >> self.shift + } + } + } + + /// Returns a reference to the map's [`BuildHasher`]. + /// + /// # Examples + /// + /// ```rust + /// use dashmap::DashMap; + /// use std::collections::hash_map::RandomState; + /// + /// let hasher = RandomState::new(); + /// let map: DashMap = DashMap::new(); + /// let hasher: &RandomState = map.hasher(); + /// ``` + /// + /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html + pub fn hasher(&self) -> &S { + &self.hasher + } + + /// Inserts a key and a value into the map. Returns the old value associated with the key if there was one. + /// + /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let map = DashMap::new(); + /// map.insert("I am the key!", "And I am the value!"); + /// ``` + pub fn insert(&self, key: K, value: V) -> Option { + self._insert(key, value) + } + + /// Removes an entry from the map, returning the key and value if they existed in the map. + /// + /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let soccer_team = DashMap::new(); + /// soccer_team.insert("Jack", "Goalie"); + /// assert_eq!(soccer_team.remove("Jack").unwrap().1, "Goalie"); + /// ``` + pub fn remove(&self, key: &Q) -> Option<(K, V)> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self._remove(key) + } + + /// Removes an entry from the map, returning the key and value + /// if the entry existed and the provided conditional function returned true. + /// + /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let soccer_team = DashMap::new(); + /// soccer_team.insert("Sam", "Forward"); + /// soccer_team.remove_if("Sam", |_, position| position == &"Goalie"); + /// assert!(soccer_team.contains_key("Sam")); + /// ``` + /// ``` + /// use dashmap::DashMap; + /// + /// let soccer_team = DashMap::new(); + /// soccer_team.insert("Sam", "Forward"); + /// soccer_team.remove_if("Sam", |_, position| position == &"Forward"); + /// assert!(!soccer_team.contains_key("Sam")); + /// ``` + pub fn remove_if(&self, key: &Q, f: impl FnOnce(&K, &V) -> bool) -> Option<(K, V)> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self._remove_if(key, f) + } + + pub fn remove_if_mut(&self, key: &Q, f: impl FnOnce(&K, &mut V) -> bool) -> Option<(K, V)> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self._remove_if_mut(key, f) + } + + /// Creates an iterator over a DashMap yielding immutable references. + /// + /// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let words = DashMap::new(); + /// words.insert("hello", "world"); + /// assert_eq!(words.iter().count(), 1); + /// ``` + pub fn iter(&'a self) -> Iter<'a, K, V, S, DashMap> { + self._iter() + } + + /// Iterator over a DashMap yielding mutable references. + /// + /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let map = DashMap::new(); + /// map.insert("Johnny", 21); + /// map.iter_mut().for_each(|mut r| *r += 1); + /// assert_eq!(*map.get("Johnny").unwrap(), 22); + /// ``` + pub fn iter_mut(&'a self) -> IterMut<'a, K, V, S, DashMap> { + self._iter_mut() + } + + /// Get an immutable reference to an entry in the map + /// + /// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let youtubers = DashMap::new(); + /// youtubers.insert("Bosnian Bill", 457000); + /// assert_eq!(*youtubers.get("Bosnian Bill").unwrap(), 457000); + /// ``` + pub fn get(&'a self, key: &Q) -> Option> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self._get(key) + } + + /// Get a mutable reference to an entry in the map + /// + /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let class = DashMap::new(); + /// class.insert("Albin", 15); + /// *class.get_mut("Albin").unwrap() -= 1; + /// assert_eq!(*class.get("Albin").unwrap(), 14); + /// ``` + pub fn get_mut(&'a self, key: &Q) -> Option> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self._get_mut(key) + } + + /// Get an immutable reference to an entry in the map, if the shard is not locked. + /// If the shard is locked, the function will return [TryResult::Locked]. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// use dashmap::try_result::TryResult; + /// + /// let map = DashMap::new(); + /// map.insert("Johnny", 21); + /// + /// assert_eq!(*map.try_get("Johnny").unwrap(), 21); + /// + /// let _result1_locking = map.get_mut("Johnny"); + /// + /// let result2 = map.try_get("Johnny"); + /// assert!(result2.is_locked()); + /// ``` + pub fn try_get(&'a self, key: &Q) -> TryResult> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self._try_get(key) + } + + /// Get a mutable reference to an entry in the map, if the shard is not locked. + /// If the shard is locked, the function will return [TryResult::Locked]. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// use dashmap::try_result::TryResult; + /// + /// let map = DashMap::new(); + /// map.insert("Johnny", 21); + /// + /// *map.try_get_mut("Johnny").unwrap() += 1; + /// assert_eq!(*map.get("Johnny").unwrap(), 22); + /// + /// let _result1_locking = map.get("Johnny"); + /// + /// let result2 = map.try_get_mut("Johnny"); + /// assert!(result2.is_locked()); + /// ``` + pub fn try_get_mut(&'a self, key: &Q) -> TryResult> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self._try_get_mut(key) + } + + /// Remove excess capacity to reduce memory usage. + /// + /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. + pub fn shrink_to_fit(&self) { + self._shrink_to_fit(); + } + + /// Retain elements that whose predicates return true + /// and discard elements whose predicates return false. + /// + /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let people = DashMap::new(); + /// people.insert("Albin", 15); + /// people.insert("Jones", 22); + /// people.insert("Charlie", 27); + /// people.retain(|_, v| *v > 20); + /// assert_eq!(people.len(), 2); + /// ``` + pub fn retain(&self, f: impl FnMut(&K, &mut V) -> bool) { + self._retain(f); + } + + /// Fetches the total number of key-value pairs stored in the map. + /// + /// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let people = DashMap::new(); + /// people.insert("Albin", 15); + /// people.insert("Jones", 22); + /// people.insert("Charlie", 27); + /// assert_eq!(people.len(), 3); + /// ``` + pub fn len(&self) -> usize { + self._len() + } + + /// Checks if the map is empty or not. + /// + /// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let map = DashMap::<(), ()>::new(); + /// assert!(map.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + self._is_empty() + } + + /// Removes all key-value pairs in the map. + /// + /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let stats = DashMap::new(); + /// stats.insert("Goals", 4); + /// assert!(!stats.is_empty()); + /// stats.clear(); + /// assert!(stats.is_empty()); + /// ``` + pub fn clear(&self) { + self._clear(); + } + + /// Returns how many key-value pairs the map can store without reallocating. + /// + /// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map. + pub fn capacity(&self) -> usize { + self._capacity() + } + + /// Modify a specific value according to a function. + /// + /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let stats = DashMap::new(); + /// stats.insert("Goals", 4); + /// stats.alter("Goals", |_, v| v * 2); + /// assert_eq!(*stats.get("Goals").unwrap(), 8); + /// ``` + /// + /// # Panics + /// + /// If the given closure panics, then `alter` will abort the process + pub fn alter(&self, key: &Q, f: impl FnOnce(&K, V) -> V) + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self._alter(key, f); + } + + /// Modify every value in the map according to a function. + /// + /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let stats = DashMap::new(); + /// stats.insert("Wins", 4); + /// stats.insert("Losses", 2); + /// stats.alter_all(|_, v| v + 1); + /// assert_eq!(*stats.get("Wins").unwrap(), 5); + /// assert_eq!(*stats.get("Losses").unwrap(), 3); + /// ``` + /// + /// # Panics + /// + /// If the given closure panics, then `alter_all` will abort the process + pub fn alter_all(&self, f: impl FnMut(&K, V) -> V) { + self._alter_all(f); + } + + /// Scoped access into an item of the map according to a function. + /// + /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let warehouse = DashMap::new(); + /// warehouse.insert(4267, ("Banana", 100)); + /// warehouse.insert(2359, ("Pear", 120)); + /// let fruit = warehouse.view(&4267, |_k, v| *v); + /// assert_eq!(fruit, Some(("Banana", 100))); + /// ``` + /// + /// # Panics + /// + /// If the given closure panics, then `view` will abort the process + pub fn view(&self, key: &Q, f: impl FnOnce(&K, &V) -> R) -> Option + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self._view(key, f) + } + + /// Checks if the map contains a specific key. + /// + /// **Locking behaviour:** May deadlock if called when holding a mutable reference into the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let team_sizes = DashMap::new(); + /// team_sizes.insert("Dakota Cherries", 23); + /// assert!(team_sizes.contains_key("Dakota Cherries")); + /// ``` + pub fn contains_key(&self, key: &Q) -> bool + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self._contains_key(key) + } + + /// Advanced entry API that tries to mimic `std::collections::HashMap`. + /// See the documentation on `dashmap::mapref::entry` for more details. + /// + /// **Locking behaviour:** May deadlock if called when holding any sort of reference into the map. + pub fn entry(&'a self, key: K) -> Entry<'a, K, V, S> { + self._entry(key) + } + + /// Advanced entry API that tries to mimic `std::collections::HashMap`. + /// See the documentation on `dashmap::mapref::entry` for more details. + /// + /// Returns None if the shard is currently locked. + pub fn try_entry(&'a self, key: K) -> Option> { + self._try_entry(key) + } + + /// Advanced entry API that tries to mimic `std::collections::HashMap::try_reserve`. + /// Tries to reserve capacity for at least `shard * additional` + /// and may reserve more space to avoid frequent reallocations. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error is returned. + // TODO: return std::collections::TryReserveError once std::collections::TryReserveErrorKind stabilises. + pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + for shard in self.shards.iter() { + shard + .write() + .try_reserve(additional) + .map_err(|_| TryReserveError {})?; + } + Ok(()) + } +} + +impl<'a, K: 'a + Eq + Hash, V: 'a, S: 'a + BuildHasher + Clone> Map<'a, K, V, S> + for DashMap +{ + fn _shard_count(&self) -> usize { + self.shards.len() + } + + unsafe fn _get_read_shard(&'a self, i: usize) -> &'a HashMap { + debug_assert!(i < self.shards.len()); + + &*self.shards.get_unchecked(i).data_ptr() + } + + unsafe fn _yield_read_shard(&'a self, i: usize) -> RwLockReadGuard<'a, HashMap> { + debug_assert!(i < self.shards.len()); + + self.shards.get_unchecked(i).read() + } + + unsafe fn _yield_write_shard(&'a self, i: usize) -> RwLockWriteGuard<'a, HashMap> { + debug_assert!(i < self.shards.len()); + + self.shards.get_unchecked(i).write() + } + + unsafe fn _try_yield_read_shard( + &'a self, + i: usize, + ) -> Option>> { + debug_assert!(i < self.shards.len()); + + self.shards.get_unchecked(i).try_read() + } + + unsafe fn _try_yield_write_shard( + &'a self, + i: usize, + ) -> Option>> { + debug_assert!(i < self.shards.len()); + + self.shards.get_unchecked(i).try_write() + } + + fn _insert(&self, key: K, value: V) -> Option { + let hash = self.hash_usize(&key); + + let idx = self.determine_shard(hash); + + let mut shard = unsafe { self._yield_write_shard(idx) }; + + shard + .insert(key, SharedValue::new(value)) + .map(|v| v.into_inner()) + } + + fn _remove(&self, key: &Q) -> Option<(K, V)> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let hash = self.hash_usize(&key); + + let idx = self.determine_shard(hash); + + let mut shard = unsafe { self._yield_write_shard(idx) }; + + shard.remove_entry(key).map(|(k, v)| (k, v.into_inner())) + } + + fn _remove_if(&self, key: &Q, f: impl FnOnce(&K, &V) -> bool) -> Option<(K, V)> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let hash = self.hash_usize(&key); + + let idx = self.determine_shard(hash); + + let mut shard = unsafe { self._yield_write_shard(idx) }; + + if let Some((kptr, vptr)) = shard.get_key_value(key) { + unsafe { + let kptr: *const K = kptr; + let vptr: *mut V = vptr.as_ptr(); + + if f(&*kptr, &mut *vptr) { + shard.remove_entry(key).map(|(k, v)| (k, v.into_inner())) + } else { + None + } + } + } else { + None + } + } + + fn _remove_if_mut(&self, key: &Q, f: impl FnOnce(&K, &mut V) -> bool) -> Option<(K, V)> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let hash = self.hash_usize(&key); + + let idx = self.determine_shard(hash); + + let mut shard = unsafe { self._yield_write_shard(idx) }; + + if let Some((kptr, vptr)) = shard.get_key_value(key) { + unsafe { + let kptr: *const K = kptr; + let vptr: *mut V = vptr.as_ptr(); + + if f(&*kptr, &mut *vptr) { + shard.remove_entry(key).map(|(k, v)| (k, v.into_inner())) + } else { + None + } + } + } else { + None + } + } + + fn _iter(&'a self) -> Iter<'a, K, V, S, DashMap> { + Iter::new(self) + } + + fn _iter_mut(&'a self) -> IterMut<'a, K, V, S, DashMap> { + IterMut::new(self) + } + + fn _get(&'a self, key: &Q) -> Option> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let hash = self.hash_usize(&key); + + let idx = self.determine_shard(hash); + + let shard = unsafe { self._yield_read_shard(idx) }; + + if let Some((kptr, vptr)) = shard.get_key_value(key) { + unsafe { + let kptr: *const K = kptr; + let vptr: *const V = vptr.get(); + Some(Ref::new(shard, kptr, vptr)) + } + } else { + None + } + } + + fn _get_mut(&'a self, key: &Q) -> Option> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let hash = self.hash_usize(&key); + + let idx = self.determine_shard(hash); + + let shard = unsafe { self._yield_write_shard(idx) }; + + if let Some((kptr, vptr)) = shard.get_key_value(key) { + unsafe { + let kptr: *const K = kptr; + let vptr: *mut V = vptr.as_ptr(); + Some(RefMut::new(shard, kptr, vptr)) + } + } else { + None + } + } + + fn _try_get(&'a self, key: &Q) -> TryResult> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let hash = self.hash_usize(&key); + + let idx = self.determine_shard(hash); + + let shard = match unsafe { self._try_yield_read_shard(idx) } { + Some(shard) => shard, + None => return TryResult::Locked, + }; + + if let Some((kptr, vptr)) = shard.get_key_value(key) { + unsafe { + let kptr: *const K = kptr; + let vptr: *const V = vptr.get(); + TryResult::Present(Ref::new(shard, kptr, vptr)) + } + } else { + TryResult::Absent + } + } + + fn _try_get_mut(&'a self, key: &Q) -> TryResult> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let hash = self.hash_usize(&key); + + let idx = self.determine_shard(hash); + + let shard = match unsafe { self._try_yield_write_shard(idx) } { + Some(shard) => shard, + None => return TryResult::Locked, + }; + + if let Some((kptr, vptr)) = shard.get_key_value(key) { + unsafe { + let kptr: *const K = kptr; + let vptr: *mut V = vptr.as_ptr(); + TryResult::Present(RefMut::new(shard, kptr, vptr)) + } + } else { + TryResult::Absent + } + } + + fn _shrink_to_fit(&self) { + self.shards.iter().for_each(|s| s.write().shrink_to_fit()); + } + + fn _retain(&self, mut f: impl FnMut(&K, &mut V) -> bool) { + self.shards + .iter() + .for_each(|s| s.write().retain(|k, v| f(k, v.get_mut()))); + } + + fn _len(&self) -> usize { + self.shards.iter().map(|s| s.read().len()).sum() + } + + fn _capacity(&self) -> usize { + self.shards.iter().map(|s| s.read().capacity()).sum() + } + + fn _alter(&self, key: &Q, f: impl FnOnce(&K, V) -> V) + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + if let Some(mut r) = self.get_mut(key) { + util::map_in_place_2(r.pair_mut(), f); + } + } + + fn _alter_all(&self, mut f: impl FnMut(&K, V) -> V) { + self.shards.iter().for_each(|s| { + s.write() + .iter_mut() + .for_each(|(k, v)| util::map_in_place_2((k, v.get_mut()), &mut f)); + }); + } + + fn _view(&self, key: &Q, f: impl FnOnce(&K, &V) -> R) -> Option + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self.get(key).map(|r| { + let (k, v) = r.pair(); + f(k, v) + }) + } + + fn _entry(&'a self, key: K) -> Entry<'a, K, V, S> { + let hash = self.hash_usize(&key); + + let idx = self.determine_shard(hash); + + let shard = unsafe { self._yield_write_shard(idx) }; + + if let Some((kptr, vptr)) = shard.get_key_value(&key) { + unsafe { + let kptr: *const K = kptr; + let vptr: *mut V = vptr.as_ptr(); + Entry::Occupied(OccupiedEntry::new(shard, key, (kptr, vptr))) + } + } else { + unsafe { Entry::Vacant(VacantEntry::new(shard, key)) } + } + } + + fn _try_entry(&'a self, key: K) -> Option> { + let hash = self.hash_usize(&key); + + let idx = self.determine_shard(hash); + + let shard = match unsafe { self._try_yield_write_shard(idx) } { + Some(shard) => shard, + None => return None, + }; + + if let Some((kptr, vptr)) = shard.get_key_value(&key) { + unsafe { + let kptr: *const K = kptr; + let vptr: *mut V = vptr.as_ptr(); + + Some(Entry::Occupied(OccupiedEntry::new( + shard, + key, + (kptr, vptr), + ))) + } + } else { + unsafe { Some(Entry::Vacant(VacantEntry::new(shard, key))) } + } + } + + fn _hasher(&self) -> S { + self.hasher.clone() + } +} + +impl fmt::Debug + for DashMap +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut pmap = f.debug_map(); + + for r in self { + let (k, v) = r.pair(); + + pmap.entry(k, v); + } + + pmap.finish() + } +} + +impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone> Shl<(K, V)> for &'a DashMap { + type Output = Option; + + fn shl(self, pair: (K, V)) -> Self::Output { + self.insert(pair.0, pair.1) + } +} + +impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone, Q> Shr<&Q> for &'a DashMap +where + K: Borrow, + Q: Hash + Eq + ?Sized, +{ + type Output = Ref<'a, K, V, S>; + + fn shr(self, key: &Q) -> Self::Output { + self.get(key).unwrap() + } +} + +impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone, Q> BitOr<&Q> for &'a DashMap +where + K: Borrow, + Q: Hash + Eq + ?Sized, +{ + type Output = RefMut<'a, K, V, S>; + + fn bitor(self, key: &Q) -> Self::Output { + self.get_mut(key).unwrap() + } +} + +impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone, Q> Sub<&Q> for &'a DashMap +where + K: Borrow, + Q: Hash + Eq + ?Sized, +{ + type Output = Option<(K, V)>; + + fn sub(self, key: &Q) -> Self::Output { + self.remove(key) + } +} + +impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone, Q> BitAnd<&Q> for &'a DashMap +where + K: Borrow, + Q: Hash + Eq + ?Sized, +{ + type Output = bool; + + fn bitand(self, key: &Q) -> Self::Output { + self.contains_key(key) + } +} + +impl IntoIterator for DashMap { + type Item = (K, V); + + type IntoIter = OwningIter; + + fn into_iter(self) -> Self::IntoIter { + OwningIter::new(self) + } +} + +impl<'a, K: Eq + Hash, V, S: BuildHasher + Clone> IntoIterator for &'a DashMap { + type Item = RefMulti<'a, K, V, S>; + + type IntoIter = Iter<'a, K, V, S, DashMap>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl Extend<(K, V)> for DashMap { + fn extend>(&mut self, intoiter: I) { + for pair in intoiter.into_iter() { + self.insert(pair.0, pair.1); + } + } +} + +impl FromIterator<(K, V)> for DashMap { + fn from_iter>(intoiter: I) -> Self { + let mut map = DashMap::default(); + + map.extend(intoiter); + + map + } +} + +#[cfg(test)] +mod tests { + use crate::DashMap; + use std::collections::hash_map::RandomState; + + #[test] + fn test_basic() { + let dm = DashMap::new(); + + dm.insert(0, 0); + + assert_eq!(dm.get(&0).unwrap().value(), &0); + } + + #[test] + fn test_default() { + let dm: DashMap = DashMap::default(); + + dm.insert(0, 0); + + assert_eq!(dm.get(&0).unwrap().value(), &0); + } + + #[test] + fn test_multiple_hashes() { + let dm: DashMap = DashMap::default(); + + for i in 0..100 { + dm.insert(0, i); + + dm.insert(i, i); + } + + for i in 1..100 { + let r = dm.get(&i).unwrap(); + + assert_eq!(i, *r.value()); + + assert_eq!(i, *r.key()); + } + + let r = dm.get(&0).unwrap(); + + assert_eq!(99, *r.value()); + } + + #[test] + fn test_more_complex_values() { + #[derive(Hash, PartialEq, Debug, Clone)] + + struct T0 { + s: String, + u: u8, + } + + let dm = DashMap::new(); + + let range = 0..10; + + for i in range { + let t = T0 { + s: i.to_string(), + u: i as u8, + }; + + dm.insert(i, t.clone()); + + assert_eq!(&t, dm.get(&i).unwrap().value()); + } + } + + #[test] + fn test_different_hashers_randomstate() { + let dm_hm_default: DashMap = + DashMap::with_hasher(RandomState::new()); + + for i in 0..10 { + dm_hm_default.insert(i, i); + + assert_eq!(i, *dm_hm_default.get(&i).unwrap().value()); + } + } + + #[test] + fn test_map_view() { + let dm = DashMap::new(); + + let vegetables: [String; 4] = [ + "Salad".to_string(), + "Beans".to_string(), + "Potato".to_string(), + "Tomato".to_string(), + ]; + + // Give it some values + dm.insert(0, "Banana".to_string()); + dm.insert(4, "Pear".to_string()); + dm.insert(9, "Potato".to_string()); + dm.insert(12, "Chicken".to_string()); + + let potato_vegetableness = dm.view(&9, |_, v| vegetables.contains(v)); + assert_eq!(potato_vegetableness, Some(true)); + + let chicken_vegetableness = dm.view(&12, |_, v| vegetables.contains(v)); + assert_eq!(chicken_vegetableness, Some(false)); + + let not_in_map = dm.view(&30, |_k, _v| false); + assert_eq!(not_in_map, None); + } + + #[test] + fn test_try_get() { + { + let map = DashMap::new(); + map.insert("Johnny", 21); + + assert_eq!(*map.try_get("Johnny").unwrap(), 21); + + let _result1_locking = map.get_mut("Johnny"); + + let result2 = map.try_get("Johnny"); + assert!(result2.is_locked()); + } + + { + let map = DashMap::new(); + map.insert("Johnny", 21); + + *map.try_get_mut("Johnny").unwrap() += 1; + assert_eq!(*map.get("Johnny").unwrap(), 22); + + let _result1_locking = map.get("Johnny"); + + let result2 = map.try_get_mut("Johnny"); + assert!(result2.is_locked()); + } + } + + #[test] + fn test_try_reserve() { + let mut map: DashMap = DashMap::new(); + // DashMap is empty and doesn't allocate memory + assert_eq!(map.capacity(), 0); + + map.try_reserve(10).unwrap(); + + // And now map can hold at least 10 elements + assert!(map.capacity() >= 10); + } + + #[test] + fn test_try_reserve_errors() { + let mut map: DashMap = DashMap::new(); + + match map.try_reserve(usize::MAX) { + Err(_) => {} + _ => panic!("should have raised CapacityOverflow error"), + } + } +} diff --git a/vendor/dashmap/src/lock.rs b/vendor/dashmap/src/lock.rs new file mode 100644 index 00000000..f2c98c76 --- /dev/null +++ b/vendor/dashmap/src/lock.rs @@ -0,0 +1,300 @@ +use core::sync::atomic::{AtomicUsize, Ordering}; +use parking_lot_core::{ParkToken, SpinWait, UnparkToken}; + +pub type RwLock = lock_api::RwLock; +pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, RawRwLock, T>; +pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, RawRwLock, T>; + +const READERS_PARKED: usize = 0b0001; +const WRITERS_PARKED: usize = 0b0010; +const ONE_READER: usize = 0b0100; +const ONE_WRITER: usize = !(READERS_PARKED | WRITERS_PARKED); + +pub struct RawRwLock { + state: AtomicUsize, +} + +unsafe impl lock_api::RawRwLock for RawRwLock { + #[allow(clippy::declare_interior_mutable_const)] + const INIT: Self = Self { + state: AtomicUsize::new(0), + }; + + type GuardMarker = lock_api::GuardNoSend; + + #[inline] + fn try_lock_exclusive(&self) -> bool { + self.state + .compare_exchange(0, ONE_WRITER, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + } + + #[inline] + fn lock_exclusive(&self) { + if self + .state + .compare_exchange_weak(0, ONE_WRITER, Ordering::Acquire, Ordering::Relaxed) + .is_err() + { + self.lock_exclusive_slow(); + } + } + + #[inline] + unsafe fn unlock_exclusive(&self) { + if self + .state + .compare_exchange(ONE_WRITER, 0, Ordering::Release, Ordering::Relaxed) + .is_err() + { + self.unlock_exclusive_slow(); + } + } + + #[inline] + fn try_lock_shared(&self) -> bool { + self.try_lock_shared_fast() || self.try_lock_shared_slow() + } + + #[inline] + fn lock_shared(&self) { + if !self.try_lock_shared_fast() { + self.lock_shared_slow(); + } + } + + #[inline] + unsafe fn unlock_shared(&self) { + let state = self.state.fetch_sub(ONE_READER, Ordering::Release); + + if state == (ONE_READER | WRITERS_PARKED) { + self.unlock_shared_slow(); + } + } +} + +unsafe impl lock_api::RawRwLockDowngrade for RawRwLock { + #[inline] + unsafe fn downgrade(&self) { + let state = self + .state + .fetch_and(ONE_READER | WRITERS_PARKED, Ordering::Release); + if state & READERS_PARKED != 0 { + parking_lot_core::unpark_all((self as *const _ as usize) + 1, UnparkToken(0)); + } + } +} + +impl RawRwLock { + #[cold] + fn lock_exclusive_slow(&self) { + let mut acquire_with = 0; + loop { + let mut spin = SpinWait::new(); + let mut state = self.state.load(Ordering::Relaxed); + + loop { + while state & ONE_WRITER == 0 { + match self.state.compare_exchange_weak( + state, + state | ONE_WRITER | acquire_with, + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(_) => return, + Err(e) => state = e, + } + } + + if state & WRITERS_PARKED == 0 { + if spin.spin() { + state = self.state.load(Ordering::Relaxed); + continue; + } + + if let Err(e) = self.state.compare_exchange_weak( + state, + state | WRITERS_PARKED, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + state = e; + continue; + } + } + + let _ = unsafe { + parking_lot_core::park( + self as *const _ as usize, + || { + let state = self.state.load(Ordering::Relaxed); + (state & ONE_WRITER != 0) && (state & WRITERS_PARKED != 0) + }, + || {}, + |_, _| {}, + ParkToken(0), + None, + ) + }; + + acquire_with = WRITERS_PARKED; + break; + } + } + } + + #[cold] + fn unlock_exclusive_slow(&self) { + let state = self.state.load(Ordering::Relaxed); + assert_eq!(state & ONE_WRITER, ONE_WRITER); + + let mut parked = state & (READERS_PARKED | WRITERS_PARKED); + assert_ne!(parked, 0); + + if parked != (READERS_PARKED | WRITERS_PARKED) { + if let Err(new_state) = + self.state + .compare_exchange(state, 0, Ordering::Release, Ordering::Relaxed) + { + assert_eq!(new_state, ONE_WRITER | READERS_PARKED | WRITERS_PARKED); + parked = READERS_PARKED | WRITERS_PARKED; + } + } + + if parked == (READERS_PARKED | WRITERS_PARKED) { + self.state.store(WRITERS_PARKED, Ordering::Release); + parked = READERS_PARKED; + } + + if parked == READERS_PARKED { + return unsafe { + parking_lot_core::unpark_all((self as *const _ as usize) + 1, UnparkToken(0)); + }; + } + + assert_eq!(parked, WRITERS_PARKED); + unsafe { + parking_lot_core::unpark_one(self as *const _ as usize, |_| UnparkToken(0)); + } + } + + #[inline(always)] + fn try_lock_shared_fast(&self) -> bool { + let state = self.state.load(Ordering::Relaxed); + + if let Some(new_state) = state.checked_add(ONE_READER) { + if new_state & ONE_WRITER != ONE_WRITER { + return self + .state + .compare_exchange_weak(state, new_state, Ordering::Acquire, Ordering::Relaxed) + .is_ok(); + } + } + + false + } + + #[cold] + fn try_lock_shared_slow(&self) -> bool { + let mut state = self.state.load(Ordering::Relaxed); + + while let Some(new_state) = state.checked_add(ONE_READER) { + if new_state & ONE_WRITER == ONE_WRITER { + break; + } + + match self.state.compare_exchange_weak( + state, + new_state, + Ordering::Acquire, + Ordering::Relaxed, + ) { + Ok(_) => return true, + Err(e) => state = e, + } + } + + false + } + + #[cold] + fn lock_shared_slow(&self) { + loop { + let mut spin = SpinWait::new(); + let mut state = self.state.load(Ordering::Relaxed); + + loop { + let mut backoff = SpinWait::new(); + while let Some(new_state) = state.checked_add(ONE_READER) { + assert_ne!( + new_state & ONE_WRITER, + ONE_WRITER, + "reader count overflowed", + ); + + if self + .state + .compare_exchange_weak( + state, + new_state, + Ordering::Acquire, + Ordering::Relaxed, + ) + .is_ok() + { + return; + } + + backoff.spin_no_yield(); + state = self.state.load(Ordering::Relaxed); + } + + if state & READERS_PARKED == 0 { + if spin.spin() { + state = self.state.load(Ordering::Relaxed); + continue; + } + + if let Err(e) = self.state.compare_exchange_weak( + state, + state | READERS_PARKED, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + state = e; + continue; + } + } + + let _ = unsafe { + parking_lot_core::park( + (self as *const _ as usize) + 1, + || { + let state = self.state.load(Ordering::Relaxed); + (state & ONE_WRITER == ONE_WRITER) && (state & READERS_PARKED != 0) + }, + || {}, + |_, _| {}, + ParkToken(0), + None, + ) + }; + + break; + } + } + } + + #[cold] + fn unlock_shared_slow(&self) { + if self + .state + .compare_exchange(WRITERS_PARKED, 0, Ordering::Relaxed, Ordering::Relaxed) + .is_ok() + { + unsafe { + parking_lot_core::unpark_one(self as *const _ as usize, |_| UnparkToken(0)); + } + } + } +} diff --git a/vendor/dashmap/src/mapref/entry.rs b/vendor/dashmap/src/mapref/entry.rs new file mode 100644 index 00000000..e9e6b913 --- /dev/null +++ b/vendor/dashmap/src/mapref/entry.rs @@ -0,0 +1,278 @@ +use super::one::RefMut; +use crate::lock::RwLockWriteGuard; +use crate::util; +use crate::util::SharedValue; +use crate::HashMap; +use core::hash::{BuildHasher, Hash}; +use core::mem; +use core::ptr; +use std::collections::hash_map::RandomState; + +pub enum Entry<'a, K, V, S = RandomState> { + Occupied(OccupiedEntry<'a, K, V, S>), + Vacant(VacantEntry<'a, K, V, S>), +} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> Entry<'a, K, V, S> { + /// Apply a function to the stored value if it exists. + pub fn and_modify(self, f: impl FnOnce(&mut V)) -> Self { + match self { + Entry::Occupied(mut entry) => { + f(entry.get_mut()); + + Entry::Occupied(entry) + } + + Entry::Vacant(entry) => Entry::Vacant(entry), + } + } + + /// Get the key of the entry. + pub fn key(&self) -> &K { + match *self { + Entry::Occupied(ref entry) => entry.key(), + Entry::Vacant(ref entry) => entry.key(), + } + } + + /// Into the key of the entry. + pub fn into_key(self) -> K { + match self { + Entry::Occupied(entry) => entry.into_key(), + Entry::Vacant(entry) => entry.into_key(), + } + } + + /// Return a mutable reference to the element if it exists, + /// otherwise insert the default and return a mutable reference to that. + pub fn or_default(self) -> RefMut<'a, K, V, S> + where + V: Default, + { + match self { + Entry::Occupied(entry) => entry.into_ref(), + Entry::Vacant(entry) => entry.insert(V::default()), + } + } + + /// Return a mutable reference to the element if it exists, + /// otherwise a provided value and return a mutable reference to that. + pub fn or_insert(self, value: V) -> RefMut<'a, K, V, S> { + match self { + Entry::Occupied(entry) => entry.into_ref(), + Entry::Vacant(entry) => entry.insert(value), + } + } + + /// Return a mutable reference to the element if it exists, + /// otherwise insert the result of a provided function and return a mutable reference to that. + pub fn or_insert_with(self, value: impl FnOnce() -> V) -> RefMut<'a, K, V, S> { + match self { + Entry::Occupied(entry) => entry.into_ref(), + Entry::Vacant(entry) => entry.insert(value()), + } + } + + pub fn or_try_insert_with( + self, + value: impl FnOnce() -> Result, + ) -> Result, E> { + match self { + Entry::Occupied(entry) => Ok(entry.into_ref()), + Entry::Vacant(entry) => Ok(entry.insert(value()?)), + } + } + + /// Sets the value of the entry, and returns a reference to the inserted value. + pub fn insert(self, value: V) -> RefMut<'a, K, V, S> { + match self { + Entry::Occupied(mut entry) => { + entry.insert(value); + entry.into_ref() + } + Entry::Vacant(entry) => entry.insert(value), + } + } + + /// Sets the value of the entry, and returns an OccupiedEntry. + /// + /// If you are not interested in the occupied entry, + /// consider [`insert`] as it doesn't need to clone the key. + /// + /// [`insert`]: Entry::insert + pub fn insert_entry(self, value: V) -> OccupiedEntry<'a, K, V, S> + where + K: Clone, + { + match self { + Entry::Occupied(mut entry) => { + entry.insert(value); + entry + } + Entry::Vacant(entry) => entry.insert_entry(value), + } + } +} + +pub struct VacantEntry<'a, K, V, S = RandomState> { + shard: RwLockWriteGuard<'a, HashMap>, + key: K, +} + +unsafe impl<'a, K: Eq + Hash + Sync, V: Sync, S: BuildHasher> Send for VacantEntry<'a, K, V, S> {} +unsafe impl<'a, K: Eq + Hash + Sync, V: Sync, S: BuildHasher> Sync for VacantEntry<'a, K, V, S> {} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> VacantEntry<'a, K, V, S> { + pub(crate) unsafe fn new(shard: RwLockWriteGuard<'a, HashMap>, key: K) -> Self { + Self { shard, key } + } + + pub fn insert(mut self, value: V) -> RefMut<'a, K, V, S> { + unsafe { + let c: K = ptr::read(&self.key); + + self.shard.insert(self.key, SharedValue::new(value)); + + let (k, v) = self.shard.get_key_value(&c).unwrap(); + + let k = util::change_lifetime_const(k); + + let v = &mut *v.as_ptr(); + + let r = RefMut::new(self.shard, k, v); + + mem::forget(c); + + r + } + } + + /// Sets the value of the entry with the VacantEntry’s key, and returns an OccupiedEntry. + pub fn insert_entry(mut self, value: V) -> OccupiedEntry<'a, K, V, S> + where + K: Clone, + { + unsafe { + self.shard.insert(self.key.clone(), SharedValue::new(value)); + + let (k, v) = self.shard.get_key_value(&self.key).unwrap(); + + let kptr: *const K = k; + let vptr: *mut V = v.as_ptr(); + OccupiedEntry::new(self.shard, self.key, (kptr, vptr)) + } + } + + pub fn into_key(self) -> K { + self.key + } + + pub fn key(&self) -> &K { + &self.key + } +} + +pub struct OccupiedEntry<'a, K, V, S = RandomState> { + shard: RwLockWriteGuard<'a, HashMap>, + elem: (*const K, *mut V), + key: K, +} + +unsafe impl<'a, K: Eq + Hash + Sync, V: Sync, S: BuildHasher> Send for OccupiedEntry<'a, K, V, S> {} +unsafe impl<'a, K: Eq + Hash + Sync, V: Sync, S: BuildHasher> Sync for OccupiedEntry<'a, K, V, S> {} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> OccupiedEntry<'a, K, V, S> { + pub(crate) unsafe fn new( + shard: RwLockWriteGuard<'a, HashMap>, + key: K, + elem: (*const K, *mut V), + ) -> Self { + Self { shard, elem, key } + } + + pub fn get(&self) -> &V { + unsafe { &*self.elem.1 } + } + + pub fn get_mut(&mut self) -> &mut V { + unsafe { &mut *self.elem.1 } + } + + pub fn insert(&mut self, value: V) -> V { + mem::replace(self.get_mut(), value) + } + + pub fn into_ref(self) -> RefMut<'a, K, V, S> { + unsafe { RefMut::new(self.shard, self.elem.0, self.elem.1) } + } + + pub fn into_key(self) -> K { + self.key + } + + pub fn key(&self) -> &K { + unsafe { &*self.elem.0 } + } + + pub fn remove(mut self) -> V { + let key = unsafe { &*self.elem.0 }; + self.shard.remove(key).unwrap().into_inner() + } + + pub fn remove_entry(mut self) -> (K, V) { + let key = unsafe { &*self.elem.0 }; + let (k, v) = self.shard.remove_entry(key).unwrap(); + (k, v.into_inner()) + } + + pub fn replace_entry(mut self, value: V) -> (K, V) { + let nk = self.key; + let key = unsafe { &*self.elem.0 }; + let (k, v) = self.shard.remove_entry(key).unwrap(); + self.shard.insert(nk, SharedValue::new(value)); + (k, v.into_inner()) + } +} + +#[cfg(test)] +mod tests { + use crate::DashMap; + + use super::*; + + #[test] + fn test_insert_entry_into_vacant() { + let map: DashMap = DashMap::new(); + + let entry = map.entry(1); + + assert!(matches!(entry, Entry::Vacant(_))); + + let entry = entry.insert_entry(2); + + assert_eq!(*entry.get(), 2); + + drop(entry); + + assert_eq!(*map.get(&1).unwrap(), 2); + } + + #[test] + fn test_insert_entry_into_occupied() { + let map: DashMap = DashMap::new(); + + map.insert(1, 1000); + + let entry = map.entry(1); + + assert!(matches!(&entry, Entry::Occupied(entry) if *entry.get() == 1000)); + + let entry = entry.insert_entry(2); + + assert_eq!(*entry.get(), 2); + + drop(entry); + + assert_eq!(*map.get(&1).unwrap(), 2); + } +} diff --git a/vendor/dashmap/src/mapref/mod.rs b/vendor/dashmap/src/mapref/mod.rs new file mode 100644 index 00000000..8897547e --- /dev/null +++ b/vendor/dashmap/src/mapref/mod.rs @@ -0,0 +1,3 @@ +pub mod entry; +pub mod multiple; +pub mod one; diff --git a/vendor/dashmap/src/mapref/multiple.rs b/vendor/dashmap/src/mapref/multiple.rs new file mode 100644 index 00000000..53a8a7e5 --- /dev/null +++ b/vendor/dashmap/src/mapref/multiple.rs @@ -0,0 +1,107 @@ +use crate::lock::{RwLockReadGuard, RwLockWriteGuard}; +use crate::HashMap; +use core::hash::BuildHasher; +use core::hash::Hash; +use core::ops::{Deref, DerefMut}; +use std::collections::hash_map::RandomState; +use std::sync::Arc; + +pub struct RefMulti<'a, K, V, S = RandomState> { + _guard: Arc>>, + k: *const K, + v: *const V, +} + +unsafe impl<'a, K: Eq + Hash + Sync, V: Sync, S: BuildHasher> Send for RefMulti<'a, K, V, S> {} +unsafe impl<'a, K: Eq + Hash + Sync, V: Sync, S: BuildHasher> Sync for RefMulti<'a, K, V, S> {} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMulti<'a, K, V, S> { + pub(crate) unsafe fn new( + guard: Arc>>, + k: *const K, + v: *const V, + ) -> Self { + Self { + _guard: guard, + k, + v, + } + } + + pub fn key(&self) -> &K { + self.pair().0 + } + + pub fn value(&self) -> &V { + self.pair().1 + } + + pub fn pair(&self) -> (&K, &V) { + unsafe { (&*self.k, &*self.v) } + } +} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> Deref for RefMulti<'a, K, V, S> { + type Target = V; + + fn deref(&self) -> &V { + self.value() + } +} + +pub struct RefMutMulti<'a, K, V, S = RandomState> { + _guard: Arc>>, + k: *const K, + v: *mut V, +} + +unsafe impl<'a, K: Eq + Hash + Sync, V: Sync, S: BuildHasher> Send for RefMutMulti<'a, K, V, S> {} +unsafe impl<'a, K: Eq + Hash + Sync, V: Sync, S: BuildHasher> Sync for RefMutMulti<'a, K, V, S> {} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMutMulti<'a, K, V, S> { + pub(crate) unsafe fn new( + guard: Arc>>, + k: *const K, + v: *mut V, + ) -> Self { + Self { + _guard: guard, + k, + v, + } + } + + pub fn key(&self) -> &K { + self.pair().0 + } + + pub fn value(&self) -> &V { + self.pair().1 + } + + pub fn value_mut(&mut self) -> &mut V { + self.pair_mut().1 + } + + pub fn pair(&self) -> (&K, &V) { + unsafe { (&*self.k, &*self.v) } + } + + pub fn pair_mut(&mut self) -> (&K, &mut V) { + unsafe { (&*self.k, &mut *self.v) } + } +} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> Deref for RefMutMulti<'a, K, V, S> { + type Target = V; + + fn deref(&self) -> &V { + self.value() + } +} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> DerefMut for RefMutMulti<'a, K, V, S> { + fn deref_mut(&mut self) -> &mut V { + self.value_mut() + } +} diff --git a/vendor/dashmap/src/mapref/one.rs b/vendor/dashmap/src/mapref/one.rs new file mode 100644 index 00000000..fd385309 --- /dev/null +++ b/vendor/dashmap/src/mapref/one.rs @@ -0,0 +1,335 @@ +use crate::lock::{RwLockReadGuard, RwLockWriteGuard}; +use crate::HashMap; +use core::hash::{BuildHasher, Hash}; +use core::ops::{Deref, DerefMut}; +use std::collections::hash_map::RandomState; +use std::fmt::{Debug, Formatter}; + +pub struct Ref<'a, K, V, S = RandomState> { + _guard: RwLockReadGuard<'a, HashMap>, + k: *const K, + v: *const V, +} + +unsafe impl<'a, K: Eq + Hash + Sync, V: Sync, S: BuildHasher> Send for Ref<'a, K, V, S> {} +unsafe impl<'a, K: Eq + Hash + Sync, V: Sync, S: BuildHasher> Sync for Ref<'a, K, V, S> {} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> Ref<'a, K, V, S> { + pub(crate) unsafe fn new( + guard: RwLockReadGuard<'a, HashMap>, + k: *const K, + v: *const V, + ) -> Self { + Self { + _guard: guard, + k, + v, + } + } + + pub fn key(&self) -> &K { + self.pair().0 + } + + pub fn value(&self) -> &V { + self.pair().1 + } + + pub fn pair(&self) -> (&K, &V) { + unsafe { (&*self.k, &*self.v) } + } + + pub fn map(self, f: F) -> MappedRef<'a, K, V, T, S> + where + F: FnOnce(&V) -> &T, + { + MappedRef { + _guard: self._guard, + k: self.k, + v: f(unsafe { &*self.v }), + } + } + + pub fn try_map(self, f: F) -> Result, Self> + where + F: FnOnce(&V) -> Option<&T>, + { + if let Some(v) = f(unsafe { &*self.v }) { + Ok(MappedRef { + _guard: self._guard, + k: self.k, + v, + }) + } else { + Err(self) + } + } +} + +impl<'a, K: Eq + Hash + Debug, V: Debug, S: BuildHasher> Debug for Ref<'a, K, V, S> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Ref") + .field("k", &self.k) + .field("v", &self.v) + .finish() + } +} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> Deref for Ref<'a, K, V, S> { + type Target = V; + + fn deref(&self) -> &V { + self.value() + } +} + +pub struct RefMut<'a, K, V, S = RandomState> { + guard: RwLockWriteGuard<'a, HashMap>, + k: *const K, + v: *mut V, +} + +unsafe impl<'a, K: Eq + Hash + Sync, V: Sync, S: BuildHasher> Send for RefMut<'a, K, V, S> {} +unsafe impl<'a, K: Eq + Hash + Sync, V: Sync, S: BuildHasher> Sync for RefMut<'a, K, V, S> {} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> RefMut<'a, K, V, S> { + pub(crate) unsafe fn new( + guard: RwLockWriteGuard<'a, HashMap>, + k: *const K, + v: *mut V, + ) -> Self { + Self { guard, k, v } + } + + pub fn key(&self) -> &K { + self.pair().0 + } + + pub fn value(&self) -> &V { + self.pair().1 + } + + pub fn value_mut(&mut self) -> &mut V { + self.pair_mut().1 + } + + pub fn pair(&self) -> (&K, &V) { + unsafe { (&*self.k, &*self.v) } + } + + pub fn pair_mut(&mut self) -> (&K, &mut V) { + unsafe { (&*self.k, &mut *self.v) } + } + + pub fn downgrade(self) -> Ref<'a, K, V, S> { + unsafe { Ref::new(RwLockWriteGuard::downgrade(self.guard), self.k, self.v) } + } + + pub fn map(self, f: F) -> MappedRefMut<'a, K, V, T, S> + where + F: FnOnce(&mut V) -> &mut T, + { + MappedRefMut { + _guard: self.guard, + k: self.k, + v: f(unsafe { &mut *self.v }), + } + } + + pub fn try_map(self, f: F) -> Result, Self> + where + F: FnOnce(&mut V) -> Option<&mut T>, + { + let v = match f(unsafe { &mut *(self.v as *mut _) }) { + Some(v) => v, + None => return Err(self), + }; + let guard = self.guard; + let k = self.k; + Ok(MappedRefMut { + _guard: guard, + k, + v, + }) + } +} + +impl<'a, K: Eq + Hash + Debug, V: Debug, S: BuildHasher> Debug for RefMut<'a, K, V, S> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RefMut") + .field("k", &self.k) + .field("v", &self.v) + .finish() + } +} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> Deref for RefMut<'a, K, V, S> { + type Target = V; + + fn deref(&self) -> &V { + self.value() + } +} + +impl<'a, K: Eq + Hash, V, S: BuildHasher> DerefMut for RefMut<'a, K, V, S> { + fn deref_mut(&mut self) -> &mut V { + self.value_mut() + } +} + +pub struct MappedRef<'a, K, V, T, S = RandomState> { + _guard: RwLockReadGuard<'a, HashMap>, + k: *const K, + v: *const T, +} + +impl<'a, K: Eq + Hash, V, T, S: BuildHasher> MappedRef<'a, K, V, T, S> { + pub fn key(&self) -> &K { + self.pair().0 + } + + pub fn value(&self) -> &T { + self.pair().1 + } + + pub fn pair(&self) -> (&K, &T) { + unsafe { (&*self.k, &*self.v) } + } + + pub fn map(self, f: F) -> MappedRef<'a, K, V, T2, S> + where + F: FnOnce(&T) -> &T2, + { + MappedRef { + _guard: self._guard, + k: self.k, + v: f(unsafe { &*self.v }), + } + } + + pub fn try_map(self, f: F) -> Result, Self> + where + F: FnOnce(&T) -> Option<&T2>, + { + let v = match f(unsafe { &*self.v }) { + Some(v) => v, + None => return Err(self), + }; + let guard = self._guard; + Ok(MappedRef { + _guard: guard, + k: self.k, + v, + }) + } +} + +impl<'a, K: Eq + Hash + Debug, V, T: Debug, S: BuildHasher> Debug for MappedRef<'a, K, V, T, S> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("MappedRef") + .field("k", &self.k) + .field("v", &self.v) + .finish() + } +} + +impl<'a, K: Eq + Hash, V, T, S: BuildHasher> Deref for MappedRef<'a, K, V, T, S> { + type Target = T; + + fn deref(&self) -> &T { + self.value() + } +} + +impl<'a, K: Eq + Hash, V, T: std::fmt::Display> std::fmt::Display for MappedRef<'a, K, V, T> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(self.value(), f) + } +} + +impl<'a, K: Eq + Hash, V, T: AsRef, TDeref: ?Sized> AsRef + for MappedRef<'a, K, V, T> +{ + fn as_ref(&self) -> &TDeref { + self.value().as_ref() + } +} + +pub struct MappedRefMut<'a, K, V, T, S = RandomState> { + _guard: RwLockWriteGuard<'a, HashMap>, + k: *const K, + v: *mut T, +} + +impl<'a, K: Eq + Hash, V, T, S: BuildHasher> MappedRefMut<'a, K, V, T, S> { + pub fn key(&self) -> &K { + self.pair().0 + } + + pub fn value(&self) -> &T { + self.pair().1 + } + + pub fn value_mut(&mut self) -> &mut T { + self.pair_mut().1 + } + + pub fn pair(&self) -> (&K, &T) { + unsafe { (&*self.k, &*self.v) } + } + + pub fn pair_mut(&mut self) -> (&K, &mut T) { + unsafe { (&*self.k, &mut *self.v) } + } + + pub fn map(self, f: F) -> MappedRefMut<'a, K, V, T2, S> + where + F: FnOnce(&mut T) -> &mut T2, + { + MappedRefMut { + _guard: self._guard, + k: self.k, + v: f(unsafe { &mut *self.v }), + } + } + + pub fn try_map(self, f: F) -> Result, Self> + where + F: FnOnce(&mut T) -> Option<&mut T2>, + { + let v = match f(unsafe { &mut *(self.v as *mut _) }) { + Some(v) => v, + None => return Err(self), + }; + let guard = self._guard; + let k = self.k; + Ok(MappedRefMut { + _guard: guard, + k, + v, + }) + } +} + +impl<'a, K: Eq + Hash + Debug, V, T: Debug, S: BuildHasher> Debug for MappedRefMut<'a, K, V, T, S> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("MappedRefMut") + .field("k", &self.k) + .field("v", &self.v) + .finish() + } +} + +impl<'a, K: Eq + Hash, V, T, S: BuildHasher> Deref for MappedRefMut<'a, K, V, T, S> { + type Target = T; + + fn deref(&self) -> &T { + self.value() + } +} + +impl<'a, K: Eq + Hash, V, T, S: BuildHasher> DerefMut for MappedRefMut<'a, K, V, T, S> { + fn deref_mut(&mut self) -> &mut T { + self.value_mut() + } +} diff --git a/vendor/dashmap/src/rayon/map.rs b/vendor/dashmap/src/rayon/map.rs new file mode 100644 index 00000000..ab45e617 --- /dev/null +++ b/vendor/dashmap/src/rayon/map.rs @@ -0,0 +1,221 @@ +use crate::lock::RwLock; +use crate::mapref::multiple::{RefMulti, RefMutMulti}; +use crate::util; +use crate::{DashMap, HashMap}; +use core::hash::{BuildHasher, Hash}; +use rayon::iter::plumbing::UnindexedConsumer; +use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator}; +use std::collections::hash_map::RandomState; +use std::sync::Arc; + +impl ParallelExtend<(K, V)> for DashMap +where + K: Send + Sync + Eq + Hash, + V: Send + Sync, + S: Send + Sync + Clone + BuildHasher, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + (&*self).par_extend(par_iter); + } +} + +// Since we don't actually need mutability, we can implement this on a +// reference, similar to `io::Write for &File`. +impl ParallelExtend<(K, V)> for &'_ DashMap +where + K: Send + Sync + Eq + Hash, + V: Send + Sync, + S: Send + Sync + Clone + BuildHasher, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + let &mut map = self; + par_iter.into_par_iter().for_each(move |(key, value)| { + map.insert(key, value); + }); + } +} + +impl FromParallelIterator<(K, V)> for DashMap +where + K: Send + Sync + Eq + Hash, + V: Send + Sync, + S: Send + Sync + Clone + Default + BuildHasher, +{ + fn from_par_iter(par_iter: I) -> Self + where + I: IntoParallelIterator, + { + let map = Self::default(); + (&map).par_extend(par_iter); + map + } +} + +// Implementation note: while the shards will iterate in parallel, we flatten +// sequentially within each shard (`flat_map_iter`), because the standard +// `HashMap` only implements `ParallelIterator` by collecting to a `Vec` first. +// There is real parallel support in the `hashbrown/rayon` feature, but we don't +// always use that map. + +impl IntoParallelIterator for DashMap +where + K: Send + Eq + Hash, + V: Send, + S: Send + Clone + BuildHasher, +{ + type Iter = OwningIter; + type Item = (K, V); + + fn into_par_iter(self) -> Self::Iter { + OwningIter { + shards: self.shards, + } + } +} + +pub struct OwningIter { + pub(super) shards: Box<[RwLock>]>, +} + +impl ParallelIterator for OwningIter +where + K: Send + Eq + Hash, + V: Send, + S: Send + Clone + BuildHasher, +{ + type Item = (K, V); + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + Vec::from(self.shards) + .into_par_iter() + .flat_map_iter(|shard| { + shard + .into_inner() + .into_iter() + .map(|(k, v)| (k, v.into_inner())) + }) + .drive_unindexed(consumer) + } +} + +// This impl also enables `IntoParallelRefIterator::par_iter` +impl<'a, K, V, S> IntoParallelIterator for &'a DashMap +where + K: Send + Sync + Eq + Hash, + V: Send + Sync, + S: Send + Sync + Clone + BuildHasher, +{ + type Iter = Iter<'a, K, V, S>; + type Item = RefMulti<'a, K, V, S>; + + fn into_par_iter(self) -> Self::Iter { + Iter { + shards: &self.shards, + } + } +} + +pub struct Iter<'a, K, V, S = RandomState> { + pub(super) shards: &'a [RwLock>], +} + +impl<'a, K, V, S> ParallelIterator for Iter<'a, K, V, S> +where + K: Send + Sync + Eq + Hash, + V: Send + Sync, + S: Send + Sync + Clone + BuildHasher, +{ + type Item = RefMulti<'a, K, V, S>; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.shards + .into_par_iter() + .flat_map_iter(|shard| { + let guard = shard.read(); + let sref: &'a HashMap = unsafe { util::change_lifetime_const(&*guard) }; + + let guard = Arc::new(guard); + sref.iter().map(move |(k, v)| { + let guard = Arc::clone(&guard); + unsafe { RefMulti::new(guard, k, v.get()) } + }) + }) + .drive_unindexed(consumer) + } +} + +// This impl also enables `IntoParallelRefMutIterator::par_iter_mut` +impl<'a, K, V, S> IntoParallelIterator for &'a mut DashMap +where + K: Send + Sync + Eq + Hash, + V: Send + Sync, + S: Send + Sync + Clone + BuildHasher, +{ + type Iter = IterMut<'a, K, V, S>; + type Item = RefMutMulti<'a, K, V, S>; + + fn into_par_iter(self) -> Self::Iter { + IterMut { + shards: &self.shards, + } + } +} + +impl DashMap +where + K: Send + Sync + Eq + Hash, + V: Send + Sync, + S: Send + Sync + Clone + BuildHasher, +{ + // Unlike `IntoParallelRefMutIterator::par_iter_mut`, we only _need_ `&self`. + pub fn par_iter_mut(&self) -> IterMut<'_, K, V, S> { + IterMut { + shards: &self.shards, + } + } +} + +pub struct IterMut<'a, K, V, S = RandomState> { + shards: &'a [RwLock>], +} + +impl<'a, K, V, S> ParallelIterator for IterMut<'a, K, V, S> +where + K: Send + Sync + Eq + Hash, + V: Send + Sync, + S: Send + Sync + Clone + BuildHasher, +{ + type Item = RefMutMulti<'a, K, V, S>; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.shards + .into_par_iter() + .flat_map_iter(|shard| { + let mut guard = shard.write(); + let sref: &'a mut HashMap = + unsafe { util::change_lifetime_mut(&mut *guard) }; + + let guard = Arc::new(guard); + sref.iter_mut().map(move |(k, v)| { + let guard = Arc::clone(&guard); + unsafe { RefMutMulti::new(guard, k, v.get_mut()) } + }) + }) + .drive_unindexed(consumer) + } +} diff --git a/vendor/dashmap/src/rayon/read_only.rs b/vendor/dashmap/src/rayon/read_only.rs new file mode 100644 index 00000000..a11448ce --- /dev/null +++ b/vendor/dashmap/src/rayon/read_only.rs @@ -0,0 +1,96 @@ +use crate::mapref::multiple::RefMulti; +use crate::rayon::map::Iter; +use crate::ReadOnlyView; +use core::hash::{BuildHasher, Hash}; +use rayon::iter::IntoParallelIterator; + +impl IntoParallelIterator for ReadOnlyView +where + K: Send + Eq + Hash, + V: Send, + S: Send + Clone + BuildHasher, +{ + type Iter = super::map::OwningIter; + type Item = (K, V); + + fn into_par_iter(self) -> Self::Iter { + super::map::OwningIter { + shards: self.map.shards, + } + } +} + +// This impl also enables `IntoParallelRefIterator::par_iter` +impl<'a, K, V, S> IntoParallelIterator for &'a ReadOnlyView +where + K: Send + Sync + Eq + Hash, + V: Send + Sync, + S: Send + Sync + Clone + BuildHasher, +{ + type Iter = Iter<'a, K, V, S>; + type Item = RefMulti<'a, K, V, S>; + + fn into_par_iter(self) -> Self::Iter { + Iter { + shards: &self.map.shards, + } + } +} + +#[cfg(test)] +mod tests { + use crate::DashMap; + use rayon::iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}; + + fn construct_sample_map() -> DashMap { + let map = DashMap::new(); + + map.insert(1, "one".to_string()); + + map.insert(10, "ten".to_string()); + + map.insert(27, "twenty seven".to_string()); + + map.insert(45, "forty five".to_string()); + + map + } + + #[test] + fn test_par_iter() { + let map = construct_sample_map(); + + let view = map.clone().into_read_only(); + + view.par_iter().for_each(|entry| { + let key = *entry.key(); + + assert!(view.contains_key(&key)); + + let map_entry = map.get(&key).unwrap(); + + assert_eq!(view.get(&key).unwrap(), map_entry.value()); + + let key_value: (&i32, &String) = view.get_key_value(&key).unwrap(); + + assert_eq!(key_value.0, map_entry.key()); + + assert_eq!(key_value.1, map_entry.value()); + }); + } + + #[test] + fn test_into_par_iter() { + let map = construct_sample_map(); + + let view = map.clone().into_read_only(); + + view.into_par_iter().for_each(|(key, value)| { + let map_entry = map.get(&key).unwrap(); + + assert_eq!(&key, map_entry.key()); + + assert_eq!(&value, map_entry.value()); + }); + } +} diff --git a/vendor/dashmap/src/rayon/set.rs b/vendor/dashmap/src/rayon/set.rs new file mode 100644 index 00000000..11c06ccb --- /dev/null +++ b/vendor/dashmap/src/rayon/set.rs @@ -0,0 +1,121 @@ +use crate::setref::multiple::RefMulti; +use crate::DashSet; +use core::hash::{BuildHasher, Hash}; +use rayon::iter::plumbing::UnindexedConsumer; +use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator}; +use std::collections::hash_map::RandomState; + +impl ParallelExtend for DashSet +where + K: Send + Sync + Eq + Hash, + S: Send + Sync + Clone + BuildHasher, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + (&*self).par_extend(par_iter); + } +} + +// Since we don't actually need mutability, we can implement this on a +// reference, similar to `io::Write for &File`. +impl ParallelExtend for &'_ DashSet +where + K: Send + Sync + Eq + Hash, + S: Send + Sync + Clone + BuildHasher, +{ + fn par_extend(&mut self, par_iter: I) + where + I: IntoParallelIterator, + { + let &mut set = self; + par_iter.into_par_iter().for_each(move |key| { + set.insert(key); + }); + } +} + +impl FromParallelIterator for DashSet +where + K: Send + Sync + Eq + Hash, + S: Send + Sync + Clone + Default + BuildHasher, +{ + fn from_par_iter(par_iter: I) -> Self + where + I: IntoParallelIterator, + { + let set = Self::default(); + (&set).par_extend(par_iter); + set + } +} + +impl IntoParallelIterator for DashSet +where + K: Send + Eq + Hash, + S: Send + Clone + BuildHasher, +{ + type Iter = OwningIter; + type Item = K; + + fn into_par_iter(self) -> Self::Iter { + OwningIter { + inner: self.inner.into_par_iter(), + } + } +} + +pub struct OwningIter { + inner: super::map::OwningIter, +} + +impl ParallelIterator for OwningIter +where + K: Send + Eq + Hash, + S: Send + Clone + BuildHasher, +{ + type Item = K; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.map(|(k, _)| k).drive_unindexed(consumer) + } +} + +// This impl also enables `IntoParallelRefIterator::par_iter` +impl<'a, K, S> IntoParallelIterator for &'a DashSet +where + K: Send + Sync + Eq + Hash, + S: Send + Sync + Clone + BuildHasher, +{ + type Iter = Iter<'a, K, S>; + type Item = RefMulti<'a, K, S>; + + fn into_par_iter(self) -> Self::Iter { + Iter { + inner: (&self.inner).into_par_iter(), + } + } +} + +pub struct Iter<'a, K, S = RandomState> { + inner: super::map::Iter<'a, K, (), S>, +} + +impl<'a, K, S> ParallelIterator for Iter<'a, K, S> +where + K: Send + Sync + Eq + Hash, + S: Send + Sync + Clone + BuildHasher, +{ + type Item = RefMulti<'a, K, S>; + + fn drive_unindexed(self, consumer: C) -> C::Result + where + C: UnindexedConsumer, + { + self.inner.map(RefMulti::new).drive_unindexed(consumer) + } +} diff --git a/vendor/dashmap/src/read_only.rs b/vendor/dashmap/src/read_only.rs new file mode 100644 index 00000000..42ee4433 --- /dev/null +++ b/vendor/dashmap/src/read_only.rs @@ -0,0 +1,271 @@ +use crate::lock::RwLock; +use crate::t::Map; +use crate::{DashMap, HashMap}; +use cfg_if::cfg_if; +use core::borrow::Borrow; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use std::collections::hash_map::RandomState; + +/// A read-only view into a `DashMap`. Allows to obtain raw references to the stored values. +pub struct ReadOnlyView { + pub(crate) map: DashMap, +} + +impl Clone for ReadOnlyView { + fn clone(&self) -> Self { + Self { + map: self.map.clone(), + } + } +} + +impl fmt::Debug + for ReadOnlyView +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.map.fmt(f) + } +} + +impl ReadOnlyView { + pub(crate) fn new(map: DashMap) -> Self { + Self { map } + } + + /// Consumes this `ReadOnlyView`, returning the underlying `DashMap`. + pub fn into_inner(self) -> DashMap { + self.map + } +} + +impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone> ReadOnlyView { + /// Returns the number of elements in the map. + pub fn len(&self) -> usize { + self.map.len() + } + + /// Returns `true` if the map contains no elements. + pub fn is_empty(&self) -> bool { + self.map.is_empty() + } + + /// Returns the number of elements the map can hold without reallocating. + pub fn capacity(&self) -> usize { + self.map.capacity() + } + + /// Returns `true` if the map contains a value for the specified key. + pub fn contains_key(&'a self, key: &Q) -> bool + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let hash = self.map.hash_usize(&key); + + let idx = self.map.determine_shard(hash); + + let shard = unsafe { self.map._get_read_shard(idx) }; + + shard.contains_key(key) + } + + /// Returns a reference to the value corresponding to the key. + pub fn get(&'a self, key: &Q) -> Option<&'a V> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let hash = self.map.hash_usize(&key); + + let idx = self.map.determine_shard(hash); + + let shard = unsafe { self.map._get_read_shard(idx) }; + + shard.get(key).map(|v| v.get()) + } + + /// Returns the key-value pair corresponding to the supplied key. + pub fn get_key_value(&'a self, key: &Q) -> Option<(&'a K, &'a V)> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let hash = self.map.hash_usize(&key); + + let idx = self.map.determine_shard(hash); + + let shard = unsafe { self.map._get_read_shard(idx) }; + + shard.get_key_value(key).map(|(k, v)| (k, v.get())) + } + + fn shard_read_iter(&'a self) -> impl Iterator> + 'a { + (0..self.map._shard_count()) + .map(move |shard_i| unsafe { self.map._get_read_shard(shard_i) }) + } + + /// An iterator visiting all key-value pairs in arbitrary order. The iterator element type is `(&'a K, &'a V)`. + pub fn iter(&'a self) -> impl Iterator + 'a { + self.shard_read_iter() + .flat_map(|shard| shard.iter()) + .map(|(k, v)| (k, v.get())) + } + + /// An iterator visiting all keys in arbitrary order. The iterator element type is `&'a K`. + pub fn keys(&'a self) -> impl Iterator + 'a { + self.shard_read_iter().flat_map(|shard| shard.keys()) + } + + /// An iterator visiting all values in arbitrary order. The iterator element type is `&'a V`. + pub fn values(&'a self) -> impl Iterator + 'a { + self.shard_read_iter() + .flat_map(|shard| shard.values()) + .map(|v| v.get()) + } + + cfg_if! { + if #[cfg(feature = "raw-api")] { + /// Allows you to peek at the inner shards that store your data. + /// You should probably not use this unless you know what you are doing. + /// + /// Requires the `raw-api` feature to be enabled. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashMap; + /// + /// let map = DashMap::<(), ()>::new().into_read_only(); + /// println!("Amount of shards: {}", map.shards().len()); + /// ``` + pub fn shards(&self) -> &[RwLock>] { + &self.map.shards + } + } else { + #[allow(dead_code)] + pub(crate) fn shards(&self) -> &[RwLock>] { + &self.map.shards + } + } + } +} + +#[cfg(test)] + +mod tests { + + use crate::DashMap; + + fn construct_sample_map() -> DashMap { + let map = DashMap::new(); + + map.insert(1, "one".to_string()); + + map.insert(10, "ten".to_string()); + + map.insert(27, "twenty seven".to_string()); + + map.insert(45, "forty five".to_string()); + + map + } + + #[test] + + fn test_properties() { + let map = construct_sample_map(); + + let view = map.clone().into_read_only(); + + assert_eq!(view.is_empty(), map.is_empty()); + + assert_eq!(view.len(), map.len()); + + assert_eq!(view.capacity(), map.capacity()); + + let new_map = view.into_inner(); + + assert_eq!(new_map.is_empty(), map.is_empty()); + + assert_eq!(new_map.len(), map.len()); + + assert_eq!(new_map.capacity(), map.capacity()); + } + + #[test] + + fn test_get() { + let map = construct_sample_map(); + + let view = map.clone().into_read_only(); + + for key in map.iter().map(|entry| *entry.key()) { + assert!(view.contains_key(&key)); + + let map_entry = map.get(&key).unwrap(); + + assert_eq!(view.get(&key).unwrap(), map_entry.value()); + + let key_value: (&i32, &String) = view.get_key_value(&key).unwrap(); + + assert_eq!(key_value.0, map_entry.key()); + + assert_eq!(key_value.1, map_entry.value()); + } + } + + #[test] + + fn test_iters() { + let map = construct_sample_map(); + + let view = map.clone().into_read_only(); + + let mut visited_items = Vec::new(); + + for (key, value) in view.iter() { + map.contains_key(key); + + let map_entry = map.get(key).unwrap(); + + assert_eq!(key, map_entry.key()); + + assert_eq!(value, map_entry.value()); + + visited_items.push((key, value)); + } + + let mut visited_keys = Vec::new(); + + for key in view.keys() { + map.contains_key(key); + + let map_entry = map.get(key).unwrap(); + + assert_eq!(key, map_entry.key()); + + assert_eq!(view.get(key).unwrap(), map_entry.value()); + + visited_keys.push(key); + } + + let mut visited_values = Vec::new(); + + for value in view.values() { + visited_values.push(value); + } + + for entry in map.iter() { + let key = entry.key(); + + let value = entry.value(); + + assert!(visited_keys.contains(&key)); + + assert!(visited_values.contains(&value)); + + assert!(visited_items.contains(&(key, value))); + } + } +} diff --git a/vendor/dashmap/src/serde.rs b/vendor/dashmap/src/serde.rs new file mode 100644 index 00000000..6abeb069 --- /dev/null +++ b/vendor/dashmap/src/serde.rs @@ -0,0 +1,215 @@ +use crate::{mapref, setref, DashMap, DashSet}; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use core::marker::PhantomData; +use serde::de::{Deserialize, MapAccess, SeqAccess, Visitor}; +use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer}; +use serde::Deserializer; + +pub struct DashMapVisitor { + marker: PhantomData DashMap>, +} + +impl DashMapVisitor +where + K: Eq + Hash, + S: BuildHasher + Clone, +{ + fn new() -> Self { + DashMapVisitor { + marker: PhantomData, + } + } +} + +impl<'de, K, V, S> Visitor<'de> for DashMapVisitor +where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: BuildHasher + Clone + Default, +{ + type Value = DashMap; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a DashMap") + } + + fn visit_map(self, mut access: M) -> Result + where + M: MapAccess<'de>, + { + let map = + DashMap::with_capacity_and_hasher(access.size_hint().unwrap_or(0), Default::default()); + + while let Some((key, value)) = access.next_entry()? { + map.insert(key, value); + } + + Ok(map) + } +} + +impl<'de, K, V, S> Deserialize<'de> for DashMap +where + K: Deserialize<'de> + Eq + Hash, + V: Deserialize<'de>, + S: BuildHasher + Clone + Default, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_map(DashMapVisitor::::new()) + } +} + +impl Serialize for DashMap +where + K: Serialize + Eq + Hash, + V: Serialize, + H: BuildHasher + Clone, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut map = serializer.serialize_map(Some(self.len()))?; + + for ref_multi in self.iter() { + map.serialize_entry(ref_multi.key(), ref_multi.value())?; + } + + map.end() + } +} + +pub struct DashSetVisitor { + marker: PhantomData DashSet>, +} + +impl DashSetVisitor +where + K: Eq + Hash, + S: BuildHasher + Clone, +{ + fn new() -> Self { + DashSetVisitor { + marker: PhantomData, + } + } +} + +impl<'de, K, S> Visitor<'de> for DashSetVisitor +where + K: Deserialize<'de> + Eq + Hash, + S: BuildHasher + Clone + Default, +{ + type Value = DashSet; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a DashSet") + } + + fn visit_seq(self, mut access: M) -> Result + where + M: SeqAccess<'de>, + { + let map = + DashSet::with_capacity_and_hasher(access.size_hint().unwrap_or(0), Default::default()); + + while let Some(key) = access.next_element()? { + map.insert(key); + } + + Ok(map) + } +} + +impl<'de, K, S> Deserialize<'de> for DashSet +where + K: Deserialize<'de> + Eq + Hash, + S: BuildHasher + Clone + Default, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + deserializer.deserialize_seq(DashSetVisitor::::new()) + } +} + +impl Serialize for DashSet +where + K: Serialize + Eq + Hash, + H: BuildHasher + Clone, +{ + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut seq = serializer.serialize_seq(Some(self.len()))?; + + for ref_multi in self.iter() { + seq.serialize_element(ref_multi.key())?; + } + + seq.end() + } +} + +macro_rules! serialize_impl { + () => { + fn serialize(&self, serializer: Ser) -> Result + where + Ser: serde::Serializer, + { + std::ops::Deref::deref(self).serialize(serializer) + } + }; +} + +// Map +impl<'a, K: Eq + Hash, V: Serialize, S: BuildHasher> Serialize + for mapref::multiple::RefMulti<'a, K, V, S> +{ + serialize_impl! {} +} + +impl<'a, K: Eq + Hash, V: Serialize, S: BuildHasher> Serialize + for mapref::multiple::RefMutMulti<'a, K, V, S> +{ + serialize_impl! {} +} + +impl<'a, K: Eq + Hash, V: Serialize, S: BuildHasher> Serialize for mapref::one::Ref<'a, K, V, S> { + serialize_impl! {} +} + +impl<'a, K: Eq + Hash, V: Serialize, S: BuildHasher> Serialize + for mapref::one::RefMut<'a, K, V, S> +{ + serialize_impl! {} +} + +impl<'a, K: Eq + Hash, V, T: Serialize, S: BuildHasher> Serialize + for mapref::one::MappedRef<'a, K, V, T, S> +{ + serialize_impl! {} +} + +impl<'a, K: Eq + Hash, V, T: Serialize, S: BuildHasher> Serialize + for mapref::one::MappedRefMut<'a, K, V, T, S> +{ + serialize_impl! {} +} + +// Set +impl<'a, V: Hash + Eq + Serialize, S: BuildHasher> Serialize + for setref::multiple::RefMulti<'a, V, S> +{ + serialize_impl! {} +} + +impl<'a, V: Hash + Eq + Serialize, S: BuildHasher> Serialize for setref::one::Ref<'a, V, S> { + serialize_impl! {} +} diff --git a/vendor/dashmap/src/set.rs b/vendor/dashmap/src/set.rs new file mode 100644 index 00000000..1a561770 --- /dev/null +++ b/vendor/dashmap/src/set.rs @@ -0,0 +1,458 @@ +use crate::iter_set::{Iter, OwningIter}; +#[cfg(feature = "raw-api")] +use crate::lock::RwLock; +use crate::setref::one::Ref; +use crate::DashMap; +#[cfg(feature = "raw-api")] +use crate::HashMap; +use cfg_if::cfg_if; +use core::borrow::Borrow; +use core::fmt; +use core::hash::{BuildHasher, Hash}; +use core::iter::FromIterator; +use std::collections::hash_map::RandomState; + +/// DashSet is a thin wrapper around [`DashMap`] using `()` as the value type. It uses +/// methods and types which are more convenient to work with on a set. +/// +/// [`DashMap`]: struct.DashMap.html +pub struct DashSet { + pub(crate) inner: DashMap, +} + +impl fmt::Debug for DashSet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.inner, f) + } +} + +impl Clone for DashSet { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } + + fn clone_from(&mut self, source: &Self) { + self.inner.clone_from(&source.inner) + } +} + +impl Default for DashSet +where + K: Eq + Hash, + S: Default + BuildHasher + Clone, +{ + fn default() -> Self { + Self::with_hasher(Default::default()) + } +} + +impl<'a, K: 'a + Eq + Hash> DashSet { + /// Creates a new DashSet with a capacity of 0. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let games = DashSet::new(); + /// games.insert("Veloren"); + /// ``` + pub fn new() -> Self { + Self::with_hasher(RandomState::default()) + } + + /// Creates a new DashMap with a specified starting capacity. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let numbers = DashSet::with_capacity(2); + /// numbers.insert(2); + /// numbers.insert(8); + /// ``` + pub fn with_capacity(capacity: usize) -> Self { + Self::with_capacity_and_hasher(capacity, RandomState::default()) + } +} + +impl<'a, K: 'a + Eq + Hash, S: BuildHasher + Clone> DashSet { + /// Creates a new DashMap with a capacity of 0 and the provided hasher. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// use std::collections::hash_map::RandomState; + /// + /// let s = RandomState::new(); + /// let games = DashSet::with_hasher(s); + /// games.insert("Veloren"); + /// ``` + pub fn with_hasher(hasher: S) -> Self { + Self::with_capacity_and_hasher(0, hasher) + } + + /// Creates a new DashMap with a specified starting capacity and hasher. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// use std::collections::hash_map::RandomState; + /// + /// let s = RandomState::new(); + /// let numbers = DashSet::with_capacity_and_hasher(2, s); + /// numbers.insert(2); + /// numbers.insert(8); + /// ``` + pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self { + Self { + inner: DashMap::with_capacity_and_hasher(capacity, hasher), + } + } + + /// Hash a given item to produce a usize. + /// Uses the provided or default HashBuilder. + pub fn hash_usize(&self, item: &T) -> usize { + self.inner.hash_usize(item) + } + + cfg_if! { + if #[cfg(feature = "raw-api")] { + /// Allows you to peek at the inner shards that store your data. + /// You should probably not use this unless you know what you are doing. + /// + /// Requires the `raw-api` feature to be enabled. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let set = DashSet::<()>::new(); + /// println!("Amount of shards: {}", set.shards().len()); + /// ``` + pub fn shards(&self) -> &[RwLock>] { + self.inner.shards() + } + } + } + + cfg_if! { + if #[cfg(feature = "raw-api")] { + /// Finds which shard a certain key is stored in. + /// You should probably not use this unless you know what you are doing. + /// Note that shard selection is dependent on the default or provided HashBuilder. + /// + /// Requires the `raw-api` feature to be enabled. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let set = DashSet::new(); + /// set.insert("coca-cola"); + /// println!("coca-cola is stored in shard: {}", set.determine_map("coca-cola")); + /// ``` + pub fn determine_map(&self, key: &Q) -> usize + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self.inner.determine_map(key) + } + } + } + + cfg_if! { + if #[cfg(feature = "raw-api")] { + /// Finds which shard a certain hash is stored in. + /// + /// Requires the `raw-api` feature to be enabled. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let set: DashSet = DashSet::new(); + /// let key = "key"; + /// let hash = set.hash_usize(&key); + /// println!("hash is stored in shard: {}", set.determine_shard(hash)); + /// ``` + pub fn determine_shard(&self, hash: usize) -> usize { + self.inner.determine_shard(hash) + } + } + } + + /// Inserts a key into the set. Returns true if the key was not already in the set. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let set = DashSet::new(); + /// set.insert("I am the key!"); + /// ``` + pub fn insert(&self, key: K) -> bool { + self.inner.insert(key, ()).is_none() + } + + /// Removes an entry from the map, returning the key if it existed in the map. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let soccer_team = DashSet::new(); + /// soccer_team.insert("Jack"); + /// assert_eq!(soccer_team.remove("Jack").unwrap(), "Jack"); + /// ``` + pub fn remove(&self, key: &Q) -> Option + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self.inner.remove(key).map(|(k, _)| k) + } + + /// Removes an entry from the set, returning the key + /// if the entry existed and the provided conditional function returned true. + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let soccer_team = DashSet::new(); + /// soccer_team.insert("Sam"); + /// soccer_team.remove_if("Sam", |player| player.starts_with("Ja")); + /// assert!(soccer_team.contains("Sam")); + /// ``` + /// ``` + /// use dashmap::DashSet; + /// + /// let soccer_team = DashSet::new(); + /// soccer_team.insert("Sam"); + /// soccer_team.remove_if("Jacob", |player| player.starts_with("Ja")); + /// assert!(!soccer_team.contains("Jacob")); + /// ``` + pub fn remove_if(&self, key: &Q, f: impl FnOnce(&K) -> bool) -> Option + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + // TODO: Don't create another closure around f + self.inner.remove_if(key, |k, _| f(k)).map(|(k, _)| k) + } + + /// Creates an iterator over a DashMap yielding immutable references. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let words = DashSet::new(); + /// words.insert("hello"); + /// assert_eq!(words.iter().count(), 1); + /// ``` + pub fn iter(&'a self) -> Iter<'a, K, S, DashMap> { + let iter = self.inner.iter(); + + Iter::new(iter) + } + + /// Get a reference to an entry in the set + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let youtubers = DashSet::new(); + /// youtubers.insert("Bosnian Bill"); + /// assert_eq!(*youtubers.get("Bosnian Bill").unwrap(), "Bosnian Bill"); + /// ``` + pub fn get(&'a self, key: &Q) -> Option> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self.inner.get(key).map(Ref::new) + } + + /// Remove excess capacity to reduce memory usage. + pub fn shrink_to_fit(&self) { + self.inner.shrink_to_fit() + } + + /// Retain elements that whose predicates return true + /// and discard elements whose predicates return false. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let people = DashSet::new(); + /// people.insert("Albin"); + /// people.insert("Jones"); + /// people.insert("Charlie"); + /// people.retain(|name| name.contains('i')); + /// assert_eq!(people.len(), 2); + /// ``` + pub fn retain(&self, mut f: impl FnMut(&K) -> bool) { + self.inner.retain(|k, _| f(k)) + } + + /// Fetches the total number of keys stored in the set. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let people = DashSet::new(); + /// people.insert("Albin"); + /// people.insert("Jones"); + /// people.insert("Charlie"); + /// assert_eq!(people.len(), 3); + /// ``` + pub fn len(&self) -> usize { + self.inner.len() + } + + /// Checks if the set is empty or not. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let map = DashSet::<()>::new(); + /// assert!(map.is_empty()); + /// ``` + pub fn is_empty(&self) -> bool { + self.inner.is_empty() + } + + /// Removes all keys in the set. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let people = DashSet::new(); + /// people.insert("Albin"); + /// assert!(!people.is_empty()); + /// people.clear(); + /// assert!(people.is_empty()); + /// ``` + pub fn clear(&self) { + self.inner.clear() + } + + /// Returns how many keys the set can store without reallocating. + pub fn capacity(&self) -> usize { + self.inner.capacity() + } + + /// Checks if the set contains a specific key. + /// + /// # Examples + /// + /// ``` + /// use dashmap::DashSet; + /// + /// let people = DashSet::new(); + /// people.insert("Dakota Cherries"); + /// assert!(people.contains("Dakota Cherries")); + /// ``` + pub fn contains(&self, key: &Q) -> bool + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self.inner.contains_key(key) + } +} + +impl IntoIterator for DashSet { + type Item = K; + + type IntoIter = OwningIter; + + fn into_iter(self) -> Self::IntoIter { + OwningIter::new(self.inner.into_iter()) + } +} + +impl Extend for DashSet { + fn extend>(&mut self, iter: T) { + let iter = iter.into_iter().map(|k| (k, ())); + + self.inner.extend(iter) + } +} + +impl FromIterator for DashSet { + fn from_iter>(iter: I) -> Self { + let mut set = DashSet::default(); + + set.extend(iter); + + set + } +} + +#[cfg(test)] +mod tests { + use crate::DashSet; + + #[test] + fn test_basic() { + let set = DashSet::new(); + + set.insert(0); + + assert_eq!(set.get(&0).as_deref(), Some(&0)); + } + + #[test] + fn test_default() { + let set: DashSet = DashSet::default(); + + set.insert(0); + + assert_eq!(set.get(&0).as_deref(), Some(&0)); + } + + #[test] + fn test_multiple_hashes() { + let set = DashSet::::default(); + + for i in 0..100 { + assert!(set.insert(i)); + } + + for i in 0..100 { + assert!(!set.insert(i)); + } + + for i in 0..100 { + assert_eq!(Some(i), set.remove(&i)); + } + + for i in 0..100 { + assert_eq!(None, set.remove(&i)); + } + } +} diff --git a/vendor/dashmap/src/setref/mod.rs b/vendor/dashmap/src/setref/mod.rs new file mode 100644 index 00000000..95ae8f47 --- /dev/null +++ b/vendor/dashmap/src/setref/mod.rs @@ -0,0 +1,2 @@ +pub mod multiple; +pub mod one; diff --git a/vendor/dashmap/src/setref/multiple.rs b/vendor/dashmap/src/setref/multiple.rs new file mode 100644 index 00000000..21e7ed4a --- /dev/null +++ b/vendor/dashmap/src/setref/multiple.rs @@ -0,0 +1,25 @@ +use crate::mapref; +use core::hash::{BuildHasher, Hash}; +use core::ops::Deref; +use std::collections::hash_map::RandomState; +pub struct RefMulti<'a, K, S = RandomState> { + inner: mapref::multiple::RefMulti<'a, K, (), S>, +} + +impl<'a, K: Eq + Hash, S: BuildHasher> RefMulti<'a, K, S> { + pub(crate) fn new(inner: mapref::multiple::RefMulti<'a, K, (), S>) -> Self { + Self { inner } + } + + pub fn key(&self) -> &K { + self.inner.key() + } +} + +impl<'a, K: Eq + Hash, S: BuildHasher> Deref for RefMulti<'a, K, S> { + type Target = K; + + fn deref(&self) -> &K { + self.key() + } +} diff --git a/vendor/dashmap/src/setref/one.rs b/vendor/dashmap/src/setref/one.rs new file mode 100644 index 00000000..0787187d --- /dev/null +++ b/vendor/dashmap/src/setref/one.rs @@ -0,0 +1,25 @@ +use crate::mapref; +use core::hash::{BuildHasher, Hash}; +use core::ops::Deref; +use std::collections::hash_map::RandomState; +pub struct Ref<'a, K, S = RandomState> { + inner: mapref::one::Ref<'a, K, (), S>, +} + +impl<'a, K: Eq + Hash, S: BuildHasher> Ref<'a, K, S> { + pub(crate) fn new(inner: mapref::one::Ref<'a, K, (), S>) -> Self { + Self { inner } + } + + pub fn key(&self) -> &K { + self.inner.key() + } +} + +impl<'a, K: Eq + Hash, S: BuildHasher> Deref for Ref<'a, K, S> { + type Target = K; + + fn deref(&self) -> &K { + self.key() + } +} diff --git a/vendor/dashmap/src/t.rs b/vendor/dashmap/src/t.rs new file mode 100644 index 00000000..5e1fedcc --- /dev/null +++ b/vendor/dashmap/src/t.rs @@ -0,0 +1,134 @@ +//! Central map trait to ease modifications and extensions down the road. + +use crate::iter::{Iter, IterMut}; +use crate::lock::{RwLockReadGuard, RwLockWriteGuard}; +use crate::mapref::entry::Entry; +use crate::mapref::one::{Ref, RefMut}; +use crate::try_result::TryResult; +use crate::HashMap; +use core::borrow::Borrow; +use core::hash::{BuildHasher, Hash}; + +/// Implementation detail that is exposed due to generic constraints in public types. +pub trait Map<'a, K: 'a + Eq + Hash, V: 'a, S: 'a + Clone + BuildHasher> { + fn _shard_count(&self) -> usize; + + /// # Safety + /// + /// The index must not be out of bounds. + unsafe fn _get_read_shard(&'a self, i: usize) -> &'a HashMap; + + /// # Safety + /// + /// The index must not be out of bounds. + unsafe fn _yield_read_shard(&'a self, i: usize) -> RwLockReadGuard<'a, HashMap>; + + /// # Safety + /// + /// The index must not be out of bounds. + unsafe fn _yield_write_shard(&'a self, i: usize) -> RwLockWriteGuard<'a, HashMap>; + + /// # Safety + /// + /// The index must not be out of bounds. + unsafe fn _try_yield_read_shard( + &'a self, + i: usize, + ) -> Option>>; + + /// # Safety + /// + /// The index must not be out of bounds. + unsafe fn _try_yield_write_shard( + &'a self, + i: usize, + ) -> Option>>; + + fn _insert(&self, key: K, value: V) -> Option; + + fn _remove(&self, key: &Q) -> Option<(K, V)> + where + K: Borrow, + Q: Hash + Eq + ?Sized; + + fn _remove_if(&self, key: &Q, f: impl FnOnce(&K, &V) -> bool) -> Option<(K, V)> + where + K: Borrow, + Q: Hash + Eq + ?Sized; + + fn _remove_if_mut(&self, key: &Q, f: impl FnOnce(&K, &mut V) -> bool) -> Option<(K, V)> + where + K: Borrow, + Q: Hash + Eq + ?Sized; + + fn _iter(&'a self) -> Iter<'a, K, V, S, Self> + where + Self: Sized; + + fn _iter_mut(&'a self) -> IterMut<'a, K, V, S, Self> + where + Self: Sized; + + fn _get(&'a self, key: &Q) -> Option> + where + K: Borrow, + Q: Hash + Eq + ?Sized; + + fn _get_mut(&'a self, key: &Q) -> Option> + where + K: Borrow, + Q: Hash + Eq + ?Sized; + + fn _try_get(&'a self, key: &Q) -> TryResult> + where + K: Borrow, + Q: Hash + Eq + ?Sized; + + fn _try_get_mut(&'a self, key: &Q) -> TryResult> + where + K: Borrow, + Q: Hash + Eq + ?Sized; + + fn _shrink_to_fit(&self); + + fn _retain(&self, f: impl FnMut(&K, &mut V) -> bool); + + fn _len(&self) -> usize; + + fn _capacity(&self) -> usize; + + fn _alter(&self, key: &Q, f: impl FnOnce(&K, V) -> V) + where + K: Borrow, + Q: Hash + Eq + ?Sized; + + fn _alter_all(&self, f: impl FnMut(&K, V) -> V); + + fn _view(&self, key: &Q, f: impl FnOnce(&K, &V) -> R) -> Option + where + K: Borrow, + Q: Hash + Eq + ?Sized; + + fn _entry(&'a self, key: K) -> Entry<'a, K, V, S>; + + fn _try_entry(&'a self, key: K) -> Option>; + + fn _hasher(&self) -> S; + + // provided + fn _clear(&self) { + self._retain(|_, _| false) + } + + fn _contains_key(&'a self, key: &Q) -> bool + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self._get(key).is_some() + } + + fn _is_empty(&self) -> bool { + self._len() == 0 + } +} diff --git a/vendor/dashmap/src/try_result.rs b/vendor/dashmap/src/try_result.rs new file mode 100644 index 00000000..aa93d3b2 --- /dev/null +++ b/vendor/dashmap/src/try_result.rs @@ -0,0 +1,46 @@ +/// Represents the result of a non-blocking read from a [DashMap](crate::DashMap). +#[derive(Debug)] +pub enum TryResult { + /// The value was present in the map, and the lock for the shard was successfully obtained. + Present(R), + /// The shard wasn't locked, and the value wasn't present in the map. + Absent, + /// The shard was locked. + Locked, +} + +impl TryResult { + /// Returns `true` if the value was present in the map, and the lock for the shard was successfully obtained. + pub fn is_present(&self) -> bool { + matches!(self, TryResult::Present(_)) + } + + /// Returns `true` if the shard wasn't locked, and the value wasn't present in the map. + pub fn is_absent(&self) -> bool { + matches!(self, TryResult::Absent) + } + + /// Returns `true` if the shard was locked. + pub fn is_locked(&self) -> bool { + matches!(self, TryResult::Locked) + } + + /// If `self` is [Present](TryResult::Present), returns the reference to the value in the map. + /// Panics if `self` is not [Present](TryResult::Present). + pub fn unwrap(self) -> R { + match self { + TryResult::Present(r) => r, + TryResult::Locked => panic!("Called unwrap() on TryResult::Locked"), + TryResult::Absent => panic!("Called unwrap() on TryResult::Absent"), + } + } + + /// If `self` is [Present](TryResult::Present), returns the reference to the value in the map. + /// If `self` is not [Present](TryResult::Present), returns `None`. + pub fn try_unwrap(self) -> Option { + match self { + TryResult::Present(r) => Some(r), + _ => None, + } + } +} diff --git a/vendor/dashmap/src/util.rs b/vendor/dashmap/src/util.rs new file mode 100644 index 00000000..d84e37db --- /dev/null +++ b/vendor/dashmap/src/util.rs @@ -0,0 +1,102 @@ +//! This module is full of hackery and dark magic. +//! Either spend a day fixing it and quietly submit a PR or don't mention it to anybody. +use core::cell::UnsafeCell; +use core::{mem, ptr}; + +pub const fn ptr_size_bits() -> usize { + mem::size_of::() * 8 +} + +pub fn map_in_place_2 T>((k, v): (U, &mut T), f: F) { + unsafe { + // # Safety + // + // If the closure panics, we must abort otherwise we could double drop `T` + let _promote_panic_to_abort = AbortOnPanic; + + ptr::write(v, f(k, ptr::read(v))); + } +} + +/// # Safety +/// +/// Requires that you ensure the reference does not become invalid. +/// The object has to outlive the reference. +pub unsafe fn change_lifetime_const<'a, 'b, T>(x: &'a T) -> &'b T { + &*(x as *const T) +} + +/// # Safety +/// +/// Requires that you ensure the reference does not become invalid. +/// The object has to outlive the reference. +pub unsafe fn change_lifetime_mut<'a, 'b, T>(x: &'a mut T) -> &'b mut T { + &mut *(x as *mut T) +} + +/// A simple wrapper around `T` +/// +/// This is to prevent UB when using `HashMap::get_key_value`, because +/// `HashMap` doesn't expose an api to get the key and value, where +/// the value is a `&mut T`. +/// +/// See [#10](https://github.com/xacrimon/dashmap/issues/10) for details +/// +/// This type is meant to be an implementation detail, but must be exposed due to the `Dashmap::shards` +#[repr(transparent)] +pub struct SharedValue { + value: UnsafeCell, +} + +impl Clone for SharedValue { + fn clone(&self) -> Self { + let inner = self.get().clone(); + + Self { + value: UnsafeCell::new(inner), + } + } +} + +unsafe impl Send for SharedValue {} + +unsafe impl Sync for SharedValue {} + +impl SharedValue { + /// Create a new `SharedValue` + pub const fn new(value: T) -> Self { + Self { + value: UnsafeCell::new(value), + } + } + + /// Get a shared reference to `T` + pub fn get(&self) -> &T { + unsafe { &*self.value.get() } + } + + /// Get an unique reference to `T` + pub fn get_mut(&mut self) -> &mut T { + unsafe { &mut *self.value.get() } + } + + /// Unwraps the value + pub fn into_inner(self) -> T { + self.value.into_inner() + } + + /// Get a mutable raw pointer to the underlying value + pub(crate) fn as_ptr(&self) -> *mut T { + self.value.get() + } +} + +struct AbortOnPanic; + +impl Drop for AbortOnPanic { + fn drop(&mut self) { + if std::thread::panicking() { + std::process::abort() + } + } +} diff --git a/vendor/futures-executor/.cargo-checksum.json b/vendor/futures-executor/.cargo-checksum.json new file mode 100644 index 00000000..f5143118 --- /dev/null +++ b/vendor/futures-executor/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"b2af1091a3cf05b9a19d7177c17fee6b3e50b205b2423690df9a40e518c089bb","LICENSE-APACHE":"275c491d6d1160553c32fd6127061d7f9606c3ea25abfad6ca3f6ed088785427","LICENSE-MIT":"6652c868f35dfe5e8ef636810a4e576b9d663f3a17fb0f5613ad73583e1b88fd","README.md":"05ba6a5490962c4df45b78e9ad928a29dd5c3fad749284d5b812ca7e765feb6d","benches/thread_notify.rs":"e601968527bee85766f32d2d11de5ed8f6b4bd5a29989b5c369a52bd3cd3d024","src/enter.rs":"e3e890a8fa649e76cd2ce915abb11b67d15f3c5ae5e8e374142e0363917b2406","src/lib.rs":"08a25594c789cb4ce1c8929a9ddd745e67fee1db373e011a7ebe135933522614","src/local_pool.rs":"78177af55564fdfcfdc9f3974afe7d9d0682a7e4654761d83a8fc02abb34a7dc","src/thread_pool.rs":"e52f8527bc37c511513d77d183b44e3991a7b324aaed5d17bee0d092cf448a5b","src/unpark_mutex.rs":"e186464d9bdec22a6d1e1d900ed03a1154e6b0d422ede9bd3b768657cdbb6113","tests/local_pool.rs":"9639c9a290e23faab3913c6fec190853f890defaed6ffe67de177eca5d88932a"},"package":"0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc"} \ No newline at end of file diff --git a/vendor/futures-executor/Cargo.toml b/vendor/futures-executor/Cargo.toml new file mode 100644 index 00000000..f11f5783 --- /dev/null +++ b/vendor/futures-executor/Cargo.toml @@ -0,0 +1,60 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.56" +name = "futures-executor" +version = "0.3.29" +description = """ +Executors for asynchronous tasks based on the futures-rs library. +""" +homepage = "https://rust-lang.github.io/futures-rs" +readme = "README.md" +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/futures-rs" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[dependencies.futures-core] +version = "0.3.29" +default-features = false + +[dependencies.futures-task] +version = "0.3.29" +default-features = false + +[dependencies.futures-util] +version = "0.3.29" +default-features = false + +[dependencies.num_cpus] +version = "1.8.0" +optional = true + +[dev-dependencies] + +[features] +default = ["std"] +std = [ + "futures-core/std", + "futures-task/std", + "futures-util/std", +] +thread-pool = [ + "std", + "num_cpus", +] diff --git a/vendor/futures-executor/LICENSE-APACHE b/vendor/futures-executor/LICENSE-APACHE new file mode 100644 index 00000000..9eb0b097 --- /dev/null +++ b/vendor/futures-executor/LICENSE-APACHE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright (c) 2016 Alex Crichton +Copyright (c) 2017 The Tokio Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/futures-executor/LICENSE-MIT b/vendor/futures-executor/LICENSE-MIT new file mode 100644 index 00000000..8ad082ec --- /dev/null +++ b/vendor/futures-executor/LICENSE-MIT @@ -0,0 +1,26 @@ +Copyright (c) 2016 Alex Crichton +Copyright (c) 2017 The Tokio Authors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/futures-executor/README.md b/vendor/futures-executor/README.md new file mode 100644 index 00000000..724ff5bb --- /dev/null +++ b/vendor/futures-executor/README.md @@ -0,0 +1,23 @@ +# futures-executor + +Executors for asynchronous tasks based on the futures-rs library. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +futures-executor = "0.3" +``` + +The current `futures-executor` requires Rust 1.56 or later. + +## License + +Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or +[MIT license](LICENSE-MIT) at your option. + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. diff --git a/vendor/futures-executor/benches/thread_notify.rs b/vendor/futures-executor/benches/thread_notify.rs new file mode 100644 index 00000000..88d0447c --- /dev/null +++ b/vendor/futures-executor/benches/thread_notify.rs @@ -0,0 +1,109 @@ +#![feature(test)] + +extern crate test; +use crate::test::Bencher; + +use futures::executor::block_on; +use futures::future::Future; +use futures::task::{Context, Poll, Waker}; +use std::pin::Pin; + +#[bench] +fn thread_yield_single_thread_one_wait(b: &mut Bencher) { + const NUM: usize = 10_000; + + struct Yield { + rem: usize, + } + + impl Future for Yield { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if self.rem == 0 { + Poll::Ready(()) + } else { + self.rem -= 1; + cx.waker().wake_by_ref(); + Poll::Pending + } + } + } + + b.iter(|| { + let y = Yield { rem: NUM }; + block_on(y); + }); +} + +#[bench] +fn thread_yield_single_thread_many_wait(b: &mut Bencher) { + const NUM: usize = 10_000; + + struct Yield { + rem: usize, + } + + impl Future for Yield { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if self.rem == 0 { + Poll::Ready(()) + } else { + self.rem -= 1; + cx.waker().wake_by_ref(); + Poll::Pending + } + } + } + + b.iter(|| { + for _ in 0..NUM { + let y = Yield { rem: 1 }; + block_on(y); + } + }); +} + +#[bench] +fn thread_yield_multi_thread(b: &mut Bencher) { + use std::sync::mpsc; + use std::thread; + + const NUM: usize = 1_000; + + let (tx, rx) = mpsc::sync_channel::(10_000); + + struct Yield { + rem: usize, + tx: mpsc::SyncSender, + } + impl Unpin for Yield {} + + impl Future for Yield { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if self.rem == 0 { + Poll::Ready(()) + } else { + self.rem -= 1; + self.tx.send(cx.waker().clone()).unwrap(); + Poll::Pending + } + } + } + + thread::spawn(move || { + while let Ok(task) = rx.recv() { + task.wake(); + } + }); + + b.iter(move || { + let y = Yield { rem: NUM, tx: tx.clone() }; + + block_on(y); + }); +} diff --git a/vendor/futures-executor/src/enter.rs b/vendor/futures-executor/src/enter.rs new file mode 100644 index 00000000..cb58c30b --- /dev/null +++ b/vendor/futures-executor/src/enter.rs @@ -0,0 +1,80 @@ +use std::cell::Cell; +use std::fmt; + +thread_local!(static ENTERED: Cell = Cell::new(false)); + +/// Represents an executor context. +/// +/// For more details, see [`enter` documentation](enter()). +pub struct Enter { + _priv: (), +} + +/// An error returned by `enter` if an execution scope has already been +/// entered. +pub struct EnterError { + _priv: (), +} + +impl fmt::Debug for EnterError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("EnterError").finish() + } +} + +impl fmt::Display for EnterError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "an execution scope has already been entered") + } +} + +impl std::error::Error for EnterError {} + +/// Marks the current thread as being within the dynamic extent of an +/// executor. +/// +/// Executor implementations should call this function before beginning to +/// execute a task, and drop the returned [`Enter`](Enter) value after +/// completing task execution: +/// +/// ``` +/// use futures::executor::enter; +/// +/// let enter = enter().expect("..."); +/// /* run task */ +/// drop(enter); +/// ``` +/// +/// Doing so ensures that executors aren't +/// accidentally invoked in a nested fashion. +/// +/// # Error +/// +/// Returns an error if the current thread is already marked, in which case the +/// caller should panic with a tailored error message. +pub fn enter() -> Result { + ENTERED.with(|c| { + if c.get() { + Err(EnterError { _priv: () }) + } else { + c.set(true); + + Ok(Enter { _priv: () }) + } + }) +} + +impl fmt::Debug for Enter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Enter").finish() + } +} + +impl Drop for Enter { + fn drop(&mut self) { + ENTERED.with(|c| { + assert!(c.get()); + c.set(false); + }); + } +} diff --git a/vendor/futures-executor/src/lib.rs b/vendor/futures-executor/src/lib.rs new file mode 100644 index 00000000..b1af8754 --- /dev/null +++ b/vendor/futures-executor/src/lib.rs @@ -0,0 +1,76 @@ +//! Built-in executors and related tools. +//! +//! All asynchronous computation occurs within an executor, which is +//! capable of spawning futures as tasks. This module provides several +//! built-in executors, as well as tools for building your own. +//! +//! All items are only available when the `std` feature of this +//! library is activated, and it is activated by default. +//! +//! # Using a thread pool (M:N task scheduling) +//! +//! Most of the time tasks should be executed on a [thread pool](ThreadPool). +//! A small set of worker threads can handle a very large set of spawned tasks +//! (which are much lighter weight than threads). Tasks spawned onto the pool +//! with the [`spawn_ok`](ThreadPool::spawn_ok) function will run ambiently on +//! the created threads. +//! +//! # Spawning additional tasks +//! +//! Tasks can be spawned onto a spawner by calling its [`spawn_obj`] method +//! directly. In the case of `!Send` futures, [`spawn_local_obj`] can be used +//! instead. +//! +//! # Single-threaded execution +//! +//! In addition to thread pools, it's possible to run a task (and the tasks +//! it spawns) entirely within a single thread via the [`LocalPool`] executor. +//! Aside from cutting down on synchronization costs, this executor also makes +//! it possible to spawn non-`Send` tasks, via [`spawn_local_obj`]. The +//! [`LocalPool`] is best suited for running I/O-bound tasks that do relatively +//! little work between I/O operations. +//! +//! There is also a convenience function [`block_on`] for simply running a +//! future to completion on the current thread. +//! +//! [`spawn_obj`]: https://docs.rs/futures/0.3/futures/task/trait.Spawn.html#tymethod.spawn_obj +//! [`spawn_local_obj`]: https://docs.rs/futures/0.3/futures/task/trait.LocalSpawn.html#tymethod.spawn_local_obj + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn( + missing_debug_implementations, + missing_docs, + rust_2018_idioms, + single_use_lifetimes, + unreachable_pub +)] +#![doc(test( + no_crate_inject, + attr( + deny(warnings, rust_2018_idioms, single_use_lifetimes), + allow(dead_code, unused_assignments, unused_variables) + ) +))] +#![cfg_attr(docsrs, feature(doc_cfg))] + +#[cfg(feature = "std")] +mod local_pool; +#[cfg(feature = "std")] +pub use crate::local_pool::{block_on, block_on_stream, BlockingStream, LocalPool, LocalSpawner}; + +#[cfg(feature = "thread-pool")] +#[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))] +#[cfg(feature = "std")] +mod thread_pool; +#[cfg(feature = "thread-pool")] +#[cfg(feature = "std")] +mod unpark_mutex; +#[cfg(feature = "thread-pool")] +#[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))] +#[cfg(feature = "std")] +pub use crate::thread_pool::{ThreadPool, ThreadPoolBuilder}; + +#[cfg(feature = "std")] +mod enter; +#[cfg(feature = "std")] +pub use crate::enter::{enter, Enter, EnterError}; diff --git a/vendor/futures-executor/src/local_pool.rs b/vendor/futures-executor/src/local_pool.rs new file mode 100644 index 00000000..8a9bc2fc --- /dev/null +++ b/vendor/futures-executor/src/local_pool.rs @@ -0,0 +1,402 @@ +use crate::enter; +use futures_core::future::Future; +use futures_core::stream::Stream; +use futures_core::task::{Context, Poll}; +use futures_task::{waker_ref, ArcWake}; +use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError}; +use futures_util::pin_mut; +use futures_util::stream::FuturesUnordered; +use futures_util::stream::StreamExt; +use std::cell::RefCell; +use std::ops::{Deref, DerefMut}; +use std::rc::{Rc, Weak}; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; +use std::thread::{self, Thread}; + +/// A single-threaded task pool for polling futures to completion. +/// +/// This executor allows you to multiplex any number of tasks onto a single +/// thread. It's appropriate to poll strictly I/O-bound futures that do very +/// little work in between I/O actions. +/// +/// To get a handle to the pool that implements +/// [`Spawn`](futures_task::Spawn), use the +/// [`spawner()`](LocalPool::spawner) method. Because the executor is +/// single-threaded, it supports a special form of task spawning for non-`Send` +/// futures, via [`spawn_local_obj`](futures_task::LocalSpawn::spawn_local_obj). +#[derive(Debug)] +pub struct LocalPool { + pool: FuturesUnordered>, + incoming: Rc, +} + +/// A handle to a [`LocalPool`](LocalPool) that implements +/// [`Spawn`](futures_task::Spawn). +#[derive(Clone, Debug)] +pub struct LocalSpawner { + incoming: Weak, +} + +type Incoming = RefCell>>; + +pub(crate) struct ThreadNotify { + /// The (single) executor thread. + thread: Thread, + /// A flag to ensure a wakeup (i.e. `unpark()`) is not "forgotten" + /// before the next `park()`, which may otherwise happen if the code + /// being executed as part of the future(s) being polled makes use of + /// park / unpark calls of its own, i.e. we cannot assume that no other + /// code uses park / unpark on the executing `thread`. + unparked: AtomicBool, +} + +thread_local! { + static CURRENT_THREAD_NOTIFY: Arc = Arc::new(ThreadNotify { + thread: thread::current(), + unparked: AtomicBool::new(false), + }); +} + +impl ArcWake for ThreadNotify { + fn wake_by_ref(arc_self: &Arc) { + // Make sure the wakeup is remembered until the next `park()`. + let unparked = arc_self.unparked.swap(true, Ordering::Release); + if !unparked { + // If the thread has not been unparked yet, it must be done + // now. If it was actually parked, it will run again, + // otherwise the token made available by `unpark` + // may be consumed before reaching `park()`, but `unparked` + // ensures it is not forgotten. + arc_self.thread.unpark(); + } + } +} + +// Set up and run a basic single-threaded spawner loop, invoking `f` on each +// turn. +fn run_executor) -> Poll>(mut f: F) -> T { + let _enter = enter().expect( + "cannot execute `LocalPool` executor from within \ + another executor", + ); + + CURRENT_THREAD_NOTIFY.with(|thread_notify| { + let waker = waker_ref(thread_notify); + let mut cx = Context::from_waker(&waker); + loop { + if let Poll::Ready(t) = f(&mut cx) { + return t; + } + + // Wait for a wakeup. + while !thread_notify.unparked.swap(false, Ordering::Acquire) { + // No wakeup occurred. It may occur now, right before parking, + // but in that case the token made available by `unpark()` + // is guaranteed to still be available and `park()` is a no-op. + thread::park(); + } + } + }) +} + +/// Check for a wakeup, but don't consume it. +fn woken() -> bool { + CURRENT_THREAD_NOTIFY.with(|thread_notify| thread_notify.unparked.load(Ordering::Acquire)) +} + +impl LocalPool { + /// Create a new, empty pool of tasks. + pub fn new() -> Self { + Self { pool: FuturesUnordered::new(), incoming: Default::default() } + } + + /// Get a clonable handle to the pool as a [`Spawn`]. + pub fn spawner(&self) -> LocalSpawner { + LocalSpawner { incoming: Rc::downgrade(&self.incoming) } + } + + /// Run all tasks in the pool to completion. + /// + /// ``` + /// use futures::executor::LocalPool; + /// + /// let mut pool = LocalPool::new(); + /// + /// // ... spawn some initial tasks using `spawn.spawn()` or `spawn.spawn_local()` + /// + /// // run *all* tasks in the pool to completion, including any newly-spawned ones. + /// pool.run(); + /// ``` + /// + /// The function will block the calling thread until *all* tasks in the pool + /// are complete, including any spawned while running existing tasks. + pub fn run(&mut self) { + run_executor(|cx| self.poll_pool(cx)) + } + + /// Runs all the tasks in the pool until the given future completes. + /// + /// ``` + /// use futures::executor::LocalPool; + /// + /// let mut pool = LocalPool::new(); + /// # let my_app = async {}; + /// + /// // run tasks in the pool until `my_app` completes + /// pool.run_until(my_app); + /// ``` + /// + /// The function will block the calling thread *only* until the future `f` + /// completes; there may still be incomplete tasks in the pool, which will + /// be inert after the call completes, but can continue with further use of + /// one of the pool's run or poll methods. While the function is running, + /// however, all tasks in the pool will try to make progress. + pub fn run_until(&mut self, future: F) -> F::Output { + pin_mut!(future); + + run_executor(|cx| { + { + // if our main task is done, so are we + let result = future.as_mut().poll(cx); + if let Poll::Ready(output) = result { + return Poll::Ready(output); + } + } + + let _ = self.poll_pool(cx); + Poll::Pending + }) + } + + /// Runs all tasks and returns after completing one future or until no more progress + /// can be made. Returns `true` if one future was completed, `false` otherwise. + /// + /// ``` + /// use futures::executor::LocalPool; + /// use futures::task::LocalSpawnExt; + /// use futures::future::{ready, pending}; + /// + /// let mut pool = LocalPool::new(); + /// let spawner = pool.spawner(); + /// + /// spawner.spawn_local(ready(())).unwrap(); + /// spawner.spawn_local(ready(())).unwrap(); + /// spawner.spawn_local(pending()).unwrap(); + /// + /// // Run the two ready tasks and return true for them. + /// pool.try_run_one(); // returns true after completing one of the ready futures + /// pool.try_run_one(); // returns true after completing the other ready future + /// + /// // the remaining task can not be completed + /// assert!(!pool.try_run_one()); // returns false + /// ``` + /// + /// This function will not block the calling thread and will return the moment + /// that there are no tasks left for which progress can be made or after exactly one + /// task was completed; Remaining incomplete tasks in the pool can continue with + /// further use of one of the pool's run or poll methods. + /// Though only one task will be completed, progress may be made on multiple tasks. + pub fn try_run_one(&mut self) -> bool { + run_executor(|cx| { + loop { + self.drain_incoming(); + + match self.pool.poll_next_unpin(cx) { + // Success! + Poll::Ready(Some(())) => return Poll::Ready(true), + // The pool was empty. + Poll::Ready(None) => return Poll::Ready(false), + Poll::Pending => (), + } + + if !self.incoming.borrow().is_empty() { + // New tasks were spawned; try again. + continue; + } else if woken() { + // The pool yielded to us, but there's more progress to be made. + return Poll::Pending; + } else { + return Poll::Ready(false); + } + } + }) + } + + /// Runs all tasks in the pool and returns if no more progress can be made + /// on any task. + /// + /// ``` + /// use futures::executor::LocalPool; + /// use futures::task::LocalSpawnExt; + /// use futures::future::{ready, pending}; + /// + /// let mut pool = LocalPool::new(); + /// let spawner = pool.spawner(); + /// + /// spawner.spawn_local(ready(())).unwrap(); + /// spawner.spawn_local(ready(())).unwrap(); + /// spawner.spawn_local(pending()).unwrap(); + /// + /// // Runs the two ready task and returns. + /// // The empty task remains in the pool. + /// pool.run_until_stalled(); + /// ``` + /// + /// This function will not block the calling thread and will return the moment + /// that there are no tasks left for which progress can be made; + /// remaining incomplete tasks in the pool can continue with further use of one + /// of the pool's run or poll methods. While the function is running, all tasks + /// in the pool will try to make progress. + pub fn run_until_stalled(&mut self) { + run_executor(|cx| match self.poll_pool(cx) { + // The pool is empty. + Poll::Ready(()) => Poll::Ready(()), + Poll::Pending => { + if woken() { + Poll::Pending + } else { + // We're stalled for now. + Poll::Ready(()) + } + } + }); + } + + /// Poll `self.pool`, re-filling it with any newly-spawned tasks. + /// Repeat until either the pool is empty, or it returns `Pending`. + /// + /// Returns `Ready` if the pool was empty, and `Pending` otherwise. + /// + /// NOTE: the pool may call `wake`, so `Pending` doesn't necessarily + /// mean that the pool can't make progress. + fn poll_pool(&mut self, cx: &mut Context<'_>) -> Poll<()> { + loop { + self.drain_incoming(); + + let pool_ret = self.pool.poll_next_unpin(cx); + + // We queued up some new tasks; add them and poll again. + if !self.incoming.borrow().is_empty() { + continue; + } + + match pool_ret { + Poll::Ready(Some(())) => continue, + Poll::Ready(None) => return Poll::Ready(()), + Poll::Pending => return Poll::Pending, + } + } + } + + /// Empty the incoming queue of newly-spawned tasks. + fn drain_incoming(&mut self) { + let mut incoming = self.incoming.borrow_mut(); + for task in incoming.drain(..) { + self.pool.push(task) + } + } +} + +impl Default for LocalPool { + fn default() -> Self { + Self::new() + } +} + +/// Run a future to completion on the current thread. +/// +/// This function will block the caller until the given future has completed. +/// +/// Use a [`LocalPool`](LocalPool) if you need finer-grained control over +/// spawned tasks. +pub fn block_on(f: F) -> F::Output { + pin_mut!(f); + run_executor(|cx| f.as_mut().poll(cx)) +} + +/// Turn a stream into a blocking iterator. +/// +/// When `next` is called on the resulting `BlockingStream`, the caller +/// will be blocked until the next element of the `Stream` becomes available. +pub fn block_on_stream(stream: S) -> BlockingStream { + BlockingStream { stream } +} + +/// An iterator which blocks on values from a stream until they become available. +#[derive(Debug)] +pub struct BlockingStream { + stream: S, +} + +impl Deref for BlockingStream { + type Target = S; + fn deref(&self) -> &Self::Target { + &self.stream + } +} + +impl DerefMut for BlockingStream { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.stream + } +} + +impl BlockingStream { + /// Convert this `BlockingStream` into the inner `Stream` type. + pub fn into_inner(self) -> S { + self.stream + } +} + +impl Iterator for BlockingStream { + type Item = S::Item; + + fn next(&mut self) -> Option { + LocalPool::new().run_until(self.stream.next()) + } + + fn size_hint(&self) -> (usize, Option) { + self.stream.size_hint() + } +} + +impl Spawn for LocalSpawner { + fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { + if let Some(incoming) = self.incoming.upgrade() { + incoming.borrow_mut().push(future.into()); + Ok(()) + } else { + Err(SpawnError::shutdown()) + } + } + + fn status(&self) -> Result<(), SpawnError> { + if self.incoming.upgrade().is_some() { + Ok(()) + } else { + Err(SpawnError::shutdown()) + } + } +} + +impl LocalSpawn for LocalSpawner { + fn spawn_local_obj(&self, future: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> { + if let Some(incoming) = self.incoming.upgrade() { + incoming.borrow_mut().push(future); + Ok(()) + } else { + Err(SpawnError::shutdown()) + } + } + + fn status_local(&self) -> Result<(), SpawnError> { + if self.incoming.upgrade().is_some() { + Ok(()) + } else { + Err(SpawnError::shutdown()) + } + } +} diff --git a/vendor/futures-executor/src/thread_pool.rs b/vendor/futures-executor/src/thread_pool.rs new file mode 100644 index 00000000..53710089 --- /dev/null +++ b/vendor/futures-executor/src/thread_pool.rs @@ -0,0 +1,380 @@ +use crate::enter; +use crate::unpark_mutex::UnparkMutex; +use futures_core::future::Future; +use futures_core::task::{Context, Poll}; +use futures_task::{waker_ref, ArcWake}; +use futures_task::{FutureObj, Spawn, SpawnError}; +use futures_util::future::FutureExt; +use std::cmp; +use std::fmt; +use std::io; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::mpsc; +use std::sync::{Arc, Mutex}; +use std::thread; + +/// A general-purpose thread pool for scheduling tasks that poll futures to +/// completion. +/// +/// The thread pool multiplexes any number of tasks onto a fixed number of +/// worker threads. +/// +/// This type is a clonable handle to the threadpool itself. +/// Cloning it will only create a new reference, not a new threadpool. +/// +/// This type is only available when the `thread-pool` feature of this +/// library is activated. +#[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))] +pub struct ThreadPool { + state: Arc, +} + +/// Thread pool configuration object. +/// +/// This type is only available when the `thread-pool` feature of this +/// library is activated. +#[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))] +pub struct ThreadPoolBuilder { + pool_size: usize, + stack_size: usize, + name_prefix: Option, + after_start: Option>, + before_stop: Option>, +} + +trait AssertSendSync: Send + Sync {} +impl AssertSendSync for ThreadPool {} + +struct PoolState { + tx: Mutex>, + rx: Mutex>, + cnt: AtomicUsize, + size: usize, +} + +impl fmt::Debug for ThreadPool { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ThreadPool").field("size", &self.state.size).finish() + } +} + +impl fmt::Debug for ThreadPoolBuilder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("ThreadPoolBuilder") + .field("pool_size", &self.pool_size) + .field("name_prefix", &self.name_prefix) + .finish() + } +} + +enum Message { + Run(Task), + Close, +} + +impl ThreadPool { + /// Creates a new thread pool with the default configuration. + /// + /// See documentation for the methods in + /// [`ThreadPoolBuilder`](ThreadPoolBuilder) for details on the default + /// configuration. + pub fn new() -> Result { + ThreadPoolBuilder::new().create() + } + + /// Create a default thread pool configuration, which can then be customized. + /// + /// See documentation for the methods in + /// [`ThreadPoolBuilder`](ThreadPoolBuilder) for details on the default + /// configuration. + pub fn builder() -> ThreadPoolBuilder { + ThreadPoolBuilder::new() + } + + /// Spawns a future that will be run to completion. + /// + /// > **Note**: This method is similar to `Spawn::spawn_obj`, except that + /// > it is guaranteed to always succeed. + pub fn spawn_obj_ok(&self, future: FutureObj<'static, ()>) { + let task = Task { + future, + wake_handle: Arc::new(WakeHandle { exec: self.clone(), mutex: UnparkMutex::new() }), + exec: self.clone(), + }; + self.state.send(Message::Run(task)); + } + + /// Spawns a task that polls the given future with output `()` to + /// completion. + /// + /// ``` + /// # { + /// use futures::executor::ThreadPool; + /// + /// let pool = ThreadPool::new().unwrap(); + /// + /// let future = async { /* ... */ }; + /// pool.spawn_ok(future); + /// # } + /// # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 + /// ``` + /// + /// > **Note**: This method is similar to `SpawnExt::spawn`, except that + /// > it is guaranteed to always succeed. + pub fn spawn_ok(&self, future: Fut) + where + Fut: Future + Send + 'static, + { + self.spawn_obj_ok(FutureObj::new(Box::new(future))) + } +} + +impl Spawn for ThreadPool { + fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { + self.spawn_obj_ok(future); + Ok(()) + } +} + +impl PoolState { + fn send(&self, msg: Message) { + self.tx.lock().unwrap().send(msg).unwrap(); + } + + fn work( + &self, + idx: usize, + after_start: Option>, + before_stop: Option>, + ) { + let _scope = enter().unwrap(); + if let Some(after_start) = after_start { + after_start(idx); + } + loop { + let msg = self.rx.lock().unwrap().recv().unwrap(); + match msg { + Message::Run(task) => task.run(), + Message::Close => break, + } + } + if let Some(before_stop) = before_stop { + before_stop(idx); + } + } +} + +impl Clone for ThreadPool { + fn clone(&self) -> Self { + self.state.cnt.fetch_add(1, Ordering::Relaxed); + Self { state: self.state.clone() } + } +} + +impl Drop for ThreadPool { + fn drop(&mut self) { + if self.state.cnt.fetch_sub(1, Ordering::Relaxed) == 1 { + for _ in 0..self.state.size { + self.state.send(Message::Close); + } + } + } +} + +impl ThreadPoolBuilder { + /// Create a default thread pool configuration. + /// + /// See the other methods on this type for details on the defaults. + pub fn new() -> Self { + Self { + pool_size: cmp::max(1, num_cpus::get()), + stack_size: 0, + name_prefix: None, + after_start: None, + before_stop: None, + } + } + + /// Set size of a future ThreadPool + /// + /// The size of a thread pool is the number of worker threads spawned. By + /// default, this is equal to the number of CPU cores. + /// + /// # Panics + /// + /// Panics if `pool_size == 0`. + pub fn pool_size(&mut self, size: usize) -> &mut Self { + assert!(size > 0); + self.pool_size = size; + self + } + + /// Set stack size of threads in the pool, in bytes. + /// + /// By default, worker threads use Rust's standard stack size. + pub fn stack_size(&mut self, stack_size: usize) -> &mut Self { + self.stack_size = stack_size; + self + } + + /// Set thread name prefix of a future ThreadPool. + /// + /// Thread name prefix is used for generating thread names. For example, if prefix is + /// `my-pool-`, then threads in the pool will get names like `my-pool-1` etc. + /// + /// By default, worker threads are assigned Rust's standard thread name. + pub fn name_prefix>(&mut self, name_prefix: S) -> &mut Self { + self.name_prefix = Some(name_prefix.into()); + self + } + + /// Execute the closure `f` immediately after each worker thread is started, + /// but before running any tasks on it. + /// + /// This hook is intended for bookkeeping and monitoring. + /// The closure `f` will be dropped after the `builder` is dropped + /// and all worker threads in the pool have executed it. + /// + /// The closure provided will receive an index corresponding to the worker + /// thread it's running on. + pub fn after_start(&mut self, f: F) -> &mut Self + where + F: Fn(usize) + Send + Sync + 'static, + { + self.after_start = Some(Arc::new(f)); + self + } + + /// Execute closure `f` just prior to shutting down each worker thread. + /// + /// This hook is intended for bookkeeping and monitoring. + /// The closure `f` will be dropped after the `builder` is dropped + /// and all threads in the pool have executed it. + /// + /// The closure provided will receive an index corresponding to the worker + /// thread it's running on. + pub fn before_stop(&mut self, f: F) -> &mut Self + where + F: Fn(usize) + Send + Sync + 'static, + { + self.before_stop = Some(Arc::new(f)); + self + } + + /// Create a [`ThreadPool`](ThreadPool) with the given configuration. + pub fn create(&mut self) -> Result { + let (tx, rx) = mpsc::channel(); + let pool = ThreadPool { + state: Arc::new(PoolState { + tx: Mutex::new(tx), + rx: Mutex::new(rx), + cnt: AtomicUsize::new(1), + size: self.pool_size, + }), + }; + + for counter in 0..self.pool_size { + let state = pool.state.clone(); + let after_start = self.after_start.clone(); + let before_stop = self.before_stop.clone(); + let mut thread_builder = thread::Builder::new(); + if let Some(ref name_prefix) = self.name_prefix { + thread_builder = thread_builder.name(format!("{}{}", name_prefix, counter)); + } + if self.stack_size > 0 { + thread_builder = thread_builder.stack_size(self.stack_size); + } + thread_builder.spawn(move || state.work(counter, after_start, before_stop))?; + } + Ok(pool) + } +} + +impl Default for ThreadPoolBuilder { + fn default() -> Self { + Self::new() + } +} + +/// A task responsible for polling a future to completion. +struct Task { + future: FutureObj<'static, ()>, + exec: ThreadPool, + wake_handle: Arc, +} + +struct WakeHandle { + mutex: UnparkMutex, + exec: ThreadPool, +} + +impl Task { + /// Actually run the task (invoking `poll` on the future) on the current + /// thread. + fn run(self) { + let Self { mut future, wake_handle, mut exec } = self; + let waker = waker_ref(&wake_handle); + let mut cx = Context::from_waker(&waker); + + // Safety: The ownership of this `Task` object is evidence that + // we are in the `POLLING`/`REPOLL` state for the mutex. + unsafe { + wake_handle.mutex.start_poll(); + + loop { + let res = future.poll_unpin(&mut cx); + match res { + Poll::Pending => {} + Poll::Ready(()) => return wake_handle.mutex.complete(), + } + let task = Self { future, wake_handle: wake_handle.clone(), exec }; + match wake_handle.mutex.wait(task) { + Ok(()) => return, // we've waited + Err(task) => { + // someone's notified us + future = task.future; + exec = task.exec; + } + } + } + } + } +} + +impl fmt::Debug for Task { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Task").field("contents", &"...").finish() + } +} + +impl ArcWake for WakeHandle { + fn wake_by_ref(arc_self: &Arc) { + if let Ok(task) = arc_self.mutex.notify() { + arc_self.exec.state.send(Message::Run(task)) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::sync::mpsc; + + #[test] + fn test_drop_after_start() { + { + let (tx, rx) = mpsc::sync_channel(2); + let _cpu_pool = ThreadPoolBuilder::new() + .pool_size(2) + .after_start(move |_| tx.send(1).unwrap()) + .create() + .unwrap(); + + // After ThreadPoolBuilder is deconstructed, the tx should be dropped + // so that we can use rx as an iterator. + let count = rx.into_iter().count(); + assert_eq!(count, 2); + } + std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 + } +} diff --git a/vendor/futures-executor/src/unpark_mutex.rs b/vendor/futures-executor/src/unpark_mutex.rs new file mode 100644 index 00000000..ac5112cf --- /dev/null +++ b/vendor/futures-executor/src/unpark_mutex.rs @@ -0,0 +1,137 @@ +use std::cell::UnsafeCell; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering::SeqCst; + +/// A "lock" around data `D`, which employs a *helping* strategy. +/// +/// Used to ensure that concurrent `unpark` invocations lead to (1) `poll` being +/// invoked on only a single thread at a time (2) `poll` being invoked at least +/// once after each `unpark` (unless the future has completed). +pub(crate) struct UnparkMutex { + // The state of task execution (state machine described below) + status: AtomicUsize, + + // The actual task data, accessible only in the POLLING state + inner: UnsafeCell>, +} + +// `UnparkMutex` functions in many ways like a `Mutex`, except that on +// acquisition failure, the current lock holder performs the desired work -- +// re-polling. +// +// As such, these impls mirror those for `Mutex`. In particular, a reference +// to `UnparkMutex` can be used to gain `&mut` access to the inner data, which +// must therefore be `Send`. +unsafe impl Send for UnparkMutex {} +unsafe impl Sync for UnparkMutex {} + +// There are four possible task states, listed below with their possible +// transitions: + +// The task is blocked, waiting on an event +const WAITING: usize = 0; // --> POLLING + +// The task is actively being polled by a thread; arrival of additional events +// of interest should move it to the REPOLL state +const POLLING: usize = 1; // --> WAITING, REPOLL, or COMPLETE + +// The task is actively being polled, but will need to be re-polled upon +// completion to ensure that all events were observed. +const REPOLL: usize = 2; // --> POLLING + +// The task has finished executing (either successfully or with an error/panic) +const COMPLETE: usize = 3; // No transitions out + +impl UnparkMutex { + pub(crate) fn new() -> Self { + Self { status: AtomicUsize::new(WAITING), inner: UnsafeCell::new(None) } + } + + /// Attempt to "notify" the mutex that a poll should occur. + /// + /// An `Ok` result indicates that the `POLLING` state has been entered, and + /// the caller can proceed to poll the future. An `Err` result indicates + /// that polling is not necessary (because the task is finished or the + /// polling has been delegated). + pub(crate) fn notify(&self) -> Result { + let mut status = self.status.load(SeqCst); + loop { + match status { + // The task is idle, so try to run it immediately. + WAITING => { + match self.status.compare_exchange(WAITING, POLLING, SeqCst, SeqCst) { + Ok(_) => { + let data = unsafe { + // SAFETY: we've ensured mutual exclusion via + // the status protocol; we are the only thread + // that has transitioned to the POLLING state, + // and we won't transition back to QUEUED until + // the lock is "released" by this thread. See + // the protocol diagram above. + (*self.inner.get()).take().unwrap() + }; + return Ok(data); + } + Err(cur) => status = cur, + } + } + + // The task is being polled, so we need to record that it should + // be *repolled* when complete. + POLLING => match self.status.compare_exchange(POLLING, REPOLL, SeqCst, SeqCst) { + Ok(_) => return Err(()), + Err(cur) => status = cur, + }, + + // The task is already scheduled for polling, or is complete, so + // we've got nothing to do. + _ => return Err(()), + } + } + } + + /// Alert the mutex that polling is about to begin, clearing any accumulated + /// re-poll requests. + /// + /// # Safety + /// + /// Callable only from the `POLLING`/`REPOLL` states, i.e. between + /// successful calls to `notify` and `wait`/`complete`. + pub(crate) unsafe fn start_poll(&self) { + self.status.store(POLLING, SeqCst); + } + + /// Alert the mutex that polling completed with `Pending`. + /// + /// # Safety + /// + /// Callable only from the `POLLING`/`REPOLL` states, i.e. between + /// successful calls to `notify` and `wait`/`complete`. + pub(crate) unsafe fn wait(&self, data: D) -> Result<(), D> { + *self.inner.get() = Some(data); + + match self.status.compare_exchange(POLLING, WAITING, SeqCst, SeqCst) { + // no unparks came in while we were running + Ok(_) => Ok(()), + + // guaranteed to be in REPOLL state; just clobber the + // state and run again. + Err(status) => { + assert_eq!(status, REPOLL); + self.status.store(POLLING, SeqCst); + Err((*self.inner.get()).take().unwrap()) + } + } + } + + /// Alert the mutex that the task has completed execution and should not be + /// notified again. + /// + /// # Safety + /// + /// Callable only from the `POLLING`/`REPOLL` states, i.e. between + /// successful calls to `notify` and `wait`/`complete`. + pub(crate) unsafe fn complete(&self) { + self.status.store(COMPLETE, SeqCst); + } +} diff --git a/vendor/futures-executor/tests/local_pool.rs b/vendor/futures-executor/tests/local_pool.rs new file mode 100644 index 00000000..72ce74b7 --- /dev/null +++ b/vendor/futures-executor/tests/local_pool.rs @@ -0,0 +1,496 @@ +use futures::channel::oneshot; +use futures::executor::LocalPool; +use futures::future::{self, lazy, poll_fn, Future}; +use futures::task::{Context, LocalSpawn, LocalSpawnExt, Poll, Spawn, SpawnExt, Waker}; +use std::cell::{Cell, RefCell}; +use std::pin::Pin; +use std::rc::Rc; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::thread; +use std::time::Duration; + +struct Pending(Rc<()>); + +impl Future for Pending { + type Output = (); + + fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> { + Poll::Pending + } +} + +fn pending() -> Pending { + Pending(Rc::new(())) +} + +#[test] +fn run_until_single_future() { + let mut cnt = 0; + + { + let mut pool = LocalPool::new(); + let fut = lazy(|_| { + cnt += 1; + }); + pool.run_until(fut); + } + + assert_eq!(cnt, 1); +} + +#[test] +fn run_until_ignores_spawned() { + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + spawn.spawn_local_obj(Box::pin(pending()).into()).unwrap(); + pool.run_until(lazy(|_| ())); +} + +#[test] +fn run_until_executes_spawned() { + let (tx, rx) = oneshot::channel(); + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + spawn + .spawn_local_obj( + Box::pin(lazy(move |_| { + tx.send(()).unwrap(); + })) + .into(), + ) + .unwrap(); + pool.run_until(rx).unwrap(); +} + +#[test] +fn run_returns_if_empty() { + let mut pool = LocalPool::new(); + pool.run(); + pool.run(); +} + +#[test] +fn run_executes_spawned() { + let cnt = Rc::new(Cell::new(0)); + let cnt2 = cnt.clone(); + + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + let spawn2 = pool.spawner(); + + spawn + .spawn_local_obj( + Box::pin(lazy(move |_| { + spawn2 + .spawn_local_obj( + Box::pin(lazy(move |_| { + cnt2.set(cnt2.get() + 1); + })) + .into(), + ) + .unwrap(); + })) + .into(), + ) + .unwrap(); + + pool.run(); + + assert_eq!(cnt.get(), 1); +} + +#[test] +fn run_spawn_many() { + const ITER: usize = 200; + + let cnt = Rc::new(Cell::new(0)); + + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + + for _ in 0..ITER { + let cnt = cnt.clone(); + spawn + .spawn_local_obj( + Box::pin(lazy(move |_| { + cnt.set(cnt.get() + 1); + })) + .into(), + ) + .unwrap(); + } + + pool.run(); + + assert_eq!(cnt.get(), ITER); +} + +#[test] +fn try_run_one_returns_if_empty() { + let mut pool = LocalPool::new(); + assert!(!pool.try_run_one()); +} + +#[test] +fn try_run_one_executes_one_ready() { + const ITER: usize = 200; + + let cnt = Rc::new(Cell::new(0)); + + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + + for _ in 0..ITER { + spawn.spawn_local_obj(Box::pin(pending()).into()).unwrap(); + + let cnt = cnt.clone(); + spawn + .spawn_local_obj( + Box::pin(lazy(move |_| { + cnt.set(cnt.get() + 1); + })) + .into(), + ) + .unwrap(); + + spawn.spawn_local_obj(Box::pin(pending()).into()).unwrap(); + } + + for i in 0..ITER { + assert_eq!(cnt.get(), i); + assert!(pool.try_run_one()); + assert_eq!(cnt.get(), i + 1); + } + assert!(!pool.try_run_one()); +} + +#[test] +fn try_run_one_returns_on_no_progress() { + const ITER: usize = 10; + + let cnt = Rc::new(Cell::new(0)); + + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + + let waker: Rc>> = Rc::new(Cell::new(None)); + { + let cnt = cnt.clone(); + let waker = waker.clone(); + spawn + .spawn_local_obj( + Box::pin(poll_fn(move |ctx| { + cnt.set(cnt.get() + 1); + waker.set(Some(ctx.waker().clone())); + if cnt.get() == ITER { + Poll::Ready(()) + } else { + Poll::Pending + } + })) + .into(), + ) + .unwrap(); + } + + for i in 0..ITER - 1 { + assert_eq!(cnt.get(), i); + assert!(!pool.try_run_one()); + assert_eq!(cnt.get(), i + 1); + let w = waker.take(); + assert!(w.is_some()); + w.unwrap().wake(); + } + assert!(pool.try_run_one()); + assert_eq!(cnt.get(), ITER); +} + +#[test] +fn try_run_one_runs_sub_futures() { + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + let cnt = Rc::new(Cell::new(0)); + + let inner_spawner = spawn.clone(); + let cnt1 = cnt.clone(); + spawn + .spawn_local_obj( + Box::pin(poll_fn(move |_| { + cnt1.set(cnt1.get() + 1); + + let cnt2 = cnt1.clone(); + inner_spawner + .spawn_local_obj(Box::pin(lazy(move |_| cnt2.set(cnt2.get() + 1))).into()) + .unwrap(); + + Poll::Pending + })) + .into(), + ) + .unwrap(); + + pool.try_run_one(); + assert_eq!(cnt.get(), 2); +} + +#[test] +fn run_until_stalled_returns_if_empty() { + let mut pool = LocalPool::new(); + pool.run_until_stalled(); + pool.run_until_stalled(); +} + +#[test] +fn run_until_stalled_returns_multiple_times() { + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + let cnt = Rc::new(Cell::new(0)); + + let cnt1 = cnt.clone(); + spawn.spawn_local_obj(Box::pin(lazy(move |_| cnt1.set(cnt1.get() + 1))).into()).unwrap(); + pool.run_until_stalled(); + assert_eq!(cnt.get(), 1); + + let cnt2 = cnt.clone(); + spawn.spawn_local_obj(Box::pin(lazy(move |_| cnt2.set(cnt2.get() + 1))).into()).unwrap(); + pool.run_until_stalled(); + assert_eq!(cnt.get(), 2); +} + +#[test] +fn run_until_stalled_runs_spawned_sub_futures() { + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + let cnt = Rc::new(Cell::new(0)); + + let inner_spawner = spawn.clone(); + let cnt1 = cnt.clone(); + spawn + .spawn_local_obj( + Box::pin(poll_fn(move |_| { + cnt1.set(cnt1.get() + 1); + + let cnt2 = cnt1.clone(); + inner_spawner + .spawn_local_obj(Box::pin(lazy(move |_| cnt2.set(cnt2.get() + 1))).into()) + .unwrap(); + + Poll::Pending + })) + .into(), + ) + .unwrap(); + + pool.run_until_stalled(); + assert_eq!(cnt.get(), 2); +} + +#[test] +fn run_until_stalled_executes_all_ready() { + const ITER: usize = if cfg!(miri) { 50 } else { 200 }; + const PER_ITER: usize = 3; + + let cnt = Rc::new(Cell::new(0)); + + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + + for i in 0..ITER { + for _ in 0..PER_ITER { + spawn.spawn_local_obj(Box::pin(pending()).into()).unwrap(); + + let cnt = cnt.clone(); + spawn + .spawn_local_obj( + Box::pin(lazy(move |_| { + cnt.set(cnt.get() + 1); + })) + .into(), + ) + .unwrap(); + + // also add some pending tasks to test if they are ignored + spawn.spawn_local_obj(Box::pin(pending()).into()).unwrap(); + } + assert_eq!(cnt.get(), i * PER_ITER); + pool.run_until_stalled(); + assert_eq!(cnt.get(), (i + 1) * PER_ITER); + } +} + +#[test] +#[should_panic] +fn nesting_run() { + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + + spawn + .spawn_obj( + Box::pin(lazy(|_| { + let mut pool = LocalPool::new(); + pool.run(); + })) + .into(), + ) + .unwrap(); + + pool.run(); +} + +#[test] +#[should_panic] +fn nesting_run_run_until_stalled() { + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + + spawn + .spawn_obj( + Box::pin(lazy(|_| { + let mut pool = LocalPool::new(); + pool.run_until_stalled(); + })) + .into(), + ) + .unwrap(); + + pool.run(); +} + +#[test] +fn tasks_are_scheduled_fairly() { + let state = Rc::new(RefCell::new([0, 0])); + + struct Spin { + state: Rc>, + idx: usize, + } + + impl Future for Spin { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { + let mut state = self.state.borrow_mut(); + + if self.idx == 0 { + let diff = state[0] - state[1]; + + assert!(diff.abs() <= 1); + + if state[0] >= 50 { + return Poll::Ready(()); + } + } + + state[self.idx] += 1; + + if state[self.idx] >= 100 { + return Poll::Ready(()); + } + + cx.waker().wake_by_ref(); + Poll::Pending + } + } + + let mut pool = LocalPool::new(); + let spawn = pool.spawner(); + + spawn.spawn_local_obj(Box::pin(Spin { state: state.clone(), idx: 0 }).into()).unwrap(); + + spawn.spawn_local_obj(Box::pin(Spin { state, idx: 1 }).into()).unwrap(); + + pool.run(); +} + +// Tests that the use of park/unpark in user-code has no +// effect on the expected behavior of the executor. +#[test] +fn park_unpark_independence() { + let mut done = false; + + let future = future::poll_fn(move |cx| { + if done { + return Poll::Ready(()); + } + done = true; + cx.waker().clone().wake(); // (*) + // some user-code that temporarily parks the thread + let test = thread::current(); + let latch = Arc::new(AtomicBool::new(false)); + let signal = latch.clone(); + thread::spawn(move || { + thread::sleep(Duration::from_millis(10)); + signal.store(true, Ordering::SeqCst); + test.unpark() + }); + while !latch.load(Ordering::Relaxed) { + thread::park(); + } + Poll::Pending // Expect to be called again due to (*). + }); + + futures::executor::block_on(future) +} + +struct SelfWaking { + wakeups_remaining: Rc>, +} + +impl Future for SelfWaking { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if *self.wakeups_remaining.borrow() != 0 { + *self.wakeups_remaining.borrow_mut() -= 1; + cx.waker().wake_by_ref(); + } + + Poll::Pending + } +} + +/// Regression test for https://github.com/rust-lang/futures-rs/pull/2593 +/// +/// The issue was that self-waking futures could cause `run_until_stalled` +/// to exit early, even when progress could still be made. +#[test] +fn self_waking_run_until_stalled() { + let wakeups_remaining = Rc::new(RefCell::new(10)); + + let mut pool = LocalPool::new(); + let spawner = pool.spawner(); + for _ in 0..3 { + let wakeups_remaining = Rc::clone(&wakeups_remaining); + spawner.spawn_local(SelfWaking { wakeups_remaining }).unwrap(); + } + + // This should keep polling until there are no more wakeups. + pool.run_until_stalled(); + + assert_eq!(*wakeups_remaining.borrow(), 0); +} + +/// Regression test for https://github.com/rust-lang/futures-rs/pull/2593 +/// +/// The issue was that self-waking futures could cause `try_run_one` +/// to exit early, even when progress could still be made. +#[test] +fn self_waking_try_run_one() { + let wakeups_remaining = Rc::new(RefCell::new(10)); + + let mut pool = LocalPool::new(); + let spawner = pool.spawner(); + for _ in 0..3 { + let wakeups_remaining = Rc::clone(&wakeups_remaining); + spawner.spawn_local(SelfWaking { wakeups_remaining }).unwrap(); + } + + spawner.spawn(future::ready(())).unwrap(); + + // The `ready` future should complete. + assert!(pool.try_run_one()); + + // The self-waking futures are each polled once. + assert_eq!(*wakeups_remaining.borrow(), 7); +} diff --git a/vendor/futures-io/.cargo-checksum.json b/vendor/futures-io/.cargo-checksum.json new file mode 100644 index 00000000..e6b4865c --- /dev/null +++ b/vendor/futures-io/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"a2f46c222bebd5fae127414ffb34535af67be92b58bafcc39a16523ffc33ee83","LICENSE-APACHE":"275c491d6d1160553c32fd6127061d7f9606c3ea25abfad6ca3f6ed088785427","LICENSE-MIT":"6652c868f35dfe5e8ef636810a4e576b9d663f3a17fb0f5613ad73583e1b88fd","README.md":"575430be5c47352d85f36b44dcc2c2851a6a19e2384593415c4af22c6654cee7","src/lib.rs":"526e9700c28250b7512f122952257d57adc38eb001af92ef25bdb48a8c453175"},"package":"a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1"} \ No newline at end of file diff --git a/vendor/futures-io/Cargo.toml b/vendor/futures-io/Cargo.toml new file mode 100644 index 00000000..d08683c6 --- /dev/null +++ b/vendor/futures-io/Cargo.toml @@ -0,0 +1,37 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.36" +name = "futures-io" +version = "0.3.30" +description = """ +The `AsyncRead`, `AsyncWrite`, `AsyncSeek`, and `AsyncBufRead` traits for the futures-rs library. +""" +homepage = "https://rust-lang.github.io/futures-rs" +readme = "README.md" +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/futures-rs" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[dependencies] + +[features] +default = ["std"] +std = [] +unstable = [] diff --git a/vendor/futures-io/LICENSE-APACHE b/vendor/futures-io/LICENSE-APACHE new file mode 100644 index 00000000..9eb0b097 --- /dev/null +++ b/vendor/futures-io/LICENSE-APACHE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright (c) 2016 Alex Crichton +Copyright (c) 2017 The Tokio Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/futures-io/LICENSE-MIT b/vendor/futures-io/LICENSE-MIT new file mode 100644 index 00000000..8ad082ec --- /dev/null +++ b/vendor/futures-io/LICENSE-MIT @@ -0,0 +1,26 @@ +Copyright (c) 2016 Alex Crichton +Copyright (c) 2017 The Tokio Authors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/futures-io/README.md b/vendor/futures-io/README.md new file mode 100644 index 00000000..da6eec28 --- /dev/null +++ b/vendor/futures-io/README.md @@ -0,0 +1,23 @@ +# futures-io + +The `AsyncRead`, `AsyncWrite`, `AsyncSeek`, and `AsyncBufRead` traits for the futures-rs library. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +futures-io = "0.3" +``` + +The current `futures-io` requires Rust 1.36 or later. + +## License + +Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or +[MIT license](LICENSE-MIT) at your option. + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. diff --git a/vendor/futures-io/src/lib.rs b/vendor/futures-io/src/lib.rs new file mode 100644 index 00000000..e91eb784 --- /dev/null +++ b/vendor/futures-io/src/lib.rs @@ -0,0 +1,558 @@ +//! Asynchronous I/O +//! +//! This crate contains the `AsyncRead`, `AsyncWrite`, `AsyncSeek`, and +//! `AsyncBufRead` traits, the asynchronous analogs to +//! `std::io::{Read, Write, Seek, BufRead}`. The primary difference is +//! that these traits integrate with the asynchronous task system. +//! +//! All items of this library are only available when the `std` feature of this +//! library is activated, and it is activated by default. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_debug_implementations, missing_docs, rust_2018_idioms, unreachable_pub)] +// It cannot be included in the published code because this lints have false positives in the minimum required version. +#![cfg_attr(test, warn(single_use_lifetimes))] +#![doc(test( + no_crate_inject, + attr( + deny(warnings, rust_2018_idioms, single_use_lifetimes), + allow(dead_code, unused_assignments, unused_variables) + ) +))] +#![cfg_attr(docsrs, feature(doc_cfg))] + +#[cfg(feature = "std")] +mod if_std { + use std::io; + use std::ops::DerefMut; + use std::pin::Pin; + use std::task::{Context, Poll}; + + // Re-export some types from `std::io` so that users don't have to deal + // with conflicts when `use`ing `futures::io` and `std::io`. + #[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411 + #[doc(no_inline)] + pub use io::{Error, ErrorKind, IoSlice, IoSliceMut, Result, SeekFrom}; + + /// Read bytes asynchronously. + /// + /// This trait is analogous to the `std::io::Read` trait, but integrates + /// with the asynchronous task system. In particular, the `poll_read` + /// method, unlike `Read::read`, will automatically queue the current task + /// for wakeup and return if data is not yet available, rather than blocking + /// the calling thread. + pub trait AsyncRead { + /// Attempt to read from the `AsyncRead` into `buf`. + /// + /// On success, returns `Poll::Ready(Ok(num_bytes_read))`. + /// + /// If no data is available for reading, the method returns + /// `Poll::Pending` and arranges for the current task (via + /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes + /// readable or is closed. + /// + /// # Implementation + /// + /// This function may not return errors of kind `WouldBlock` or + /// `Interrupted`. Implementations must convert `WouldBlock` into + /// `Poll::Pending` and either internally retry or convert + /// `Interrupted` into another error kind. + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll>; + + /// Attempt to read from the `AsyncRead` into `bufs` using vectored + /// IO operations. + /// + /// This method is similar to `poll_read`, but allows data to be read + /// into multiple buffers using a single operation. + /// + /// On success, returns `Poll::Ready(Ok(num_bytes_read))`. + /// + /// If no data is available for reading, the method returns + /// `Poll::Pending` and arranges for the current task (via + /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes + /// readable or is closed. + /// By default, this method delegates to using `poll_read` on the first + /// nonempty buffer in `bufs`, or an empty one if none exists. Objects which + /// support vectored IO should override this method. + /// + /// # Implementation + /// + /// This function may not return errors of kind `WouldBlock` or + /// `Interrupted`. Implementations must convert `WouldBlock` into + /// `Poll::Pending` and either internally retry or convert + /// `Interrupted` into another error kind. + fn poll_read_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &mut [IoSliceMut<'_>], + ) -> Poll> { + for b in bufs { + if !b.is_empty() { + return self.poll_read(cx, b); + } + } + + self.poll_read(cx, &mut []) + } + } + + /// Write bytes asynchronously. + /// + /// This trait is analogous to the `std::io::Write` trait, but integrates + /// with the asynchronous task system. In particular, the `poll_write` + /// method, unlike `Write::write`, will automatically queue the current task + /// for wakeup and return if the writer cannot take more data, rather than blocking + /// the calling thread. + pub trait AsyncWrite { + /// Attempt to write bytes from `buf` into the object. + /// + /// On success, returns `Poll::Ready(Ok(num_bytes_written))`. + /// + /// If the object is not ready for writing, the method returns + /// `Poll::Pending` and arranges for the current task (via + /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes + /// writable or is closed. + /// + /// # Implementation + /// + /// This function may not return errors of kind `WouldBlock` or + /// `Interrupted`. Implementations must convert `WouldBlock` into + /// `Poll::Pending` and either internally retry or convert + /// `Interrupted` into another error kind. + /// + /// `poll_write` must try to make progress by flushing the underlying object if + /// that is the only way the underlying object can become writable again. + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll>; + + /// Attempt to write bytes from `bufs` into the object using vectored + /// IO operations. + /// + /// This method is similar to `poll_write`, but allows data from multiple buffers to be written + /// using a single operation. + /// + /// On success, returns `Poll::Ready(Ok(num_bytes_written))`. + /// + /// If the object is not ready for writing, the method returns + /// `Poll::Pending` and arranges for the current task (via + /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes + /// writable or is closed. + /// + /// By default, this method delegates to using `poll_write` on the first + /// nonempty buffer in `bufs`, or an empty one if none exists. Objects which + /// support vectored IO should override this method. + /// + /// # Implementation + /// + /// This function may not return errors of kind `WouldBlock` or + /// `Interrupted`. Implementations must convert `WouldBlock` into + /// `Poll::Pending` and either internally retry or convert + /// `Interrupted` into another error kind. + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[IoSlice<'_>], + ) -> Poll> { + for b in bufs { + if !b.is_empty() { + return self.poll_write(cx, b); + } + } + + self.poll_write(cx, &[]) + } + + /// Attempt to flush the object, ensuring that any buffered data reach + /// their destination. + /// + /// On success, returns `Poll::Ready(Ok(()))`. + /// + /// If flushing cannot immediately complete, this method returns + /// `Poll::Pending` and arranges for the current task (via + /// `cx.waker().wake_by_ref()`) to receive a notification when the object can make + /// progress towards flushing. + /// + /// # Implementation + /// + /// This function may not return errors of kind `WouldBlock` or + /// `Interrupted`. Implementations must convert `WouldBlock` into + /// `Poll::Pending` and either internally retry or convert + /// `Interrupted` into another error kind. + /// + /// It only makes sense to do anything here if you actually buffer data. + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + + /// Attempt to close the object. + /// + /// On success, returns `Poll::Ready(Ok(()))`. + /// + /// If closing cannot immediately complete, this function returns + /// `Poll::Pending` and arranges for the current task (via + /// `cx.waker().wake_by_ref()`) to receive a notification when the object can make + /// progress towards closing. + /// + /// # Implementation + /// + /// This function may not return errors of kind `WouldBlock` or + /// `Interrupted`. Implementations must convert `WouldBlock` into + /// `Poll::Pending` and either internally retry or convert + /// `Interrupted` into another error kind. + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + } + + /// Seek bytes asynchronously. + /// + /// This trait is analogous to the `std::io::Seek` trait, but integrates + /// with the asynchronous task system. In particular, the `poll_seek` + /// method, unlike `Seek::seek`, will automatically queue the current task + /// for wakeup and return if data is not yet available, rather than blocking + /// the calling thread. + pub trait AsyncSeek { + /// Attempt to seek to an offset, in bytes, in a stream. + /// + /// A seek beyond the end of a stream is allowed, but behavior is defined + /// by the implementation. + /// + /// If the seek operation completed successfully, + /// this method returns the new position from the start of the stream. + /// That position can be used later with [`SeekFrom::Start`]. + /// + /// # Errors + /// + /// Seeking to a negative offset is considered an error. + /// + /// # Implementation + /// + /// This function may not return errors of kind `WouldBlock` or + /// `Interrupted`. Implementations must convert `WouldBlock` into + /// `Poll::Pending` and either internally retry or convert + /// `Interrupted` into another error kind. + fn poll_seek( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + pos: SeekFrom, + ) -> Poll>; + } + + /// Read bytes asynchronously. + /// + /// This trait is analogous to the `std::io::BufRead` trait, but integrates + /// with the asynchronous task system. In particular, the `poll_fill_buf` + /// method, unlike `BufRead::fill_buf`, will automatically queue the current task + /// for wakeup and return if data is not yet available, rather than blocking + /// the calling thread. + pub trait AsyncBufRead: AsyncRead { + /// Attempt to return the contents of the internal buffer, filling it with more data + /// from the inner reader if it is empty. + /// + /// On success, returns `Poll::Ready(Ok(buf))`. + /// + /// If no data is available for reading, the method returns + /// `Poll::Pending` and arranges for the current task (via + /// `cx.waker().wake_by_ref()`) to receive a notification when the object becomes + /// readable or is closed. + /// + /// This function is a lower-level call. It needs to be paired with the + /// [`consume`] method to function properly. When calling this + /// method, none of the contents will be "read" in the sense that later + /// calling [`poll_read`] may return the same contents. As such, [`consume`] must + /// be called with the number of bytes that are consumed from this buffer to + /// ensure that the bytes are never returned twice. + /// + /// [`poll_read`]: AsyncRead::poll_read + /// [`consume`]: AsyncBufRead::consume + /// + /// An empty buffer returned indicates that the stream has reached EOF. + /// + /// # Implementation + /// + /// This function may not return errors of kind `WouldBlock` or + /// `Interrupted`. Implementations must convert `WouldBlock` into + /// `Poll::Pending` and either internally retry or convert + /// `Interrupted` into another error kind. + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>; + + /// Tells this buffer that `amt` bytes have been consumed from the buffer, + /// so they should no longer be returned in calls to [`poll_read`]. + /// + /// This function is a lower-level call. It needs to be paired with the + /// [`poll_fill_buf`] method to function properly. This function does + /// not perform any I/O, it simply informs this object that some amount of + /// its buffer, returned from [`poll_fill_buf`], has been consumed and should + /// no longer be returned. As such, this function may do odd things if + /// [`poll_fill_buf`] isn't called before calling it. + /// + /// The `amt` must be `<=` the number of bytes in the buffer returned by + /// [`poll_fill_buf`]. + /// + /// [`poll_read`]: AsyncRead::poll_read + /// [`poll_fill_buf`]: AsyncBufRead::poll_fill_buf + fn consume(self: Pin<&mut Self>, amt: usize); + } + + macro_rules! deref_async_read { + () => { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + Pin::new(&mut **self).poll_read(cx, buf) + } + + fn poll_read_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &mut [IoSliceMut<'_>], + ) -> Poll> { + Pin::new(&mut **self).poll_read_vectored(cx, bufs) + } + }; + } + + impl AsyncRead for Box { + deref_async_read!(); + } + + impl AsyncRead for &mut T { + deref_async_read!(); + } + + impl

AsyncRead for Pin

+ where + P: DerefMut + Unpin, + P::Target: AsyncRead, + { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + self.get_mut().as_mut().poll_read(cx, buf) + } + + fn poll_read_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &mut [IoSliceMut<'_>], + ) -> Poll> { + self.get_mut().as_mut().poll_read_vectored(cx, bufs) + } + } + + macro_rules! delegate_async_read_to_stdio { + () => { + fn poll_read( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + Poll::Ready(io::Read::read(&mut *self, buf)) + } + + fn poll_read_vectored( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + bufs: &mut [IoSliceMut<'_>], + ) -> Poll> { + Poll::Ready(io::Read::read_vectored(&mut *self, bufs)) + } + }; + } + + impl AsyncRead for &[u8] { + delegate_async_read_to_stdio!(); + } + + macro_rules! deref_async_write { + () => { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut **self).poll_write(cx, buf) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[IoSlice<'_>], + ) -> Poll> { + Pin::new(&mut **self).poll_write_vectored(cx, bufs) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut **self).poll_flush(cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut **self).poll_close(cx) + } + }; + } + + impl AsyncWrite for Box { + deref_async_write!(); + } + + impl AsyncWrite for &mut T { + deref_async_write!(); + } + + impl

AsyncWrite for Pin

+ where + P: DerefMut + Unpin, + P::Target: AsyncWrite, + { + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + self.get_mut().as_mut().poll_write(cx, buf) + } + + fn poll_write_vectored( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[IoSlice<'_>], + ) -> Poll> { + self.get_mut().as_mut().poll_write_vectored(cx, bufs) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.get_mut().as_mut().poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.get_mut().as_mut().poll_close(cx) + } + } + + macro_rules! delegate_async_write_to_stdio { + () => { + fn poll_write( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + Poll::Ready(io::Write::write(&mut *self, buf)) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + bufs: &[IoSlice<'_>], + ) -> Poll> { + Poll::Ready(io::Write::write_vectored(&mut *self, bufs)) + } + + fn poll_flush(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(io::Write::flush(&mut *self)) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.poll_flush(cx) + } + }; + } + + impl AsyncWrite for Vec { + delegate_async_write_to_stdio!(); + } + + macro_rules! deref_async_seek { + () => { + fn poll_seek( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + pos: SeekFrom, + ) -> Poll> { + Pin::new(&mut **self).poll_seek(cx, pos) + } + }; + } + + impl AsyncSeek for Box { + deref_async_seek!(); + } + + impl AsyncSeek for &mut T { + deref_async_seek!(); + } + + impl

AsyncSeek for Pin

+ where + P: DerefMut + Unpin, + P::Target: AsyncSeek, + { + fn poll_seek( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + pos: SeekFrom, + ) -> Poll> { + self.get_mut().as_mut().poll_seek(cx, pos) + } + } + + macro_rules! deref_async_buf_read { + () => { + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut **self.get_mut()).poll_fill_buf(cx) + } + + fn consume(mut self: Pin<&mut Self>, amt: usize) { + Pin::new(&mut **self).consume(amt) + } + }; + } + + impl AsyncBufRead for Box { + deref_async_buf_read!(); + } + + impl AsyncBufRead for &mut T { + deref_async_buf_read!(); + } + + impl

AsyncBufRead for Pin

+ where + P: DerefMut + Unpin, + P::Target: AsyncBufRead, + { + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.get_mut().as_mut().poll_fill_buf(cx) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + self.get_mut().as_mut().consume(amt) + } + } + + macro_rules! delegate_async_buf_read_to_stdio { + () => { + fn poll_fill_buf(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(io::BufRead::fill_buf(self.get_mut())) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + io::BufRead::consume(self.get_mut(), amt) + } + }; + } + + impl AsyncBufRead for &[u8] { + delegate_async_buf_read_to_stdio!(); + } +} + +#[cfg(feature = "std")] +pub use self::if_std::*; diff --git a/vendor/futures/.cargo-checksum.json b/vendor/futures/.cargo-checksum.json new file mode 100644 index 00000000..d50aae8b --- /dev/null +++ b/vendor/futures/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"9162f9effcecfa23c06e5908fb47f0e818cff072b39b8c5bb22867144297d63e","LICENSE-APACHE":"275c491d6d1160553c32fd6127061d7f9606c3ea25abfad6ca3f6ed088785427","LICENSE-MIT":"6652c868f35dfe5e8ef636810a4e576b9d663f3a17fb0f5613ad73583e1b88fd","README.md":"842d0b8a539ab13ba2b9863cd8fb27da4fc7e9def1aefeb21db5aa04269b1e34","src/lib.rs":"bb07f533ba89a36c0385b57de17d51bf23ccab9e13e00ec812a74f376df15930","tests/_require_features.rs":"5ad24019430b498addfc1fd853e955c7b646d78d0727a8ca29f586c9aab45cff","tests/async_await_macros.rs":"87863f5b73217d727a4789d69229ab5dd85252b8e76a1aca0220feb98a0922af","tests/auto_traits.rs":"bff7c984267a9ec98d5549c971623e7d658fc6f26fd78cfcb2630838b73e46d0","tests/bilock.rs":"bd0bf617352528f686b3fbb1847f4da9f6fe351e456e0bdce888bc738311fa83","tests/compat.rs":"1449926cc046d2ae9f86a263efd9353ca8e174ea546c083b360136c5a2aef1d1","tests/eager_drop.rs":"dc25d067207c06bbe094752d70bf161e206f00e162ffa3219583c8b4eb0816a1","tests/eventual.rs":"9050809e5196d0870a3ee2a268a5b4b398739b01617e1e317a673ac0660974cf","tests/future_abortable.rs":"4c81607472a85c5d87a5fe8a510a24cf1e8793fedf7f6cd6741ba1efd66615cd","tests/future_basic_combinators.rs":"4508c1250b85a4f749b7261bbd0ba728d3970e7ba277e84a006e76cf068fb54f","tests/future_fuse.rs":"bb63141f1486e755d0cdea1d93e302ad864a2186aa5287f909a0b3a922e82065","tests/future_inspect.rs":"9c03ceb770ce04fe9fd88a3489362642a0e34ae86a7b4958703e89e8b7a1ecf4","tests/future_join.rs":"f59d7b948df7019e52f902ca7aef17f89ad26582bd1902d520ba99f6f61ba508","tests/future_join_all.rs":"6adacfca4d33a769dbe72fd04c54b49580ecd7a9994a185cfe97dd7a2b55c298","tests/future_obj.rs":"a6aae88a194dc7d3bb961c20db78f180a01796cf7ea4bf106da98c40d89ed36d","tests/future_select_all.rs":"4cefc84d6b7ae2cf0007912cd0325fff6b926a4c26310e7b14a21868de61616f","tests/future_select_ok.rs":"1cabd03268641e1ac42b880344528bad73e3aeb6d6a8a141e652f339dd40184b","tests/future_shared.rs":"4f2cba1e74dacc4fc6b92eef04700df832533efe4fe6a392e3fd0f655b5b8450","tests/future_try_flatten_stream.rs":"aa4542b5d88f62522b736fac4567613081df45ad3eb54b0b659cdadc9409c4db","tests/future_try_join_all.rs":"cca2c5a3b42fe4bf9705301cd1450b30a3822736c5c09793eee06b28ce686a19","tests/io_buf_reader.rs":"1d60479224d5aa9378d4aed6246362b08a823ee7c9977f6a5e44fce7c40116be","tests/io_buf_writer.rs":"8f7a78ab2955d2beb69d0881321d4191235540aef6448e875e7f76a2ffc55b89","tests/io_cursor.rs":"cba5a7b968b9f816ac33316ce1e4da67cb320aa5a21332c0f9a45694fa445dd7","tests/io_line_writer.rs":"5b1140de776a721a677911496daa4e7956cc52cc08838d593ab300a93e0d7984","tests/io_lines.rs":"72a310c885591793ed724d0aa2158ac2c9d1af22de417044d96b714f78317586","tests/io_read.rs":"e0a8fa9b27e042f03c9fe14e8f0f329a67e24afad1ce40b906a1ab4d2abef23a","tests/io_read_exact.rs":"42049cd67589992dc09764ffb3836c475115b26dee441fd4cc7e847b2d166667","tests/io_read_line.rs":"f360c30c32fc8c73b371281e86c3f1095da7ef23b702debb30d335046dc77dac","tests/io_read_to_end.rs":"ea3e961e39a0b92930bded05e8ba26e4902461ab53818843d40fae8065b1a803","tests/io_read_to_string.rs":"824921601ac49f15b9a0b349c900f9cc9081cf2646e6a86f443166f841f1320e","tests/io_read_until.rs":"36d9a98149b2410894121ccba49e5134e3209826b2225acfc787016cea2bc92a","tests/io_window.rs":"0d18334b1eb35f5e93099e19c0cab22abe5971d8531176b81345fc89d07692a8","tests/io_write.rs":"701032ff3d5a6e6a3d8cb4e373d1c93e4708f2e5ee0a6742fa626f27b6094b4d","tests/lock_mutex.rs":"eb47121b842096353165b1444bf679a2df0103b181f811b40042f5c3f1d00c73","tests/macro_comma_support.rs":"627024ccadfe95194469d5bae2cc29b897b0118a664d7222408a2e234a10e939","tests/object_safety.rs":"9d047190387ed8334113687003c23407c80c858411f5ec7d5c505500f9639dfc","tests/oneshot.rs":"2109a8b3b524f4b36be9fb100f9b8c0d38bbd38d51716adcafdb65994b4a81d6","tests/ready_queue.rs":"6380025061025c27cb3b521df9520f169c7aa8e1802b881d539023bb4651744a","tests/recurse.rs":"b01b3d73b69ad90a767d297f974dac435817c39e12556fa6a3e6c725dd84f706","tests/sink.rs":"d9b2ddcbbb6af9e36d057db97dbba233547be645a7e4901b2842a4671f9f0212","tests/sink_fanout.rs":"67ab58422040308353955311f75222e55378e4cc34557c7b34140bd20c259132","tests/stream.rs":"6cb49d74e63a6264b2862d1020517251262010441a9f3e1eb0b3a83c908b705b","tests/stream_abortable.rs":"60052b83b5eeb2395b77bc213f35098d2d5880529f0d83884582a8bbff78b139","tests/stream_buffer_unordered.rs":"143ee19056b9ee9e480903cf4a1b00da7d4e528c5804569bf8c40869e6ac6eed","tests/stream_catch_unwind.rs":"5cdaaf70436c49d3a7107bdc5547ddb8757c3d2057635aded70e485d0cb9cbfc","tests/stream_futures_ordered.rs":"f9083bd8cfa86620c51abffc390564432022b5c8d15a7cba15dd5cb53ae99dd6","tests/stream_futures_unordered.rs":"c888112d760db856e4d9191a2a6a3aa4a757d65e47a12fcd16fc5be7bf0b3e78","tests/stream_into_async_read.rs":"00ecb18289ebc8f46ea0cf43e0dce0631d7698bd1303a7bcd84d0addc9d8b645","tests/stream_peekable.rs":"c0addb0c510e13183ba3d6102633b75a9223651ae80a64542e913c712fe69a30","tests/stream_select_all.rs":"3a9045754939da5b30305e78f0571d79a03aaa77030c6ccf82225f076e9843c9","tests/stream_select_next_some.rs":"871edcee3ffc16c697251b29c9ba500aa4e3e503aa738748d7392e3462c82dce","tests/stream_split.rs":"074e9c9b51b6f7ea83d77347b5a0c8d414ca32b90445fec9b85f7f4cd2a6049f","tests/stream_try_stream.rs":"62ac8242ce3311930d352ad96573208d0df9ea5bc4e37d6edafabc7b3291ef88","tests/stream_unfold.rs":"7c6fbd10c782828793cbe1eb347ec776d99b185dad498e886f7161da76f76880","tests/task_arc_wake.rs":"5a49d074d1d5d9d5ec383dcd9a3868f636c1d7e34662e2573e467948db126206","tests/task_atomic_waker.rs":"8e85b4bc1360788646a52633dfe896d852773d6b482f81626cf534b97b7d937a","tests/test_macro.rs":"a46a946169c342c576936b60909165a50b94350501280ed9bba89d365af69287","tests/try_join.rs":"65f282f8351bd9a74642f2465c7aaf72ee7097002920989f156d60271652549e","tests_disabled/all.rs":"ddcd8fefb0d4a4a91a78328e7e652c35f93dc3669639d76fa0f56452b51abc23","tests_disabled/stream.rs":"8a615a472a35053d12b269d40fe244dfb3ba66fb78d217323aa2be177d5a111e"},"package":"da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335"} \ No newline at end of file diff --git a/vendor/futures/Cargo.toml b/vendor/futures/Cargo.toml new file mode 100644 index 00000000..f766861c --- /dev/null +++ b/vendor/futures/Cargo.toml @@ -0,0 +1,147 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +rust-version = "1.56" +name = "futures" +version = "0.3.29" +description = """ +An implementation of futures and streams featuring zero allocations, +composability, and iterator-like interfaces. +""" +homepage = "https://rust-lang.github.io/futures-rs" +readme = "README.md" +keywords = [ + "futures", + "async", + "future", +] +categories = ["asynchronous"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/futures-rs" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[package.metadata.playground] +features = [ + "std", + "async-await", + "compat", + "io-compat", + "executor", + "thread-pool", +] + +[dependencies.futures-channel] +version = "0.3.29" +features = ["sink"] +default-features = false + +[dependencies.futures-core] +version = "0.3.29" +default-features = false + +[dependencies.futures-executor] +version = "0.3.29" +optional = true +default-features = false + +[dependencies.futures-io] +version = "0.3.29" +default-features = false + +[dependencies.futures-sink] +version = "0.3.29" +default-features = false + +[dependencies.futures-task] +version = "0.3.29" +default-features = false + +[dependencies.futures-util] +version = "0.3.29" +features = ["sink"] +default-features = false + +[dev-dependencies.assert_matches] +version = "1.3.0" + +[dev-dependencies.pin-project] +version = "1.0.11" + +[dev-dependencies.pin-utils] +version = "0.1.0" + +[dev-dependencies.static_assertions] +version = "1" + +[dev-dependencies.tokio] +version = "0.1.11" + +[features] +alloc = [ + "futures-core/alloc", + "futures-task/alloc", + "futures-sink/alloc", + "futures-channel/alloc", + "futures-util/alloc", +] +async-await = [ + "futures-util/async-await", + "futures-util/async-await-macro", +] +bilock = ["futures-util/bilock"] +cfg-target-has-atomic = [] +compat = [ + "std", + "futures-util/compat", +] +default = [ + "std", + "async-await", + "executor", +] +executor = [ + "std", + "futures-executor/std", +] +io-compat = [ + "compat", + "futures-util/io-compat", +] +std = [ + "alloc", + "futures-core/std", + "futures-task/std", + "futures-io/std", + "futures-sink/std", + "futures-util/std", + "futures-util/io", + "futures-util/channel", +] +thread-pool = [ + "executor", + "futures-executor/thread-pool", +] +unstable = [ + "futures-core/unstable", + "futures-task/unstable", + "futures-channel/unstable", + "futures-io/unstable", + "futures-util/unstable", +] +write-all-vectored = ["futures-util/write-all-vectored"] diff --git a/vendor/futures/LICENSE-APACHE b/vendor/futures/LICENSE-APACHE new file mode 100644 index 00000000..9eb0b097 --- /dev/null +++ b/vendor/futures/LICENSE-APACHE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright (c) 2016 Alex Crichton +Copyright (c) 2017 The Tokio Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/futures/LICENSE-MIT b/vendor/futures/LICENSE-MIT new file mode 100644 index 00000000..8ad082ec --- /dev/null +++ b/vendor/futures/LICENSE-MIT @@ -0,0 +1,26 @@ +Copyright (c) 2016 Alex Crichton +Copyright (c) 2017 The Tokio Authors + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/futures/README.md b/vendor/futures/README.md new file mode 100644 index 00000000..355d6078 --- /dev/null +++ b/vendor/futures/README.md @@ -0,0 +1,61 @@ +

+ futures-rs +

+ +

+ Zero-cost asynchronous programming in Rust +

+ +

+ + Build Status + + + + crates.io + +

+ +

+ + Documentation + | + Website + +

+ +`futures-rs` is a library providing the foundations for asynchronous programming in Rust. +It includes key trait definitions like `Stream`, as well as utilities like `join!`, +`select!`, and various futures combinator methods which enable expressive asynchronous +control flow. + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +futures = "0.3" +``` + +The current `futures` requires Rust 1.56 or later. + +### Feature `std` + +Futures-rs works without the standard library, such as in bare metal environments. +However, it has a significantly reduced API surface. To use futures-rs in +a `#[no_std]` environment, use: + +```toml +[dependencies] +futures = { version = "0.3", default-features = false } +``` + +## License + +Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or +[MIT license](LICENSE-MIT) at your option. + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall +be dual licensed as above, without any additional terms or conditions. diff --git a/vendor/futures/src/lib.rs b/vendor/futures/src/lib.rs new file mode 100644 index 00000000..b972f517 --- /dev/null +++ b/vendor/futures/src/lib.rs @@ -0,0 +1,260 @@ +//! Abstractions for asynchronous programming. +//! +//! This crate provides a number of core abstractions for writing asynchronous +//! code: +//! +//! - [Futures](crate::future) are single eventual values produced by +//! asynchronous computations. Some programming languages (e.g. JavaScript) +//! call this concept "promise". +//! - [Streams](crate::stream) represent a series of values +//! produced asynchronously. +//! - [Sinks](crate::sink) provide support for asynchronous writing of +//! data. +//! - [Executors](crate::executor) are responsible for running asynchronous +//! tasks. +//! +//! The crate also contains abstractions for [asynchronous I/O](crate::io) and +//! [cross-task communication](crate::channel). +//! +//! Underlying all of this is the *task system*, which is a form of lightweight +//! threading. Large asynchronous computations are built up using futures, +//! streams and sinks, and then spawned as independent tasks that are run to +//! completion, but *do not block* the thread running them. +//! +//! The following example describes how the task system context is built and used +//! within macros and keywords such as async and await!. +//! +//! ```rust +//! # use futures::channel::mpsc; +//! # use futures::executor; ///standard executors to provide a context for futures and streams +//! # use futures::executor::ThreadPool; +//! # use futures::StreamExt; +//! # +//! fn main() { +//! # { +//! let pool = ThreadPool::new().expect("Failed to build pool"); +//! let (tx, rx) = mpsc::unbounded::(); +//! +//! // Create a future by an async block, where async is responsible for an +//! // implementation of Future. At this point no executor has been provided +//! // to this future, so it will not be running. +//! let fut_values = async { +//! // Create another async block, again where the Future implementation +//! // is generated by async. Since this is inside of a parent async block, +//! // it will be provided with the executor of the parent block when the parent +//! // block is executed. +//! // +//! // This executor chaining is done by Future::poll whose second argument +//! // is a std::task::Context. This represents our executor, and the Future +//! // implemented by this async block can be polled using the parent async +//! // block's executor. +//! let fut_tx_result = async move { +//! (0..100).for_each(|v| { +//! tx.unbounded_send(v).expect("Failed to send"); +//! }) +//! }; +//! +//! // Use the provided thread pool to spawn the generated future +//! // responsible for transmission +//! pool.spawn_ok(fut_tx_result); +//! +//! let fut_values = rx +//! .map(|v| v * 2) +//! .collect(); +//! +//! // Use the executor provided to this async block to wait for the +//! // future to complete. +//! fut_values.await +//! }; +//! +//! // Actually execute the above future, which will invoke Future::poll and +//! // subsequently chain appropriate Future::poll and methods needing executors +//! // to drive all futures. Eventually fut_values will be driven to completion. +//! let values: Vec = executor::block_on(fut_values); +//! +//! println!("Values={:?}", values); +//! # } +//! # std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +//! } +//! ``` +//! +//! The majority of examples and code snippets in this crate assume that they are +//! inside an async block as written above. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn( + missing_debug_implementations, + missing_docs, + rust_2018_idioms, + single_use_lifetimes, + unreachable_pub +)] +#![doc(test( + no_crate_inject, + attr( + deny(warnings, rust_2018_idioms, single_use_lifetimes), + allow(dead_code, unused_assignments, unused_variables) + ) +))] +#![cfg_attr(docsrs, feature(doc_cfg))] + +#[cfg(all(feature = "bilock", not(feature = "unstable")))] +compile_error!("The `bilock` feature requires the `unstable` feature as an explicit opt-in to unstable features"); + +#[doc(no_inline)] +pub use futures_core::future::{Future, TryFuture}; +#[doc(no_inline)] +pub use futures_util::future::{FutureExt, TryFutureExt}; + +#[doc(no_inline)] +pub use futures_core::stream::{Stream, TryStream}; +#[doc(no_inline)] +pub use futures_util::stream::{StreamExt, TryStreamExt}; + +#[doc(no_inline)] +pub use futures_sink::Sink; +#[doc(no_inline)] +pub use futures_util::sink::SinkExt; + +#[cfg(feature = "std")] +#[doc(no_inline)] +pub use futures_io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite}; +#[cfg(feature = "std")] +#[doc(no_inline)] +pub use futures_util::{AsyncBufReadExt, AsyncReadExt, AsyncSeekExt, AsyncWriteExt}; + +// Macro reexports +pub use futures_core::ready; // Readiness propagation +pub use futures_util::pin_mut; +#[cfg(feature = "std")] +#[cfg(feature = "async-await")] +pub use futures_util::select; +#[cfg(feature = "async-await")] +pub use futures_util::{join, pending, poll, select_biased, try_join}; // Async-await + +// Module reexports +#[doc(inline)] +pub use futures_util::{future, never, sink, stream, task}; + +#[cfg(feature = "std")] +#[cfg(feature = "async-await")] +pub use futures_util::stream_select; + +#[cfg(feature = "alloc")] +#[doc(inline)] +pub use futures_channel as channel; +#[cfg(feature = "alloc")] +#[doc(inline)] +pub use futures_util::lock; + +#[cfg(feature = "std")] +#[doc(inline)] +pub use futures_util::io; + +#[cfg(feature = "executor")] +#[cfg_attr(docsrs, doc(cfg(feature = "executor")))] +pub mod executor { + //! Built-in executors and related tools. + //! + //! All asynchronous computation occurs within an executor, which is + //! capable of spawning futures as tasks. This module provides several + //! built-in executors, as well as tools for building your own. + //! + //! + //! This module is only available when the `executor` feature of this + //! library is activated. + //! + //! # Using a thread pool (M:N task scheduling) + //! + //! Most of the time tasks should be executed on a [thread pool](ThreadPool). + //! A small set of worker threads can handle a very large set of spawned tasks + //! (which are much lighter weight than threads). Tasks spawned onto the pool + //! with the [`spawn_ok`](ThreadPool::spawn_ok) function will run ambiently on + //! the created threads. + //! + //! # Spawning additional tasks + //! + //! Tasks can be spawned onto a spawner by calling its [`spawn_obj`] method + //! directly. In the case of `!Send` futures, [`spawn_local_obj`] can be used + //! instead. + //! + //! # Single-threaded execution + //! + //! In addition to thread pools, it's possible to run a task (and the tasks + //! it spawns) entirely within a single thread via the [`LocalPool`] executor. + //! Aside from cutting down on synchronization costs, this executor also makes + //! it possible to spawn non-`Send` tasks, via [`spawn_local_obj`]. The + //! [`LocalPool`] is best suited for running I/O-bound tasks that do relatively + //! little work between I/O operations. + //! + //! There is also a convenience function [`block_on`] for simply running a + //! future to completion on the current thread. + //! + //! [`spawn_obj`]: https://docs.rs/futures/0.3/futures/task/trait.Spawn.html#tymethod.spawn_obj + //! [`spawn_local_obj`]: https://docs.rs/futures/0.3/futures/task/trait.LocalSpawn.html#tymethod.spawn_local_obj + + pub use futures_executor::{ + block_on, block_on_stream, enter, BlockingStream, Enter, EnterError, LocalPool, + LocalSpawner, + }; + + #[cfg(feature = "thread-pool")] + #[cfg_attr(docsrs, doc(cfg(feature = "thread-pool")))] + pub use futures_executor::{ThreadPool, ThreadPoolBuilder}; +} + +#[cfg(feature = "compat")] +#[cfg_attr(docsrs, doc(cfg(feature = "compat")))] +pub mod compat { + //! Interop between `futures` 0.1 and 0.3. + //! + //! This module is only available when the `compat` feature of this + //! library is activated. + + pub use futures_util::compat::{ + Compat, Compat01As03, Compat01As03Sink, CompatSink, Executor01As03, Executor01CompatExt, + Executor01Future, Future01CompatExt, Sink01CompatExt, Stream01CompatExt, + }; + + #[cfg(feature = "io-compat")] + #[cfg_attr(docsrs, doc(cfg(feature = "io-compat")))] + pub use futures_util::compat::{AsyncRead01CompatExt, AsyncWrite01CompatExt}; +} + +pub mod prelude { + //! A "prelude" for crates using the `futures` crate. + //! + //! This prelude is similar to the standard library's prelude in that you'll + //! almost always want to import its entire contents, but unlike the + //! standard library's prelude you'll have to do so manually: + //! + //! ``` + //! # #[allow(unused_imports)] + //! use futures::prelude::*; + //! ``` + //! + //! The prelude may grow over time as additional items see ubiquitous use. + + pub use crate::future::{self, Future, TryFuture}; + pub use crate::sink::{self, Sink}; + pub use crate::stream::{self, Stream, TryStream}; + + #[doc(no_inline)] + #[allow(unreachable_pub)] + pub use crate::future::{FutureExt as _, TryFutureExt as _}; + #[doc(no_inline)] + pub use crate::sink::SinkExt as _; + #[doc(no_inline)] + #[allow(unreachable_pub)] + pub use crate::stream::{StreamExt as _, TryStreamExt as _}; + + #[cfg(feature = "std")] + pub use crate::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite}; + + #[cfg(feature = "std")] + #[doc(no_inline)] + #[allow(unreachable_pub)] + pub use crate::io::{ + AsyncBufReadExt as _, AsyncReadExt as _, AsyncSeekExt as _, AsyncWriteExt as _, + }; +} diff --git a/vendor/futures/tests/_require_features.rs b/vendor/futures/tests/_require_features.rs new file mode 100644 index 00000000..8046cc99 --- /dev/null +++ b/vendor/futures/tests/_require_features.rs @@ -0,0 +1,13 @@ +#[cfg(not(all( + feature = "std", + feature = "alloc", + feature = "async-await", + feature = "compat", + feature = "io-compat", + feature = "executor", + feature = "thread-pool", +)))] +compile_error!( + "`futures` tests must have all stable features activated: \ + use `--all-features` or `--features default,thread-pool,io-compat`" +); diff --git a/vendor/futures/tests/async_await_macros.rs b/vendor/futures/tests/async_await_macros.rs new file mode 100644 index 00000000..82a617f2 --- /dev/null +++ b/vendor/futures/tests/async_await_macros.rs @@ -0,0 +1,393 @@ +use futures::channel::{mpsc, oneshot}; +use futures::executor::block_on; +use futures::future::{self, poll_fn, FutureExt}; +use futures::sink::SinkExt; +use futures::stream::StreamExt; +use futures::task::{Context, Poll}; +use futures::{ + join, pending, pin_mut, poll, select, select_biased, stream, stream_select, try_join, +}; +use std::mem; + +#[test] +fn poll_and_pending() { + let pending_once = async { pending!() }; + block_on(async { + pin_mut!(pending_once); + assert_eq!(Poll::Pending, poll!(&mut pending_once)); + assert_eq!(Poll::Ready(()), poll!(&mut pending_once)); + }); +} + +#[test] +fn join() { + let (tx1, rx1) = oneshot::channel::(); + let (tx2, rx2) = oneshot::channel::(); + + let fut = async { + let res = join!(rx1, rx2); + assert_eq!((Ok(1), Ok(2)), res); + }; + + block_on(async { + pin_mut!(fut); + assert_eq!(Poll::Pending, poll!(&mut fut)); + tx1.send(1).unwrap(); + assert_eq!(Poll::Pending, poll!(&mut fut)); + tx2.send(2).unwrap(); + assert_eq!(Poll::Ready(()), poll!(&mut fut)); + }); +} + +#[test] +fn select() { + let (tx1, rx1) = oneshot::channel::(); + let (_tx2, rx2) = oneshot::channel::(); + tx1.send(1).unwrap(); + let mut ran = false; + block_on(async { + select! { + res = rx1.fuse() => { + assert_eq!(Ok(1), res); + ran = true; + }, + _ = rx2.fuse() => unreachable!(), + } + }); + assert!(ran); +} + +#[test] +fn select_biased() { + let (tx1, rx1) = oneshot::channel::(); + let (_tx2, rx2) = oneshot::channel::(); + tx1.send(1).unwrap(); + let mut ran = false; + block_on(async { + select_biased! { + res = rx1.fuse() => { + assert_eq!(Ok(1), res); + ran = true; + }, + _ = rx2.fuse() => unreachable!(), + } + }); + assert!(ran); +} + +#[test] +fn select_streams() { + let (mut tx1, rx1) = mpsc::channel::(1); + let (mut tx2, rx2) = mpsc::channel::(1); + let mut rx1 = rx1.fuse(); + let mut rx2 = rx2.fuse(); + let mut ran = false; + let mut total = 0; + block_on(async { + let mut tx1_opt; + let mut tx2_opt; + select! { + _ = rx1.next() => panic!(), + _ = rx2.next() => panic!(), + default => { + tx1.send(2).await.unwrap(); + tx2.send(3).await.unwrap(); + tx1_opt = Some(tx1); + tx2_opt = Some(tx2); + } + complete => panic!(), + } + loop { + select! { + // runs first and again after default + x = rx1.next() => if let Some(x) = x { total += x; }, + // runs second and again after default + x = rx2.next() => if let Some(x) = x { total += x; }, + // runs third + default => { + assert_eq!(total, 5); + ran = true; + drop(tx1_opt.take().unwrap()); + drop(tx2_opt.take().unwrap()); + }, + // runs last + complete => break, + }; + } + }); + assert!(ran); +} + +#[test] +fn select_can_move_uncompleted_futures() { + let (tx1, rx1) = oneshot::channel::(); + let (tx2, rx2) = oneshot::channel::(); + tx1.send(1).unwrap(); + tx2.send(2).unwrap(); + let mut ran = false; + let mut rx1 = rx1.fuse(); + let mut rx2 = rx2.fuse(); + block_on(async { + select! { + res = rx1 => { + assert_eq!(Ok(1), res); + assert_eq!(Ok(2), rx2.await); + ran = true; + }, + res = rx2 => { + assert_eq!(Ok(2), res); + assert_eq!(Ok(1), rx1.await); + ran = true; + }, + } + }); + assert!(ran); +} + +#[test] +fn select_nested() { + let mut outer_fut = future::ready(1); + let mut inner_fut = future::ready(2); + let res = block_on(async { + select! { + x = outer_fut => { + select! { + y = inner_fut => x + y, + } + } + } + }); + assert_eq!(res, 3); +} + +#[cfg_attr(not(target_pointer_width = "64"), ignore)] +#[test] +fn select_size() { + let fut = async { + let mut ready = future::ready(0i32); + select! { + _ = ready => {}, + } + }; + assert_eq!(mem::size_of_val(&fut), 24); + + let fut = async { + let mut ready1 = future::ready(0i32); + let mut ready2 = future::ready(0i32); + select! { + _ = ready1 => {}, + _ = ready2 => {}, + } + }; + assert_eq!(mem::size_of_val(&fut), 40); +} + +#[test] +fn select_on_non_unpin_expressions() { + // The returned Future is !Unpin + let make_non_unpin_fut = || async { 5 }; + + let res = block_on(async { + let select_res; + select! { + value_1 = make_non_unpin_fut().fuse() => select_res = value_1, + value_2 = make_non_unpin_fut().fuse() => select_res = value_2, + }; + select_res + }); + assert_eq!(res, 5); +} + +#[test] +fn select_on_non_unpin_expressions_with_default() { + // The returned Future is !Unpin + let make_non_unpin_fut = || async { 5 }; + + let res = block_on(async { + let select_res; + select! { + value_1 = make_non_unpin_fut().fuse() => select_res = value_1, + value_2 = make_non_unpin_fut().fuse() => select_res = value_2, + default => select_res = 7, + }; + select_res + }); + assert_eq!(res, 5); +} + +#[cfg_attr(not(target_pointer_width = "64"), ignore)] +#[test] +fn select_on_non_unpin_size() { + // The returned Future is !Unpin + let make_non_unpin_fut = || async { 5 }; + + let fut = async { + let select_res; + select! { + value_1 = make_non_unpin_fut().fuse() => select_res = value_1, + value_2 = make_non_unpin_fut().fuse() => select_res = value_2, + }; + select_res + }; + + assert_eq!(32, mem::size_of_val(&fut)); +} + +#[test] +fn select_can_be_used_as_expression() { + block_on(async { + let res = select! { + x = future::ready(7) => x, + y = future::ready(3) => y + 1, + }; + assert!(res == 7 || res == 4); + }); +} + +#[test] +fn select_with_default_can_be_used_as_expression() { + fn poll_always_pending(_cx: &mut Context<'_>) -> Poll { + Poll::Pending + } + + block_on(async { + let res = select! { + x = poll_fn(poll_always_pending::).fuse() => x, + y = poll_fn(poll_always_pending::).fuse() => y + 1, + default => 99, + }; + assert_eq!(res, 99); + }); +} + +#[test] +fn select_with_complete_can_be_used_as_expression() { + block_on(async { + let res = select! { + x = future::pending::() => x, + y = future::pending::() => y + 1, + default => 99, + complete => 237, + }; + assert_eq!(res, 237); + }); +} + +#[test] +#[allow(unused_assignments)] +fn select_on_mutable_borrowing_future_with_same_borrow_in_block() { + async fn require_mutable(_: &mut i32) {} + async fn async_noop() {} + + block_on(async { + let mut value = 234; + select! { + _ = require_mutable(&mut value).fuse() => { }, + _ = async_noop().fuse() => { + value += 5; + }, + } + }); +} + +#[test] +#[allow(unused_assignments)] +fn select_on_mutable_borrowing_future_with_same_borrow_in_block_and_default() { + async fn require_mutable(_: &mut i32) {} + async fn async_noop() {} + + block_on(async { + let mut value = 234; + select! { + _ = require_mutable(&mut value).fuse() => { }, + _ = async_noop().fuse() => { + value += 5; + }, + default => { + value += 27; + }, + } + }); +} + +#[test] +#[allow(unused_assignments)] +fn stream_select() { + // stream_select! macro + block_on(async { + let endless_ints = |i| stream::iter(vec![i].into_iter().cycle()); + + let mut endless_ones = stream_select!(endless_ints(1i32), stream::pending()); + assert_eq!(endless_ones.next().await, Some(1)); + assert_eq!(endless_ones.next().await, Some(1)); + + let mut finite_list = + stream_select!(stream::iter(vec![1].into_iter()), stream::iter(vec![1].into_iter())); + assert_eq!(finite_list.next().await, Some(1)); + assert_eq!(finite_list.next().await, Some(1)); + assert_eq!(finite_list.next().await, None); + + let endless_mixed = stream_select!(endless_ints(1i32), endless_ints(2), endless_ints(3)); + // Take 1000, and assert a somewhat even distribution of values. + // The fairness is randomized, but over 1000 samples we should be pretty close to even. + // This test may be a bit flaky. Feel free to adjust the margins as you see fit. + let mut count = 0; + let results = endless_mixed + .take_while(move |_| { + count += 1; + let ret = count < 1000; + async move { ret } + }) + .collect::>() + .await; + assert!(results.iter().filter(|x| **x == 1).count() >= 299); + assert!(results.iter().filter(|x| **x == 2).count() >= 299); + assert!(results.iter().filter(|x| **x == 3).count() >= 299); + }); +} + +#[cfg_attr(not(target_pointer_width = "64"), ignore)] +#[test] +fn join_size() { + let fut = async { + let ready = future::ready(0i32); + join!(ready) + }; + assert_eq!(mem::size_of_val(&fut), 24); + + let fut = async { + let ready1 = future::ready(0i32); + let ready2 = future::ready(0i32); + join!(ready1, ready2) + }; + assert_eq!(mem::size_of_val(&fut), 40); +} + +#[cfg_attr(not(target_pointer_width = "64"), ignore)] +#[test] +fn try_join_size() { + let fut = async { + let ready = future::ready(Ok::(0)); + try_join!(ready) + }; + assert_eq!(mem::size_of_val(&fut), 24); + + let fut = async { + let ready1 = future::ready(Ok::(0)); + let ready2 = future::ready(Ok::(0)); + try_join!(ready1, ready2) + }; + assert_eq!(mem::size_of_val(&fut), 48); +} + +#[allow(clippy::let_underscore_future)] +#[test] +fn join_doesnt_require_unpin() { + let _ = async { join!(async {}, async {}) }; +} + +#[allow(clippy::let_underscore_future)] +#[test] +fn try_join_doesnt_require_unpin() { + let _ = async { try_join!(async { Ok::<(), ()>(()) }, async { Ok::<(), ()>(()) },) }; +} diff --git a/vendor/futures/tests/auto_traits.rs b/vendor/futures/tests/auto_traits.rs new file mode 100644 index 00000000..004fda1e --- /dev/null +++ b/vendor/futures/tests/auto_traits.rs @@ -0,0 +1,1898 @@ +#![cfg(feature = "compat")] + +//! Assert Send/Sync/Unpin for all public types. + +use futures::{ + future::Future, + sink::Sink, + stream::Stream, + task::{Context, Poll}, +}; +use static_assertions::{assert_impl_all as assert_impl, assert_not_impl_all as assert_not_impl}; +use std::marker::PhantomPinned; +use std::{marker::PhantomData, pin::Pin}; + +pub type LocalFuture = Pin>>; +pub type LocalTryFuture = LocalFuture>; +pub type SendFuture = Pin + Send>>; +pub type SendTryFuture = SendFuture>; +pub type SyncFuture = Pin + Sync>>; +pub type SyncTryFuture = SyncFuture>; +pub type SendSyncFuture = Pin + Send + Sync>>; +pub type SendSyncTryFuture = SendSyncFuture>; +pub type UnpinFuture = LocalFuture; +pub type UnpinTryFuture = UnpinFuture>; +pub struct PinnedFuture(PhantomPinned, PhantomData); +impl Future for PinnedFuture { + type Output = T; + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll { + unimplemented!() + } +} +pub type PinnedTryFuture = PinnedFuture>; + +pub type LocalStream = Pin>>; +pub type LocalTryStream = LocalStream>; +pub type SendStream = Pin + Send>>; +pub type SendTryStream = SendStream>; +pub type SyncStream = Pin + Sync>>; +pub type SyncTryStream = SyncStream>; +pub type SendSyncStream = Pin + Send + Sync>>; +pub type SendSyncTryStream = SendSyncStream>; +pub type UnpinStream = LocalStream; +pub type UnpinTryStream = UnpinStream>; +pub struct PinnedStream(PhantomPinned, PhantomData); +impl Stream for PinnedStream { + type Item = T; + fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + unimplemented!() + } +} +pub type PinnedTryStream = PinnedStream>; + +pub type LocalSink = Pin>>; +pub type SendSink = Pin + Send>>; +pub type SyncSink = Pin + Sync>>; +pub type UnpinSink = LocalSink; +pub struct PinnedSink(PhantomPinned, PhantomData<(T, E)>); +impl Sink for PinnedSink { + type Error = E; + fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + unimplemented!() + } + fn start_send(self: Pin<&mut Self>, _: T) -> Result<(), Self::Error> { + unimplemented!() + } + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + unimplemented!() + } + fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + unimplemented!() + } +} + +/// Assert Send/Sync/Unpin for all public types in `futures::channel`. +pub mod channel { + use super::*; + use futures::channel::*; + + assert_impl!(mpsc::Receiver<()>: Send); + assert_not_impl!(mpsc::Receiver<*const ()>: Send); + assert_impl!(mpsc::Receiver<()>: Sync); + assert_not_impl!(mpsc::Receiver<*const ()>: Sync); + assert_impl!(mpsc::Receiver: Unpin); + + assert_impl!(mpsc::SendError: Send); + assert_impl!(mpsc::SendError: Sync); + assert_impl!(mpsc::SendError: Unpin); + + assert_impl!(mpsc::Sender<()>: Send); + assert_not_impl!(mpsc::Sender<*const ()>: Send); + assert_impl!(mpsc::Sender<()>: Sync); + assert_not_impl!(mpsc::Sender<*const ()>: Sync); + assert_impl!(mpsc::Sender: Unpin); + + assert_impl!(mpsc::TryRecvError: Send); + assert_impl!(mpsc::TryRecvError: Sync); + assert_impl!(mpsc::TryRecvError: Unpin); + + assert_impl!(mpsc::TrySendError<()>: Send); + assert_not_impl!(mpsc::TrySendError<*const ()>: Send); + assert_impl!(mpsc::TrySendError<()>: Sync); + assert_not_impl!(mpsc::TrySendError<*const ()>: Sync); + assert_impl!(mpsc::TrySendError<()>: Unpin); + assert_not_impl!(mpsc::TrySendError: Unpin); + + assert_impl!(mpsc::UnboundedReceiver<()>: Send); + assert_not_impl!(mpsc::UnboundedReceiver<*const ()>: Send); + assert_impl!(mpsc::UnboundedReceiver<()>: Sync); + assert_not_impl!(mpsc::UnboundedReceiver<*const ()>: Sync); + assert_impl!(mpsc::UnboundedReceiver: Unpin); + + assert_impl!(mpsc::UnboundedReceiver<()>: Send); + assert_not_impl!(mpsc::UnboundedReceiver<*const ()>: Send); + assert_impl!(mpsc::UnboundedReceiver<()>: Sync); + assert_not_impl!(mpsc::UnboundedReceiver<*const ()>: Sync); + assert_impl!(mpsc::UnboundedReceiver: Unpin); + + assert_impl!(oneshot::Canceled: Send); + assert_impl!(oneshot::Canceled: Sync); + assert_impl!(oneshot::Canceled: Unpin); + + assert_impl!(oneshot::Cancellation<()>: Send); + assert_not_impl!(oneshot::Cancellation<*const ()>: Send); + assert_impl!(oneshot::Cancellation<()>: Sync); + assert_not_impl!(oneshot::Cancellation<*const ()>: Sync); + assert_impl!(oneshot::Cancellation: Unpin); + + assert_impl!(oneshot::Receiver<()>: Send); + assert_not_impl!(oneshot::Receiver<*const ()>: Send); + assert_impl!(oneshot::Receiver<()>: Sync); + assert_not_impl!(oneshot::Receiver<*const ()>: Sync); + assert_impl!(oneshot::Receiver: Unpin); + + assert_impl!(oneshot::Sender<()>: Send); + assert_not_impl!(oneshot::Sender<*const ()>: Send); + assert_impl!(oneshot::Sender<()>: Sync); + assert_not_impl!(oneshot::Sender<*const ()>: Sync); + assert_impl!(oneshot::Sender: Unpin); +} + +/// Assert Send/Sync/Unpin for all public types in `futures::compat`. +pub mod compat { + use super::*; + use futures::compat::*; + + assert_impl!(Compat<()>: Send); + assert_not_impl!(Compat<*const ()>: Send); + assert_impl!(Compat<()>: Sync); + assert_not_impl!(Compat<*const ()>: Sync); + assert_impl!(Compat<()>: Unpin); + assert_not_impl!(Compat: Unpin); + + assert_impl!(Compat01As03<()>: Send); + assert_not_impl!(Compat01As03<*const ()>: Send); + assert_not_impl!(Compat01As03<()>: Sync); + assert_impl!(Compat01As03: Unpin); + + assert_impl!(Compat01As03Sink<(), ()>: Send); + assert_not_impl!(Compat01As03Sink<(), *const ()>: Send); + assert_not_impl!(Compat01As03Sink<*const (), ()>: Send); + assert_not_impl!(Compat01As03Sink<(), ()>: Sync); + assert_impl!(Compat01As03Sink: Unpin); + + assert_impl!(CompatSink<(), *const ()>: Send); + assert_not_impl!(CompatSink<*const (), ()>: Send); + assert_impl!(CompatSink<(), *const ()>: Sync); + assert_not_impl!(CompatSink<*const (), ()>: Sync); + assert_impl!(CompatSink<(), PhantomPinned>: Unpin); + assert_not_impl!(CompatSink: Unpin); + + assert_impl!(Executor01As03<()>: Send); + assert_not_impl!(Executor01As03<*const ()>: Send); + assert_impl!(Executor01As03<()>: Sync); + assert_not_impl!(Executor01As03<*const ()>: Sync); + assert_impl!(Executor01As03<()>: Unpin); + assert_not_impl!(Executor01As03: Unpin); + + assert_impl!(Executor01Future: Send); + assert_not_impl!(Executor01Future: Sync); + assert_impl!(Executor01Future: Unpin); +} + +/// Assert Send/Sync/Unpin for all public types in `futures::executor`. +pub mod executor { + use super::*; + use futures::executor::*; + + assert_impl!(BlockingStream: Send); + assert_not_impl!(BlockingStream: Send); + assert_impl!(BlockingStream: Sync); + assert_not_impl!(BlockingStream: Sync); + assert_impl!(BlockingStream: Unpin); + // BlockingStream requires `S: Unpin` + // assert_not_impl!(BlockingStream: Unpin); + + assert_impl!(Enter: Send); + assert_impl!(Enter: Sync); + assert_impl!(Enter: Unpin); + + assert_impl!(EnterError: Send); + assert_impl!(EnterError: Sync); + assert_impl!(EnterError: Unpin); + + assert_not_impl!(LocalPool: Send); + assert_not_impl!(LocalPool: Sync); + assert_impl!(LocalPool: Unpin); + + assert_not_impl!(LocalSpawner: Send); + assert_not_impl!(LocalSpawner: Sync); + assert_impl!(LocalSpawner: Unpin); + + assert_impl!(ThreadPool: Send); + assert_impl!(ThreadPool: Sync); + assert_impl!(ThreadPool: Unpin); + + assert_impl!(ThreadPoolBuilder: Send); + assert_impl!(ThreadPoolBuilder: Sync); + assert_impl!(ThreadPoolBuilder: Unpin); +} + +/// Assert Send/Sync/Unpin for all public types in `futures::future`. +pub mod future { + use super::*; + use futures::future::*; + + assert_impl!(AbortHandle: Send); + assert_impl!(AbortHandle: Sync); + assert_impl!(AbortHandle: Unpin); + + assert_impl!(AbortRegistration: Send); + assert_impl!(AbortRegistration: Sync); + assert_impl!(AbortRegistration: Unpin); + + assert_impl!(Abortable: Send); + assert_not_impl!(Abortable: Send); + assert_impl!(Abortable: Sync); + assert_not_impl!(Abortable: Sync); + assert_impl!(Abortable: Unpin); + assert_not_impl!(Abortable: Unpin); + + assert_impl!(Aborted: Send); + assert_impl!(Aborted: Sync); + assert_impl!(Aborted: Unpin); + + assert_impl!(AndThen: Send); + assert_not_impl!(AndThen: Send); + assert_not_impl!(AndThen: Send); + assert_not_impl!(AndThen: Send); + assert_impl!(AndThen: Sync); + assert_not_impl!(AndThen: Sync); + assert_not_impl!(AndThen: Sync); + assert_not_impl!(AndThen: Sync); + assert_impl!(AndThen: Unpin); + assert_not_impl!(AndThen: Unpin); + assert_not_impl!(AndThen: Unpin); + + assert_impl!(CatchUnwind: Send); + assert_not_impl!(CatchUnwind: Send); + assert_impl!(CatchUnwind: Sync); + assert_not_impl!(CatchUnwind: Sync); + assert_impl!(CatchUnwind: Unpin); + assert_not_impl!(CatchUnwind: Unpin); + + assert_impl!(ErrInto: Send); + assert_not_impl!(ErrInto: Send); + assert_impl!(ErrInto: Sync); + assert_not_impl!(ErrInto: Sync); + assert_impl!(ErrInto: Unpin); + assert_not_impl!(ErrInto: Unpin); + + assert_impl!(Flatten>: Send); + assert_not_impl!(Flatten: Send); + assert_not_impl!(Flatten: Send); + assert_impl!(Flatten>: Sync); + assert_not_impl!(Flatten: Sync); + assert_not_impl!(Flatten: Sync); + assert_impl!(Flatten>: Unpin); + assert_not_impl!(Flatten: Unpin); + assert_not_impl!(Flatten: Unpin); + + assert_impl!(FlattenSink: Send); + assert_not_impl!(FlattenSink: Send); + assert_not_impl!(FlattenSink: Send); + assert_impl!(FlattenSink: Sync); + assert_not_impl!(FlattenSink: Sync); + assert_not_impl!(FlattenSink: Sync); + assert_impl!(FlattenSink: Unpin); + assert_not_impl!(FlattenSink: Unpin); + assert_not_impl!(FlattenSink: Unpin); + + assert_impl!(FlattenStream>: Send); + assert_not_impl!(FlattenStream: Send); + assert_not_impl!(FlattenStream: Send); + assert_impl!(FlattenStream>: Sync); + assert_not_impl!(FlattenStream: Sync); + assert_not_impl!(FlattenStream: Sync); + assert_impl!(FlattenStream>: Unpin); + assert_not_impl!(FlattenStream: Unpin); + assert_not_impl!(FlattenStream: Unpin); + + assert_impl!(Fuse: Send); + assert_not_impl!(Fuse: Send); + assert_impl!(Fuse: Sync); + assert_not_impl!(Fuse: Sync); + assert_impl!(Fuse: Unpin); + assert_not_impl!(Fuse: Unpin); + + assert_impl!(FutureObj<*const ()>: Send); + assert_not_impl!(FutureObj<()>: Sync); + assert_impl!(FutureObj: Unpin); + + assert_impl!(Inspect: Send); + assert_not_impl!(Inspect: Send); + assert_not_impl!(Inspect: Send); + assert_impl!(Inspect: Sync); + assert_not_impl!(Inspect: Sync); + assert_not_impl!(Inspect: Sync); + assert_impl!(Inspect: Unpin); + assert_not_impl!(Inspect: Unpin); + + assert_impl!(InspectErr: Send); + assert_not_impl!(InspectErr: Send); + assert_not_impl!(InspectErr: Send); + assert_impl!(InspectErr: Sync); + assert_not_impl!(InspectErr: Sync); + assert_not_impl!(InspectErr: Sync); + assert_impl!(InspectErr: Unpin); + assert_not_impl!(InspectErr: Unpin); + + assert_impl!(InspectOk: Send); + assert_not_impl!(InspectOk: Send); + assert_not_impl!(InspectOk: Send); + assert_impl!(InspectOk: Sync); + assert_not_impl!(InspectOk: Sync); + assert_not_impl!(InspectOk: Sync); + assert_impl!(InspectOk: Unpin); + assert_not_impl!(InspectOk: Unpin); + + assert_impl!(IntoFuture: Send); + assert_not_impl!(IntoFuture: Send); + assert_impl!(IntoFuture: Sync); + assert_not_impl!(IntoFuture: Sync); + assert_impl!(IntoFuture: Unpin); + assert_not_impl!(IntoFuture: Unpin); + + assert_impl!(IntoStream: Send); + assert_not_impl!(IntoStream: Send); + assert_impl!(IntoStream: Sync); + assert_not_impl!(IntoStream: Sync); + assert_impl!(IntoStream: Unpin); + assert_not_impl!(IntoStream: Unpin); + + assert_impl!(Join, SendFuture<()>>: Send); + assert_not_impl!(Join, SendFuture>: Send); + assert_not_impl!(Join>: Send); + assert_not_impl!(Join: Send); + assert_not_impl!(Join: Send); + assert_impl!(Join, SyncFuture<()>>: Sync); + assert_not_impl!(Join, SyncFuture>: Sync); + assert_not_impl!(Join>: Sync); + assert_not_impl!(Join: Sync); + assert_not_impl!(Join: Sync); + assert_impl!(Join: Unpin); + assert_not_impl!(Join: Unpin); + assert_not_impl!(Join: Unpin); + + // Join3, Join4, Join5 are the same as Join + + assert_impl!(JoinAll>: Send); + assert_not_impl!(JoinAll: Send); + assert_not_impl!(JoinAll: Send); + assert_impl!(JoinAll>: Sync); + assert_not_impl!(JoinAll>: Sync); + assert_not_impl!(JoinAll>: Sync); + assert_not_impl!(JoinAll: Sync); + assert_impl!(JoinAll: Unpin); + + assert_impl!(Lazy<()>: Send); + assert_not_impl!(Lazy<*const ()>: Send); + assert_impl!(Lazy<()>: Sync); + assert_not_impl!(Lazy<*const ()>: Sync); + assert_impl!(Lazy: Unpin); + + assert_not_impl!(LocalFutureObj<()>: Send); + assert_not_impl!(LocalFutureObj<()>: Sync); + assert_impl!(LocalFutureObj: Unpin); + + assert_impl!(Map: Send); + assert_not_impl!(Map: Send); + assert_not_impl!(Map: Send); + assert_impl!(Map: Sync); + assert_not_impl!(Map: Sync); + assert_not_impl!(Map: Sync); + assert_impl!(Map: Unpin); + assert_not_impl!(Map: Unpin); + + assert_impl!(MapErr: Send); + assert_not_impl!(MapErr: Send); + assert_not_impl!(MapErr: Send); + assert_impl!(MapErr: Sync); + assert_not_impl!(MapErr: Sync); + assert_not_impl!(MapErr: Sync); + assert_impl!(MapErr: Unpin); + assert_not_impl!(MapErr: Unpin); + + assert_impl!(MapInto: Send); + assert_not_impl!(MapInto: Send); + assert_impl!(MapInto: Sync); + assert_not_impl!(MapInto: Sync); + assert_impl!(MapInto: Unpin); + assert_not_impl!(MapInto: Unpin); + + assert_impl!(MapOk: Send); + assert_not_impl!(MapOk: Send); + assert_not_impl!(MapOk: Send); + assert_impl!(MapOk: Sync); + assert_not_impl!(MapOk: Sync); + assert_not_impl!(MapOk: Sync); + assert_impl!(MapOk: Unpin); + assert_not_impl!(MapOk: Unpin); + + assert_impl!(MapOkOrElse: Send); + assert_not_impl!(MapOkOrElse: Send); + assert_not_impl!(MapOkOrElse: Send); + assert_not_impl!(MapOkOrElse: Send); + assert_impl!(MapOkOrElse: Sync); + assert_not_impl!(MapOkOrElse: Sync); + assert_not_impl!(MapOkOrElse: Sync); + assert_not_impl!(MapOkOrElse: Sync); + assert_impl!(MapOkOrElse: Unpin); + assert_not_impl!(MapOkOrElse: Unpin); + + assert_impl!(NeverError: Send); + assert_not_impl!(NeverError: Send); + assert_impl!(NeverError: Sync); + assert_not_impl!(NeverError: Sync); + assert_impl!(NeverError: Unpin); + assert_not_impl!(NeverError: Unpin); + + assert_impl!(OkInto: Send); + assert_not_impl!(OkInto: Send); + assert_impl!(OkInto: Sync); + assert_not_impl!(OkInto: Sync); + assert_impl!(OkInto: Unpin); + assert_not_impl!(OkInto: Unpin); + + assert_impl!(OptionFuture: Send); + assert_not_impl!(OptionFuture: Send); + assert_impl!(OptionFuture: Sync); + assert_not_impl!(OptionFuture: Sync); + assert_impl!(OptionFuture: Unpin); + assert_not_impl!(OptionFuture: Unpin); + + assert_impl!(OrElse: Send); + assert_not_impl!(OrElse: Send); + assert_not_impl!(OrElse: Send); + assert_not_impl!(OrElse: Send); + assert_impl!(OrElse: Sync); + assert_not_impl!(OrElse: Sync); + assert_not_impl!(OrElse: Sync); + assert_not_impl!(OrElse: Sync); + assert_impl!(OrElse: Unpin); + assert_not_impl!(OrElse: Unpin); + assert_not_impl!(OrElse: Unpin); + + assert_impl!(Pending<()>: Send); + assert_not_impl!(Pending<*const ()>: Send); + assert_impl!(Pending<()>: Sync); + assert_not_impl!(Pending<*const ()>: Sync); + assert_impl!(Pending: Unpin); + + assert_impl!(PollFn<()>: Send); + assert_not_impl!(PollFn<*const ()>: Send); + assert_impl!(PollFn<()>: Sync); + assert_not_impl!(PollFn<*const ()>: Sync); + assert_impl!(PollFn: Unpin); + + assert_impl!(PollImmediate: Send); + assert_not_impl!(PollImmediate>: Send); + assert_impl!(PollImmediate: Sync); + assert_not_impl!(PollImmediate>: Sync); + assert_impl!(PollImmediate: Unpin); + assert_not_impl!(PollImmediate: Unpin); + + assert_impl!(Ready<()>: Send); + assert_not_impl!(Ready<*const ()>: Send); + assert_impl!(Ready<()>: Sync); + assert_not_impl!(Ready<*const ()>: Sync); + assert_impl!(Ready: Unpin); + + assert_impl!(Remote>: Send); + assert_not_impl!(Remote: Send); + assert_not_impl!(Remote: Send); + assert_impl!(Remote>: Sync); + assert_not_impl!(Remote: Sync); + assert_not_impl!(Remote: Sync); + assert_impl!(Remote: Unpin); + assert_not_impl!(Remote: Unpin); + + assert_impl!(RemoteHandle<()>: Send); + assert_not_impl!(RemoteHandle<*const ()>: Send); + assert_impl!(RemoteHandle<()>: Sync); + assert_not_impl!(RemoteHandle<*const ()>: Sync); + assert_impl!(RemoteHandle: Unpin); + + assert_impl!(Select: Send); + assert_not_impl!(Select: Send); + assert_not_impl!(Select: Send); + assert_impl!(Select: Sync); + assert_not_impl!(Select: Sync); + assert_not_impl!(Select: Sync); + assert_impl!(Select: Unpin); + assert_not_impl!(Select: Unpin); + assert_not_impl!(Select: Unpin); + + assert_impl!(SelectAll: Send); + assert_not_impl!(SelectAll: Send); + assert_impl!(SelectAll: Sync); + assert_not_impl!(SelectAll: Sync); + assert_impl!(SelectAll: Unpin); + assert_not_impl!(SelectAll: Unpin); + + assert_impl!(SelectOk: Send); + assert_not_impl!(SelectOk: Send); + assert_impl!(SelectOk: Sync); + assert_not_impl!(SelectOk: Sync); + assert_impl!(SelectOk: Unpin); + assert_not_impl!(SelectOk: Unpin); + + assert_impl!(Shared>: Send); + assert_not_impl!(Shared: Send); + assert_not_impl!(Shared: Send); + assert_not_impl!(Shared>: Sync); + assert_impl!(Shared: Unpin); + + assert_impl!(Then: Send); + assert_not_impl!(Then: Send); + assert_not_impl!(Then: Send); + assert_not_impl!(Then: Send); + assert_impl!(Then: Sync); + assert_not_impl!(Then: Sync); + assert_not_impl!(Then: Sync); + assert_not_impl!(Then: Sync); + assert_impl!(Then: Unpin); + assert_not_impl!(Then: Unpin); + assert_not_impl!(Then: Unpin); + + assert_impl!(TryFlatten, ()>: Send); + assert_not_impl!(TryFlatten: Send); + assert_not_impl!(TryFlatten: Send); + assert_impl!(TryFlatten, ()>: Sync); + assert_not_impl!(TryFlatten: Sync); + assert_not_impl!(TryFlatten: Sync); + assert_impl!(TryFlatten, ()>: Unpin); + assert_not_impl!(TryFlatten: Unpin); + assert_not_impl!(TryFlatten: Unpin); + + assert_impl!(TryFlattenStream>: Send); + assert_not_impl!(TryFlattenStream: Send); + assert_not_impl!(TryFlattenStream: Send); + assert_impl!(TryFlattenStream>: Sync); + assert_not_impl!(TryFlattenStream: Sync); + assert_not_impl!(TryFlattenStream: Sync); + assert_impl!(TryFlattenStream>: Unpin); + assert_not_impl!(TryFlattenStream: Unpin); + assert_not_impl!(TryFlattenStream: Unpin); + + assert_impl!(TryJoin, SendTryFuture<()>>: Send); + assert_not_impl!(TryJoin, SendTryFuture>: Send); + assert_not_impl!(TryJoin>: Send); + assert_not_impl!(TryJoin: Send); + assert_not_impl!(TryJoin: Send); + assert_impl!(TryJoin, SyncTryFuture<()>>: Sync); + assert_not_impl!(TryJoin, SyncTryFuture>: Sync); + assert_not_impl!(TryJoin>: Sync); + assert_not_impl!(TryJoin: Sync); + assert_not_impl!(TryJoin: Sync); + assert_impl!(TryJoin: Unpin); + assert_not_impl!(TryJoin: Unpin); + assert_not_impl!(TryJoin: Unpin); + + // TryJoin3, TryJoin4, TryJoin5 are the same as TryJoin + + assert_impl!(TryJoinAll>: Send); + assert_not_impl!(TryJoinAll: Send); + assert_not_impl!(TryJoinAll: Send); + assert_impl!(TryJoinAll>: Sync); + assert_not_impl!(TryJoinAll>: Sync); + assert_not_impl!(TryJoinAll>: Sync); + assert_not_impl!(TryJoinAll: Sync); + assert_impl!(TryJoinAll: Unpin); + + assert_impl!(TrySelect: Send); + assert_not_impl!(TrySelect: Send); + assert_not_impl!(TrySelect: Send); + assert_impl!(TrySelect: Sync); + assert_not_impl!(TrySelect: Sync); + assert_not_impl!(TrySelect: Sync); + assert_impl!(TrySelect: Unpin); + assert_not_impl!(TrySelect: Unpin); + assert_not_impl!(TrySelect: Unpin); + + assert_impl!(UnitError: Send); + assert_not_impl!(UnitError: Send); + assert_impl!(UnitError: Sync); + assert_not_impl!(UnitError: Sync); + assert_impl!(UnitError: Unpin); + assert_not_impl!(UnitError: Unpin); + + assert_impl!(UnwrapOrElse: Send); + assert_not_impl!(UnwrapOrElse: Send); + assert_not_impl!(UnwrapOrElse: Send); + assert_impl!(UnwrapOrElse: Sync); + assert_not_impl!(UnwrapOrElse: Sync); + assert_not_impl!(UnwrapOrElse: Sync); + assert_impl!(UnwrapOrElse: Unpin); + assert_not_impl!(UnwrapOrElse: Unpin); + + assert_impl!(WeakShared>: Send); + assert_not_impl!(WeakShared: Send); + assert_not_impl!(WeakShared: Send); + assert_not_impl!(WeakShared>: Sync); + assert_impl!(WeakShared: Unpin); + + assert_impl!(Either: Send); + assert_not_impl!(Either: Send); + assert_not_impl!(Either: Send); + assert_impl!(Either: Sync); + assert_not_impl!(Either: Sync); + assert_not_impl!(Either: Sync); + assert_impl!(Either: Unpin); + assert_not_impl!(Either: Unpin); + assert_not_impl!(Either: Unpin); + + assert_impl!(MaybeDone>: Send); + assert_not_impl!(MaybeDone: Send); + assert_not_impl!(MaybeDone: Send); + assert_impl!(MaybeDone>: Sync); + assert_not_impl!(MaybeDone: Sync); + assert_not_impl!(MaybeDone: Sync); + assert_impl!(MaybeDone: Unpin); + assert_not_impl!(MaybeDone: Unpin); + + assert_impl!(TryMaybeDone>: Send); + assert_not_impl!(TryMaybeDone: Send); + assert_not_impl!(TryMaybeDone: Send); + assert_impl!(TryMaybeDone>: Sync); + assert_not_impl!(TryMaybeDone: Sync); + assert_not_impl!(TryMaybeDone: Sync); + assert_impl!(TryMaybeDone: Unpin); + assert_not_impl!(TryMaybeDone: Unpin); +} + +/// Assert Send/Sync/Unpin for all public types in `futures::io`. +pub mod io { + use super::*; + use futures::io::{Sink, *}; + + assert_impl!(AllowStdIo<()>: Send); + assert_not_impl!(AllowStdIo<*const ()>: Send); + assert_impl!(AllowStdIo<()>: Sync); + assert_not_impl!(AllowStdIo<*const ()>: Sync); + assert_impl!(AllowStdIo: Unpin); + + assert_impl!(BufReader<()>: Send); + assert_not_impl!(BufReader<*const ()>: Send); + assert_impl!(BufReader<()>: Sync); + assert_not_impl!(BufReader<*const ()>: Sync); + assert_impl!(BufReader<()>: Unpin); + assert_not_impl!(BufReader: Unpin); + + assert_impl!(BufWriter<()>: Send); + assert_not_impl!(BufWriter<*const ()>: Send); + assert_impl!(BufWriter<()>: Sync); + assert_not_impl!(BufWriter<*const ()>: Sync); + assert_impl!(BufWriter<()>: Unpin); + assert_not_impl!(BufWriter: Unpin); + + assert_impl!(Chain<(), ()>: Send); + assert_not_impl!(Chain<(), *const ()>: Send); + assert_not_impl!(Chain<*const (), ()>: Send); + assert_impl!(Chain<(), ()>: Sync); + assert_not_impl!(Chain<(), *const ()>: Sync); + assert_not_impl!(Chain<*const (), ()>: Sync); + assert_impl!(Chain<(), ()>: Unpin); + assert_not_impl!(Chain<(), PhantomPinned>: Unpin); + assert_not_impl!(Chain: Unpin); + + assert_impl!(Close<'_, ()>: Send); + assert_not_impl!(Close<'_, *const ()>: Send); + assert_impl!(Close<'_, ()>: Sync); + assert_not_impl!(Close<'_, *const ()>: Sync); + assert_impl!(Close<'_, ()>: Unpin); + assert_not_impl!(Close<'_, PhantomPinned>: Unpin); + + assert_impl!(Copy<(), ()>: Send); + assert_not_impl!(Copy<(), *const ()>: Send); + assert_not_impl!(Copy<*const (), ()>: Send); + assert_impl!(Copy<(), ()>: Sync); + assert_not_impl!(Copy<(), *const ()>: Sync); + assert_not_impl!(Copy<*const (), ()>: Sync); + assert_impl!(Copy<(), PhantomPinned>: Unpin); + assert_not_impl!(Copy: Unpin); + + assert_impl!(CopyBuf<(), ()>: Send); + assert_not_impl!(CopyBuf<(), *const ()>: Send); + assert_not_impl!(CopyBuf<*const (), ()>: Send); + assert_impl!(CopyBuf<(), ()>: Sync); + assert_not_impl!(CopyBuf<(), *const ()>: Sync); + assert_not_impl!(CopyBuf<*const (), ()>: Sync); + assert_impl!(CopyBuf<(), PhantomPinned>: Unpin); + assert_not_impl!(CopyBuf: Unpin); + + assert_impl!(Cursor<()>: Send); + assert_not_impl!(Cursor<*const ()>: Send); + assert_impl!(Cursor<()>: Sync); + assert_not_impl!(Cursor<*const ()>: Sync); + assert_impl!(Cursor<()>: Unpin); + assert_not_impl!(Cursor: Unpin); + + assert_impl!(Empty: Send); + assert_impl!(Empty: Sync); + assert_impl!(Empty: Unpin); + + assert_impl!(FillBuf<'_, ()>: Send); + assert_not_impl!(FillBuf<'_, *const ()>: Send); + assert_impl!(FillBuf<'_, ()>: Sync); + assert_not_impl!(FillBuf<'_, *const ()>: Sync); + assert_impl!(FillBuf<'_, PhantomPinned>: Unpin); + + assert_impl!(Flush<'_, ()>: Send); + assert_not_impl!(Flush<'_, *const ()>: Send); + assert_impl!(Flush<'_, ()>: Sync); + assert_not_impl!(Flush<'_, *const ()>: Sync); + assert_impl!(Flush<'_, ()>: Unpin); + assert_not_impl!(Flush<'_, PhantomPinned>: Unpin); + + assert_impl!(IntoSink<(), ()>: Send); + assert_not_impl!(IntoSink<(), *const ()>: Send); + assert_not_impl!(IntoSink<*const (), ()>: Send); + assert_impl!(IntoSink<(), ()>: Sync); + assert_not_impl!(IntoSink<(), *const ()>: Sync); + assert_not_impl!(IntoSink<*const (), ()>: Sync); + assert_impl!(IntoSink<(), PhantomPinned>: Unpin); + assert_not_impl!(IntoSink: Unpin); + + assert_impl!(Lines<()>: Send); + assert_not_impl!(Lines<*const ()>: Send); + assert_impl!(Lines<()>: Sync); + assert_not_impl!(Lines<*const ()>: Sync); + assert_impl!(Lines<()>: Unpin); + assert_not_impl!(Lines: Unpin); + + assert_impl!(Read<'_, ()>: Send); + assert_not_impl!(Read<'_, *const ()>: Send); + assert_impl!(Read<'_, ()>: Sync); + assert_not_impl!(Read<'_, *const ()>: Sync); + assert_impl!(Read<'_, ()>: Unpin); + assert_not_impl!(Read<'_, PhantomPinned>: Unpin); + + assert_impl!(ReadExact<'_, ()>: Send); + assert_not_impl!(ReadExact<'_, *const ()>: Send); + assert_impl!(ReadExact<'_, ()>: Sync); + assert_not_impl!(ReadExact<'_, *const ()>: Sync); + assert_impl!(ReadExact<'_, ()>: Unpin); + assert_not_impl!(ReadExact<'_, PhantomPinned>: Unpin); + + assert_impl!(ReadHalf<()>: Send); + assert_not_impl!(ReadHalf<*const ()>: Send); + assert_impl!(ReadHalf<()>: Sync); + assert_not_impl!(ReadHalf<*const ()>: Sync); + assert_impl!(ReadHalf: Unpin); + + assert_impl!(ReadLine<'_, ()>: Send); + assert_not_impl!(ReadLine<'_, *const ()>: Send); + assert_impl!(ReadLine<'_, ()>: Sync); + assert_not_impl!(ReadLine<'_, *const ()>: Sync); + assert_impl!(ReadLine<'_, ()>: Unpin); + assert_not_impl!(ReadLine<'_, PhantomPinned>: Unpin); + + assert_impl!(ReadToEnd<'_, ()>: Send); + assert_not_impl!(ReadToEnd<'_, *const ()>: Send); + assert_impl!(ReadToEnd<'_, ()>: Sync); + assert_not_impl!(ReadToEnd<'_, *const ()>: Sync); + assert_impl!(ReadToEnd<'_, ()>: Unpin); + assert_not_impl!(ReadToEnd<'_, PhantomPinned>: Unpin); + + assert_impl!(ReadToString<'_, ()>: Send); + assert_not_impl!(ReadToString<'_, *const ()>: Send); + assert_impl!(ReadToString<'_, ()>: Sync); + assert_not_impl!(ReadToString<'_, *const ()>: Sync); + assert_impl!(ReadToString<'_, ()>: Unpin); + assert_not_impl!(ReadToString<'_, PhantomPinned>: Unpin); + + assert_impl!(ReadUntil<'_, ()>: Send); + assert_not_impl!(ReadUntil<'_, *const ()>: Send); + assert_impl!(ReadUntil<'_, ()>: Sync); + assert_not_impl!(ReadUntil<'_, *const ()>: Sync); + assert_impl!(ReadUntil<'_, ()>: Unpin); + assert_not_impl!(ReadUntil<'_, PhantomPinned>: Unpin); + + assert_impl!(ReadVectored<'_, ()>: Send); + assert_not_impl!(ReadVectored<'_, *const ()>: Send); + assert_impl!(ReadVectored<'_, ()>: Sync); + assert_not_impl!(ReadVectored<'_, *const ()>: Sync); + assert_impl!(ReadVectored<'_, ()>: Unpin); + assert_not_impl!(ReadVectored<'_, PhantomPinned>: Unpin); + + assert_impl!(Repeat: Send); + assert_impl!(Repeat: Sync); + assert_impl!(Repeat: Unpin); + + assert_impl!(ReuniteError<()>: Send); + assert_not_impl!(ReuniteError<*const ()>: Send); + assert_impl!(ReuniteError<()>: Sync); + assert_not_impl!(ReuniteError<*const ()>: Sync); + assert_impl!(ReuniteError: Unpin); + + assert_impl!(Seek<'_, ()>: Send); + assert_not_impl!(Seek<'_, *const ()>: Send); + assert_impl!(Seek<'_, ()>: Sync); + assert_not_impl!(Seek<'_, *const ()>: Sync); + assert_impl!(Seek<'_, ()>: Unpin); + assert_not_impl!(Seek<'_, PhantomPinned>: Unpin); + + assert_impl!(SeeKRelative<'_, ()>: Send); + assert_not_impl!(SeeKRelative<'_, *const ()>: Send); + assert_impl!(SeeKRelative<'_, ()>: Sync); + assert_not_impl!(SeeKRelative<'_, *const ()>: Sync); + assert_impl!(SeeKRelative<'_, PhantomPinned>: Unpin); + + assert_impl!(Sink: Send); + assert_impl!(Sink: Sync); + assert_impl!(Sink: Unpin); + + assert_impl!(Take<()>: Send); + assert_not_impl!(Take<*const ()>: Send); + assert_impl!(Take<()>: Sync); + assert_not_impl!(Take<*const ()>: Sync); + assert_impl!(Take<()>: Unpin); + assert_not_impl!(Take: Unpin); + + assert_impl!(Window<()>: Send); + assert_not_impl!(Window<*const ()>: Send); + assert_impl!(Window<()>: Sync); + assert_not_impl!(Window<*const ()>: Sync); + assert_impl!(Window<()>: Unpin); + assert_not_impl!(Window: Unpin); + + assert_impl!(Write<'_, ()>: Send); + assert_not_impl!(Write<'_, *const ()>: Send); + assert_impl!(Write<'_, ()>: Sync); + assert_not_impl!(Write<'_, *const ()>: Sync); + assert_impl!(Write<'_, ()>: Unpin); + assert_not_impl!(Write<'_, PhantomPinned>: Unpin); + + assert_impl!(WriteAll<'_, ()>: Send); + assert_not_impl!(WriteAll<'_, *const ()>: Send); + assert_impl!(WriteAll<'_, ()>: Sync); + assert_not_impl!(WriteAll<'_, *const ()>: Sync); + assert_impl!(WriteAll<'_, ()>: Unpin); + assert_not_impl!(WriteAll<'_, PhantomPinned>: Unpin); + + #[cfg(feature = "write-all-vectored")] + assert_impl!(WriteAllVectored<'_, ()>: Send); + #[cfg(feature = "write-all-vectored")] + assert_not_impl!(WriteAllVectored<'_, *const ()>: Send); + #[cfg(feature = "write-all-vectored")] + assert_impl!(WriteAllVectored<'_, ()>: Sync); + #[cfg(feature = "write-all-vectored")] + assert_not_impl!(WriteAllVectored<'_, *const ()>: Sync); + #[cfg(feature = "write-all-vectored")] + assert_impl!(WriteAllVectored<'_, ()>: Unpin); + // WriteAllVectored requires `W: Unpin` + // #[cfg(feature = "write-all-vectored")] + // assert_not_impl!(WriteAllVectored<'_, PhantomPinned>: Unpin); + + assert_impl!(WriteHalf<()>: Send); + assert_not_impl!(WriteHalf<*const ()>: Send); + assert_impl!(WriteHalf<()>: Sync); + assert_not_impl!(WriteHalf<*const ()>: Sync); + assert_impl!(WriteHalf: Unpin); + + assert_impl!(WriteVectored<'_, ()>: Send); + assert_not_impl!(WriteVectored<'_, *const ()>: Send); + assert_impl!(WriteVectored<'_, ()>: Sync); + assert_not_impl!(WriteVectored<'_, *const ()>: Sync); + assert_impl!(WriteVectored<'_, ()>: Unpin); + assert_not_impl!(WriteVectored<'_, PhantomPinned>: Unpin); +} + +/// Assert Send/Sync/Unpin for all public types in `futures::lock`. +pub mod lock { + use super::*; + use futures::lock::*; + + #[cfg(feature = "bilock")] + assert_impl!(BiLock<()>: Send); + #[cfg(feature = "bilock")] + assert_not_impl!(BiLock<*const ()>: Send); + #[cfg(feature = "bilock")] + assert_impl!(BiLock<()>: Sync); + #[cfg(feature = "bilock")] + assert_not_impl!(BiLock<*const ()>: Sync); + #[cfg(feature = "bilock")] + assert_impl!(BiLock: Unpin); + + #[cfg(feature = "bilock")] + assert_impl!(BiLockAcquire<'_, ()>: Send); + #[cfg(feature = "bilock")] + assert_not_impl!(BiLockAcquire<'_, *const ()>: Send); + #[cfg(feature = "bilock")] + assert_impl!(BiLockAcquire<'_, ()>: Sync); + #[cfg(feature = "bilock")] + assert_not_impl!(BiLockAcquire<'_, *const ()>: Sync); + #[cfg(feature = "bilock")] + assert_impl!(BiLockAcquire<'_, PhantomPinned>: Unpin); + + #[cfg(feature = "bilock")] + assert_impl!(BiLockGuard<'_, ()>: Send); + #[cfg(feature = "bilock")] + assert_not_impl!(BiLockGuard<'_, *const ()>: Send); + #[cfg(feature = "bilock")] + assert_impl!(BiLockGuard<'_, ()>: Sync); + #[cfg(feature = "bilock")] + assert_not_impl!(BiLockGuard<'_, *const ()>: Sync); + #[cfg(feature = "bilock")] + assert_impl!(BiLockGuard<'_, PhantomPinned>: Unpin); + + assert_impl!(MappedMutexGuard<'_, (), ()>: Send); + assert_not_impl!(MappedMutexGuard<'_, (), *const ()>: Send); + assert_not_impl!(MappedMutexGuard<'_, *const (), ()>: Send); + assert_impl!(MappedMutexGuard<'_, (), ()>: Sync); + assert_not_impl!(MappedMutexGuard<'_, (), *const ()>: Sync); + assert_not_impl!(MappedMutexGuard<'_, *const (), ()>: Sync); + assert_impl!(MappedMutexGuard<'_, PhantomPinned, PhantomPinned>: Unpin); + + assert_impl!(Mutex<()>: Send); + assert_not_impl!(Mutex<*const ()>: Send); + assert_impl!(Mutex<()>: Sync); + assert_not_impl!(Mutex<*const ()>: Sync); + assert_impl!(Mutex<()>: Unpin); + assert_not_impl!(Mutex: Unpin); + + assert_impl!(MutexGuard<'_, ()>: Send); + assert_not_impl!(MutexGuard<'_, *const ()>: Send); + assert_impl!(MutexGuard<'_, ()>: Sync); + assert_not_impl!(MutexGuard<'_, *const ()>: Sync); + assert_impl!(MutexGuard<'_, PhantomPinned>: Unpin); + + assert_impl!(MutexLockFuture<'_, ()>: Send); + assert_not_impl!(MutexLockFuture<'_, *const ()>: Send); + assert_impl!(MutexLockFuture<'_, *const ()>: Sync); + assert_impl!(MutexLockFuture<'_, PhantomPinned>: Unpin); + + #[cfg(feature = "bilock")] + assert_impl!(ReuniteError<()>: Send); + #[cfg(feature = "bilock")] + assert_not_impl!(ReuniteError<*const ()>: Send); + #[cfg(feature = "bilock")] + assert_impl!(ReuniteError<()>: Sync); + #[cfg(feature = "bilock")] + assert_not_impl!(ReuniteError<*const ()>: Sync); + #[cfg(feature = "bilock")] + assert_impl!(ReuniteError: Unpin); +} + +/// Assert Send/Sync/Unpin for all public types in `futures::sink`. +pub mod sink { + use super::*; + use futures::sink::{self, *}; + use std::marker::Send; + + assert_impl!(Buffer<(), ()>: Send); + assert_not_impl!(Buffer<(), *const ()>: Send); + assert_not_impl!(Buffer<*const (), ()>: Send); + assert_impl!(Buffer<(), ()>: Sync); + assert_not_impl!(Buffer<(), *const ()>: Sync); + assert_not_impl!(Buffer<*const (), ()>: Sync); + assert_impl!(Buffer<(), PhantomPinned>: Unpin); + assert_not_impl!(Buffer: Unpin); + + assert_impl!(Close<'_, (), *const ()>: Send); + assert_not_impl!(Close<'_, *const (), ()>: Send); + assert_impl!(Close<'_, (), *const ()>: Sync); + assert_not_impl!(Close<'_, *const (), ()>: Sync); + assert_impl!(Close<'_, (), PhantomPinned>: Unpin); + assert_not_impl!(Close<'_, PhantomPinned, ()>: Unpin); + + assert_impl!(Drain<()>: Send); + assert_not_impl!(Drain<*const ()>: Send); + assert_impl!(Drain<()>: Sync); + assert_not_impl!(Drain<*const ()>: Sync); + assert_impl!(Drain: Unpin); + + assert_impl!(Fanout<(), ()>: Send); + assert_not_impl!(Fanout<(), *const ()>: Send); + assert_not_impl!(Fanout<*const (), ()>: Send); + assert_impl!(Fanout<(), ()>: Sync); + assert_not_impl!(Fanout<(), *const ()>: Sync); + assert_not_impl!(Fanout<*const (), ()>: Sync); + assert_impl!(Fanout<(), ()>: Unpin); + assert_not_impl!(Fanout<(), PhantomPinned>: Unpin); + assert_not_impl!(Fanout: Unpin); + + assert_impl!(Feed<'_, (), ()>: Send); + assert_not_impl!(Feed<'_, (), *const ()>: Send); + assert_not_impl!(Feed<'_, *const (), ()>: Send); + assert_impl!(Feed<'_, (), ()>: Sync); + assert_not_impl!(Feed<'_, (), *const ()>: Sync); + assert_not_impl!(Feed<'_, *const (), ()>: Sync); + assert_impl!(Feed<'_, (), PhantomPinned>: Unpin); + assert_not_impl!(Feed<'_, PhantomPinned, ()>: Unpin); + + assert_impl!(Flush<'_, (), *const ()>: Send); + assert_not_impl!(Flush<'_, *const (), ()>: Send); + assert_impl!(Flush<'_, (), *const ()>: Sync); + assert_not_impl!(Flush<'_, *const (), ()>: Sync); + assert_impl!(Flush<'_, (), PhantomPinned>: Unpin); + assert_not_impl!(Flush<'_, PhantomPinned, ()>: Unpin); + + assert_impl!(sink::Send<'_, (), ()>: Send); + assert_not_impl!(sink::Send<'_, (), *const ()>: Send); + assert_not_impl!(sink::Send<'_, *const (), ()>: Send); + assert_impl!(sink::Send<'_, (), ()>: Sync); + assert_not_impl!(sink::Send<'_, (), *const ()>: Sync); + assert_not_impl!(sink::Send<'_, *const (), ()>: Sync); + assert_impl!(sink::Send<'_, (), PhantomPinned>: Unpin); + assert_not_impl!(sink::Send<'_, PhantomPinned, ()>: Unpin); + + assert_impl!(SendAll<'_, (), SendTryStream<()>>: Send); + assert_not_impl!(SendAll<'_, (), SendTryStream>: Send); + assert_not_impl!(SendAll<'_, (), LocalTryStream>: Send); + assert_not_impl!(SendAll<'_, *const (), SendTryStream<()>>: Send); + assert_impl!(SendAll<'_, (), SyncTryStream<()>>: Sync); + assert_not_impl!(SendAll<'_, (), SyncTryStream>: Sync); + assert_not_impl!(SendAll<'_, (), LocalTryStream>: Sync); + assert_not_impl!(SendAll<'_, *const (), SyncTryStream<()>>: Sync); + assert_impl!(SendAll<'_, (), UnpinTryStream>: Unpin); + assert_not_impl!(SendAll<'_, PhantomPinned, UnpinTryStream>: Unpin); + assert_not_impl!(SendAll<'_, (), PinnedTryStream>: Unpin); + + assert_impl!(SinkErrInto: Send); + assert_not_impl!(SinkErrInto, (), ()>: Send); + assert_impl!(SinkErrInto: Sync); + assert_not_impl!(SinkErrInto, (), ()>: Sync); + assert_impl!(SinkErrInto: Unpin); + assert_not_impl!(SinkErrInto, (), ()>: Unpin); + + assert_impl!(SinkMapErr: Send); + assert_not_impl!(SinkMapErr: Send); + assert_not_impl!(SinkMapErr, ()>: Send); + assert_impl!(SinkMapErr: Sync); + assert_not_impl!(SinkMapErr: Sync); + assert_not_impl!(SinkMapErr, ()>: Sync); + assert_impl!(SinkMapErr: Unpin); + assert_not_impl!(SinkMapErr, ()>: Unpin); + + assert_impl!(Unfold<(), (), ()>: Send); + assert_not_impl!(Unfold<*const (), (), ()>: Send); + assert_not_impl!(Unfold<(), *const (), ()>: Send); + assert_not_impl!(Unfold<(), (), *const ()>: Send); + assert_impl!(Unfold<(), (), ()>: Sync); + assert_not_impl!(Unfold<*const (), (), ()>: Sync); + assert_not_impl!(Unfold<(), *const (), ()>: Sync); + assert_not_impl!(Unfold<(), (), *const ()>: Sync); + assert_impl!(Unfold: Unpin); + assert_not_impl!(Unfold, (), PhantomPinned>: Unpin); + + assert_impl!(With<(), *const (), *const (), (), ()>: Send); + assert_not_impl!(With<*const (), (), (), (), ()>: Send); + assert_not_impl!(With<(), (), (), *const (), ()>: Send); + assert_not_impl!(With<(), (), (), (), *const ()>: Send); + assert_impl!(With<(), *const (), *const (), (), ()>: Sync); + assert_not_impl!(With<*const (), (), (), (), ()>: Sync); + assert_not_impl!(With<(), (), (), *const (), ()>: Sync); + assert_not_impl!(With<(), (), (), (), *const ()>: Sync); + assert_impl!(With<(), PhantomPinned, PhantomPinned, (), PhantomPinned>: Unpin); + assert_not_impl!(With: Unpin); + assert_not_impl!(With<(), (), (), PhantomPinned, ()>: Unpin); + + assert_impl!(WithFlatMap<(), (), *const (), (), ()>: Send); + assert_not_impl!(WithFlatMap<*const (), (), (), (), ()>: Send); + assert_not_impl!(WithFlatMap<(), *const (), (), (), ()>: Send); + assert_not_impl!(WithFlatMap<(), (), (), *const (), ()>: Send); + assert_not_impl!(WithFlatMap<(), (), (), (), *const ()>: Send); + assert_impl!(WithFlatMap<(), (), *const (), (), ()>: Sync); + assert_not_impl!(WithFlatMap<*const (), (), (), (), ()>: Sync); + assert_not_impl!(WithFlatMap<(), *const (), (), (), ()>: Sync); + assert_not_impl!(WithFlatMap<(), (), (), *const (), ()>: Sync); + assert_not_impl!(WithFlatMap<(), (), (), (), *const ()>: Sync); + assert_impl!(WithFlatMap<(), PhantomPinned, PhantomPinned, (), PhantomPinned>: Unpin); + assert_not_impl!(WithFlatMap: Unpin); + assert_not_impl!(WithFlatMap<(), (), (), PhantomPinned, ()>: Unpin); +} + +/// Assert Send/Sync/Unpin for all public types in `futures::stream`. +pub mod stream { + use super::*; + use futures::{io, stream::*}; + + assert_impl!(AndThen<(), (), ()>: Send); + assert_not_impl!(AndThen<*const (), (), ()>: Send); + assert_not_impl!(AndThen<(), *const (), ()>: Send); + assert_not_impl!(AndThen<(), (), *const ()>: Send); + assert_impl!(AndThen<(), (), ()>: Sync); + assert_not_impl!(AndThen<*const (), (), ()>: Sync); + assert_not_impl!(AndThen<(), *const (), ()>: Sync); + assert_not_impl!(AndThen<(), (), *const ()>: Sync); + assert_impl!(AndThen<(), (), PhantomPinned>: Unpin); + assert_not_impl!(AndThen: Unpin); + assert_not_impl!(AndThen<(), PhantomPinned, ()>: Unpin); + + assert_impl!(BufferUnordered>: Send); + assert_not_impl!(BufferUnordered: Send); + assert_not_impl!(BufferUnordered: Send); + assert_impl!(BufferUnordered>: Sync); + assert_not_impl!(BufferUnordered: Sync); + assert_not_impl!(BufferUnordered: Sync); + assert_impl!(BufferUnordered: Unpin); + assert_not_impl!(BufferUnordered: Unpin); + + assert_impl!(Buffered>>: Send); + assert_not_impl!(Buffered>: Send); + assert_not_impl!(Buffered>: Send); + assert_not_impl!(Buffered>>: Send); + assert_impl!(Buffered>>: Sync); + assert_not_impl!(Buffered>>: Sync); + assert_not_impl!(Buffered>>: Sync); + assert_impl!(Buffered>: Unpin); + assert_not_impl!(Buffered>: Unpin); + + assert_impl!(CatchUnwind: Send); + assert_not_impl!(CatchUnwind: Send); + assert_impl!(CatchUnwind: Sync); + assert_not_impl!(CatchUnwind: Sync); + assert_impl!(CatchUnwind: Unpin); + assert_not_impl!(CatchUnwind: Unpin); + + assert_impl!(Chain<(), ()>: Send); + assert_not_impl!(Chain<(), *const ()>: Send); + assert_not_impl!(Chain<*const (), ()>: Send); + assert_impl!(Chain<(), ()>: Sync); + assert_not_impl!(Chain<(), *const ()>: Sync); + assert_not_impl!(Chain<*const (), ()>: Sync); + assert_impl!(Chain<(), ()>: Unpin); + assert_not_impl!(Chain<(), PhantomPinned>: Unpin); + assert_not_impl!(Chain: Unpin); + + assert_impl!(Chunks>: Send); + assert_not_impl!(Chunks: Send); + assert_not_impl!(Chunks: Send); + assert_impl!(Chunks>: Sync); + assert_not_impl!(Chunks: Sync); + assert_not_impl!(Chunks: Sync); + assert_impl!(Chunks: Unpin); + assert_not_impl!(Chunks: Unpin); + + assert_impl!(Collect<(), ()>: Send); + assert_not_impl!(Collect<*const (), ()>: Send); + assert_not_impl!(Collect<(), *const ()>: Send); + assert_impl!(Collect<(), ()>: Sync); + assert_not_impl!(Collect<*const (), ()>: Sync); + assert_not_impl!(Collect<(), *const ()>: Sync); + assert_impl!(Collect<(), PhantomPinned>: Unpin); + assert_not_impl!(Collect: Unpin); + + assert_impl!(Concat>: Send); + assert_not_impl!(Concat: Send); + assert_not_impl!(Concat: Send); + assert_impl!(Concat>: Sync); + assert_not_impl!(Concat: Sync); + assert_not_impl!(Concat: Sync); + assert_impl!(Concat: Unpin); + assert_not_impl!(Concat: Unpin); + + assert_impl!(Cycle<()>: Send); + assert_not_impl!(Cycle<*const ()>: Send); + assert_impl!(Cycle<()>: Sync); + assert_not_impl!(Cycle<*const ()>: Sync); + assert_impl!(Cycle<()>: Unpin); + assert_not_impl!(Cycle: Unpin); + + assert_impl!(Empty<()>: Send); + assert_not_impl!(Empty<*const ()>: Send); + assert_impl!(Empty<()>: Sync); + assert_not_impl!(Empty<*const ()>: Sync); + assert_impl!(Empty: Unpin); + + assert_impl!(Enumerate<()>: Send); + assert_not_impl!(Enumerate<*const ()>: Send); + assert_impl!(Enumerate<()>: Sync); + assert_not_impl!(Enumerate<*const ()>: Sync); + assert_impl!(Enumerate<()>: Unpin); + assert_not_impl!(Enumerate: Unpin); + + assert_impl!(ErrInto<(), *const ()>: Send); + assert_not_impl!(ErrInto<*const (), ()>: Send); + assert_impl!(ErrInto<(), *const ()>: Sync); + assert_not_impl!(ErrInto<*const (), ()>: Sync); + assert_impl!(ErrInto<(), PhantomPinned>: Unpin); + assert_not_impl!(ErrInto: Unpin); + + assert_impl!(Filter, (), ()>: Send); + assert_not_impl!(Filter, (), ()>: Send); + assert_not_impl!(Filter: Send); + assert_not_impl!(Filter, *const (), ()>: Send); + assert_not_impl!(Filter, (), *const ()>: Send); + assert_impl!(Filter, (), ()>: Sync); + assert_not_impl!(Filter, (), ()>: Sync); + assert_not_impl!(Filter: Sync); + assert_not_impl!(Filter, *const (), ()>: Sync); + assert_not_impl!(Filter, (), *const ()>: Sync); + assert_impl!(Filter: Unpin); + assert_not_impl!(Filter: Unpin); + assert_not_impl!(Filter: Unpin); + + assert_impl!(FilterMap<(), (), ()>: Send); + assert_not_impl!(FilterMap<*const (), (), ()>: Send); + assert_not_impl!(FilterMap<(), *const (), ()>: Send); + assert_not_impl!(FilterMap<(), (), *const ()>: Send); + assert_impl!(FilterMap<(), (), ()>: Sync); + assert_not_impl!(FilterMap<*const (), (), ()>: Sync); + assert_not_impl!(FilterMap<(), *const (), ()>: Sync); + assert_not_impl!(FilterMap<(), (), *const ()>: Sync); + assert_impl!(FilterMap<(), (), PhantomPinned>: Unpin); + assert_not_impl!(FilterMap: Unpin); + assert_not_impl!(FilterMap<(), PhantomPinned, ()>: Unpin); + + assert_impl!(FlatMap<(), (), ()>: Send); + assert_not_impl!(FlatMap<*const (), (), ()>: Send); + assert_not_impl!(FlatMap<(), *const (), ()>: Send); + assert_not_impl!(FlatMap<(), (), *const ()>: Send); + assert_impl!(FlatMap<(), (), ()>: Sync); + assert_not_impl!(FlatMap<*const (), (), ()>: Sync); + assert_not_impl!(FlatMap<(), *const (), ()>: Sync); + assert_not_impl!(FlatMap<(), (), *const ()>: Sync); + assert_impl!(FlatMap<(), (), PhantomPinned>: Unpin); + assert_not_impl!(FlatMap: Unpin); + assert_not_impl!(FlatMap<(), PhantomPinned, ()>: Unpin); + + assert_impl!(Flatten>: Send); + assert_not_impl!(Flatten: Send); + assert_not_impl!(Flatten: Send); + assert_impl!(Flatten>: Sync); + assert_not_impl!(Flatten>: Sync); + assert_not_impl!(Flatten>: Sync); + assert_impl!(Flatten>: Unpin); + assert_not_impl!(Flatten: Unpin); + assert_not_impl!(Flatten: Unpin); + + assert_impl!(Fold<(), (), (), ()>: Send); + assert_not_impl!(Fold<*const (), (), (), ()>: Send); + assert_not_impl!(Fold<(), *const (), (), ()>: Send); + assert_not_impl!(Fold<(), (), *const (), ()>: Send); + assert_not_impl!(Fold<(), (), (), *const ()>: Send); + assert_impl!(Fold<(), (), (), ()>: Sync); + assert_not_impl!(Fold<*const (), (), (), ()>: Sync); + assert_not_impl!(Fold<(), *const (), (), ()>: Sync); + assert_not_impl!(Fold<(), (), *const (), ()>: Sync); + assert_not_impl!(Fold<(), (), (), *const ()>: Sync); + assert_impl!(Fold<(), (), PhantomPinned, PhantomPinned>: Unpin); + assert_not_impl!(Fold: Unpin); + assert_not_impl!(Fold<(), PhantomPinned, (), ()>: Unpin); + + assert_impl!(ForEach<(), (), ()>: Send); + assert_not_impl!(ForEach<*const (), (), ()>: Send); + assert_not_impl!(ForEach<(), *const (), ()>: Send); + assert_not_impl!(ForEach<(), (), *const ()>: Send); + assert_impl!(ForEach<(), (), ()>: Sync); + assert_not_impl!(ForEach<*const (), (), ()>: Sync); + assert_not_impl!(ForEach<(), *const (), ()>: Sync); + assert_not_impl!(ForEach<(), (), *const ()>: Sync); + assert_impl!(ForEach<(), (), PhantomPinned>: Unpin); + assert_not_impl!(ForEach: Unpin); + assert_not_impl!(ForEach<(), PhantomPinned, ()>: Unpin); + + assert_impl!(ForEachConcurrent<(), (), ()>: Send); + assert_not_impl!(ForEachConcurrent<*const (), (), ()>: Send); + assert_not_impl!(ForEachConcurrent<(), *const (), ()>: Send); + assert_not_impl!(ForEachConcurrent<(), (), *const ()>: Send); + assert_impl!(ForEachConcurrent<(), (), ()>: Sync); + assert_not_impl!(ForEachConcurrent<*const (), (), ()>: Sync); + assert_not_impl!(ForEachConcurrent<(), *const (), ()>: Sync); + assert_not_impl!(ForEachConcurrent<(), (), *const ()>: Sync); + assert_impl!(ForEachConcurrent<(), PhantomPinned, PhantomPinned>: Unpin); + assert_not_impl!(ForEachConcurrent: Unpin); + + assert_impl!(Forward, ()>: Send); + assert_not_impl!(Forward: Send); + assert_not_impl!(Forward, *const ()>: Send); + assert_not_impl!(Forward: Send); + assert_impl!(Forward, ()>: Sync); + assert_not_impl!(Forward: Sync); + assert_not_impl!(Forward, *const ()>: Sync); + assert_not_impl!(Forward: Sync); + assert_impl!(Forward: Unpin); + assert_not_impl!(Forward: Unpin); + assert_not_impl!(Forward: Unpin); + + assert_impl!(Fuse<()>: Send); + assert_not_impl!(Fuse<*const ()>: Send); + assert_impl!(Fuse<()>: Sync); + assert_not_impl!(Fuse<*const ()>: Sync); + assert_impl!(Fuse<()>: Unpin); + assert_not_impl!(Fuse: Unpin); + + assert_impl!(FuturesOrdered>: Send); + assert_not_impl!(FuturesOrdered: Send); + assert_not_impl!(FuturesOrdered: Send); + assert_impl!(FuturesOrdered>: Sync); + assert_not_impl!(FuturesOrdered>: Sync); + assert_not_impl!(FuturesOrdered>: Sync); + assert_not_impl!(FuturesOrdered: Sync); + assert_impl!(FuturesOrdered: Unpin); + + assert_impl!(FuturesUnordered<()>: Send); + assert_not_impl!(FuturesUnordered<*const ()>: Send); + assert_impl!(FuturesUnordered<()>: Sync); + assert_not_impl!(FuturesUnordered<*const ()>: Sync); + assert_impl!(FuturesUnordered: Unpin); + + assert_impl!(Inspect<(), ()>: Send); + assert_not_impl!(Inspect<*const (), ()>: Send); + assert_not_impl!(Inspect<(), *const ()>: Send); + assert_impl!(Inspect<(), ()>: Sync); + assert_not_impl!(Inspect<*const (), ()>: Sync); + assert_not_impl!(Inspect<(), *const ()>: Sync); + assert_impl!(Inspect<(), PhantomPinned>: Unpin); + assert_not_impl!(Inspect: Unpin); + + assert_impl!(InspectErr<(), ()>: Send); + assert_not_impl!(InspectErr<*const (), ()>: Send); + assert_not_impl!(InspectErr<(), *const ()>: Send); + assert_impl!(InspectErr<(), ()>: Sync); + assert_not_impl!(InspectErr<*const (), ()>: Sync); + assert_not_impl!(InspectErr<(), *const ()>: Sync); + assert_impl!(InspectErr<(), PhantomPinned>: Unpin); + assert_not_impl!(InspectErr: Unpin); + + assert_impl!(InspectOk<(), ()>: Send); + assert_not_impl!(InspectOk<*const (), ()>: Send); + assert_not_impl!(InspectOk<(), *const ()>: Send); + assert_impl!(InspectOk<(), ()>: Sync); + assert_not_impl!(InspectOk<*const (), ()>: Sync); + assert_not_impl!(InspectOk<(), *const ()>: Sync); + assert_impl!(InspectOk<(), PhantomPinned>: Unpin); + assert_not_impl!(InspectOk: Unpin); + + assert_impl!(IntoAsyncRead, io::Error>>: Send); + assert_not_impl!(IntoAsyncRead, io::Error>>: Send); + assert_impl!(IntoAsyncRead, io::Error>>: Sync); + assert_not_impl!(IntoAsyncRead, io::Error>>: Sync); + assert_impl!(IntoAsyncRead, io::Error>>: Unpin); + // IntoAsyncRead requires `St: Unpin` + // assert_not_impl!(IntoAsyncRead, io::Error>>: Unpin); + + assert_impl!(IntoStream<()>: Send); + assert_not_impl!(IntoStream<*const ()>: Send); + assert_impl!(IntoStream<()>: Sync); + assert_not_impl!(IntoStream<*const ()>: Sync); + assert_impl!(IntoStream<()>: Unpin); + assert_not_impl!(IntoStream: Unpin); + + assert_impl!(Iter<()>: Send); + assert_not_impl!(Iter<*const ()>: Send); + assert_impl!(Iter<()>: Sync); + assert_not_impl!(Iter<*const ()>: Sync); + assert_impl!(Iter: Unpin); + + assert_impl!(Map<(), ()>: Send); + assert_not_impl!(Map<*const (), ()>: Send); + assert_not_impl!(Map<(), *const ()>: Send); + assert_impl!(Map<(), ()>: Sync); + assert_not_impl!(Map<*const (), ()>: Sync); + assert_not_impl!(Map<(), *const ()>: Sync); + assert_impl!(Map<(), PhantomPinned>: Unpin); + assert_not_impl!(Map: Unpin); + + assert_impl!(MapErr<(), ()>: Send); + assert_not_impl!(MapErr<*const (), ()>: Send); + assert_not_impl!(MapErr<(), *const ()>: Send); + assert_impl!(MapErr<(), ()>: Sync); + assert_not_impl!(MapErr<*const (), ()>: Sync); + assert_not_impl!(MapErr<(), *const ()>: Sync); + assert_impl!(MapErr<(), PhantomPinned>: Unpin); + assert_not_impl!(MapErr: Unpin); + + assert_impl!(MapOk<(), ()>: Send); + assert_not_impl!(MapOk<*const (), ()>: Send); + assert_not_impl!(MapOk<(), *const ()>: Send); + assert_impl!(MapOk<(), ()>: Sync); + assert_not_impl!(MapOk<*const (), ()>: Sync); + assert_not_impl!(MapOk<(), *const ()>: Sync); + assert_impl!(MapOk<(), PhantomPinned>: Unpin); + assert_not_impl!(MapOk: Unpin); + + assert_impl!(Next<'_, ()>: Send); + assert_not_impl!(Next<'_, *const ()>: Send); + assert_impl!(Next<'_, ()>: Sync); + assert_not_impl!(Next<'_, *const ()>: Sync); + assert_impl!(Next<'_, ()>: Unpin); + assert_not_impl!(Next<'_, PhantomPinned>: Unpin); + + assert_impl!(NextIf<'_, SendStream<()>, ()>: Send); + assert_not_impl!(NextIf<'_, SendStream<()>, *const ()>: Send); + assert_not_impl!(NextIf<'_, SendStream, ()>: Send); + assert_not_impl!(NextIf<'_, LocalStream<()>, ()>: Send); + assert_impl!(NextIf<'_, SyncStream<()>, ()>: Sync); + assert_not_impl!(NextIf<'_, SyncStream<()>, *const ()>: Sync); + assert_not_impl!(NextIf<'_, SyncStream, ()>: Sync); + assert_not_impl!(NextIf<'_, LocalStream<()>, ()>: Send); + assert_impl!(NextIf<'_, PinnedStream, PhantomPinned>: Unpin); + + assert_impl!(NextIfEq<'_, SendStream<()>, ()>: Send); + assert_not_impl!(NextIfEq<'_, SendStream<()>, *const ()>: Send); + assert_not_impl!(NextIfEq<'_, SendStream, ()>: Send); + assert_not_impl!(NextIfEq<'_, LocalStream<()>, ()>: Send); + assert_impl!(NextIfEq<'_, SyncStream<()>, ()>: Sync); + assert_not_impl!(NextIfEq<'_, SyncStream<()>, *const ()>: Sync); + assert_not_impl!(NextIfEq<'_, SyncStream, ()>: Sync); + assert_not_impl!(NextIfEq<'_, LocalStream<()>, ()>: Send); + assert_impl!(NextIfEq<'_, PinnedStream, PhantomPinned>: Unpin); + + assert_impl!(Once<()>: Send); + assert_not_impl!(Once<*const ()>: Send); + assert_impl!(Once<()>: Sync); + assert_not_impl!(Once<*const ()>: Sync); + assert_impl!(Once<()>: Unpin); + assert_not_impl!(Once: Unpin); + + assert_impl!(OrElse<(), (), ()>: Send); + assert_not_impl!(OrElse<*const (), (), ()>: Send); + assert_not_impl!(OrElse<(), *const (), ()>: Send); + assert_not_impl!(OrElse<(), (), *const ()>: Send); + assert_impl!(OrElse<(), (), ()>: Sync); + assert_not_impl!(OrElse<*const (), (), ()>: Sync); + assert_not_impl!(OrElse<(), *const (), ()>: Sync); + assert_not_impl!(OrElse<(), (), *const ()>: Sync); + assert_impl!(OrElse<(), (), PhantomPinned>: Unpin); + assert_not_impl!(OrElse: Unpin); + assert_not_impl!(OrElse<(), PhantomPinned, ()>: Unpin); + + assert_impl!(Peek<'_, SendStream<()>>: Send); + assert_not_impl!(Peek<'_, SendStream>: Send); + assert_not_impl!(Peek<'_, LocalStream<()>>: Send); + assert_impl!(Peek<'_, SyncStream<()>>: Sync); + assert_not_impl!(Peek<'_, SyncStream>: Sync); + assert_not_impl!(Peek<'_, LocalStream<()>>: Sync); + assert_impl!(Peek<'_, PinnedStream>: Unpin); + + assert_impl!(PeekMut<'_, SendStream<()>>: Send); + assert_not_impl!(PeekMut<'_, SendStream>: Send); + assert_not_impl!(PeekMut<'_, LocalStream<()>>: Send); + assert_impl!(PeekMut<'_, SyncStream<()>>: Sync); + assert_not_impl!(PeekMut<'_, SyncStream>: Sync); + assert_not_impl!(PeekMut<'_, LocalStream<()>>: Sync); + assert_impl!(PeekMut<'_, PinnedStream>: Unpin); + + assert_impl!(Peekable>: Send); + assert_not_impl!(Peekable: Send); + assert_not_impl!(Peekable: Send); + assert_impl!(Peekable>: Sync); + assert_not_impl!(Peekable: Sync); + assert_not_impl!(Peekable: Sync); + assert_impl!(Peekable: Unpin); + assert_not_impl!(Peekable: Unpin); + + assert_impl!(Pending<()>: Send); + assert_not_impl!(Pending<*const ()>: Send); + assert_impl!(Pending<()>: Sync); + assert_not_impl!(Pending<*const ()>: Sync); + assert_impl!(Pending: Unpin); + + assert_impl!(PollFn<()>: Send); + assert_not_impl!(PollFn<*const ()>: Send); + assert_impl!(PollFn<()>: Sync); + assert_not_impl!(PollFn<*const ()>: Sync); + assert_impl!(PollFn: Unpin); + + assert_impl!(PollImmediate: Send); + assert_not_impl!(PollImmediate>: Send); + assert_impl!(PollImmediate: Sync); + assert_not_impl!(PollImmediate>: Sync); + assert_impl!(PollImmediate: Unpin); + assert_not_impl!(PollImmediate: Unpin); + + assert_impl!(ReadyChunks>: Send); + assert_impl!(ReadyChunks: Send); + assert_not_impl!(ReadyChunks: Send); + assert_impl!(ReadyChunks>: Sync); + assert_impl!(ReadyChunks: Sync); + assert_not_impl!(ReadyChunks: Sync); + assert_impl!(ReadyChunks: Unpin); + assert_not_impl!(ReadyChunks: Unpin); + + assert_impl!(Repeat<()>: Send); + assert_not_impl!(Repeat<*const ()>: Send); + assert_impl!(Repeat<()>: Sync); + assert_not_impl!(Repeat<*const ()>: Sync); + assert_impl!(Repeat: Unpin); + + assert_impl!(RepeatWith<()>: Send); + assert_not_impl!(RepeatWith<*const ()>: Send); + assert_impl!(RepeatWith<()>: Sync); + assert_not_impl!(RepeatWith<*const ()>: Sync); + // RepeatWith requires `F: FnMut() -> A` + assert_impl!(RepeatWith ()>: Unpin); + // assert_impl!(RepeatWith: Unpin); + + assert_impl!(ReuniteError<(), ()>: Send); + assert_not_impl!(ReuniteError<*const (), ()>: Send); + assert_not_impl!(ReuniteError<(), *const ()>: Send); + assert_impl!(ReuniteError<(), ()>: Sync); + assert_not_impl!(ReuniteError<*const (), ()>: Sync); + assert_not_impl!(ReuniteError<(), *const ()>: Sync); + assert_impl!(ReuniteError: Unpin); + + assert_impl!(Scan: Send); + assert_not_impl!(Scan, (), (), ()>: Send); + assert_not_impl!(Scan, *const (), (), ()>: Send); + assert_not_impl!(Scan, (), *const (), ()>: Send); + assert_not_impl!(Scan, (), (), *const ()>: Send); + assert_impl!(Scan: Sync); + assert_not_impl!(Scan, (), (), ()>: Sync); + assert_not_impl!(Scan, *const (), (), ()>: Sync); + assert_not_impl!(Scan, (), *const (), ()>: Sync); + assert_not_impl!(Scan, (), (), *const ()>: Sync); + assert_impl!(Scan: Unpin); + assert_not_impl!(Scan: Unpin); + assert_not_impl!(Scan: Unpin); + + assert_impl!(Select<(), ()>: Send); + assert_not_impl!(Select<*const (), ()>: Send); + assert_not_impl!(Select<(), *const ()>: Send); + assert_impl!(Select<(), ()>: Sync); + assert_not_impl!(Select<*const (), ()>: Sync); + assert_not_impl!(Select<(), *const ()>: Sync); + assert_impl!(Select<(), ()>: Unpin); + assert_not_impl!(Select: Unpin); + assert_not_impl!(Select<(), PhantomPinned>: Unpin); + + assert_impl!(SelectAll<()>: Send); + assert_not_impl!(SelectAll<*const ()>: Send); + assert_impl!(SelectAll<()>: Sync); + assert_not_impl!(SelectAll<*const ()>: Sync); + assert_impl!(SelectAll: Unpin); + + assert_impl!(SelectNextSome<'_, ()>: Send); + assert_not_impl!(SelectNextSome<'_, *const ()>: Send); + assert_impl!(SelectNextSome<'_, ()>: Sync); + assert_not_impl!(SelectNextSome<'_, *const ()>: Sync); + assert_impl!(SelectNextSome<'_, PhantomPinned>: Unpin); + + assert_impl!(Skip<()>: Send); + assert_not_impl!(Skip<*const ()>: Send); + assert_impl!(Skip<()>: Sync); + assert_not_impl!(Skip<*const ()>: Sync); + assert_impl!(Skip<()>: Unpin); + assert_not_impl!(Skip: Unpin); + + assert_impl!(SkipWhile, (), ()>: Send); + assert_not_impl!(SkipWhile, (), ()>: Send); + assert_not_impl!(SkipWhile: Send); + assert_not_impl!(SkipWhile, *const (), ()>: Send); + assert_not_impl!(SkipWhile, (), *const ()>: Send); + assert_impl!(SkipWhile, (), ()>: Sync); + assert_not_impl!(SkipWhile, (), ()>: Sync); + assert_not_impl!(SkipWhile: Sync); + assert_not_impl!(SkipWhile, *const (), ()>: Sync); + assert_not_impl!(SkipWhile, (), *const ()>: Sync); + assert_impl!(SkipWhile: Unpin); + assert_not_impl!(SkipWhile: Unpin); + assert_not_impl!(SkipWhile: Unpin); + + assert_impl!(SplitSink<(), ()>: Send); + assert_not_impl!(SplitSink<*const (), ()>: Send); + assert_not_impl!(SplitSink<(), *const ()>: Send); + assert_impl!(SplitSink<(), ()>: Sync); + assert_not_impl!(SplitSink<*const (), ()>: Sync); + assert_not_impl!(SplitSink<(), *const ()>: Sync); + assert_impl!(SplitSink: Unpin); + + assert_impl!(SplitStream<()>: Send); + assert_not_impl!(SplitStream<*const ()>: Send); + assert_impl!(SplitStream<()>: Sync); + assert_not_impl!(SplitStream<*const ()>: Sync); + assert_impl!(SplitStream: Unpin); + + assert_impl!(StreamFuture<()>: Send); + assert_not_impl!(StreamFuture<*const ()>: Send); + assert_impl!(StreamFuture<()>: Sync); + assert_not_impl!(StreamFuture<*const ()>: Sync); + assert_impl!(StreamFuture<()>: Unpin); + assert_not_impl!(StreamFuture: Unpin); + + assert_impl!(Take<()>: Send); + assert_not_impl!(Take<*const ()>: Send); + assert_impl!(Take<()>: Sync); + assert_not_impl!(Take<*const ()>: Sync); + assert_impl!(Take<()>: Unpin); + assert_not_impl!(Take: Unpin); + + assert_impl!(TakeUntil>: Send); + assert_not_impl!(TakeUntil: Send); + assert_not_impl!(TakeUntil>: Send); + assert_not_impl!(TakeUntil>: Send); + assert_impl!(TakeUntil>: Sync); + assert_not_impl!(TakeUntil: Sync); + assert_not_impl!(TakeUntil>: Sync); + assert_not_impl!(TakeUntil>: Sync); + assert_impl!(TakeUntil: Unpin); + assert_not_impl!(TakeUntil: Unpin); + assert_not_impl!(TakeUntil: Unpin); + + assert_impl!(TakeWhile, (), ()>: Send); + assert_not_impl!(TakeWhile, (), ()>: Send); + assert_not_impl!(TakeWhile: Send); + assert_not_impl!(TakeWhile, *const (), ()>: Send); + assert_not_impl!(TakeWhile, (), *const ()>: Send); + assert_impl!(TakeWhile, (), ()>: Sync); + assert_not_impl!(TakeWhile, (), ()>: Sync); + assert_not_impl!(TakeWhile: Sync); + assert_not_impl!(TakeWhile, *const (), ()>: Sync); + assert_not_impl!(TakeWhile, (), *const ()>: Sync); + assert_impl!(TakeWhile: Unpin); + assert_not_impl!(TakeWhile: Unpin); + assert_not_impl!(TakeWhile: Unpin); + + assert_impl!(Then: Send); + assert_not_impl!(Then, (), ()>: Send); + assert_not_impl!(Then, *const (), ()>: Send); + assert_not_impl!(Then, (), *const ()>: Send); + assert_impl!(Then: Sync); + assert_not_impl!(Then, (), ()>: Sync); + assert_not_impl!(Then, *const (), ()>: Sync); + assert_not_impl!(Then, (), *const ()>: Sync); + assert_impl!(Then: Unpin); + assert_not_impl!(Then: Unpin); + assert_not_impl!(Then: Unpin); + + assert_impl!(TryBufferUnordered>: Send); + assert_not_impl!(TryBufferUnordered: Send); + assert_not_impl!(TryBufferUnordered: Send); + assert_impl!(TryBufferUnordered>: Sync); + assert_not_impl!(TryBufferUnordered: Sync); + assert_not_impl!(TryBufferUnordered: Sync); + assert_impl!(TryBufferUnordered: Unpin); + assert_not_impl!(TryBufferUnordered: Unpin); + + assert_impl!(TryBuffered>>: Send); + assert_not_impl!(TryBuffered>>: Send); + assert_not_impl!(TryBuffered>>: Send); + assert_not_impl!(TryBuffered>>: Send); + assert_not_impl!(TryBuffered>>: Send); + assert_impl!(TryBuffered>>: Sync); + assert_not_impl!(TryBuffered>>: Sync); + assert_not_impl!(TryBuffered>>: Sync); + assert_not_impl!(TryBuffered>>: Sync); + assert_not_impl!(TryBuffered>>: Sync); + assert_not_impl!(TryBuffered>>: Sync); + assert_impl!(TryBuffered>: Unpin); + assert_not_impl!(TryBuffered>: Unpin); + + assert_impl!(TryCollect<(), ()>: Send); + assert_not_impl!(TryCollect<*const (), ()>: Send); + assert_not_impl!(TryCollect<(), *const ()>: Send); + assert_impl!(TryCollect<(), ()>: Sync); + assert_not_impl!(TryCollect<*const (), ()>: Sync); + assert_not_impl!(TryCollect<(), *const ()>: Sync); + assert_impl!(TryCollect<(), PhantomPinned>: Unpin); + assert_not_impl!(TryCollect: Unpin); + + assert_impl!(TryConcat>: Send); + assert_not_impl!(TryConcat: Send); + assert_not_impl!(TryConcat: Send); + assert_impl!(TryConcat>: Sync); + assert_not_impl!(TryConcat: Sync); + assert_not_impl!(TryConcat: Sync); + assert_impl!(TryConcat: Unpin); + assert_not_impl!(TryConcat: Unpin); + + assert_impl!(TryFilter, (), ()>: Send); + assert_not_impl!(TryFilter, (), ()>: Send); + assert_not_impl!(TryFilter: Send); + assert_not_impl!(TryFilter, *const (), ()>: Send); + assert_not_impl!(TryFilter, (), *const ()>: Send); + assert_impl!(TryFilter, (), ()>: Sync); + assert_not_impl!(TryFilter, (), ()>: Sync); + assert_not_impl!(TryFilter: Sync); + assert_not_impl!(TryFilter, *const (), ()>: Sync); + assert_not_impl!(TryFilter, (), *const ()>: Sync); + assert_impl!(TryFilter: Unpin); + assert_not_impl!(TryFilter: Unpin); + assert_not_impl!(TryFilter: Unpin); + + assert_impl!(TryFilterMap<(), (), ()>: Send); + assert_not_impl!(TryFilterMap<*const (), (), ()>: Send); + assert_not_impl!(TryFilterMap<(), *const (), ()>: Send); + assert_not_impl!(TryFilterMap<(), (), *const ()>: Send); + assert_impl!(TryFilterMap<(), (), ()>: Sync); + assert_not_impl!(TryFilterMap<*const (), (), ()>: Sync); + assert_not_impl!(TryFilterMap<(), *const (), ()>: Sync); + assert_not_impl!(TryFilterMap<(), (), *const ()>: Sync); + assert_impl!(TryFilterMap<(), (), PhantomPinned>: Unpin); + assert_not_impl!(TryFilterMap: Unpin); + assert_not_impl!(TryFilterMap<(), PhantomPinned, ()>: Unpin); + + assert_impl!(TryFlatten>: Send); + assert_not_impl!(TryFlatten: Send); + assert_not_impl!(TryFlatten: Send); + assert_impl!(TryFlatten>: Sync); + assert_not_impl!(TryFlatten>: Sync); + assert_not_impl!(TryFlatten>: Sync); + assert_impl!(TryFlatten>: Unpin); + assert_not_impl!(TryFlatten: Unpin); + assert_not_impl!(TryFlatten: Unpin); + + assert_impl!(TryFold<(), (), (), ()>: Send); + assert_not_impl!(TryFold<*const (), (), (), ()>: Send); + assert_not_impl!(TryFold<(), *const (), (), ()>: Send); + assert_not_impl!(TryFold<(), (), *const (), ()>: Send); + assert_not_impl!(TryFold<(), (), (), *const ()>: Send); + assert_impl!(TryFold<(), (), (), ()>: Sync); + assert_not_impl!(TryFold<*const (), (), (), ()>: Sync); + assert_not_impl!(TryFold<(), *const (), (), ()>: Sync); + assert_not_impl!(TryFold<(), (), *const (), ()>: Sync); + assert_not_impl!(TryFold<(), (), (), *const ()>: Sync); + assert_impl!(TryFold<(), (), PhantomPinned, PhantomPinned>: Unpin); + assert_not_impl!(TryFold: Unpin); + assert_not_impl!(TryFold<(), PhantomPinned, (), ()>: Unpin); + + assert_impl!(TryForEach<(), (), ()>: Send); + assert_not_impl!(TryForEach<*const (), (), ()>: Send); + assert_not_impl!(TryForEach<(), *const (), ()>: Send); + assert_not_impl!(TryForEach<(), (), *const ()>: Send); + assert_impl!(TryForEach<(), (), ()>: Sync); + assert_not_impl!(TryForEach<*const (), (), ()>: Sync); + assert_not_impl!(TryForEach<(), *const (), ()>: Sync); + assert_not_impl!(TryForEach<(), (), *const ()>: Sync); + assert_impl!(TryForEach<(), (), PhantomPinned>: Unpin); + assert_not_impl!(TryForEach: Unpin); + assert_not_impl!(TryForEach<(), PhantomPinned, ()>: Unpin); + + assert_impl!(TryForEachConcurrent<(), (), ()>: Send); + assert_not_impl!(TryForEachConcurrent<*const (), (), ()>: Send); + assert_not_impl!(TryForEachConcurrent<(), *const (), ()>: Send); + assert_not_impl!(TryForEachConcurrent<(), (), *const ()>: Send); + assert_impl!(TryForEachConcurrent<(), (), ()>: Sync); + assert_not_impl!(TryForEachConcurrent<*const (), (), ()>: Sync); + assert_not_impl!(TryForEachConcurrent<(), *const (), ()>: Sync); + assert_not_impl!(TryForEachConcurrent<(), (), *const ()>: Sync); + assert_impl!(TryForEachConcurrent<(), PhantomPinned, PhantomPinned>: Unpin); + assert_not_impl!(TryForEachConcurrent: Unpin); + + assert_impl!(TryNext<'_, ()>: Send); + assert_not_impl!(TryNext<'_, *const ()>: Send); + assert_impl!(TryNext<'_, ()>: Sync); + assert_not_impl!(TryNext<'_, *const ()>: Sync); + assert_impl!(TryNext<'_, ()>: Unpin); + assert_not_impl!(TryNext<'_, PhantomPinned>: Unpin); + + assert_impl!(TrySkipWhile, (), ()>: Send); + assert_not_impl!(TrySkipWhile, (), ()>: Send); + assert_not_impl!(TrySkipWhile: Send); + assert_not_impl!(TrySkipWhile, *const (), ()>: Send); + assert_not_impl!(TrySkipWhile, (), *const ()>: Send); + assert_impl!(TrySkipWhile, (), ()>: Sync); + assert_not_impl!(TrySkipWhile, (), ()>: Sync); + assert_not_impl!(TrySkipWhile: Sync); + assert_not_impl!(TrySkipWhile, *const (), ()>: Sync); + assert_not_impl!(TrySkipWhile, (), *const ()>: Sync); + assert_impl!(TrySkipWhile: Unpin); + assert_not_impl!(TrySkipWhile: Unpin); + assert_not_impl!(TrySkipWhile: Unpin); + + assert_impl!(TryTakeWhile, (), ()>: Send); + assert_not_impl!(TryTakeWhile, (), ()>: Send); + assert_not_impl!(TryTakeWhile: Send); + assert_not_impl!(TryTakeWhile, *const (), ()>: Send); + assert_not_impl!(TryTakeWhile, (), *const ()>: Send); + assert_impl!(TryTakeWhile, (), ()>: Sync); + assert_not_impl!(TryTakeWhile, (), ()>: Sync); + assert_not_impl!(TryTakeWhile: Sync); + assert_not_impl!(TryTakeWhile, *const (), ()>: Sync); + assert_not_impl!(TryTakeWhile, (), *const ()>: Sync); + assert_impl!(TryTakeWhile: Unpin); + assert_not_impl!(TryTakeWhile: Unpin); + assert_not_impl!(TryTakeWhile: Unpin); + + assert_impl!(TryUnfold<(), (), ()>: Send); + assert_not_impl!(TryUnfold<*const (), (), ()>: Send); + assert_not_impl!(TryUnfold<(), *const (), ()>: Send); + assert_not_impl!(TryUnfold<(), (), *const ()>: Send); + assert_impl!(TryUnfold<(), (), ()>: Sync); + assert_not_impl!(TryUnfold<*const (), (), ()>: Sync); + assert_not_impl!(TryUnfold<(), *const (), ()>: Sync); + assert_not_impl!(TryUnfold<(), (), *const ()>: Sync); + assert_impl!(TryUnfold: Unpin); + assert_not_impl!(TryUnfold<(), (), PhantomPinned>: Unpin); + + assert_impl!(Unfold<(), (), ()>: Send); + assert_not_impl!(Unfold<*const (), (), ()>: Send); + assert_not_impl!(Unfold<(), *const (), ()>: Send); + assert_not_impl!(Unfold<(), (), *const ()>: Send); + assert_impl!(Unfold<(), (), ()>: Sync); + assert_not_impl!(Unfold<*const (), (), ()>: Sync); + assert_not_impl!(Unfold<(), *const (), ()>: Sync); + assert_not_impl!(Unfold<(), (), *const ()>: Sync); + assert_impl!(Unfold: Unpin); + assert_not_impl!(Unfold<(), (), PhantomPinned>: Unpin); + + assert_impl!(Unzip<(), (), ()>: Send); + assert_not_impl!(Unzip<*const (), (), ()>: Send); + assert_not_impl!(Unzip<(), *const (), ()>: Send); + assert_not_impl!(Unzip<(), (), *const ()>: Send); + assert_impl!(Unzip<(), (), ()>: Sync); + assert_not_impl!(Unzip<*const (), (), ()>: Sync); + assert_not_impl!(Unzip<(), *const (), ()>: Sync); + assert_not_impl!(Unzip<(), (), *const ()>: Sync); + assert_impl!(Unzip<(), PhantomPinned, PhantomPinned>: Unpin); + assert_not_impl!(Unzip: Unpin); + + assert_impl!(Zip, SendStream<()>>: Send); + assert_not_impl!(Zip>: Send); + assert_not_impl!(Zip, SendStream>: Send); + assert_not_impl!(Zip>: Send); + assert_not_impl!(Zip, LocalStream>: Send); + assert_impl!(Zip, SyncStream<()>>: Sync); + assert_not_impl!(Zip>: Sync); + assert_not_impl!(Zip, SyncStream>: Sync); + assert_not_impl!(Zip>: Sync); + assert_not_impl!(Zip, LocalStream>: Sync); + assert_impl!(Zip: Unpin); + assert_not_impl!(Zip: Unpin); + assert_not_impl!(Zip: Unpin); + + assert_impl!(futures_unordered::Iter<()>: Send); + assert_not_impl!(futures_unordered::Iter<*const ()>: Send); + assert_impl!(futures_unordered::Iter<()>: Sync); + assert_not_impl!(futures_unordered::Iter<*const ()>: Sync); + assert_impl!(futures_unordered::Iter<()>: Unpin); + // The definition of futures_unordered::Iter has `Fut: Unpin` bounds. + // assert_not_impl!(futures_unordered::Iter: Unpin); + + assert_impl!(futures_unordered::IterMut<()>: Send); + assert_not_impl!(futures_unordered::IterMut<*const ()>: Send); + assert_impl!(futures_unordered::IterMut<()>: Sync); + assert_not_impl!(futures_unordered::IterMut<*const ()>: Sync); + assert_impl!(futures_unordered::IterMut<()>: Unpin); + // The definition of futures_unordered::IterMut has `Fut: Unpin` bounds. + // assert_not_impl!(futures_unordered::IterMut: Unpin); + + assert_impl!(futures_unordered::IterPinMut<()>: Send); + assert_not_impl!(futures_unordered::IterPinMut<*const ()>: Send); + assert_impl!(futures_unordered::IterPinMut<()>: Sync); + assert_not_impl!(futures_unordered::IterPinMut<*const ()>: Sync); + assert_impl!(futures_unordered::IterPinMut: Unpin); + + assert_impl!(futures_unordered::IterPinRef<()>: Send); + assert_not_impl!(futures_unordered::IterPinRef<*const ()>: Send); + assert_impl!(futures_unordered::IterPinRef<()>: Sync); + assert_not_impl!(futures_unordered::IterPinRef<*const ()>: Sync); + assert_impl!(futures_unordered::IterPinRef: Unpin); + + assert_impl!(futures_unordered::IntoIter<()>: Send); + assert_not_impl!(futures_unordered::IntoIter<*const ()>: Send); + assert_impl!(futures_unordered::IntoIter<()>: Sync); + assert_not_impl!(futures_unordered::IntoIter<*const ()>: Sync); + // The definition of futures_unordered::IntoIter has `Fut: Unpin` bounds. + // assert_not_impl!(futures_unordered::IntoIter: Unpin); +} + +/// Assert Send/Sync/Unpin for all public types in `futures::task`. +pub mod task { + use super::*; + use futures::task::*; + + assert_impl!(AtomicWaker: Send); + assert_impl!(AtomicWaker: Sync); + assert_impl!(AtomicWaker: Unpin); + + assert_impl!(FutureObj<*const ()>: Send); + assert_not_impl!(FutureObj<()>: Sync); + assert_impl!(FutureObj: Unpin); + + assert_not_impl!(LocalFutureObj<()>: Send); + assert_not_impl!(LocalFutureObj<()>: Sync); + assert_impl!(LocalFutureObj: Unpin); + + assert_impl!(SpawnError: Send); + assert_impl!(SpawnError: Sync); + assert_impl!(SpawnError: Unpin); + + assert_impl!(WakerRef<'_>: Send); + assert_impl!(WakerRef<'_>: Sync); + assert_impl!(WakerRef<'_>: Unpin); +} diff --git a/vendor/futures/tests/bilock.rs b/vendor/futures/tests/bilock.rs new file mode 100644 index 00000000..b1034878 --- /dev/null +++ b/vendor/futures/tests/bilock.rs @@ -0,0 +1,104 @@ +#![cfg(feature = "bilock")] + +use futures::executor::block_on; +use futures::future; +use futures::stream; +use futures::task::{Context, Poll}; +use futures::Future; +use futures::StreamExt; +use futures_test::task::noop_context; +use futures_util::lock::BiLock; +use std::pin::Pin; +use std::thread; + +#[test] +fn smoke() { + let future = future::lazy(|cx| { + let (a, b) = BiLock::new(1); + + { + let mut lock = match a.poll_lock(cx) { + Poll::Ready(l) => l, + Poll::Pending => panic!("poll not ready"), + }; + assert_eq!(*lock, 1); + *lock = 2; + + assert!(b.poll_lock(cx).is_pending()); + assert!(a.poll_lock(cx).is_pending()); + } + + assert!(b.poll_lock(cx).is_ready()); + assert!(a.poll_lock(cx).is_ready()); + + { + let lock = match b.poll_lock(cx) { + Poll::Ready(l) => l, + Poll::Pending => panic!("poll not ready"), + }; + assert_eq!(*lock, 2); + } + + assert_eq!(a.reunite(b).expect("bilock/smoke: reunite error"), 2); + + Ok::<(), ()>(()) + }); + + assert_eq!(block_on(future), Ok(())); +} + +#[test] +fn concurrent() { + const N: usize = 10000; + let mut cx = noop_context(); + let (a, b) = BiLock::new(0); + + let a = Increment { a: Some(a), remaining: N }; + let b = stream::iter(0..N).fold(b, |b, _n| async { + let mut g = b.lock().await; + *g += 1; + drop(g); + b + }); + + let t1 = thread::spawn(move || block_on(a)); + let b = block_on(b); + let a = t1.join().unwrap(); + + match a.poll_lock(&mut cx) { + Poll::Ready(l) => assert_eq!(*l, 2 * N), + Poll::Pending => panic!("poll not ready"), + } + match b.poll_lock(&mut cx) { + Poll::Ready(l) => assert_eq!(*l, 2 * N), + Poll::Pending => panic!("poll not ready"), + } + + assert_eq!(a.reunite(b).expect("bilock/concurrent: reunite error"), 2 * N); + + struct Increment { + remaining: usize, + a: Option>, + } + + impl Future for Increment { + type Output = BiLock; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + if self.remaining == 0 { + return self.a.take().unwrap().into(); + } + + let a = self.a.as_mut().unwrap(); + let mut a = match a.poll_lock(cx) { + Poll::Ready(l) => l, + Poll::Pending => return Poll::Pending, + }; + *a += 1; + drop(a); + self.remaining -= 1; + } + } + } +} diff --git a/vendor/futures/tests/compat.rs b/vendor/futures/tests/compat.rs new file mode 100644 index 00000000..ac04a95e --- /dev/null +++ b/vendor/futures/tests/compat.rs @@ -0,0 +1,16 @@ +#![cfg(feature = "compat")] +#![cfg(not(miri))] // Miri does not support epoll + +use futures::compat::Future01CompatExt; +use futures::prelude::*; +use std::time::Instant; +use tokio::runtime::Runtime; +use tokio::timer::Delay; + +#[test] +fn can_use_01_futures_in_a_03_future_running_on_a_01_executor() { + let f = async { Delay::new(Instant::now()).compat().await }; + + let mut runtime = Runtime::new().unwrap(); + runtime.block_on(f.boxed().compat()).unwrap(); +} diff --git a/vendor/futures/tests/eager_drop.rs b/vendor/futures/tests/eager_drop.rs new file mode 100644 index 00000000..99250777 --- /dev/null +++ b/vendor/futures/tests/eager_drop.rs @@ -0,0 +1,121 @@ +use futures::channel::oneshot; +use futures::future::{self, Future, FutureExt, TryFutureExt}; +use futures::task::{Context, Poll}; +use futures_test::future::FutureTestExt; +use pin_project::pin_project; +use std::pin::Pin; +use std::sync::mpsc; + +#[test] +fn map_ok() { + // The closure given to `map_ok` should have been dropped by the time `map` + // runs. + let (tx1, rx1) = mpsc::channel::<()>(); + let (tx2, rx2) = mpsc::channel::<()>(); + + future::ready::>(Err(1)) + .map_ok(move |_| { + let _tx1 = tx1; + panic!("should not run"); + }) + .map(move |_| { + assert!(rx1.recv().is_err()); + tx2.send(()).unwrap() + }) + .run_in_background(); + + rx2.recv().unwrap(); +} + +#[test] +fn map_err() { + // The closure given to `map_err` should have been dropped by the time `map` + // runs. + let (tx1, rx1) = mpsc::channel::<()>(); + let (tx2, rx2) = mpsc::channel::<()>(); + + future::ready::>(Ok(1)) + .map_err(move |_| { + let _tx1 = tx1; + panic!("should not run"); + }) + .map(move |_| { + assert!(rx1.recv().is_err()); + tx2.send(()).unwrap() + }) + .run_in_background(); + + rx2.recv().unwrap(); +} + +#[pin_project] +struct FutureData { + _data: T, + #[pin] + future: F, +} + +impl Future for FutureData { + type Output = F::Output; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.project().future.poll(cx) + } +} + +#[test] +fn then_drops_eagerly() { + let (tx0, rx0) = oneshot::channel::<()>(); + let (tx1, rx1) = mpsc::channel::<()>(); + let (tx2, rx2) = mpsc::channel::<()>(); + + FutureData { _data: tx1, future: rx0.unwrap_or_else(|_| panic!()) } + .then(move |_| { + assert!(rx1.recv().is_err()); // tx1 should have been dropped + tx2.send(()).unwrap(); + future::ready(()) + }) + .run_in_background(); + + assert_eq!(Err(mpsc::TryRecvError::Empty), rx2.try_recv()); + tx0.send(()).unwrap(); + rx2.recv().unwrap(); +} + +#[test] +fn and_then_drops_eagerly() { + let (tx0, rx0) = oneshot::channel::>(); + let (tx1, rx1) = mpsc::channel::<()>(); + let (tx2, rx2) = mpsc::channel::<()>(); + + FutureData { _data: tx1, future: rx0.unwrap_or_else(|_| panic!()) } + .and_then(move |_| { + assert!(rx1.recv().is_err()); // tx1 should have been dropped + tx2.send(()).unwrap(); + future::ready(Ok(())) + }) + .run_in_background(); + + assert_eq!(Err(mpsc::TryRecvError::Empty), rx2.try_recv()); + tx0.send(Ok(())).unwrap(); + rx2.recv().unwrap(); +} + +#[test] +fn or_else_drops_eagerly() { + let (tx0, rx0) = oneshot::channel::>(); + let (tx1, rx1) = mpsc::channel::<()>(); + let (tx2, rx2) = mpsc::channel::<()>(); + + FutureData { _data: tx1, future: rx0.unwrap_or_else(|_| panic!()) } + .or_else(move |_| { + assert!(rx1.recv().is_err()); // tx1 should have been dropped + tx2.send(()).unwrap(); + future::ready::>(Ok(())) + }) + .run_in_background(); + + assert_eq!(Err(mpsc::TryRecvError::Empty), rx2.try_recv()); + tx0.send(Err(())).unwrap(); + rx2.recv().unwrap(); +} diff --git a/vendor/futures/tests/eventual.rs b/vendor/futures/tests/eventual.rs new file mode 100644 index 00000000..57a49b24 --- /dev/null +++ b/vendor/futures/tests/eventual.rs @@ -0,0 +1,179 @@ +use futures::channel::oneshot; +use futures::executor::ThreadPool; +use futures::future::{self, ok, Future, FutureExt, TryFutureExt}; +use futures::task::SpawnExt; +use std::sync::mpsc; +use std::thread; + +fn run(future: F) { + let tp = ThreadPool::new().unwrap(); + tp.spawn(future.map(drop)).unwrap(); +} + +#[test] +fn join1() { + let (tx, rx) = mpsc::channel(); + run(future::try_join(ok::(1), ok(2)).map_ok(move |v| tx.send(v).unwrap())); + assert_eq!(rx.recv(), Ok((1, 2))); + assert!(rx.recv().is_err()); + + std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +} + +#[test] +fn join2() { + let (c1, p1) = oneshot::channel::(); + let (c2, p2) = oneshot::channel::(); + let (tx, rx) = mpsc::channel(); + run(future::try_join(p1, p2).map_ok(move |v| tx.send(v).unwrap())); + assert!(rx.try_recv().is_err()); + c1.send(1).unwrap(); + assert!(rx.try_recv().is_err()); + c2.send(2).unwrap(); + assert_eq!(rx.recv(), Ok((1, 2))); + assert!(rx.recv().is_err()); + + std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +} + +#[test] +fn join3() { + let (c1, p1) = oneshot::channel::(); + let (c2, p2) = oneshot::channel::(); + let (tx, rx) = mpsc::channel(); + run(future::try_join(p1, p2).map_err(move |_v| tx.send(1).unwrap())); + assert!(rx.try_recv().is_err()); + drop(c1); + assert_eq!(rx.recv(), Ok(1)); + assert!(rx.recv().is_err()); + drop(c2); + + std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +} + +#[test] +fn join4() { + let (c1, p1) = oneshot::channel::(); + let (c2, p2) = oneshot::channel::(); + let (tx, rx) = mpsc::channel(); + run(future::try_join(p1, p2).map_err(move |v| tx.send(v).unwrap())); + assert!(rx.try_recv().is_err()); + drop(c1); + assert!(rx.recv().is_ok()); + drop(c2); + assert!(rx.recv().is_err()); + + std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +} + +#[test] +fn join5() { + let (c1, p1) = oneshot::channel::(); + let (c2, p2) = oneshot::channel::(); + let (c3, p3) = oneshot::channel::(); + let (tx, rx) = mpsc::channel(); + run(future::try_join(future::try_join(p1, p2), p3).map_ok(move |v| tx.send(v).unwrap())); + assert!(rx.try_recv().is_err()); + c1.send(1).unwrap(); + assert!(rx.try_recv().is_err()); + c2.send(2).unwrap(); + assert!(rx.try_recv().is_err()); + c3.send(3).unwrap(); + assert_eq!(rx.recv(), Ok(((1, 2), 3))); + assert!(rx.recv().is_err()); + + std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +} + +#[test] +fn select1() { + let (c1, p1) = oneshot::channel::(); + let (c2, p2) = oneshot::channel::(); + let (tx, rx) = mpsc::channel(); + run(future::try_select(p1, p2).map_ok(move |v| tx.send(v).unwrap())); + assert!(rx.try_recv().is_err()); + c1.send(1).unwrap(); + let (v, p2) = rx.recv().unwrap().into_inner(); + assert_eq!(v, 1); + assert!(rx.recv().is_err()); + + let (tx, rx) = mpsc::channel(); + run(p2.map_ok(move |v| tx.send(v).unwrap())); + c2.send(2).unwrap(); + assert_eq!(rx.recv(), Ok(2)); + assert!(rx.recv().is_err()); + + std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +} + +#[test] +fn select2() { + let (c1, p1) = oneshot::channel::(); + let (c2, p2) = oneshot::channel::(); + let (tx, rx) = mpsc::channel(); + run(future::try_select(p1, p2).map_err(move |v| tx.send((1, v.into_inner().1)).unwrap())); + assert!(rx.try_recv().is_err()); + drop(c1); + let (v, p2) = rx.recv().unwrap(); + assert_eq!(v, 1); + assert!(rx.recv().is_err()); + + let (tx, rx) = mpsc::channel(); + run(p2.map_ok(move |v| tx.send(v).unwrap())); + c2.send(2).unwrap(); + assert_eq!(rx.recv(), Ok(2)); + assert!(rx.recv().is_err()); + + std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +} + +#[test] +fn select3() { + let (c1, p1) = oneshot::channel::(); + let (c2, p2) = oneshot::channel::(); + let (tx, rx) = mpsc::channel(); + run(future::try_select(p1, p2).map_err(move |v| tx.send((1, v.into_inner().1)).unwrap())); + assert!(rx.try_recv().is_err()); + drop(c1); + let (v, p2) = rx.recv().unwrap(); + assert_eq!(v, 1); + assert!(rx.recv().is_err()); + + let (tx, rx) = mpsc::channel(); + run(p2.map_err(move |_v| tx.send(2).unwrap())); + drop(c2); + assert_eq!(rx.recv(), Ok(2)); + assert!(rx.recv().is_err()); + + std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +} + +#[test] +fn select4() { + const N: usize = if cfg!(miri) { 100 } else { 10000 }; + + let (tx, rx) = mpsc::channel::>(); + + let t = thread::spawn(move || { + for c in rx { + c.send(1).unwrap(); + } + }); + + let (tx2, rx2) = mpsc::channel(); + for _ in 0..N { + let (c1, p1) = oneshot::channel::(); + let (c2, p2) = oneshot::channel::(); + + let tx3 = tx2.clone(); + run(future::try_select(p1, p2).map_ok(move |_| tx3.send(()).unwrap())); + tx.send(c1).unwrap(); + rx2.recv().unwrap(); + drop(c2); + } + drop(tx); + + t.join().unwrap(); + + std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +} diff --git a/vendor/futures/tests/future_abortable.rs b/vendor/futures/tests/future_abortable.rs new file mode 100644 index 00000000..e119f0b7 --- /dev/null +++ b/vendor/futures/tests/future_abortable.rs @@ -0,0 +1,44 @@ +use futures::channel::oneshot; +use futures::executor::block_on; +use futures::future::{abortable, Aborted, FutureExt}; +use futures::task::{Context, Poll}; +use futures_test::task::new_count_waker; + +#[test] +fn abortable_works() { + let (_tx, a_rx) = oneshot::channel::<()>(); + let (abortable_rx, abort_handle) = abortable(a_rx); + + abort_handle.abort(); + assert!(abortable_rx.is_aborted()); + assert_eq!(Err(Aborted), block_on(abortable_rx)); +} + +#[test] +fn abortable_awakens() { + let (_tx, a_rx) = oneshot::channel::<()>(); + let (mut abortable_rx, abort_handle) = abortable(a_rx); + + let (waker, counter) = new_count_waker(); + let mut cx = Context::from_waker(&waker); + + assert_eq!(counter, 0); + assert_eq!(Poll::Pending, abortable_rx.poll_unpin(&mut cx)); + assert_eq!(counter, 0); + + abort_handle.abort(); + assert_eq!(counter, 1); + assert!(abortable_rx.is_aborted()); + assert_eq!(Poll::Ready(Err(Aborted)), abortable_rx.poll_unpin(&mut cx)); +} + +#[test] +fn abortable_resolves() { + let (tx, a_rx) = oneshot::channel::<()>(); + let (abortable_rx, _abort_handle) = abortable(a_rx); + + tx.send(()).unwrap(); + + assert!(!abortable_rx.is_aborted()); + assert_eq!(Ok(Ok(())), block_on(abortable_rx)); +} diff --git a/vendor/futures/tests/future_basic_combinators.rs b/vendor/futures/tests/future_basic_combinators.rs new file mode 100644 index 00000000..372ab48b --- /dev/null +++ b/vendor/futures/tests/future_basic_combinators.rs @@ -0,0 +1,104 @@ +use futures::future::{self, FutureExt, TryFutureExt}; +use futures_test::future::FutureTestExt; +use std::sync::mpsc; + +#[test] +fn basic_future_combinators() { + let (tx1, rx) = mpsc::channel(); + let tx2 = tx1.clone(); + let tx3 = tx1.clone(); + + let fut = future::ready(1) + .then(move |x| { + tx1.send(x).unwrap(); // Send 1 + tx1.send(2).unwrap(); // Send 2 + future::ready(3) + }) + .map(move |x| { + tx2.send(x).unwrap(); // Send 3 + tx2.send(4).unwrap(); // Send 4 + 5 + }) + .map(move |x| { + tx3.send(x).unwrap(); // Send 5 + }); + + assert!(rx.try_recv().is_err()); // Not started yet + fut.run_in_background(); // Start it + for i in 1..=5 { + assert_eq!(rx.recv(), Ok(i)); + } // Check it + assert!(rx.recv().is_err()); // Should be done +} + +#[test] +fn basic_try_future_combinators() { + let (tx1, rx) = mpsc::channel(); + let tx2 = tx1.clone(); + let tx3 = tx1.clone(); + let tx4 = tx1.clone(); + let tx5 = tx1.clone(); + let tx6 = tx1.clone(); + let tx7 = tx1.clone(); + let tx8 = tx1.clone(); + let tx9 = tx1.clone(); + let tx10 = tx1.clone(); + + let fut = future::ready(Ok(1)) + .and_then(move |x: i32| { + tx1.send(x).unwrap(); // Send 1 + tx1.send(2).unwrap(); // Send 2 + future::ready(Ok(3)) + }) + .or_else(move |x: i32| { + tx2.send(x).unwrap(); // Should not run + tx2.send(-1).unwrap(); + future::ready(Ok(-1)) + }) + .map_ok(move |x: i32| { + tx3.send(x).unwrap(); // Send 3 + tx3.send(4).unwrap(); // Send 4 + 5 + }) + .map_err(move |x: i32| { + tx4.send(x).unwrap(); // Should not run + tx4.send(-1).unwrap(); + -1 + }) + .map(move |x: Result| { + tx5.send(x.unwrap()).unwrap(); // Send 5 + tx5.send(6).unwrap(); // Send 6 + Err(7) // Now return errors! + }) + .and_then(move |x: i32| { + tx6.send(x).unwrap(); // Should not run + tx6.send(-1).unwrap(); + future::ready(Err(-1)) + }) + .or_else(move |x: i32| { + tx7.send(x).unwrap(); // Send 7 + tx7.send(8).unwrap(); // Send 8 + future::ready(Err(9)) + }) + .map_ok(move |x: i32| { + tx8.send(x).unwrap(); // Should not run + tx8.send(-1).unwrap(); + -1 + }) + .map_err(move |x: i32| { + tx9.send(x).unwrap(); // Send 9 + tx9.send(10).unwrap(); // Send 10 + 11 + }) + .map(move |x: Result| { + tx10.send(x.err().unwrap()).unwrap(); // Send 11 + tx10.send(12).unwrap(); // Send 12 + }); + + assert!(rx.try_recv().is_err()); // Not started yet + fut.run_in_background(); // Start it + for i in 1..=12 { + assert_eq!(rx.recv(), Ok(i)); + } // Check it + assert!(rx.recv().is_err()); // Should be done +} diff --git a/vendor/futures/tests/future_fuse.rs b/vendor/futures/tests/future_fuse.rs new file mode 100644 index 00000000..83f2c1ce --- /dev/null +++ b/vendor/futures/tests/future_fuse.rs @@ -0,0 +1,12 @@ +use futures::future::{self, FutureExt}; +use futures::task::Context; +use futures_test::task::panic_waker; + +#[test] +fn fuse() { + let mut future = future::ready::(2).fuse(); + let waker = panic_waker(); + let mut cx = Context::from_waker(&waker); + assert!(future.poll_unpin(&mut cx).is_ready()); + assert!(future.poll_unpin(&mut cx).is_pending()); +} diff --git a/vendor/futures/tests/future_inspect.rs b/vendor/futures/tests/future_inspect.rs new file mode 100644 index 00000000..eacd1f78 --- /dev/null +++ b/vendor/futures/tests/future_inspect.rs @@ -0,0 +1,16 @@ +use futures::executor::block_on; +use futures::future::{self, FutureExt}; + +#[test] +fn smoke() { + let mut counter = 0; + + { + let work = future::ready::(40).inspect(|val| { + counter += *val; + }); + assert_eq!(block_on(work), 40); + } + + assert_eq!(counter, 40); +} diff --git a/vendor/futures/tests/future_join.rs b/vendor/futures/tests/future_join.rs new file mode 100644 index 00000000..f5df9d77 --- /dev/null +++ b/vendor/futures/tests/future_join.rs @@ -0,0 +1,32 @@ +use futures::executor::block_on; +use futures::future::Future; +use std::task::Poll; + +/// This tests verifies (through miri) that self-referencing +/// futures are not invalidated when joining them. +#[test] +fn futures_join_macro_self_referential() { + block_on(async { futures::join!(yield_now(), trouble()) }); +} + +async fn trouble() { + let lucky_number = 42; + let problematic_variable = &lucky_number; + + yield_now().await; + + // problematic dereference + let _ = { *problematic_variable }; +} + +fn yield_now() -> impl Future { + let mut yielded = false; + std::future::poll_fn(move |cx| { + if core::mem::replace(&mut yielded, true) { + Poll::Ready(()) + } else { + cx.waker().wake_by_ref(); + Poll::Pending + } + }) +} diff --git a/vendor/futures/tests/future_join_all.rs b/vendor/futures/tests/future_join_all.rs new file mode 100644 index 00000000..44486e1c --- /dev/null +++ b/vendor/futures/tests/future_join_all.rs @@ -0,0 +1,41 @@ +use futures::executor::block_on; +use futures::future::{join_all, ready, Future, JoinAll}; +use futures::pin_mut; +use std::fmt::Debug; + +#[track_caller] +fn assert_done(actual_fut: impl Future, expected: T) +where + T: PartialEq + Debug, +{ + pin_mut!(actual_fut); + let output = block_on(actual_fut); + assert_eq!(output, expected); +} + +#[test] +fn collect_collects() { + assert_done(join_all(vec![ready(1), ready(2)]), vec![1, 2]); + assert_done(join_all(vec![ready(1)]), vec![1]); + // REVIEW: should this be implemented? + // assert_done(join_all(Vec::::new()), vec![]); + + // TODO: needs more tests +} + +#[test] +fn join_all_iter_lifetime() { + // In futures-rs version 0.1, this function would fail to typecheck due to an overly + // conservative type parameterization of `JoinAll`. + fn sizes(bufs: Vec<&[u8]>) -> impl Future> { + let iter = bufs.into_iter().map(|b| ready::(b.len())); + join_all(iter) + } + + assert_done(sizes(vec![&[1, 2, 3], &[], &[0]]), vec![3_usize, 0, 1]); +} + +#[test] +fn join_all_from_iter() { + assert_done(vec![ready(1), ready(2)].into_iter().collect::>(), vec![1, 2]) +} diff --git a/vendor/futures/tests/future_obj.rs b/vendor/futures/tests/future_obj.rs new file mode 100644 index 00000000..0e525346 --- /dev/null +++ b/vendor/futures/tests/future_obj.rs @@ -0,0 +1,33 @@ +use futures::future::{Future, FutureExt, FutureObj}; +use futures::task::{Context, Poll}; +use std::pin::Pin; + +#[test] +fn dropping_does_not_segfault() { + FutureObj::new(async { String::new() }.boxed()); +} + +#[test] +fn dropping_drops_the_future() { + let mut times_dropped = 0; + + struct Inc<'a>(&'a mut u32); + + impl Future for Inc<'_> { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<()> { + unimplemented!() + } + } + + impl Drop for Inc<'_> { + fn drop(&mut self) { + *self.0 += 1; + } + } + + FutureObj::new(Inc(&mut times_dropped).boxed()); + + assert_eq!(times_dropped, 1); +} diff --git a/vendor/futures/tests/future_select_all.rs b/vendor/futures/tests/future_select_all.rs new file mode 100644 index 00000000..299b4790 --- /dev/null +++ b/vendor/futures/tests/future_select_all.rs @@ -0,0 +1,25 @@ +use futures::executor::block_on; +use futures::future::{ready, select_all}; +use std::collections::HashSet; + +#[test] +fn smoke() { + let v = vec![ready(1), ready(2), ready(3)]; + + let mut c = vec![1, 2, 3].into_iter().collect::>(); + + let (i, idx, v) = block_on(select_all(v)); + assert!(c.remove(&i)); + assert_eq!(idx, 0); + + let (i, idx, v) = block_on(select_all(v)); + assert!(c.remove(&i)); + assert_eq!(idx, 0); + + let (i, idx, v) = block_on(select_all(v)); + assert!(c.remove(&i)); + assert_eq!(idx, 0); + + assert!(c.is_empty()); + assert!(v.is_empty()); +} diff --git a/vendor/futures/tests/future_select_ok.rs b/vendor/futures/tests/future_select_ok.rs new file mode 100644 index 00000000..8aec0036 --- /dev/null +++ b/vendor/futures/tests/future_select_ok.rs @@ -0,0 +1,30 @@ +use futures::executor::block_on; +use futures::future::{err, ok, select_ok}; + +#[test] +fn ignore_err() { + let v = vec![err(1), err(2), ok(3), ok(4)]; + + let (i, v) = block_on(select_ok(v)).ok().unwrap(); + assert_eq!(i, 3); + + assert_eq!(v.len(), 1); + + let (i, v) = block_on(select_ok(v)).ok().unwrap(); + assert_eq!(i, 4); + + assert!(v.is_empty()); +} + +#[test] +fn last_err() { + let v = vec![ok(1), err(2), err(3)]; + + let (i, v) = block_on(select_ok(v)).ok().unwrap(); + assert_eq!(i, 1); + + assert_eq!(v.len(), 2); + + let i = block_on(select_ok(v)).err().unwrap(); + assert_eq!(i, 3); +} diff --git a/vendor/futures/tests/future_shared.rs b/vendor/futures/tests/future_shared.rs new file mode 100644 index 00000000..bd69c1d7 --- /dev/null +++ b/vendor/futures/tests/future_shared.rs @@ -0,0 +1,273 @@ +use futures::channel::oneshot; +use futures::executor::{block_on, LocalPool}; +use futures::future::{self, FutureExt, LocalFutureObj, TryFutureExt}; +use futures::task::LocalSpawn; +use std::cell::{Cell, RefCell}; +use std::panic::AssertUnwindSafe; +use std::rc::Rc; +use std::task::Poll; +use std::thread; + +struct CountClone(Rc>); + +impl Clone for CountClone { + fn clone(&self) -> Self { + self.0.set(self.0.get() + 1); + Self(self.0.clone()) + } +} + +fn send_shared_oneshot_and_wait_on_multiple_threads(threads_number: u32) { + let (tx, rx) = oneshot::channel::(); + let f = rx.shared(); + let join_handles = (0..threads_number) + .map(|_| { + let cloned_future = f.clone(); + thread::spawn(move || { + assert_eq!(block_on(cloned_future).unwrap(), 6); + }) + }) + .collect::>(); + + tx.send(6).unwrap(); + + assert_eq!(block_on(f).unwrap(), 6); + for join_handle in join_handles { + join_handle.join().unwrap(); + } +} + +#[test] +fn one_thread() { + send_shared_oneshot_and_wait_on_multiple_threads(1); +} + +#[test] +fn two_threads() { + send_shared_oneshot_and_wait_on_multiple_threads(2); +} + +#[test] +fn many_threads() { + send_shared_oneshot_and_wait_on_multiple_threads(1000); +} + +#[test] +fn drop_on_one_task_ok() { + let (tx, rx) = oneshot::channel::(); + let f1 = rx.shared(); + let f2 = f1.clone(); + + let (tx2, rx2) = oneshot::channel::(); + + let t1 = thread::spawn(|| { + let f = future::try_select(f1.map_err(|_| ()), rx2.map_err(|_| ())); + drop(block_on(f)); + }); + + let (tx3, rx3) = oneshot::channel::(); + + let t2 = thread::spawn(|| { + let _ = block_on(f2.map_ok(|x| tx3.send(x).unwrap()).map_err(|_| ())); + }); + + tx2.send(11).unwrap(); // cancel `f1` + t1.join().unwrap(); + + tx.send(42).unwrap(); // Should cause `f2` and then `rx3` to get resolved. + let result = block_on(rx3).unwrap(); + assert_eq!(result, 42); + t2.join().unwrap(); +} + +#[test] +fn drop_in_poll() { + let slot1 = Rc::new(RefCell::new(None)); + let slot2 = slot1.clone(); + + let future1 = future::lazy(move |_| { + slot2.replace(None); // Drop future + 1 + }) + .shared(); + + let future2 = LocalFutureObj::new(Box::new(future1.clone())); + slot1.replace(Some(future2)); + + assert_eq!(block_on(future1), 1); +} + +#[test] +fn peek() { + let mut local_pool = LocalPool::new(); + let spawn = &mut local_pool.spawner(); + + let (tx0, rx0) = oneshot::channel::(); + let f1 = rx0.shared(); + let f2 = f1.clone(); + + // Repeated calls on the original or clone do not change the outcome. + for _ in 0..2 { + assert!(f1.peek().is_none()); + assert!(f2.peek().is_none()); + } + + // Completing the underlying future has no effect, because the value has not been `poll`ed in. + tx0.send(42).unwrap(); + for _ in 0..2 { + assert!(f1.peek().is_none()); + assert!(f2.peek().is_none()); + } + + // Once the Shared has been polled, the value is peekable on the clone. + spawn.spawn_local_obj(LocalFutureObj::new(Box::new(f1.map(|_| ())))).unwrap(); + local_pool.run(); + for _ in 0..2 { + assert_eq!(*f2.peek().unwrap(), Ok(42)); + } +} + +#[test] +fn downgrade() { + let (tx, rx) = oneshot::channel::(); + let shared = rx.shared(); + // Since there are outstanding `Shared`s, we can get a `WeakShared`. + let weak = shared.downgrade().unwrap(); + // It should upgrade fine right now. + let mut shared2 = weak.upgrade().unwrap(); + + tx.send(42).unwrap(); + assert_eq!(block_on(shared).unwrap(), 42); + + // We should still be able to get a new `WeakShared` and upgrade it + // because `shared2` is outstanding. + assert!(shared2.downgrade().is_some()); + assert!(weak.upgrade().is_some()); + + assert_eq!(block_on(&mut shared2).unwrap(), 42); + // Now that all `Shared`s have been exhausted, we should not be able + // to get a new `WeakShared` or upgrade an existing one. + assert!(weak.upgrade().is_none()); + assert!(shared2.downgrade().is_none()); +} + +#[test] +fn ptr_eq() { + use future::FusedFuture; + use std::collections::hash_map::DefaultHasher; + use std::hash::Hasher; + + let (tx, rx) = oneshot::channel::(); + let shared = rx.shared(); + let mut shared2 = shared.clone(); + let mut hasher = DefaultHasher::new(); + let mut hasher2 = DefaultHasher::new(); + + // Because these two futures share the same underlying future, + // `ptr_eq` should return true. + assert!(shared.ptr_eq(&shared2)); + // Equivalence relations are symmetric + assert!(shared2.ptr_eq(&shared)); + + // If `ptr_eq` returns true, they should hash to the same value. + shared.ptr_hash(&mut hasher); + shared2.ptr_hash(&mut hasher2); + assert_eq!(hasher.finish(), hasher2.finish()); + + tx.send(42).unwrap(); + assert_eq!(block_on(&mut shared2).unwrap(), 42); + + // Now that `shared2` has completed, `ptr_eq` should return false. + assert!(shared2.is_terminated()); + assert!(!shared.ptr_eq(&shared2)); + + // `ptr_eq` should continue to work for the other `Shared`. + let shared3 = shared.clone(); + let mut hasher3 = DefaultHasher::new(); + assert!(shared.ptr_eq(&shared3)); + + shared3.ptr_hash(&mut hasher3); + assert_eq!(hasher.finish(), hasher3.finish()); + + let (_tx, rx) = oneshot::channel::(); + let shared4 = rx.shared(); + + // And `ptr_eq` should return false for two futures that don't share + // the underlying future. + assert!(!shared.ptr_eq(&shared4)); +} + +#[test] +fn dont_clone_in_single_owner_shared_future() { + let counter = CountClone(Rc::new(Cell::new(0))); + let (tx, rx) = oneshot::channel(); + + let rx = rx.shared(); + + tx.send(counter).ok().unwrap(); + + assert_eq!(block_on(rx).unwrap().0.get(), 0); +} + +#[test] +fn dont_do_unnecessary_clones_on_output() { + let counter = CountClone(Rc::new(Cell::new(0))); + let (tx, rx) = oneshot::channel(); + + let rx = rx.shared(); + + tx.send(counter).ok().unwrap(); + + assert_eq!(block_on(rx.clone()).unwrap().0.get(), 1); + assert_eq!(block_on(rx.clone()).unwrap().0.get(), 2); + assert_eq!(block_on(rx).unwrap().0.get(), 2); +} + +#[test] +fn shared_future_that_wakes_itself_until_pending_is_returned() { + let proceed = Cell::new(false); + let fut = futures::future::poll_fn(|cx| { + if proceed.get() { + Poll::Ready(()) + } else { + cx.waker().wake_by_ref(); + Poll::Pending + } + }) + .shared(); + + // The join future can only complete if the second future gets a chance to run after the first + // has returned pending + assert_eq!(block_on(futures::future::join(fut, async { proceed.set(true) })), ((), ())); +} + +#[test] +#[should_panic(expected = "inner future panicked during poll")] +fn panic_while_poll() { + let fut = futures::future::poll_fn::(|_cx| panic!("test")).shared(); + + let fut_captured = fut.clone(); + std::panic::catch_unwind(AssertUnwindSafe(|| { + block_on(fut_captured); + })) + .unwrap_err(); + + block_on(fut); +} + +#[test] +#[should_panic(expected = "test_marker")] +fn poll_while_panic() { + struct S; + + impl Drop for S { + fn drop(&mut self) { + let fut = futures::future::ready(1).shared(); + assert_eq!(block_on(fut.clone()), 1); + assert_eq!(block_on(fut), 1); + } + } + + let _s = S {}; + panic!("test_marker"); +} diff --git a/vendor/futures/tests/future_try_flatten_stream.rs b/vendor/futures/tests/future_try_flatten_stream.rs new file mode 100644 index 00000000..82ae1baf --- /dev/null +++ b/vendor/futures/tests/future_try_flatten_stream.rs @@ -0,0 +1,83 @@ +use futures::executor::block_on_stream; +use futures::future::{err, ok, TryFutureExt}; +use futures::sink::Sink; +use futures::stream::Stream; +use futures::stream::{self, StreamExt}; +use futures::task::{Context, Poll}; +use std::marker::PhantomData; +use std::pin::Pin; + +#[test] +fn successful_future() { + let stream_items = vec![17, 19]; + let future_of_a_stream = ok::<_, bool>(stream::iter(stream_items).map(Ok)); + + let stream = future_of_a_stream.try_flatten_stream(); + + let mut iter = block_on_stream(stream); + assert_eq!(Ok(17), iter.next().unwrap()); + assert_eq!(Ok(19), iter.next().unwrap()); + assert_eq!(None, iter.next()); +} + +#[test] +fn failed_future() { + struct PanickingStream { + _marker: PhantomData<(T, E)>, + } + + impl Stream for PanickingStream { + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + panic!() + } + } + + let future_of_a_stream = err::, _>(10); + let stream = future_of_a_stream.try_flatten_stream(); + let mut iter = block_on_stream(stream); + assert_eq!(Err(10), iter.next().unwrap()); + assert_eq!(None, iter.next()); +} + +#[test] +fn assert_impls() { + struct StreamSink(PhantomData<(T, E, Item)>); + + impl Stream for StreamSink { + type Item = Result; + fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + panic!() + } + } + + impl Sink for StreamSink { + type Error = E; + fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + panic!() + } + fn start_send(self: Pin<&mut Self>, _: Item) -> Result<(), Self::Error> { + panic!() + } + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + panic!() + } + fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + panic!() + } + } + + fn assert_stream(_: &S) {} + fn assert_sink, Item>(_: &S) {} + fn assert_stream_sink, Item>(_: &S) {} + + let s = ok(StreamSink::<(), (), ()>(PhantomData)).try_flatten_stream(); + assert_stream(&s); + assert_sink(&s); + assert_stream_sink(&s); + let s = ok(StreamSink::<(), (), ()>(PhantomData)).flatten_sink(); + assert_stream(&s); + assert_sink(&s); + assert_stream_sink(&s); +} diff --git a/vendor/futures/tests/future_try_join_all.rs b/vendor/futures/tests/future_try_join_all.rs new file mode 100644 index 00000000..9a824872 --- /dev/null +++ b/vendor/futures/tests/future_try_join_all.rs @@ -0,0 +1,46 @@ +use futures::executor::block_on; +use futures::pin_mut; +use futures_util::future::{err, ok, try_join_all, TryJoinAll}; +use std::fmt::Debug; +use std::future::Future; + +#[track_caller] +fn assert_done(actual_fut: impl Future, expected: T) +where + T: PartialEq + Debug, +{ + pin_mut!(actual_fut); + let output = block_on(actual_fut); + assert_eq!(output, expected); +} + +#[test] +fn collect_collects() { + assert_done(try_join_all(vec![ok(1), ok(2)]), Ok::<_, usize>(vec![1, 2])); + assert_done(try_join_all(vec![ok(1), err(2)]), Err(2)); + assert_done(try_join_all(vec![ok(1)]), Ok::<_, usize>(vec![1])); + // REVIEW: should this be implemented? + // assert_done(try_join_all(Vec::::new()), Ok(vec![])); + + // TODO: needs more tests +} + +#[test] +fn try_join_all_iter_lifetime() { + // In futures-rs version 0.1, this function would fail to typecheck due to an overly + // conservative type parameterization of `TryJoinAll`. + fn sizes(bufs: Vec<&[u8]>) -> impl Future, ()>> { + let iter = bufs.into_iter().map(|b| ok::(b.len())); + try_join_all(iter) + } + + assert_done(sizes(vec![&[1, 2, 3], &[], &[0]]), Ok(vec![3_usize, 0, 1])); +} + +#[test] +fn try_join_all_from_iter() { + assert_done( + vec![ok(1), ok(2)].into_iter().collect::>(), + Ok::<_, usize>(vec![1, 2]), + ) +} diff --git a/vendor/futures/tests/io_buf_reader.rs b/vendor/futures/tests/io_buf_reader.rs new file mode 100644 index 00000000..717297cc --- /dev/null +++ b/vendor/futures/tests/io_buf_reader.rs @@ -0,0 +1,432 @@ +use futures::executor::block_on; +use futures::future::{Future, FutureExt}; +use futures::io::{ + AllowStdIo, AsyncBufRead, AsyncBufReadExt, AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, + BufReader, SeekFrom, +}; +use futures::pin_mut; +use futures::task::{Context, Poll}; +use futures_test::task::noop_context; +use pin_project::pin_project; +use std::cmp; +use std::io; +use std::pin::Pin; + +// helper for maybe_pending_* tests +fn run(mut f: F) -> F::Output { + let mut cx = noop_context(); + loop { + if let Poll::Ready(x) = f.poll_unpin(&mut cx) { + return x; + } + } +} + +// https://github.com/rust-lang/futures-rs/pull/2489#discussion_r697865719 +#[pin_project(!Unpin)] +struct Cursor { + #[pin] + inner: futures::io::Cursor, +} + +impl Cursor { + fn new(inner: T) -> Self { + Self { inner: futures::io::Cursor::new(inner) } + } +} + +impl AsyncRead for Cursor<&[u8]> { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + self.project().inner.poll_read(cx, buf) + } +} + +impl AsyncBufRead for Cursor<&[u8]> { + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().inner.poll_fill_buf(cx) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + self.project().inner.consume(amt) + } +} + +impl AsyncSeek for Cursor<&[u8]> { + fn poll_seek( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + pos: SeekFrom, + ) -> Poll> { + self.project().inner.poll_seek(cx, pos) + } +} + +struct MaybePending<'a> { + inner: &'a [u8], + ready_read: bool, + ready_fill_buf: bool, +} + +impl<'a> MaybePending<'a> { + fn new(inner: &'a [u8]) -> Self { + Self { inner, ready_read: false, ready_fill_buf: false } + } +} + +impl AsyncRead for MaybePending<'_> { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + if self.ready_read { + self.ready_read = false; + Pin::new(&mut self.inner).poll_read(cx, buf) + } else { + self.ready_read = true; + Poll::Pending + } + } +} + +impl AsyncBufRead for MaybePending<'_> { + fn poll_fill_buf(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + if self.ready_fill_buf { + self.ready_fill_buf = false; + if self.inner.is_empty() { + return Poll::Ready(Ok(&[])); + } + let len = cmp::min(2, self.inner.len()); + Poll::Ready(Ok(&self.inner[0..len])) + } else { + self.ready_fill_buf = true; + Poll::Pending + } + } + + fn consume(mut self: Pin<&mut Self>, amt: usize) { + self.inner = &self.inner[amt..]; + } +} + +#[test] +fn test_buffered_reader() { + block_on(async { + let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4]; + let mut reader = BufReader::with_capacity(2, inner); + + let mut buf = [0, 0, 0]; + let nread = reader.read(&mut buf).await.unwrap(); + assert_eq!(nread, 3); + assert_eq!(buf, [5, 6, 7]); + assert_eq!(reader.buffer(), []); + + let mut buf = [0, 0]; + let nread = reader.read(&mut buf).await.unwrap(); + assert_eq!(nread, 2); + assert_eq!(buf, [0, 1]); + assert_eq!(reader.buffer(), []); + + let mut buf = [0]; + let nread = reader.read(&mut buf).await.unwrap(); + assert_eq!(nread, 1); + assert_eq!(buf, [2]); + assert_eq!(reader.buffer(), [3]); + + let mut buf = [0, 0, 0]; + let nread = reader.read(&mut buf).await.unwrap(); + assert_eq!(nread, 1); + assert_eq!(buf, [3, 0, 0]); + assert_eq!(reader.buffer(), []); + + let nread = reader.read(&mut buf).await.unwrap(); + assert_eq!(nread, 1); + assert_eq!(buf, [4, 0, 0]); + assert_eq!(reader.buffer(), []); + + assert_eq!(reader.read(&mut buf).await.unwrap(), 0); + }); +} + +#[test] +fn test_buffered_reader_seek() { + block_on(async { + let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4]; + let reader = BufReader::with_capacity(2, Cursor::new(inner)); + pin_mut!(reader); + + assert_eq!(reader.seek(SeekFrom::Start(3)).await.unwrap(), 3); + assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[0, 1][..]); + assert!(reader.seek(SeekFrom::Current(i64::MIN)).await.is_err()); + assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[0, 1][..]); + assert_eq!(reader.seek(SeekFrom::Current(1)).await.unwrap(), 4); + assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[1, 2][..]); + reader.as_mut().consume(1); + assert_eq!(reader.seek(SeekFrom::Current(-2)).await.unwrap(), 3); + }); +} + +#[test] +fn test_buffered_reader_seek_relative() { + block_on(async { + let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4]; + let reader = BufReader::with_capacity(2, Cursor::new(inner)); + pin_mut!(reader); + + assert!(reader.as_mut().seek_relative(3).await.is_ok()); + assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[0, 1][..]); + assert!(reader.as_mut().seek_relative(0).await.is_ok()); + assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[0, 1][..]); + assert!(reader.as_mut().seek_relative(1).await.is_ok()); + assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[1][..]); + assert!(reader.as_mut().seek_relative(-1).await.is_ok()); + assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[0, 1][..]); + assert!(reader.as_mut().seek_relative(2).await.is_ok()); + assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[2, 3][..]); + }); +} + +#[test] +fn test_buffered_reader_invalidated_after_read() { + block_on(async { + let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4]; + let reader = BufReader::with_capacity(3, Cursor::new(inner)); + pin_mut!(reader); + + assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[5, 6, 7][..]); + reader.as_mut().consume(3); + + let mut buffer = [0, 0, 0, 0, 0]; + assert_eq!(reader.read(&mut buffer).await.unwrap(), 5); + assert_eq!(buffer, [0, 1, 2, 3, 4]); + + assert!(reader.as_mut().seek_relative(-2).await.is_ok()); + let mut buffer = [0, 0]; + assert_eq!(reader.read(&mut buffer).await.unwrap(), 2); + assert_eq!(buffer, [3, 4]); + }); +} + +#[test] +fn test_buffered_reader_invalidated_after_seek() { + block_on(async { + let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4]; + let reader = BufReader::with_capacity(3, Cursor::new(inner)); + pin_mut!(reader); + + assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[5, 6, 7][..]); + reader.as_mut().consume(3); + + assert!(reader.seek(SeekFrom::Current(5)).await.is_ok()); + + assert!(reader.as_mut().seek_relative(-2).await.is_ok()); + let mut buffer = [0, 0]; + assert_eq!(reader.read(&mut buffer).await.unwrap(), 2); + assert_eq!(buffer, [3, 4]); + }); +} + +#[test] +fn test_buffered_reader_seek_underflow() { + // gimmick reader that yields its position modulo 256 for each byte + struct PositionReader { + pos: u64, + } + impl io::Read for PositionReader { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let len = buf.len(); + for x in buf { + *x = self.pos as u8; + self.pos = self.pos.wrapping_add(1); + } + Ok(len) + } + } + impl io::Seek for PositionReader { + fn seek(&mut self, pos: SeekFrom) -> io::Result { + match pos { + SeekFrom::Start(n) => { + self.pos = n; + } + SeekFrom::Current(n) => { + self.pos = self.pos.wrapping_add(n as u64); + } + SeekFrom::End(n) => { + self.pos = u64::MAX.wrapping_add(n as u64); + } + } + Ok(self.pos) + } + } + + block_on(async { + let reader = BufReader::with_capacity(5, AllowStdIo::new(PositionReader { pos: 0 })); + pin_mut!(reader); + assert_eq!(reader.as_mut().fill_buf().await.unwrap(), &[0, 1, 2, 3, 4][..]); + assert_eq!(reader.seek(SeekFrom::End(-5)).await.unwrap(), u64::MAX - 5); + assert_eq!(reader.as_mut().fill_buf().await.unwrap().len(), 5); + // the following seek will require two underlying seeks + let expected = 9_223_372_036_854_775_802; + assert_eq!(reader.seek(SeekFrom::Current(i64::MIN)).await.unwrap(), expected); + assert_eq!(reader.as_mut().fill_buf().await.unwrap().len(), 5); + // seeking to 0 should empty the buffer. + assert_eq!(reader.seek(SeekFrom::Current(0)).await.unwrap(), expected); + assert_eq!(reader.get_ref().get_ref().pos, expected); + }); +} + +#[test] +fn test_short_reads() { + /// A dummy reader intended at testing short-reads propagation. + struct ShortReader { + lengths: Vec, + } + + impl io::Read for ShortReader { + fn read(&mut self, _: &mut [u8]) -> io::Result { + if self.lengths.is_empty() { + Ok(0) + } else { + Ok(self.lengths.remove(0)) + } + } + } + + block_on(async { + let inner = ShortReader { lengths: vec![0, 1, 2, 0, 1, 0] }; + let mut reader = BufReader::new(AllowStdIo::new(inner)); + let mut buf = [0, 0]; + assert_eq!(reader.read(&mut buf).await.unwrap(), 0); + assert_eq!(reader.read(&mut buf).await.unwrap(), 1); + assert_eq!(reader.read(&mut buf).await.unwrap(), 2); + assert_eq!(reader.read(&mut buf).await.unwrap(), 0); + assert_eq!(reader.read(&mut buf).await.unwrap(), 1); + assert_eq!(reader.read(&mut buf).await.unwrap(), 0); + assert_eq!(reader.read(&mut buf).await.unwrap(), 0); + }); +} + +#[test] +fn maybe_pending() { + let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4]; + let mut reader = BufReader::with_capacity(2, MaybePending::new(inner)); + + let mut buf = [0, 0, 0]; + let nread = run(reader.read(&mut buf)); + assert_eq!(nread.unwrap(), 3); + assert_eq!(buf, [5, 6, 7]); + assert_eq!(reader.buffer(), []); + + let mut buf = [0, 0]; + let nread = run(reader.read(&mut buf)); + assert_eq!(nread.unwrap(), 2); + assert_eq!(buf, [0, 1]); + assert_eq!(reader.buffer(), []); + + let mut buf = [0]; + let nread = run(reader.read(&mut buf)); + assert_eq!(nread.unwrap(), 1); + assert_eq!(buf, [2]); + assert_eq!(reader.buffer(), [3]); + + let mut buf = [0, 0, 0]; + let nread = run(reader.read(&mut buf)); + assert_eq!(nread.unwrap(), 1); + assert_eq!(buf, [3, 0, 0]); + assert_eq!(reader.buffer(), []); + + let nread = run(reader.read(&mut buf)); + assert_eq!(nread.unwrap(), 1); + assert_eq!(buf, [4, 0, 0]); + assert_eq!(reader.buffer(), []); + + assert_eq!(run(reader.read(&mut buf)).unwrap(), 0); +} + +#[test] +fn maybe_pending_buf_read() { + let inner = MaybePending::new(&[0, 1, 2, 3, 1, 0]); + let mut reader = BufReader::with_capacity(2, inner); + let mut v = Vec::new(); + run(reader.read_until(3, &mut v)).unwrap(); + assert_eq!(v, [0, 1, 2, 3]); + v.clear(); + run(reader.read_until(1, &mut v)).unwrap(); + assert_eq!(v, [1]); + v.clear(); + run(reader.read_until(8, &mut v)).unwrap(); + assert_eq!(v, [0]); + v.clear(); + run(reader.read_until(9, &mut v)).unwrap(); + assert_eq!(v, []); +} + +// https://github.com/rust-lang/futures-rs/pull/1573#discussion_r281162309 +#[test] +fn maybe_pending_seek() { + #[pin_project] + struct MaybePendingSeek<'a> { + #[pin] + inner: Cursor<&'a [u8]>, + ready: bool, + } + + impl<'a> MaybePendingSeek<'a> { + fn new(inner: &'a [u8]) -> Self { + Self { inner: Cursor::new(inner), ready: true } + } + } + + impl AsyncRead for MaybePendingSeek<'_> { + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + self.project().inner.poll_read(cx, buf) + } + } + + impl AsyncBufRead for MaybePendingSeek<'_> { + fn poll_fill_buf(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().inner.poll_fill_buf(cx) + } + + fn consume(self: Pin<&mut Self>, amt: usize) { + self.project().inner.consume(amt) + } + } + + impl AsyncSeek for MaybePendingSeek<'_> { + fn poll_seek( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + pos: SeekFrom, + ) -> Poll> { + if self.ready { + *self.as_mut().project().ready = false; + self.project().inner.poll_seek(cx, pos) + } else { + *self.project().ready = true; + Poll::Pending + } + } + } + + let inner: &[u8] = &[5, 6, 7, 0, 1, 2, 3, 4]; + let reader = BufReader::with_capacity(2, MaybePendingSeek::new(inner)); + pin_mut!(reader); + + assert_eq!(run(reader.seek(SeekFrom::Current(3))).ok(), Some(3)); + assert_eq!(run(reader.as_mut().fill_buf()).ok(), Some(&[0, 1][..])); + assert_eq!(run(reader.seek(SeekFrom::Current(i64::MIN))).ok(), None); + assert_eq!(run(reader.as_mut().fill_buf()).ok(), Some(&[0, 1][..])); + assert_eq!(run(reader.seek(SeekFrom::Current(1))).ok(), Some(4)); + assert_eq!(run(reader.as_mut().fill_buf()).ok(), Some(&[1, 2][..])); + Pin::new(&mut reader).consume(1); + assert_eq!(run(reader.seek(SeekFrom::Current(-2))).ok(), Some(3)); +} diff --git a/vendor/futures/tests/io_buf_writer.rs b/vendor/futures/tests/io_buf_writer.rs new file mode 100644 index 00000000..b264cd54 --- /dev/null +++ b/vendor/futures/tests/io_buf_writer.rs @@ -0,0 +1,239 @@ +use futures::executor::block_on; +use futures::future::{Future, FutureExt}; +use futures::io::{ + AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt, BufWriter, Cursor, SeekFrom, +}; +use futures::task::{Context, Poll}; +use futures_test::task::noop_context; +use std::io; +use std::pin::Pin; + +struct MaybePending { + inner: Vec, + ready: bool, +} + +impl MaybePending { + fn new(inner: Vec) -> Self { + Self { inner, ready: false } + } +} + +impl AsyncWrite for MaybePending { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + if self.ready { + self.ready = false; + Pin::new(&mut self.inner).poll_write(cx, buf) + } else { + self.ready = true; + Poll::Pending + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_flush(cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_close(cx) + } +} + +fn run(mut f: F) -> F::Output { + let mut cx = noop_context(); + loop { + if let Poll::Ready(x) = f.poll_unpin(&mut cx) { + return x; + } + } +} + +#[test] +fn buf_writer() { + let mut writer = BufWriter::with_capacity(2, Vec::new()); + + block_on(writer.write(&[0, 1])).unwrap(); + assert_eq!(writer.buffer(), []); + assert_eq!(*writer.get_ref(), [0, 1]); + + block_on(writer.write(&[2])).unwrap(); + assert_eq!(writer.buffer(), [2]); + assert_eq!(*writer.get_ref(), [0, 1]); + + block_on(writer.write(&[3])).unwrap(); + assert_eq!(writer.buffer(), [2, 3]); + assert_eq!(*writer.get_ref(), [0, 1]); + + block_on(writer.flush()).unwrap(); + assert_eq!(writer.buffer(), []); + assert_eq!(*writer.get_ref(), [0, 1, 2, 3]); + + block_on(writer.write(&[4])).unwrap(); + block_on(writer.write(&[5])).unwrap(); + assert_eq!(writer.buffer(), [4, 5]); + assert_eq!(*writer.get_ref(), [0, 1, 2, 3]); + + block_on(writer.write(&[6])).unwrap(); + assert_eq!(writer.buffer(), [6]); + assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5]); + + block_on(writer.write(&[7, 8])).unwrap(); + assert_eq!(writer.buffer(), []); + assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8]); + + block_on(writer.write(&[9, 10, 11])).unwrap(); + assert_eq!(writer.buffer(), []); + assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]); + + block_on(writer.flush()).unwrap(); + assert_eq!(writer.buffer(), []); + assert_eq!(*writer.get_ref(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]); +} + +#[test] +fn buf_writer_inner_flushes() { + let mut w = BufWriter::with_capacity(3, Vec::new()); + block_on(w.write(&[0, 1])).unwrap(); + assert_eq!(*w.get_ref(), []); + block_on(w.flush()).unwrap(); + let w = w.into_inner(); + assert_eq!(w, [0, 1]); +} + +#[test] +fn buf_writer_seek() { + // FIXME: when https://github.com/rust-lang/futures-rs/issues/1510 fixed, + // use `Vec::new` instead of `vec![0; 8]`. + let mut w = BufWriter::with_capacity(3, Cursor::new(vec![0; 8])); + block_on(w.write_all(&[0, 1, 2, 3, 4, 5])).unwrap(); + block_on(w.write_all(&[6, 7])).unwrap(); + assert_eq!(block_on(w.seek(SeekFrom::Current(0))).ok(), Some(8)); + assert_eq!(&w.get_ref().get_ref()[..], &[0, 1, 2, 3, 4, 5, 6, 7][..]); + assert_eq!(block_on(w.seek(SeekFrom::Start(2))).ok(), Some(2)); + block_on(w.write_all(&[8, 9])).unwrap(); + block_on(w.flush()).unwrap(); + assert_eq!(&w.into_inner().into_inner()[..], &[0, 1, 8, 9, 4, 5, 6, 7]); +} + +#[test] +fn maybe_pending_buf_writer() { + let mut writer = BufWriter::with_capacity(2, MaybePending::new(Vec::new())); + + run(writer.write(&[0, 1])).unwrap(); + assert_eq!(writer.buffer(), []); + assert_eq!(&writer.get_ref().inner, &[0, 1]); + + run(writer.write(&[2])).unwrap(); + assert_eq!(writer.buffer(), [2]); + assert_eq!(&writer.get_ref().inner, &[0, 1]); + + run(writer.write(&[3])).unwrap(); + assert_eq!(writer.buffer(), [2, 3]); + assert_eq!(&writer.get_ref().inner, &[0, 1]); + + run(writer.flush()).unwrap(); + assert_eq!(writer.buffer(), []); + assert_eq!(&writer.get_ref().inner, &[0, 1, 2, 3]); + + run(writer.write(&[4])).unwrap(); + run(writer.write(&[5])).unwrap(); + assert_eq!(writer.buffer(), [4, 5]); + assert_eq!(&writer.get_ref().inner, &[0, 1, 2, 3]); + + run(writer.write(&[6])).unwrap(); + assert_eq!(writer.buffer(), [6]); + assert_eq!(writer.get_ref().inner, &[0, 1, 2, 3, 4, 5]); + + run(writer.write(&[7, 8])).unwrap(); + assert_eq!(writer.buffer(), []); + assert_eq!(writer.get_ref().inner, &[0, 1, 2, 3, 4, 5, 6, 7, 8]); + + run(writer.write(&[9, 10, 11])).unwrap(); + assert_eq!(writer.buffer(), []); + assert_eq!(writer.get_ref().inner, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]); + + run(writer.flush()).unwrap(); + assert_eq!(writer.buffer(), []); + assert_eq!(&writer.get_ref().inner, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]); +} + +#[test] +fn maybe_pending_buf_writer_inner_flushes() { + let mut w = BufWriter::with_capacity(3, MaybePending::new(Vec::new())); + run(w.write(&[0, 1])).unwrap(); + assert_eq!(&w.get_ref().inner, &[]); + run(w.flush()).unwrap(); + let w = w.into_inner().inner; + assert_eq!(w, [0, 1]); +} + +#[test] +fn maybe_pending_buf_writer_seek() { + struct MaybePendingSeek { + inner: Cursor>, + ready_write: bool, + ready_seek: bool, + } + + impl MaybePendingSeek { + fn new(inner: Vec) -> Self { + Self { inner: Cursor::new(inner), ready_write: false, ready_seek: false } + } + } + + impl AsyncWrite for MaybePendingSeek { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + if self.ready_write { + self.ready_write = false; + Pin::new(&mut self.inner).poll_write(cx, buf) + } else { + self.ready_write = true; + Poll::Pending + } + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_flush(cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_close(cx) + } + } + + impl AsyncSeek for MaybePendingSeek { + fn poll_seek( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + pos: SeekFrom, + ) -> Poll> { + if self.ready_seek { + self.ready_seek = false; + Pin::new(&mut self.inner).poll_seek(cx, pos) + } else { + self.ready_seek = true; + Poll::Pending + } + } + } + + // FIXME: when https://github.com/rust-lang/futures-rs/issues/1510 fixed, + // use `Vec::new` instead of `vec![0; 8]`. + let mut w = BufWriter::with_capacity(3, MaybePendingSeek::new(vec![0; 8])); + run(w.write_all(&[0, 1, 2, 3, 4, 5])).unwrap(); + run(w.write_all(&[6, 7])).unwrap(); + assert_eq!(run(w.seek(SeekFrom::Current(0))).ok(), Some(8)); + assert_eq!(&w.get_ref().inner.get_ref()[..], &[0, 1, 2, 3, 4, 5, 6, 7][..]); + assert_eq!(run(w.seek(SeekFrom::Start(2))).ok(), Some(2)); + run(w.write_all(&[8, 9])).unwrap(); + run(w.flush()).unwrap(); + assert_eq!(&w.into_inner().inner.into_inner()[..], &[0, 1, 8, 9, 4, 5, 6, 7]); +} diff --git a/vendor/futures/tests/io_cursor.rs b/vendor/futures/tests/io_cursor.rs new file mode 100644 index 00000000..435ea5a1 --- /dev/null +++ b/vendor/futures/tests/io_cursor.rs @@ -0,0 +1,30 @@ +use assert_matches::assert_matches; +use futures::executor::block_on; +use futures::future::lazy; +use futures::io::{AsyncWrite, Cursor}; +use futures::task::Poll; +use std::pin::Pin; + +#[test] +fn cursor_asyncwrite_vec() { + let mut cursor = Cursor::new(vec![0; 5]); + block_on(lazy(|cx| { + assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[1, 2]), Poll::Ready(Ok(2))); + assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[3, 4]), Poll::Ready(Ok(2))); + assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[5, 6]), Poll::Ready(Ok(2))); + assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[6, 7]), Poll::Ready(Ok(2))); + })); + assert_eq!(cursor.into_inner(), [1, 2, 3, 4, 5, 6, 6, 7]); +} + +#[test] +fn cursor_asyncwrite_box() { + let mut cursor = Cursor::new(vec![0; 5].into_boxed_slice()); + block_on(lazy(|cx| { + assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[1, 2]), Poll::Ready(Ok(2))); + assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[3, 4]), Poll::Ready(Ok(2))); + assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[5, 6]), Poll::Ready(Ok(1))); + assert_matches!(Pin::new(&mut cursor).poll_write(cx, &[6, 7]), Poll::Ready(Ok(0))); + })); + assert_eq!(&*cursor.into_inner(), [1, 2, 3, 4, 5]); +} diff --git a/vendor/futures/tests/io_line_writer.rs b/vendor/futures/tests/io_line_writer.rs new file mode 100644 index 00000000..b483e0ff --- /dev/null +++ b/vendor/futures/tests/io_line_writer.rs @@ -0,0 +1,73 @@ +use futures::executor::block_on; +use futures::io::{AsyncWriteExt, LineWriter}; +use std::io; + +#[test] +fn line_writer() { + let mut writer = LineWriter::new(Vec::new()); + + block_on(writer.write(&[0])).unwrap(); + assert_eq!(*writer.get_ref(), []); + + block_on(writer.write(&[1])).unwrap(); + assert_eq!(*writer.get_ref(), []); + + block_on(writer.flush()).unwrap(); + assert_eq!(*writer.get_ref(), [0, 1]); + + block_on(writer.write(&[0, b'\n', 1, b'\n', 2])).unwrap(); + assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n']); + + block_on(writer.flush()).unwrap(); + assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2]); + + block_on(writer.write(&[3, b'\n'])).unwrap(); + assert_eq!(*writer.get_ref(), [0, 1, 0, b'\n', 1, b'\n', 2, 3, b'\n']); +} + +#[test] +fn line_vectored() { + let mut line_writer = LineWriter::new(Vec::new()); + assert_eq!( + block_on(line_writer.write_vectored(&[ + io::IoSlice::new(&[]), + io::IoSlice::new(b"\n"), + io::IoSlice::new(&[]), + io::IoSlice::new(b"a"), + ])) + .unwrap(), + 2 + ); + assert_eq!(line_writer.get_ref(), b"\n"); + + assert_eq!( + block_on(line_writer.write_vectored(&[ + io::IoSlice::new(&[]), + io::IoSlice::new(b"b"), + io::IoSlice::new(&[]), + io::IoSlice::new(b"a"), + io::IoSlice::new(&[]), + io::IoSlice::new(b"c"), + ])) + .unwrap(), + 3 + ); + assert_eq!(line_writer.get_ref(), b"\n"); + block_on(line_writer.flush()).unwrap(); + assert_eq!(line_writer.get_ref(), b"\nabac"); + assert_eq!(block_on(line_writer.write_vectored(&[])).unwrap(), 0); + + assert_eq!( + block_on(line_writer.write_vectored(&[ + io::IoSlice::new(&[]), + io::IoSlice::new(&[]), + io::IoSlice::new(&[]), + io::IoSlice::new(&[]), + ])) + .unwrap(), + 0 + ); + + assert_eq!(block_on(line_writer.write_vectored(&[io::IoSlice::new(b"a\nb")])).unwrap(), 3); + assert_eq!(line_writer.get_ref(), b"\nabaca\nb"); +} diff --git a/vendor/futures/tests/io_lines.rs b/vendor/futures/tests/io_lines.rs new file mode 100644 index 00000000..5ce01a69 --- /dev/null +++ b/vendor/futures/tests/io_lines.rs @@ -0,0 +1,60 @@ +use futures::executor::block_on; +use futures::future::{Future, FutureExt}; +use futures::io::{AsyncBufReadExt, Cursor}; +use futures::stream::{self, StreamExt, TryStreamExt}; +use futures::task::Poll; +use futures_test::io::AsyncReadTestExt; +use futures_test::task::noop_context; + +fn run(mut f: F) -> F::Output { + let mut cx = noop_context(); + loop { + if let Poll::Ready(x) = f.poll_unpin(&mut cx) { + return x; + } + } +} + +macro_rules! block_on_next { + ($expr:expr) => { + block_on($expr.next()).unwrap().unwrap() + }; +} + +macro_rules! run_next { + ($expr:expr) => { + run($expr.next()).unwrap().unwrap() + }; +} + +#[test] +fn lines() { + let buf = Cursor::new(&b"12\r"[..]); + let mut s = buf.lines(); + assert_eq!(block_on_next!(s), "12\r".to_string()); + assert!(block_on(s.next()).is_none()); + + let buf = Cursor::new(&b"12\r\n\n"[..]); + let mut s = buf.lines(); + assert_eq!(block_on_next!(s), "12".to_string()); + assert_eq!(block_on_next!(s), "".to_string()); + assert!(block_on(s.next()).is_none()); +} + +#[test] +fn maybe_pending() { + let buf = + stream::iter(vec![&b"12"[..], &b"\r"[..]]).map(Ok).into_async_read().interleave_pending(); + let mut s = buf.lines(); + assert_eq!(run_next!(s), "12\r".to_string()); + assert!(run(s.next()).is_none()); + + let buf = stream::iter(vec![&b"12"[..], &b"\r\n"[..], &b"\n"[..]]) + .map(Ok) + .into_async_read() + .interleave_pending(); + let mut s = buf.lines(); + assert_eq!(run_next!(s), "12".to_string()); + assert_eq!(run_next!(s), "".to_string()); + assert!(run(s.next()).is_none()); +} diff --git a/vendor/futures/tests/io_read.rs b/vendor/futures/tests/io_read.rs new file mode 100644 index 00000000..d39a6ea7 --- /dev/null +++ b/vendor/futures/tests/io_read.rs @@ -0,0 +1,64 @@ +use futures::io::AsyncRead; +use futures_test::task::panic_context; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +struct MockReader { + fun: Box Poll>>, +} + +impl MockReader { + fn new(fun: impl FnMut(&mut [u8]) -> Poll> + 'static) -> Self { + Self { fun: Box::new(fun) } + } +} + +impl AsyncRead for MockReader { + fn poll_read( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + buf: &mut [u8], + ) -> Poll> { + (self.get_mut().fun)(buf) + } +} + +/// Verifies that the default implementation of `poll_read_vectored` +/// calls `poll_read` with an empty slice if no buffers are provided. +#[test] +fn read_vectored_no_buffers() { + let mut reader = MockReader::new(|buf| { + assert_eq!(buf, b""); + Err(io::ErrorKind::BrokenPipe.into()).into() + }); + let cx = &mut panic_context(); + let bufs = &mut []; + + let res = Pin::new(&mut reader).poll_read_vectored(cx, bufs); + let res = res.map_err(|e| e.kind()); + assert_eq!(res, Poll::Ready(Err(io::ErrorKind::BrokenPipe))) +} + +/// Verifies that the default implementation of `poll_read_vectored` +/// calls `poll_read` with the first non-empty buffer. +#[test] +fn read_vectored_first_non_empty() { + let mut reader = MockReader::new(|buf| { + assert_eq!(buf.len(), 4); + buf.copy_from_slice(b"four"); + Poll::Ready(Ok(4)) + }); + let cx = &mut panic_context(); + let mut buf = [0; 4]; + let bufs = &mut [ + io::IoSliceMut::new(&mut []), + io::IoSliceMut::new(&mut []), + io::IoSliceMut::new(&mut buf), + ]; + + let res = Pin::new(&mut reader).poll_read_vectored(cx, bufs); + let res = res.map_err(|e| e.kind()); + assert_eq!(res, Poll::Ready(Ok(4))); + assert_eq!(buf, b"four"[..]); +} diff --git a/vendor/futures/tests/io_read_exact.rs b/vendor/futures/tests/io_read_exact.rs new file mode 100644 index 00000000..6582e50b --- /dev/null +++ b/vendor/futures/tests/io_read_exact.rs @@ -0,0 +1,17 @@ +use futures::executor::block_on; +use futures::io::AsyncReadExt; + +#[test] +fn read_exact() { + let mut reader: &[u8] = &[1, 2, 3, 4, 5]; + let mut out = [0u8; 3]; + + let res = block_on(reader.read_exact(&mut out)); // read 3 bytes out + assert!(res.is_ok()); + assert_eq!(out, [1, 2, 3]); + assert_eq!(reader.len(), 2); + + let res = block_on(reader.read_exact(&mut out)); // read another 3 bytes, but only 2 bytes left + assert!(res.is_err()); + assert_eq!(reader.len(), 0); +} diff --git a/vendor/futures/tests/io_read_line.rs b/vendor/futures/tests/io_read_line.rs new file mode 100644 index 00000000..88a87792 --- /dev/null +++ b/vendor/futures/tests/io_read_line.rs @@ -0,0 +1,58 @@ +use futures::executor::block_on; +use futures::future::{Future, FutureExt}; +use futures::io::{AsyncBufReadExt, Cursor}; +use futures::stream::{self, StreamExt, TryStreamExt}; +use futures::task::Poll; +use futures_test::io::AsyncReadTestExt; +use futures_test::task::noop_context; + +fn run(mut f: F) -> F::Output { + let mut cx = noop_context(); + loop { + if let Poll::Ready(x) = f.poll_unpin(&mut cx) { + return x; + } + } +} + +#[test] +fn read_line() { + let mut buf = Cursor::new(b"12"); + let mut v = String::new(); + assert_eq!(block_on(buf.read_line(&mut v)).unwrap(), 2); + assert_eq!(v, "12"); + + let mut buf = Cursor::new(b"12\n\n"); + let mut v = String::new(); + assert_eq!(block_on(buf.read_line(&mut v)).unwrap(), 3); + assert_eq!(v, "12\n"); + v.clear(); + assert_eq!(block_on(buf.read_line(&mut v)).unwrap(), 1); + assert_eq!(v, "\n"); + v.clear(); + assert_eq!(block_on(buf.read_line(&mut v)).unwrap(), 0); + assert_eq!(v, ""); +} + +#[test] +fn maybe_pending() { + let mut buf = b"12".interleave_pending(); + let mut v = String::new(); + assert_eq!(run(buf.read_line(&mut v)).unwrap(), 2); + assert_eq!(v, "12"); + + let mut buf = + stream::iter(vec![&b"12"[..], &b"\n\n"[..]]).map(Ok).into_async_read().interleave_pending(); + let mut v = String::new(); + assert_eq!(run(buf.read_line(&mut v)).unwrap(), 3); + assert_eq!(v, "12\n"); + v.clear(); + assert_eq!(run(buf.read_line(&mut v)).unwrap(), 1); + assert_eq!(v, "\n"); + v.clear(); + assert_eq!(run(buf.read_line(&mut v)).unwrap(), 0); + assert_eq!(v, ""); + v.clear(); + assert_eq!(run(buf.read_line(&mut v)).unwrap(), 0); + assert_eq!(v, ""); +} diff --git a/vendor/futures/tests/io_read_to_end.rs b/vendor/futures/tests/io_read_to_end.rs new file mode 100644 index 00000000..7122511f --- /dev/null +++ b/vendor/futures/tests/io_read_to_end.rs @@ -0,0 +1,65 @@ +use futures::{ + executor::block_on, + io::{self, AsyncRead, AsyncReadExt}, + task::{Context, Poll}, +}; +use std::pin::Pin; + +#[test] +#[should_panic(expected = "assertion failed: n <= buf.len()")] +fn issue2310() { + struct MyRead { + first: bool, + } + + impl MyRead { + fn new() -> Self { + MyRead { first: false } + } + } + + impl AsyncRead for MyRead { + fn poll_read( + mut self: Pin<&mut Self>, + _cx: &mut Context, + _buf: &mut [u8], + ) -> Poll> { + Poll::Ready(if !self.first { + self.first = true; + // First iteration: return more than the buffer size + Ok(64) + } else { + // Second iteration: indicate that we are done + Ok(0) + }) + } + } + + struct VecWrapper { + inner: Vec, + } + + impl VecWrapper { + fn new() -> Self { + VecWrapper { inner: Vec::new() } + } + } + + impl Drop for VecWrapper { + fn drop(&mut self) { + // Observe uninitialized bytes + println!("{:?}", &self.inner); + // Overwrite heap contents + for b in &mut self.inner { + *b = 0x90; + } + } + } + + block_on(async { + let mut vec = VecWrapper::new(); + let mut read = MyRead::new(); + + read.read_to_end(&mut vec.inner).await.unwrap(); + }) +} diff --git a/vendor/futures/tests/io_read_to_string.rs b/vendor/futures/tests/io_read_to_string.rs new file mode 100644 index 00000000..ae6aaa21 --- /dev/null +++ b/vendor/futures/tests/io_read_to_string.rs @@ -0,0 +1,44 @@ +use futures::executor::block_on; +use futures::future::{Future, FutureExt}; +use futures::io::{AsyncReadExt, Cursor}; +use futures::stream::{self, StreamExt, TryStreamExt}; +use futures::task::Poll; +use futures_test::io::AsyncReadTestExt; +use futures_test::task::noop_context; + +#[test] +fn read_to_string() { + let mut c = Cursor::new(&b""[..]); + let mut v = String::new(); + assert_eq!(block_on(c.read_to_string(&mut v)).unwrap(), 0); + assert_eq!(v, ""); + + let mut c = Cursor::new(&b"1"[..]); + let mut v = String::new(); + assert_eq!(block_on(c.read_to_string(&mut v)).unwrap(), 1); + assert_eq!(v, "1"); + + let mut c = Cursor::new(&b"\xff"[..]); + let mut v = String::new(); + assert!(block_on(c.read_to_string(&mut v)).is_err()); +} + +#[test] +fn interleave_pending() { + fn run(mut f: F) -> F::Output { + let mut cx = noop_context(); + loop { + if let Poll::Ready(x) = f.poll_unpin(&mut cx) { + return x; + } + } + } + let mut buf = stream::iter(vec![&b"12"[..], &b"33"[..], &b"3"[..]]) + .map(Ok) + .into_async_read() + .interleave_pending(); + + let mut v = String::new(); + assert_eq!(run(buf.read_to_string(&mut v)).unwrap(), 5); + assert_eq!(v, "12333"); +} diff --git a/vendor/futures/tests/io_read_until.rs b/vendor/futures/tests/io_read_until.rs new file mode 100644 index 00000000..71f857f4 --- /dev/null +++ b/vendor/futures/tests/io_read_until.rs @@ -0,0 +1,60 @@ +use futures::executor::block_on; +use futures::future::{Future, FutureExt}; +use futures::io::{AsyncBufReadExt, Cursor}; +use futures::stream::{self, StreamExt, TryStreamExt}; +use futures::task::Poll; +use futures_test::io::AsyncReadTestExt; +use futures_test::task::noop_context; + +fn run(mut f: F) -> F::Output { + let mut cx = noop_context(); + loop { + if let Poll::Ready(x) = f.poll_unpin(&mut cx) { + return x; + } + } +} + +#[test] +fn read_until() { + let mut buf = Cursor::new(b"12"); + let mut v = Vec::new(); + assert_eq!(block_on(buf.read_until(b'3', &mut v)).unwrap(), 2); + assert_eq!(v, b"12"); + + let mut buf = Cursor::new(b"1233"); + let mut v = Vec::new(); + assert_eq!(block_on(buf.read_until(b'3', &mut v)).unwrap(), 3); + assert_eq!(v, b"123"); + v.truncate(0); + assert_eq!(block_on(buf.read_until(b'3', &mut v)).unwrap(), 1); + assert_eq!(v, b"3"); + v.truncate(0); + assert_eq!(block_on(buf.read_until(b'3', &mut v)).unwrap(), 0); + assert_eq!(v, []); +} + +#[test] +fn maybe_pending() { + let mut buf = b"12".interleave_pending(); + let mut v = Vec::new(); + assert_eq!(run(buf.read_until(b'3', &mut v)).unwrap(), 2); + assert_eq!(v, b"12"); + + let mut buf = stream::iter(vec![&b"12"[..], &b"33"[..], &b"3"[..]]) + .map(Ok) + .into_async_read() + .interleave_pending(); + let mut v = Vec::new(); + assert_eq!(run(buf.read_until(b'3', &mut v)).unwrap(), 3); + assert_eq!(v, b"123"); + v.clear(); + assert_eq!(run(buf.read_until(b'3', &mut v)).unwrap(), 1); + assert_eq!(v, b"3"); + v.clear(); + assert_eq!(run(buf.read_until(b'3', &mut v)).unwrap(), 1); + assert_eq!(v, b"3"); + v.clear(); + assert_eq!(run(buf.read_until(b'3', &mut v)).unwrap(), 0); + assert_eq!(v, []); +} diff --git a/vendor/futures/tests/io_window.rs b/vendor/futures/tests/io_window.rs new file mode 100644 index 00000000..8f0d48bc --- /dev/null +++ b/vendor/futures/tests/io_window.rs @@ -0,0 +1,30 @@ +#![allow(clippy::reversed_empty_ranges)] // This is intentional. + +use futures::io::Window; + +#[test] +fn set() { + let mut buffer = Window::new(&[1, 2, 3]); + buffer.set(..3); + assert_eq!(buffer.as_ref(), &[1, 2, 3]); + buffer.set(3..3); + assert_eq!(buffer.as_ref(), &[]); + buffer.set(3..=2); // == 3..3 + assert_eq!(buffer.as_ref(), &[]); + buffer.set(0..2); + assert_eq!(buffer.as_ref(), &[1, 2]); +} + +#[test] +#[should_panic] +fn set_panic_out_of_bounds() { + let mut buffer = Window::new(&[1, 2, 3]); + buffer.set(2..4); +} + +#[test] +#[should_panic] +fn set_panic_start_is_greater_than_end() { + let mut buffer = Window::new(&[1, 2, 3]); + buffer.set(3..2); +} diff --git a/vendor/futures/tests/io_write.rs b/vendor/futures/tests/io_write.rs new file mode 100644 index 00000000..6af27553 --- /dev/null +++ b/vendor/futures/tests/io_write.rs @@ -0,0 +1,65 @@ +use futures::io::AsyncWrite; +use futures_test::task::panic_context; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; + +struct MockWriter { + fun: Box Poll>>, +} + +impl MockWriter { + fn new(fun: impl FnMut(&[u8]) -> Poll> + 'static) -> Self { + Self { fun: Box::new(fun) } + } +} + +impl AsyncWrite for MockWriter { + fn poll_write( + self: Pin<&mut Self>, + _cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + (self.get_mut().fun)(buf) + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + panic!() + } + + fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + panic!() + } +} + +/// Verifies that the default implementation of `poll_write_vectored` +/// calls `poll_write` with an empty slice if no buffers are provided. +#[test] +fn write_vectored_no_buffers() { + let mut writer = MockWriter::new(|buf| { + assert_eq!(buf, b""); + Err(io::ErrorKind::BrokenPipe.into()).into() + }); + let cx = &mut panic_context(); + let bufs = &mut []; + + let res = Pin::new(&mut writer).poll_write_vectored(cx, bufs); + let res = res.map_err(|e| e.kind()); + assert_eq!(res, Poll::Ready(Err(io::ErrorKind::BrokenPipe))) +} + +/// Verifies that the default implementation of `poll_write_vectored` +/// calls `poll_write` with the first non-empty buffer. +#[test] +fn write_vectored_first_non_empty() { + let mut writer = MockWriter::new(|buf| { + assert_eq!(buf, b"four"); + Poll::Ready(Ok(4)) + }); + let cx = &mut panic_context(); + let bufs = &mut [io::IoSlice::new(&[]), io::IoSlice::new(&[]), io::IoSlice::new(b"four")]; + + let res = Pin::new(&mut writer).poll_write_vectored(cx, bufs); + let res = res.map_err(|e| e.kind()); + assert_eq!(res, Poll::Ready(Ok(4))); +} diff --git a/vendor/futures/tests/lock_mutex.rs b/vendor/futures/tests/lock_mutex.rs new file mode 100644 index 00000000..c15e76bd --- /dev/null +++ b/vendor/futures/tests/lock_mutex.rs @@ -0,0 +1,69 @@ +use futures::channel::mpsc; +use futures::executor::{block_on, ThreadPool}; +use futures::future::{ready, FutureExt}; +use futures::lock::Mutex; +use futures::stream::StreamExt; +use futures::task::{Context, SpawnExt}; +use futures_test::future::FutureTestExt; +use futures_test::task::{new_count_waker, panic_context}; +use std::sync::Arc; + +#[test] +fn mutex_acquire_uncontested() { + let mutex = Mutex::new(()); + for _ in 0..10 { + assert!(mutex.lock().poll_unpin(&mut panic_context()).is_ready()); + } +} + +#[test] +fn mutex_wakes_waiters() { + let mutex = Mutex::new(()); + let (waker, counter) = new_count_waker(); + let lock = mutex.lock().poll_unpin(&mut panic_context()); + assert!(lock.is_ready()); + + let mut cx = Context::from_waker(&waker); + let mut waiter = mutex.lock(); + assert!(waiter.poll_unpin(&mut cx).is_pending()); + assert_eq!(counter, 0); + + drop(lock); + + assert_eq!(counter, 1); + assert!(waiter.poll_unpin(&mut panic_context()).is_ready()); +} + +#[test] +fn mutex_contested() { + { + let (tx, mut rx) = mpsc::unbounded(); + let pool = ThreadPool::builder().pool_size(16).create().unwrap(); + + let tx = Arc::new(tx); + let mutex = Arc::new(Mutex::new(0)); + + let num_tasks = 1000; + for _ in 0..num_tasks { + let tx = tx.clone(); + let mutex = mutex.clone(); + pool.spawn(async move { + let mut lock = mutex.lock().await; + ready(()).pending_once().await; + *lock += 1; + tx.unbounded_send(()).unwrap(); + drop(lock); + }) + .unwrap(); + } + + block_on(async { + for _ in 0..num_tasks { + rx.next().await.unwrap(); + } + let lock = mutex.lock().await; + assert_eq!(num_tasks, *lock); + }); + } + std::thread::sleep(std::time::Duration::from_millis(500)); // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371 +} diff --git a/vendor/futures/tests/macro_comma_support.rs b/vendor/futures/tests/macro_comma_support.rs new file mode 100644 index 00000000..85871e98 --- /dev/null +++ b/vendor/futures/tests/macro_comma_support.rs @@ -0,0 +1,43 @@ +use futures::{ + executor::block_on, + future::{self, FutureExt}, + join, ready, + task::Poll, + try_join, +}; + +#[test] +fn ready() { + block_on(future::poll_fn(|_| { + ready!(Poll::Ready(()),); + Poll::Ready(()) + })) +} + +#[test] +fn poll() { + use futures::poll; + + block_on(async { + let _ = poll!(async {}.boxed(),); + }) +} + +#[test] +fn join() { + block_on(async { + let future1 = async { 1 }; + let future2 = async { 2 }; + join!(future1, future2,); + }) +} + +#[test] +fn try_join() { + block_on(async { + let future1 = async { 1 }.never_error(); + let future2 = async { 2 }.never_error(); + try_join!(future1, future2,) + }) + .unwrap(); +} diff --git a/vendor/futures/tests/object_safety.rs b/vendor/futures/tests/object_safety.rs new file mode 100644 index 00000000..30c892f5 --- /dev/null +++ b/vendor/futures/tests/object_safety.rs @@ -0,0 +1,49 @@ +fn assert_is_object_safe() {} + +#[test] +fn future() { + // `FutureExt`, `TryFutureExt` and `UnsafeFutureObj` are not object safe. + use futures::future::{FusedFuture, Future, TryFuture}; + + assert_is_object_safe::<&dyn Future>(); + assert_is_object_safe::<&dyn FusedFuture>(); + assert_is_object_safe::<&dyn TryFuture>>(); +} + +#[test] +fn stream() { + // `StreamExt` and `TryStreamExt` are not object safe. + use futures::stream::{FusedStream, Stream, TryStream}; + + assert_is_object_safe::<&dyn Stream>(); + assert_is_object_safe::<&dyn FusedStream>(); + assert_is_object_safe::<&dyn TryStream>>(); +} + +#[test] +fn sink() { + // `SinkExt` is not object safe. + use futures::sink::Sink; + + assert_is_object_safe::<&dyn Sink<(), Error = ()>>(); +} + +#[test] +fn io() { + // `AsyncReadExt`, `AsyncWriteExt`, `AsyncSeekExt` and `AsyncBufReadExt` are not object safe. + use futures::io::{AsyncBufRead, AsyncRead, AsyncSeek, AsyncWrite}; + + assert_is_object_safe::<&dyn AsyncRead>(); + assert_is_object_safe::<&dyn AsyncWrite>(); + assert_is_object_safe::<&dyn AsyncSeek>(); + assert_is_object_safe::<&dyn AsyncBufRead>(); +} + +#[test] +fn task() { + // `ArcWake`, `SpawnExt` and `LocalSpawnExt` are not object safe. + use futures::task::{LocalSpawn, Spawn}; + + assert_is_object_safe::<&dyn Spawn>(); + assert_is_object_safe::<&dyn LocalSpawn>(); +} diff --git a/vendor/futures/tests/oneshot.rs b/vendor/futures/tests/oneshot.rs new file mode 100644 index 00000000..34b78a33 --- /dev/null +++ b/vendor/futures/tests/oneshot.rs @@ -0,0 +1,78 @@ +use futures::channel::oneshot; +use futures::future::{FutureExt, TryFutureExt}; +use futures_test::future::FutureTestExt; +use std::sync::mpsc; +use std::thread; + +#[test] +fn oneshot_send1() { + let (tx1, rx1) = oneshot::channel::(); + let (tx2, rx2) = mpsc::channel(); + + let t = thread::spawn(|| tx1.send(1).unwrap()); + rx1.map_ok(move |x| tx2.send(x)).run_in_background(); + assert_eq!(1, rx2.recv().unwrap()); + t.join().unwrap(); +} + +#[test] +fn oneshot_send2() { + let (tx1, rx1) = oneshot::channel::(); + let (tx2, rx2) = mpsc::channel(); + + thread::spawn(|| tx1.send(1).unwrap()).join().unwrap(); + rx1.map_ok(move |x| tx2.send(x).unwrap()).run_in_background(); + assert_eq!(1, rx2.recv().unwrap()); +} + +#[test] +fn oneshot_send3() { + let (tx1, rx1) = oneshot::channel::(); + let (tx2, rx2) = mpsc::channel(); + + rx1.map_ok(move |x| tx2.send(x).unwrap()).run_in_background(); + thread::spawn(|| tx1.send(1).unwrap()).join().unwrap(); + assert_eq!(1, rx2.recv().unwrap()); +} + +#[test] +fn oneshot_drop_tx1() { + let (tx1, rx1) = oneshot::channel::(); + let (tx2, rx2) = mpsc::channel(); + + drop(tx1); + rx1.map(move |result| tx2.send(result).unwrap()).run_in_background(); + + assert_eq!(Err(oneshot::Canceled), rx2.recv().unwrap()); +} + +#[test] +fn oneshot_drop_tx2() { + let (tx1, rx1) = oneshot::channel::(); + let (tx2, rx2) = mpsc::channel(); + + let t = thread::spawn(|| drop(tx1)); + rx1.map(move |result| tx2.send(result).unwrap()).run_in_background(); + t.join().unwrap(); + + assert_eq!(Err(oneshot::Canceled), rx2.recv().unwrap()); +} + +#[test] +fn oneshot_drop_rx() { + let (tx, rx) = oneshot::channel::(); + drop(rx); + assert_eq!(Err(2), tx.send(2)); +} + +#[test] +fn oneshot_debug() { + let (tx, rx) = oneshot::channel::(); + assert_eq!(format!("{:?}", tx), "Sender { complete: false }"); + assert_eq!(format!("{:?}", rx), "Receiver { complete: false }"); + drop(rx); + assert_eq!(format!("{:?}", tx), "Sender { complete: true }"); + let (tx, rx) = oneshot::channel::(); + drop(tx); + assert_eq!(format!("{:?}", rx), "Receiver { complete: true }"); +} diff --git a/vendor/futures/tests/ready_queue.rs b/vendor/futures/tests/ready_queue.rs new file mode 100644 index 00000000..c19d6259 --- /dev/null +++ b/vendor/futures/tests/ready_queue.rs @@ -0,0 +1,148 @@ +use futures::channel::oneshot; +use futures::executor::{block_on, block_on_stream}; +use futures::future; +use futures::stream::{FuturesUnordered, StreamExt}; +use futures::task::Poll; +use futures_test::task::noop_context; +use std::panic::{self, AssertUnwindSafe}; +use std::sync::{Arc, Barrier}; +use std::thread; + +#[test] +fn basic_usage() { + block_on(future::lazy(move |cx| { + let mut queue = FuturesUnordered::new(); + let (tx1, rx1) = oneshot::channel(); + let (tx2, rx2) = oneshot::channel(); + let (tx3, rx3) = oneshot::channel(); + + queue.push(rx1); + queue.push(rx2); + queue.push(rx3); + + assert!(!queue.poll_next_unpin(cx).is_ready()); + + tx2.send("hello").unwrap(); + + assert_eq!(Poll::Ready(Some(Ok("hello"))), queue.poll_next_unpin(cx)); + assert!(!queue.poll_next_unpin(cx).is_ready()); + + tx1.send("world").unwrap(); + tx3.send("world2").unwrap(); + + assert_eq!(Poll::Ready(Some(Ok("world"))), queue.poll_next_unpin(cx)); + assert_eq!(Poll::Ready(Some(Ok("world2"))), queue.poll_next_unpin(cx)); + assert_eq!(Poll::Ready(None), queue.poll_next_unpin(cx)); + })); +} + +#[test] +fn resolving_errors() { + block_on(future::lazy(move |cx| { + let mut queue = FuturesUnordered::new(); + let (tx1, rx1) = oneshot::channel(); + let (tx2, rx2) = oneshot::channel(); + let (tx3, rx3) = oneshot::channel(); + + queue.push(rx1); + queue.push(rx2); + queue.push(rx3); + + assert!(!queue.poll_next_unpin(cx).is_ready()); + + drop(tx2); + + assert_eq!(Poll::Ready(Some(Err(oneshot::Canceled))), queue.poll_next_unpin(cx)); + assert!(!queue.poll_next_unpin(cx).is_ready()); + + drop(tx1); + tx3.send("world2").unwrap(); + + assert_eq!(Poll::Ready(Some(Err(oneshot::Canceled))), queue.poll_next_unpin(cx)); + assert_eq!(Poll::Ready(Some(Ok("world2"))), queue.poll_next_unpin(cx)); + assert_eq!(Poll::Ready(None), queue.poll_next_unpin(cx)); + })); +} + +#[test] +fn dropping_ready_queue() { + block_on(future::lazy(move |_| { + let queue = FuturesUnordered::new(); + let (mut tx1, rx1) = oneshot::channel::<()>(); + let (mut tx2, rx2) = oneshot::channel::<()>(); + let (mut tx3, rx3) = oneshot::channel::<()>(); + + queue.push(rx1); + queue.push(rx2); + queue.push(rx3); + + { + let cx = &mut noop_context(); + assert!(!tx1.poll_canceled(cx).is_ready()); + assert!(!tx2.poll_canceled(cx).is_ready()); + assert!(!tx3.poll_canceled(cx).is_ready()); + + drop(queue); + + assert!(tx1.poll_canceled(cx).is_ready()); + assert!(tx2.poll_canceled(cx).is_ready()); + assert!(tx3.poll_canceled(cx).is_ready()); + } + })); +} + +#[test] +fn stress() { + const ITER: usize = if cfg!(miri) { 30 } else { 300 }; + + for i in 0..ITER { + let n = (i % 10) + 1; + + let mut queue = FuturesUnordered::new(); + + for _ in 0..5 { + let barrier = Arc::new(Barrier::new(n + 1)); + + for num in 0..n { + let barrier = barrier.clone(); + let (tx, rx) = oneshot::channel(); + + queue.push(rx); + + thread::spawn(move || { + barrier.wait(); + tx.send(num).unwrap(); + }); + } + + barrier.wait(); + + let mut sync = block_on_stream(queue); + + let mut rx: Vec<_> = (&mut sync).take(n).map(|res| res.unwrap()).collect(); + + assert_eq!(rx.len(), n); + + rx.sort_unstable(); + + for (i, x) in rx.into_iter().enumerate() { + assert_eq!(i, x); + } + + queue = sync.into_inner(); + } + } +} + +#[test] +fn panicking_future_dropped() { + block_on(future::lazy(move |cx| { + let mut queue = FuturesUnordered::new(); + queue.push(future::poll_fn(|_| -> Poll> { panic!() })); + + let r = panic::catch_unwind(AssertUnwindSafe(|| queue.poll_next_unpin(cx))); + assert!(r.is_err()); + assert!(queue.is_empty()); + assert_eq!(Poll::Ready(None), queue.poll_next_unpin(cx)); + })); +} diff --git a/vendor/futures/tests/recurse.rs b/vendor/futures/tests/recurse.rs new file mode 100644 index 00000000..d81753c9 --- /dev/null +++ b/vendor/futures/tests/recurse.rs @@ -0,0 +1,25 @@ +use futures::executor::block_on; +use futures::future::{self, BoxFuture, FutureExt}; +use std::sync::mpsc; +use std::thread; + +#[test] +fn lots() { + #[cfg(not(futures_sanitizer))] + const N: i32 = 1_000; + #[cfg(futures_sanitizer)] // If N is many, asan reports stack-overflow: https://gist.github.com/taiki-e/099446d21cbec69d4acbacf7a9646136 + const N: i32 = 100; + + fn do_it(input: (i32, i32)) -> BoxFuture<'static, i32> { + let (n, x) = input; + if n == 0 { + future::ready(x).boxed() + } else { + future::ready((n - 1, x + n)).then(do_it).boxed() + } + } + + let (tx, rx) = mpsc::channel(); + thread::spawn(|| block_on(do_it((N, 0)).map(move |x| tx.send(x).unwrap()))); + assert_eq!((0..=N).sum::(), rx.recv().unwrap()); +} diff --git a/vendor/futures/tests/sink.rs b/vendor/futures/tests/sink.rs new file mode 100644 index 00000000..5b691e74 --- /dev/null +++ b/vendor/futures/tests/sink.rs @@ -0,0 +1,554 @@ +use futures::channel::{mpsc, oneshot}; +use futures::executor::block_on; +use futures::future::{self, poll_fn, Future, FutureExt, TryFutureExt}; +use futures::never::Never; +use futures::ready; +use futures::sink::{self, Sink, SinkErrInto, SinkExt}; +use futures::stream::{self, Stream, StreamExt}; +use futures::task::{self, ArcWake, Context, Poll, Waker}; +use futures_test::task::panic_context; +use std::cell::{Cell, RefCell}; +use std::collections::VecDeque; +use std::fmt; +use std::mem; +use std::pin::Pin; +use std::rc::Rc; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; + +fn sassert_next(s: &mut S, item: S::Item) +where + S: Stream + Unpin, + S::Item: Eq + fmt::Debug, +{ + match s.poll_next_unpin(&mut panic_context()) { + Poll::Ready(None) => panic!("stream is at its end"), + Poll::Ready(Some(e)) => assert_eq!(e, item), + Poll::Pending => panic!("stream wasn't ready"), + } +} + +fn unwrap(x: Poll>) -> T { + match x { + Poll::Ready(Ok(x)) => x, + Poll::Ready(Err(_)) => panic!("Poll::Ready(Err(_))"), + Poll::Pending => panic!("Poll::Pending"), + } +} + +// An Unpark struct that records unpark events for inspection +struct Flag(AtomicBool); + +impl Flag { + fn new() -> Arc { + Arc::new(Self(AtomicBool::new(false))) + } + + fn take(&self) -> bool { + self.0.swap(false, Ordering::SeqCst) + } + + fn set(&self, v: bool) { + self.0.store(v, Ordering::SeqCst) + } +} + +impl ArcWake for Flag { + fn wake_by_ref(arc_self: &Arc) { + arc_self.set(true) + } +} + +fn flag_cx(f: F) -> R +where + F: FnOnce(Arc, &mut Context<'_>) -> R, +{ + let flag = Flag::new(); + let waker = task::waker_ref(&flag); + let cx = &mut Context::from_waker(&waker); + f(flag.clone(), cx) +} + +// Sends a value on an i32 channel sink +struct StartSendFut + Unpin, Item: Unpin>(Option, Option); + +impl + Unpin, Item: Unpin> StartSendFut { + fn new(sink: S, item: Item) -> Self { + Self(Some(sink), Some(item)) + } +} + +impl + Unpin, Item: Unpin> Future for StartSendFut { + type Output = Result; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let Self(inner, item) = self.get_mut(); + { + let mut inner = inner.as_mut().unwrap(); + ready!(Pin::new(&mut inner).poll_ready(cx))?; + Pin::new(&mut inner).start_send(item.take().unwrap())?; + } + Poll::Ready(Ok(inner.take().unwrap())) + } +} + +// Immediately accepts all requests to start pushing, but completion is managed +// by manually flushing +struct ManualFlush { + data: Vec, + waiting_tasks: Vec, +} + +impl Sink> for ManualFlush { + type Error = (); + + fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn start_send(mut self: Pin<&mut Self>, item: Option) -> Result<(), Self::Error> { + if let Some(item) = item { + self.data.push(item); + } else { + self.force_flush(); + } + Ok(()) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.data.is_empty() { + Poll::Ready(Ok(())) + } else { + self.waiting_tasks.push(cx.waker().clone()); + Poll::Pending + } + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.poll_flush(cx) + } +} + +impl ManualFlush { + fn new() -> Self { + Self { data: Vec::new(), waiting_tasks: Vec::new() } + } + + fn force_flush(&mut self) -> Vec { + for task in self.waiting_tasks.drain(..) { + task.wake() + } + mem::take(&mut self.data) + } +} + +struct ManualAllow { + data: Vec, + allow: Rc, +} + +struct Allow { + flag: Cell, + tasks: RefCell>, +} + +impl Allow { + fn new() -> Self { + Self { flag: Cell::new(false), tasks: RefCell::new(Vec::new()) } + } + + fn check(&self, cx: &mut Context<'_>) -> bool { + if self.flag.get() { + true + } else { + self.tasks.borrow_mut().push(cx.waker().clone()); + false + } + } + + fn start(&self) { + self.flag.set(true); + let mut tasks = self.tasks.borrow_mut(); + for task in tasks.drain(..) { + task.wake(); + } + } +} + +impl Sink for ManualAllow { + type Error = (); + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + if self.allow.check(cx) { + Poll::Ready(Ok(())) + } else { + Poll::Pending + } + } + + fn start_send(mut self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { + self.data.push(item); + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } +} + +fn manual_allow() -> (ManualAllow, Rc) { + let allow = Rc::new(Allow::new()); + let manual_allow = ManualAllow { data: Vec::new(), allow: allow.clone() }; + (manual_allow, allow) +} + +#[test] +fn either_sink() { + let mut s = + if true { Vec::::new().left_sink() } else { VecDeque::::new().right_sink() }; + + Pin::new(&mut s).start_send(0).unwrap(); +} + +#[test] +fn vec_sink() { + let mut v = Vec::new(); + Pin::new(&mut v).start_send(0).unwrap(); + Pin::new(&mut v).start_send(1).unwrap(); + assert_eq!(v, vec![0, 1]); + block_on(v.flush()).unwrap(); + assert_eq!(v, vec![0, 1]); +} + +#[test] +fn vecdeque_sink() { + let mut deque = VecDeque::new(); + Pin::new(&mut deque).start_send(2).unwrap(); + Pin::new(&mut deque).start_send(3).unwrap(); + + assert_eq!(deque.pop_front(), Some(2)); + assert_eq!(deque.pop_front(), Some(3)); + assert_eq!(deque.pop_front(), None); +} + +#[test] +fn send() { + let mut v = Vec::new(); + + block_on(v.send(0)).unwrap(); + assert_eq!(v, vec![0]); + + block_on(v.send(1)).unwrap(); + assert_eq!(v, vec![0, 1]); + + block_on(v.send(2)).unwrap(); + assert_eq!(v, vec![0, 1, 2]); +} + +#[test] +fn send_all() { + let mut v = Vec::new(); + + block_on(v.send_all(&mut stream::iter(vec![0, 1]).map(Ok))).unwrap(); + assert_eq!(v, vec![0, 1]); + + block_on(v.send_all(&mut stream::iter(vec![2, 3]).map(Ok))).unwrap(); + assert_eq!(v, vec![0, 1, 2, 3]); + + block_on(v.send_all(&mut stream::iter(vec![4, 5]).map(Ok))).unwrap(); + assert_eq!(v, vec![0, 1, 2, 3, 4, 5]); +} + +// Test that `start_send` on an `mpsc` channel does indeed block when the +// channel is full +#[test] +fn mpsc_blocking_start_send() { + let (mut tx, mut rx) = mpsc::channel::(0); + + block_on(future::lazy(|_| { + tx.start_send(0).unwrap(); + + flag_cx(|flag, cx| { + let mut task = StartSendFut::new(tx, 1); + + assert!(task.poll_unpin(cx).is_pending()); + assert!(!flag.take()); + sassert_next(&mut rx, 0); + assert!(flag.take()); + unwrap(task.poll_unpin(cx)); + assert!(!flag.take()); + sassert_next(&mut rx, 1); + }) + })); +} + +// test `flush` by using `with` to make the first insertion into a sink block +// until a oneshot is completed +#[test] +fn with_flush() { + let (tx, rx) = oneshot::channel(); + let mut block = rx.boxed(); + let mut sink = Vec::new().with(|elem| { + mem::replace(&mut block, future::ok(()).boxed()) + .map_ok(move |()| elem + 1) + .map_err(|_| -> Never { panic!() }) + }); + + assert_eq!(Pin::new(&mut sink).start_send(0).ok(), Some(())); + + flag_cx(|flag, cx| { + let mut task = sink.flush(); + assert!(task.poll_unpin(cx).is_pending()); + tx.send(()).unwrap(); + assert!(flag.take()); + + unwrap(task.poll_unpin(cx)); + + block_on(sink.send(1)).unwrap(); + assert_eq!(sink.get_ref(), &[1, 2]); + }) +} + +// test simple use of with to change data +#[test] +fn with_as_map() { + let mut sink = Vec::new().with(|item| future::ok::(item * 2)); + block_on(sink.send(0)).unwrap(); + block_on(sink.send(1)).unwrap(); + block_on(sink.send(2)).unwrap(); + assert_eq!(sink.get_ref(), &[0, 2, 4]); +} + +// test simple use of with_flat_map +#[test] +fn with_flat_map() { + let mut sink = Vec::new().with_flat_map(|item| stream::iter(vec![item; item]).map(Ok)); + block_on(sink.send(0)).unwrap(); + block_on(sink.send(1)).unwrap(); + block_on(sink.send(2)).unwrap(); + block_on(sink.send(3)).unwrap(); + assert_eq!(sink.get_ref(), &[1, 2, 2, 3, 3, 3]); +} + +// Check that `with` propagates `poll_ready` to the inner sink. +// Regression test for the issue #1834. +#[test] +fn with_propagates_poll_ready() { + let (tx, mut rx) = mpsc::channel::(0); + let mut tx = tx.with(|item: i32| future::ok::(item + 10)); + + block_on(future::lazy(|_| { + flag_cx(|flag, cx| { + let mut tx = Pin::new(&mut tx); + + // Should be ready for the first item. + assert_eq!(tx.as_mut().poll_ready(cx), Poll::Ready(Ok(()))); + assert_eq!(tx.as_mut().start_send(0), Ok(())); + + // Should be ready for the second item only after the first one is received. + assert_eq!(tx.as_mut().poll_ready(cx), Poll::Pending); + assert!(!flag.take()); + sassert_next(&mut rx, 10); + assert!(flag.take()); + assert_eq!(tx.as_mut().poll_ready(cx), Poll::Ready(Ok(()))); + assert_eq!(tx.as_mut().start_send(1), Ok(())); + }) + })); +} + +// test that the `with` sink doesn't require the underlying sink to flush, +// but doesn't claim to be flushed until the underlying sink is +#[test] +fn with_flush_propagate() { + let mut sink = ManualFlush::new().with(future::ok::, ()>); + flag_cx(|flag, cx| { + unwrap(Pin::new(&mut sink).poll_ready(cx)); + Pin::new(&mut sink).start_send(Some(0)).unwrap(); + unwrap(Pin::new(&mut sink).poll_ready(cx)); + Pin::new(&mut sink).start_send(Some(1)).unwrap(); + + { + let mut task = sink.flush(); + assert!(task.poll_unpin(cx).is_pending()); + assert!(!flag.take()); + } + assert_eq!(sink.get_mut().force_flush(), vec![0, 1]); + assert!(flag.take()); + unwrap(sink.flush().poll_unpin(cx)); + }) +} + +// test that `Clone` is implemented on `with` sinks +#[test] +fn with_implements_clone() { + let (mut tx, rx) = mpsc::channel(5); + + { + let mut is_positive = tx.clone().with(|item| future::ok::(item > 0)); + + let mut is_long = + tx.clone().with(|item: &str| future::ok::(item.len() > 5)); + + block_on(is_positive.clone().send(-1)).unwrap(); + block_on(is_long.clone().send("123456")).unwrap(); + block_on(is_long.send("123")).unwrap(); + block_on(is_positive.send(1)).unwrap(); + } + + block_on(tx.send(false)).unwrap(); + + block_on(tx.close()).unwrap(); + + assert_eq!(block_on(rx.collect::>()), vec![false, true, false, true, false]); +} + +// test that a buffer is a no-nop around a sink that always accepts sends +#[test] +fn buffer_noop() { + let mut sink = Vec::new().buffer(0); + block_on(sink.send(0)).unwrap(); + block_on(sink.send(1)).unwrap(); + assert_eq!(sink.get_ref(), &[0, 1]); + + let mut sink = Vec::new().buffer(1); + block_on(sink.send(0)).unwrap(); + block_on(sink.send(1)).unwrap(); + assert_eq!(sink.get_ref(), &[0, 1]); +} + +// test basic buffer functionality, including both filling up to capacity, +// and writing out when the underlying sink is ready +#[test] +fn buffer() { + let (sink, allow) = manual_allow::(); + let sink = sink.buffer(2); + + let sink = block_on(StartSendFut::new(sink, 0)).unwrap(); + let mut sink = block_on(StartSendFut::new(sink, 1)).unwrap(); + + flag_cx(|flag, cx| { + let mut task = sink.send(2); + assert!(task.poll_unpin(cx).is_pending()); + assert!(!flag.take()); + allow.start(); + assert!(flag.take()); + unwrap(task.poll_unpin(cx)); + assert_eq!(sink.get_ref().data, vec![0, 1, 2]); + }) +} + +#[test] +fn fanout_smoke() { + let sink1 = Vec::new(); + let sink2 = Vec::new(); + let mut sink = sink1.fanout(sink2); + block_on(sink.send_all(&mut stream::iter(vec![1, 2, 3]).map(Ok))).unwrap(); + let (sink1, sink2) = sink.into_inner(); + assert_eq!(sink1, vec![1, 2, 3]); + assert_eq!(sink2, vec![1, 2, 3]); +} + +#[test] +fn fanout_backpressure() { + let (left_send, mut left_recv) = mpsc::channel(0); + let (right_send, mut right_recv) = mpsc::channel(0); + let sink = left_send.fanout(right_send); + + let mut sink = block_on(StartSendFut::new(sink, 0)).unwrap(); + + flag_cx(|flag, cx| { + let mut task = sink.send(2); + assert!(!flag.take()); + assert!(task.poll_unpin(cx).is_pending()); + assert_eq!(block_on(left_recv.next()), Some(0)); + assert!(flag.take()); + assert!(task.poll_unpin(cx).is_pending()); + assert_eq!(block_on(right_recv.next()), Some(0)); + assert!(flag.take()); + + assert!(task.poll_unpin(cx).is_pending()); + assert_eq!(block_on(left_recv.next()), Some(2)); + assert!(flag.take()); + assert!(task.poll_unpin(cx).is_pending()); + assert_eq!(block_on(right_recv.next()), Some(2)); + assert!(flag.take()); + + unwrap(task.poll_unpin(cx)); + // make sure receivers live until end of test to prevent send errors + drop(left_recv); + drop(right_recv); + }) +} + +#[test] +fn sink_map_err() { + { + let cx = &mut panic_context(); + let (tx, _rx) = mpsc::channel(1); + let mut tx = tx.sink_map_err(|_| ()); + assert_eq!(Pin::new(&mut tx).start_send(()), Ok(())); + assert_eq!(Pin::new(&mut tx).poll_flush(cx), Poll::Ready(Ok(()))); + } + + let tx = mpsc::channel(0).0; + assert_eq!(Pin::new(&mut tx.sink_map_err(|_| ())).start_send(()), Err(())); +} + +#[test] +fn sink_unfold() { + block_on(poll_fn(|cx| { + let (tx, mut rx) = mpsc::channel(1); + let unfold = sink::unfold((), |(), i: i32| { + let mut tx = tx.clone(); + async move { + tx.send(i).await.unwrap(); + Ok::<_, String>(()) + } + }); + futures::pin_mut!(unfold); + assert_eq!(unfold.as_mut().start_send(1), Ok(())); + assert_eq!(unfold.as_mut().poll_flush(cx), Poll::Ready(Ok(()))); + assert_eq!(rx.try_next().unwrap(), Some(1)); + + assert_eq!(unfold.as_mut().poll_ready(cx), Poll::Ready(Ok(()))); + assert_eq!(unfold.as_mut().start_send(2), Ok(())); + assert_eq!(unfold.as_mut().poll_ready(cx), Poll::Ready(Ok(()))); + assert_eq!(unfold.as_mut().start_send(3), Ok(())); + assert_eq!(rx.try_next().unwrap(), Some(2)); + assert!(rx.try_next().is_err()); + assert_eq!(unfold.as_mut().poll_ready(cx), Poll::Ready(Ok(()))); + assert_eq!(unfold.as_mut().start_send(4), Ok(())); + assert_eq!(unfold.as_mut().poll_flush(cx), Poll::Pending); // Channel full + assert_eq!(rx.try_next().unwrap(), Some(3)); + assert_eq!(rx.try_next().unwrap(), Some(4)); + + Poll::Ready(()) + })) +} + +#[test] +fn err_into() { + #[derive(Copy, Clone, Debug, PartialEq, Eq)] + struct ErrIntoTest; + + impl From for ErrIntoTest { + fn from(_: mpsc::SendError) -> Self { + Self + } + } + + { + let cx = &mut panic_context(); + let (tx, _rx) = mpsc::channel(1); + let mut tx: SinkErrInto, _, ErrIntoTest> = tx.sink_err_into(); + assert_eq!(Pin::new(&mut tx).start_send(()), Ok(())); + assert_eq!(Pin::new(&mut tx).poll_flush(cx), Poll::Ready(Ok(()))); + } + + let tx = mpsc::channel(0).0; + assert_eq!(Pin::new(&mut tx.sink_err_into()).start_send(()), Err(ErrIntoTest)); +} diff --git a/vendor/futures/tests/sink_fanout.rs b/vendor/futures/tests/sink_fanout.rs new file mode 100644 index 00000000..e57b2d8c --- /dev/null +++ b/vendor/futures/tests/sink_fanout.rs @@ -0,0 +1,24 @@ +use futures::channel::mpsc; +use futures::executor::block_on; +use futures::future::join3; +use futures::sink::SinkExt; +use futures::stream::{self, StreamExt}; + +#[test] +fn it_works() { + let (tx1, rx1) = mpsc::channel(1); + let (tx2, rx2) = mpsc::channel(2); + let tx = tx1.fanout(tx2).sink_map_err(|_| ()); + + let src = stream::iter((0..10).map(Ok)); + let fwd = src.forward(tx); + + let collect_fut1 = rx1.collect::>(); + let collect_fut2 = rx2.collect::>(); + let (_, vec1, vec2) = block_on(join3(fwd, collect_fut1, collect_fut2)); + + let expected = (0..10).collect::>(); + + assert_eq!(vec1, expected); + assert_eq!(vec2, expected); +} diff --git a/vendor/futures/tests/stream.rs b/vendor/futures/tests/stream.rs new file mode 100644 index 00000000..6cbef751 --- /dev/null +++ b/vendor/futures/tests/stream.rs @@ -0,0 +1,577 @@ +use std::cell::Cell; +use std::iter; +use std::pin::Pin; +use std::rc::Rc; +use std::sync::Arc; +use std::task::Context; + +use futures::channel::mpsc; +use futures::executor::block_on; +use futures::future::{self, Future}; +use futures::lock::Mutex; +use futures::sink::SinkExt; +use futures::stream::{self, StreamExt}; +use futures::task::Poll; +use futures::{ready, FutureExt}; +use futures_core::Stream; +use futures_executor::ThreadPool; +use futures_test::task::noop_context; + +#[test] +fn select() { + fn select_and_compare(a: Vec, b: Vec, expected: Vec) { + let a = stream::iter(a); + let b = stream::iter(b); + let vec = block_on(stream::select(a, b).collect::>()); + assert_eq!(vec, expected); + } + + select_and_compare(vec![1, 2, 3], vec![4, 5, 6], vec![1, 4, 2, 5, 3, 6]); + select_and_compare(vec![1, 2, 3], vec![4, 5], vec![1, 4, 2, 5, 3]); + select_and_compare(vec![1, 2], vec![4, 5, 6], vec![1, 4, 2, 5, 6]); +} + +#[test] +fn flat_map() { + block_on(async { + let st = + stream::iter(vec![stream::iter(0..=4u8), stream::iter(6..=10), stream::iter(0..=2)]); + + let values: Vec<_> = + st.flat_map(|s| s.filter(|v| futures::future::ready(v % 2 == 0))).collect().await; + + assert_eq!(values, vec![0, 2, 4, 6, 8, 10, 0, 2]); + }); +} + +#[test] +fn scan() { + block_on(async { + let values = stream::iter(vec![1u8, 2, 3, 4, 6, 8, 2]) + .scan(1, |state, e| { + *state += 1; + futures::future::ready(if e < *state { Some(e) } else { None }) + }) + .collect::>() + .await; + + assert_eq!(values, vec![1u8, 2, 3, 4]); + }); +} + +#[test] +fn flatten_unordered() { + use futures::executor::block_on; + use futures::stream::*; + use futures::task::*; + use std::convert::identity; + use std::pin::Pin; + use std::sync::atomic::{AtomicBool, Ordering}; + use std::thread; + use std::time::Duration; + + struct DataStream { + data: Vec, + polled: bool, + wake_immediately: bool, + } + + impl Stream for DataStream { + type Item = u8; + + fn poll_next(mut self: Pin<&mut Self>, ctx: &mut Context) -> Poll> { + if !self.polled { + if !self.wake_immediately { + let waker = ctx.waker().clone(); + let sleep_time = + Duration::from_millis(*self.data.first().unwrap_or(&0) as u64 / 10); + thread::spawn(move || { + thread::sleep(sleep_time); + waker.wake_by_ref(); + }); + } else { + ctx.waker().wake_by_ref(); + } + self.polled = true; + Poll::Pending + } else { + self.polled = false; + Poll::Ready(self.data.pop()) + } + } + } + + struct Interchanger { + polled: bool, + base: u8, + wake_immediately: bool, + } + + impl Stream for Interchanger { + type Item = DataStream; + + fn poll_next(mut self: Pin<&mut Self>, ctx: &mut Context) -> Poll> { + if !self.polled { + self.polled = true; + if !self.wake_immediately { + let waker = ctx.waker().clone(); + let sleep_time = Duration::from_millis(self.base as u64); + thread::spawn(move || { + thread::sleep(sleep_time); + waker.wake_by_ref(); + }); + } else { + ctx.waker().wake_by_ref(); + } + Poll::Pending + } else { + let data: Vec<_> = (0..6).rev().map(|v| v + self.base * 6).collect(); + self.base += 1; + self.polled = false; + Poll::Ready(Some(DataStream { + polled: false, + data, + wake_immediately: self.wake_immediately && self.base % 2 == 0, + })) + } + } + } + + // basic behaviour + { + block_on(async { + let st = stream::iter(vec![ + stream::iter(0..=4u8), + stream::iter(6..=10), + stream::iter(10..=12), + ]); + + let fl_unordered = st.flatten_unordered(3).collect::>().await; + + assert_eq!(fl_unordered, vec![0, 6, 10, 1, 7, 11, 2, 8, 12, 3, 9, 4, 10]); + }); + + block_on(async { + let st = stream::iter(vec![ + stream::iter(0..=4u8), + stream::iter(6..=10), + stream::iter(0..=2), + ]); + + let mut fm_unordered = st + .flat_map_unordered(1, |s| s.filter(|v| futures::future::ready(v % 2 == 0))) + .collect::>() + .await; + + fm_unordered.sort_unstable(); + + assert_eq!(fm_unordered, vec![0, 0, 2, 2, 4, 6, 8, 10]); + }); + } + + // wake up immediately + { + block_on(async { + let mut fl_unordered = Interchanger { polled: false, base: 0, wake_immediately: true } + .take(10) + .map(|s| s.map(identity)) + .flatten_unordered(10) + .collect::>() + .await; + + fl_unordered.sort_unstable(); + + assert_eq!(fl_unordered, (0..60).collect::>()); + }); + + block_on(async { + let mut fm_unordered = Interchanger { polled: false, base: 0, wake_immediately: true } + .take(10) + .flat_map_unordered(10, |s| s.map(identity)) + .collect::>() + .await; + + fm_unordered.sort_unstable(); + + assert_eq!(fm_unordered, (0..60).collect::>()); + }); + } + + // wake up after delay + { + block_on(async { + let mut fl_unordered = Interchanger { polled: false, base: 0, wake_immediately: false } + .take(10) + .map(|s| s.map(identity)) + .flatten_unordered(10) + .collect::>() + .await; + + fl_unordered.sort_unstable(); + + assert_eq!(fl_unordered, (0..60).collect::>()); + }); + + block_on(async { + let mut fm_unordered = Interchanger { polled: false, base: 0, wake_immediately: false } + .take(10) + .flat_map_unordered(10, |s| s.map(identity)) + .collect::>() + .await; + + fm_unordered.sort_unstable(); + + assert_eq!(fm_unordered, (0..60).collect::>()); + }); + + block_on(async { + let (mut fm_unordered, mut fl_unordered) = futures_util::join!( + Interchanger { polled: false, base: 0, wake_immediately: false } + .take(10) + .flat_map_unordered(10, |s| s.map(identity)) + .collect::>(), + Interchanger { polled: false, base: 0, wake_immediately: false } + .take(10) + .map(|s| s.map(identity)) + .flatten_unordered(10) + .collect::>() + ); + + fm_unordered.sort_unstable(); + fl_unordered.sort_unstable(); + + assert_eq!(fm_unordered, fl_unordered); + assert_eq!(fm_unordered, (0..60).collect::>()); + }); + } + + // waker panics + { + let stream = Arc::new(Mutex::new( + Interchanger { polled: false, base: 0, wake_immediately: true } + .take(10) + .flat_map_unordered(10, |s| s.map(identity)), + )); + + struct PanicWaker; + + impl ArcWake for PanicWaker { + fn wake_by_ref(_arc_self: &Arc) { + panic!("WAKE UP"); + } + } + + std::thread::spawn({ + let stream = stream.clone(); + move || { + let mut st = poll_fn(|cx| { + let mut lock = ready!(stream.lock().poll_unpin(cx)); + + let panic_waker = waker(Arc::new(PanicWaker)); + let mut panic_cx = Context::from_waker(&panic_waker); + let _ = ready!(lock.poll_next_unpin(&mut panic_cx)); + + Poll::Ready(Some(())) + }); + + block_on(st.next()) + } + }) + .join() + .unwrap_err(); + + block_on(async move { + let mut values: Vec<_> = stream.lock().await.by_ref().collect().await; + values.sort_unstable(); + + assert_eq!(values, (0..60).collect::>()); + }); + } + + // stream panics + { + let st = stream::iter(iter::once( + once(Box::pin(async { panic!("Polled") })).left_stream::(), + )) + .chain( + Interchanger { polled: false, base: 0, wake_immediately: true } + .map(|stream| stream.right_stream()) + .take(10), + ); + + let stream = Arc::new(Mutex::new(st.flatten_unordered(10))); + + std::thread::spawn({ + let stream = stream.clone(); + move || { + let mut st = poll_fn(|cx| { + let mut lock = ready!(stream.lock().poll_unpin(cx)); + let data = ready!(lock.poll_next_unpin(cx)); + + Poll::Ready(data) + }); + + block_on(st.next()) + } + }) + .join() + .unwrap_err(); + + block_on(async move { + let mut values: Vec<_> = stream.lock().await.by_ref().collect().await; + values.sort_unstable(); + + assert_eq!(values, (0..60).collect::>()); + }); + } + + fn timeout(time: Duration, value: I) -> impl Future { + let ready = Arc::new(AtomicBool::new(false)); + let mut spawned = false; + + future::poll_fn(move |cx| { + if !spawned { + let waker = cx.waker().clone(); + let ready = ready.clone(); + + std::thread::spawn(move || { + std::thread::sleep(time); + ready.store(true, Ordering::Release); + + waker.wake_by_ref() + }); + spawned = true; + } + + if ready.load(Ordering::Acquire) { + Poll::Ready(value.clone()) + } else { + Poll::Pending + } + }) + } + + fn build_nested_fu(st: S) -> impl Stream + Unpin + where + S::Item: Clone, + { + let inner = st + .then(|item| timeout(Duration::from_millis(50), item)) + .enumerate() + .map(|(idx, value)| { + stream::once(if idx % 2 == 0 { + future::ready(value).left_future() + } else { + timeout(Duration::from_millis(100), value).right_future() + }) + }) + .flatten_unordered(None); + + stream::once(future::ready(inner)).flatten_unordered(None) + } + + // nested `flatten_unordered` + let te = ThreadPool::new().unwrap(); + let base_handle = te + .spawn_with_handle(async move { + let fu = build_nested_fu(stream::iter(1..=10)); + + assert_eq!(fu.count().await, 10); + }) + .unwrap(); + + block_on(base_handle); + + let empty_state_move_handle = te + .spawn_with_handle(async move { + let mut fu = build_nested_fu(stream::iter(1..10)); + { + let mut cx = noop_context(); + let _ = fu.poll_next_unpin(&mut cx); + let _ = fu.poll_next_unpin(&mut cx); + } + + assert_eq!(fu.count().await, 9); + }) + .unwrap(); + + block_on(empty_state_move_handle); +} + +#[test] +fn take_until() { + fn make_stop_fut(stop_on: u32) -> impl Future { + let mut i = 0; + future::poll_fn(move |_cx| { + i += 1; + if i <= stop_on { + Poll::Pending + } else { + Poll::Ready(()) + } + }) + } + + block_on(async { + // Verify stopping works: + let stream = stream::iter(1u32..=10); + let stop_fut = make_stop_fut(5); + + let stream = stream.take_until(stop_fut); + let last = stream.fold(0, |_, i| async move { i }).await; + assert_eq!(last, 5); + + // Verify take_future() works: + let stream = stream::iter(1..=10); + let stop_fut = make_stop_fut(5); + + let mut stream = stream.take_until(stop_fut); + + assert_eq!(stream.next().await, Some(1)); + assert_eq!(stream.next().await, Some(2)); + + stream.take_future(); + + let last = stream.fold(0, |_, i| async move { i }).await; + assert_eq!(last, 10); + + // Verify take_future() returns None if stream is stopped: + let stream = stream::iter(1u32..=10); + let stop_fut = make_stop_fut(1); + let mut stream = stream.take_until(stop_fut); + assert_eq!(stream.next().await, Some(1)); + assert_eq!(stream.next().await, None); + assert!(stream.take_future().is_none()); + + // Verify TakeUntil is fused: + let mut i = 0; + let stream = stream::poll_fn(move |_cx| { + i += 1; + match i { + 1 => Poll::Ready(Some(1)), + 2 => Poll::Ready(None), + _ => panic!("TakeUntil not fused"), + } + }); + + let stop_fut = make_stop_fut(1); + let mut stream = stream.take_until(stop_fut); + assert_eq!(stream.next().await, Some(1)); + assert_eq!(stream.next().await, None); + assert_eq!(stream.next().await, None); + }); +} + +#[test] +#[should_panic] +fn chunks_panic_on_cap_zero() { + let (_, rx1) = mpsc::channel::<()>(1); + + let _ = rx1.chunks(0); +} + +#[test] +#[should_panic] +fn ready_chunks_panic_on_cap_zero() { + let (_, rx1) = mpsc::channel::<()>(1); + + let _ = rx1.ready_chunks(0); +} + +#[test] +fn ready_chunks() { + let (mut tx, rx1) = mpsc::channel::(16); + + let mut s = rx1.ready_chunks(2); + + let mut cx = noop_context(); + assert!(s.next().poll_unpin(&mut cx).is_pending()); + + block_on(async { + tx.send(1).await.unwrap(); + + assert_eq!(s.next().await.unwrap(), vec![1]); + tx.send(2).await.unwrap(); + tx.send(3).await.unwrap(); + tx.send(4).await.unwrap(); + assert_eq!(s.next().await.unwrap(), vec![2, 3]); + assert_eq!(s.next().await.unwrap(), vec![4]); + }); +} + +struct SlowStream { + times_should_poll: usize, + times_polled: Rc>, +} +impl Stream for SlowStream { + type Item = usize; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.times_polled.set(self.times_polled.get() + 1); + if self.times_polled.get() % 2 == 0 { + cx.waker().wake_by_ref(); + return Poll::Pending; + } + if self.times_polled.get() >= self.times_should_poll { + return Poll::Ready(None); + } + Poll::Ready(Some(self.times_polled.get())) + } +} + +#[test] +fn select_with_strategy_doesnt_terminate_early() { + for side in [stream::PollNext::Left, stream::PollNext::Right] { + let times_should_poll = 10; + let count = Rc::new(Cell::new(0)); + let b = stream::iter([10, 20]); + + let mut selected = stream::select_with_strategy( + SlowStream { times_should_poll, times_polled: count.clone() }, + b, + |_: &mut ()| side, + ); + block_on(async move { while selected.next().await.is_some() {} }); + assert_eq!(count.get(), times_should_poll + 1); + } +} + +async fn is_even(number: u8) -> bool { + number % 2 == 0 +} + +#[test] +fn all() { + block_on(async { + let empty: [u8; 0] = []; + let st = stream::iter(empty); + let all = st.all(is_even).await; + assert!(all); + + let st = stream::iter([2, 4, 6, 8]); + let all = st.all(is_even).await; + assert!(all); + + let st = stream::iter([2, 3, 4]); + let all = st.all(is_even).await; + assert!(!all); + }); +} + +#[test] +fn any() { + block_on(async { + let empty: [u8; 0] = []; + let st = stream::iter(empty); + let any = st.any(is_even).await; + assert!(!any); + + let st = stream::iter([1, 2, 3]); + let any = st.any(is_even).await; + assert!(any); + + let st = stream::iter([1, 3, 5]); + let any = st.any(is_even).await; + assert!(!any); + }); +} diff --git a/vendor/futures/tests/stream_abortable.rs b/vendor/futures/tests/stream_abortable.rs new file mode 100644 index 00000000..2339dd05 --- /dev/null +++ b/vendor/futures/tests/stream_abortable.rs @@ -0,0 +1,46 @@ +use futures::channel::mpsc; +use futures::executor::block_on; +use futures::stream::{abortable, Stream, StreamExt}; +use futures::task::{Context, Poll}; +use futures::SinkExt; +use futures_test::task::new_count_waker; +use std::pin::Pin; + +#[test] +fn abortable_works() { + let (_tx, a_rx) = mpsc::channel::<()>(1); + let (mut abortable_rx, abort_handle) = abortable(a_rx); + + abort_handle.abort(); + assert!(abortable_rx.is_aborted()); + assert_eq!(None, block_on(abortable_rx.next())); +} + +#[test] +fn abortable_awakens() { + let (_tx, a_rx) = mpsc::channel::<()>(1); + let (mut abortable_rx, abort_handle) = abortable(a_rx); + + let (waker, counter) = new_count_waker(); + let mut cx = Context::from_waker(&waker); + + assert_eq!(counter, 0); + assert_eq!(Poll::Pending, Pin::new(&mut abortable_rx).poll_next(&mut cx)); + assert_eq!(counter, 0); + + abort_handle.abort(); + assert_eq!(counter, 1); + assert!(abortable_rx.is_aborted()); + assert_eq!(Poll::Ready(None), Pin::new(&mut abortable_rx).poll_next(&mut cx)); +} + +#[test] +fn abortable_resolves() { + let (mut tx, a_rx) = mpsc::channel::<()>(1); + let (mut abortable_rx, _abort_handle) = abortable(a_rx); + + block_on(tx.send(())).unwrap(); + + assert!(!abortable_rx.is_aborted()); + assert_eq!(Some(()), block_on(abortable_rx.next())); +} diff --git a/vendor/futures/tests/stream_buffer_unordered.rs b/vendor/futures/tests/stream_buffer_unordered.rs new file mode 100644 index 00000000..9a2ee174 --- /dev/null +++ b/vendor/futures/tests/stream_buffer_unordered.rs @@ -0,0 +1,73 @@ +use futures::channel::{mpsc, oneshot}; +use futures::executor::{block_on, block_on_stream}; +use futures::sink::SinkExt; +use futures::stream::StreamExt; +use std::sync::mpsc as std_mpsc; +use std::thread; + +#[test] +#[ignore] // FIXME: https://github.com/rust-lang/futures-rs/issues/1790 +fn works() { + const N: usize = 4; + + let (mut tx, rx) = mpsc::channel(1); + + let (tx2, rx2) = std_mpsc::channel(); + let (tx3, rx3) = std_mpsc::channel(); + let t1 = thread::spawn(move || { + for _ in 0..=N { + let (mytx, myrx) = oneshot::channel(); + block_on(tx.send(myrx)).unwrap(); + tx3.send(mytx).unwrap(); + } + rx2.recv().unwrap(); + for _ in 0..N { + let (mytx, myrx) = oneshot::channel(); + block_on(tx.send(myrx)).unwrap(); + tx3.send(mytx).unwrap(); + } + }); + + let (tx4, rx4) = std_mpsc::channel(); + let t2 = thread::spawn(move || { + for item in block_on_stream(rx.buffer_unordered(N)) { + tx4.send(item.unwrap()).unwrap(); + } + }); + + let o1 = rx3.recv().unwrap(); + let o2 = rx3.recv().unwrap(); + let o3 = rx3.recv().unwrap(); + let o4 = rx3.recv().unwrap(); + assert!(rx4.try_recv().is_err()); + + o1.send(1).unwrap(); + assert_eq!(rx4.recv(), Ok(1)); + o3.send(3).unwrap(); + assert_eq!(rx4.recv(), Ok(3)); + tx2.send(()).unwrap(); + o2.send(2).unwrap(); + assert_eq!(rx4.recv(), Ok(2)); + o4.send(4).unwrap(); + assert_eq!(rx4.recv(), Ok(4)); + + let o5 = rx3.recv().unwrap(); + let o6 = rx3.recv().unwrap(); + let o7 = rx3.recv().unwrap(); + let o8 = rx3.recv().unwrap(); + let o9 = rx3.recv().unwrap(); + + o5.send(5).unwrap(); + assert_eq!(rx4.recv(), Ok(5)); + o8.send(8).unwrap(); + assert_eq!(rx4.recv(), Ok(8)); + o9.send(9).unwrap(); + assert_eq!(rx4.recv(), Ok(9)); + o7.send(7).unwrap(); + assert_eq!(rx4.recv(), Ok(7)); + o6.send(6).unwrap(); + assert_eq!(rx4.recv(), Ok(6)); + + t1.join().unwrap(); + t2.join().unwrap(); +} diff --git a/vendor/futures/tests/stream_catch_unwind.rs b/vendor/futures/tests/stream_catch_unwind.rs new file mode 100644 index 00000000..8b23a0a7 --- /dev/null +++ b/vendor/futures/tests/stream_catch_unwind.rs @@ -0,0 +1,27 @@ +use futures::executor::block_on_stream; +use futures::stream::{self, StreamExt}; + +#[test] +fn panic_in_the_middle_of_the_stream() { + let stream = stream::iter(vec![Some(10), None, Some(11)]); + + // panic on second element + let stream_panicking = stream.map(|o| o.unwrap()); + let mut iter = block_on_stream(stream_panicking.catch_unwind()); + + assert_eq!(10, iter.next().unwrap().ok().unwrap()); + assert!(iter.next().unwrap().is_err()); + assert!(iter.next().is_none()); +} + +#[test] +fn no_panic() { + let stream = stream::iter(vec![10, 11, 12]); + + let mut iter = block_on_stream(stream.catch_unwind()); + + assert_eq!(10, iter.next().unwrap().ok().unwrap()); + assert_eq!(11, iter.next().unwrap().ok().unwrap()); + assert_eq!(12, iter.next().unwrap().ok().unwrap()); + assert!(iter.next().is_none()); +} diff --git a/vendor/futures/tests/stream_futures_ordered.rs b/vendor/futures/tests/stream_futures_ordered.rs new file mode 100644 index 00000000..5a4a3e22 --- /dev/null +++ b/vendor/futures/tests/stream_futures_ordered.rs @@ -0,0 +1,172 @@ +use futures::channel::oneshot; +use futures::executor::{block_on, block_on_stream}; +use futures::future::{self, join, Future, FutureExt, TryFutureExt}; +use futures::stream::{FuturesOrdered, StreamExt}; +use futures::task::Poll; +use futures_test::task::noop_context; +use std::any::Any; + +#[test] +fn works_1() { + let (a_tx, a_rx) = oneshot::channel::(); + let (b_tx, b_rx) = oneshot::channel::(); + let (c_tx, c_rx) = oneshot::channel::(); + + let mut stream = vec![a_rx, b_rx, c_rx].into_iter().collect::>(); + + b_tx.send(99).unwrap(); + assert!(stream.poll_next_unpin(&mut noop_context()).is_pending()); + + a_tx.send(33).unwrap(); + c_tx.send(33).unwrap(); + + let mut iter = block_on_stream(stream); + assert_eq!(Some(Ok(33)), iter.next()); + assert_eq!(Some(Ok(99)), iter.next()); + assert_eq!(Some(Ok(33)), iter.next()); + assert_eq!(None, iter.next()); +} + +#[test] +fn works_2() { + let (a_tx, a_rx) = oneshot::channel::(); + let (b_tx, b_rx) = oneshot::channel::(); + let (c_tx, c_rx) = oneshot::channel::(); + + let mut stream = vec![a_rx.boxed(), join(b_rx, c_rx).map(|(a, b)| Ok(a? + b?)).boxed()] + .into_iter() + .collect::>(); + + let mut cx = noop_context(); + a_tx.send(33).unwrap(); + b_tx.send(33).unwrap(); + assert!(stream.poll_next_unpin(&mut cx).is_ready()); + assert!(stream.poll_next_unpin(&mut cx).is_pending()); + c_tx.send(33).unwrap(); + assert!(stream.poll_next_unpin(&mut cx).is_ready()); +} + +#[test] +fn test_push_front() { + let (a_tx, a_rx) = oneshot::channel::(); + let (b_tx, b_rx) = oneshot::channel::(); + let (c_tx, c_rx) = oneshot::channel::(); + let (d_tx, d_rx) = oneshot::channel::(); + + let mut stream = FuturesOrdered::new(); + + let mut cx = noop_context(); + + stream.push_back(a_rx); + stream.push_back(b_rx); + stream.push_back(c_rx); + + a_tx.send(1).unwrap(); + b_tx.send(2).unwrap(); + c_tx.send(3).unwrap(); + + // 1 and 2 should be received in order + assert_eq!(Poll::Ready(Some(Ok(1))), stream.poll_next_unpin(&mut cx)); + assert_eq!(Poll::Ready(Some(Ok(2))), stream.poll_next_unpin(&mut cx)); + + stream.push_front(d_rx); + d_tx.send(4).unwrap(); + + // we pushed `d_rx` to the front and sent 4, so we should recieve 4 next + // and then 3 after it + assert_eq!(Poll::Ready(Some(Ok(4))), stream.poll_next_unpin(&mut cx)); + assert_eq!(Poll::Ready(Some(Ok(3))), stream.poll_next_unpin(&mut cx)); +} + +#[test] +fn test_push_back() { + let (a_tx, a_rx) = oneshot::channel::(); + let (b_tx, b_rx) = oneshot::channel::(); + let (c_tx, c_rx) = oneshot::channel::(); + let (d_tx, d_rx) = oneshot::channel::(); + + let mut stream = FuturesOrdered::new(); + + let mut cx = noop_context(); + + stream.push_back(a_rx); + stream.push_back(b_rx); + stream.push_back(c_rx); + + a_tx.send(1).unwrap(); + b_tx.send(2).unwrap(); + c_tx.send(3).unwrap(); + + // All results should be received in order + + assert_eq!(Poll::Ready(Some(Ok(1))), stream.poll_next_unpin(&mut cx)); + assert_eq!(Poll::Ready(Some(Ok(2))), stream.poll_next_unpin(&mut cx)); + + stream.push_back(d_rx); + d_tx.send(4).unwrap(); + + assert_eq!(Poll::Ready(Some(Ok(3))), stream.poll_next_unpin(&mut cx)); + assert_eq!(Poll::Ready(Some(Ok(4))), stream.poll_next_unpin(&mut cx)); +} + +#[test] +fn from_iterator() { + let stream = vec![future::ready::(1), future::ready::(2), future::ready::(3)] + .into_iter() + .collect::>(); + assert_eq!(stream.len(), 3); + assert_eq!(block_on(stream.collect::>()), vec![1, 2, 3]); +} + +#[test] +fn queue_never_unblocked() { + let (_a_tx, a_rx) = oneshot::channel::>(); + let (b_tx, b_rx) = oneshot::channel::>(); + let (c_tx, c_rx) = oneshot::channel::>(); + + let mut stream = vec![ + Box::new(a_rx) as Box + Unpin>, + Box::new( + future::try_select(b_rx, c_rx) + .map_err(|e| e.factor_first().0) + .and_then(|e| future::ok(Box::new(e) as Box)), + ) as _, + ] + .into_iter() + .collect::>(); + + let cx = &mut noop_context(); + for _ in 0..10 { + assert!(stream.poll_next_unpin(cx).is_pending()); + } + + b_tx.send(Box::new(())).unwrap(); + assert!(stream.poll_next_unpin(cx).is_pending()); + c_tx.send(Box::new(())).unwrap(); + assert!(stream.poll_next_unpin(cx).is_pending()); + assert!(stream.poll_next_unpin(cx).is_pending()); +} + +#[test] +fn test_push_front_negative() { + let (a_tx, a_rx) = oneshot::channel::(); + let (b_tx, b_rx) = oneshot::channel::(); + let (c_tx, c_rx) = oneshot::channel::(); + + let mut stream = FuturesOrdered::new(); + + let mut cx = noop_context(); + + stream.push_front(a_rx); + stream.push_front(b_rx); + stream.push_front(c_rx); + + a_tx.send(1).unwrap(); + b_tx.send(2).unwrap(); + c_tx.send(3).unwrap(); + + // These should all be recieved in reverse order + assert_eq!(Poll::Ready(Some(Ok(3))), stream.poll_next_unpin(&mut cx)); + assert_eq!(Poll::Ready(Some(Ok(2))), stream.poll_next_unpin(&mut cx)); + assert_eq!(Poll::Ready(Some(Ok(1))), stream.poll_next_unpin(&mut cx)); +} diff --git a/vendor/futures/tests/stream_futures_unordered.rs b/vendor/futures/tests/stream_futures_unordered.rs new file mode 100644 index 00000000..b5682804 --- /dev/null +++ b/vendor/futures/tests/stream_futures_unordered.rs @@ -0,0 +1,383 @@ +use futures::channel::oneshot; +use futures::executor::{block_on, block_on_stream}; +use futures::future::{self, join, Future, FutureExt}; +use futures::stream::{FusedStream, FuturesUnordered, StreamExt}; +use futures::task::{Context, Poll}; +use futures_test::future::FutureTestExt; +use futures_test::task::noop_context; +use futures_test::{assert_stream_done, assert_stream_next, assert_stream_pending}; +use std::iter::FromIterator; +use std::pin::Pin; +use std::sync::atomic::{AtomicBool, Ordering}; + +#[test] +fn is_terminated() { + let mut cx = noop_context(); + let mut tasks = FuturesUnordered::new(); + + assert_eq!(tasks.is_terminated(), false); + assert_eq!(tasks.poll_next_unpin(&mut cx), Poll::Ready(None)); + assert_eq!(tasks.is_terminated(), true); + + // Test that the sentinel value doesn't leak + assert_eq!(tasks.is_empty(), true); + assert_eq!(tasks.len(), 0); + assert_eq!(tasks.iter_mut().len(), 0); + + tasks.push(future::ready(1)); + + assert_eq!(tasks.is_empty(), false); + assert_eq!(tasks.len(), 1); + assert_eq!(tasks.iter_mut().len(), 1); + + assert_eq!(tasks.is_terminated(), false); + assert_eq!(tasks.poll_next_unpin(&mut cx), Poll::Ready(Some(1))); + assert_eq!(tasks.is_terminated(), false); + assert_eq!(tasks.poll_next_unpin(&mut cx), Poll::Ready(None)); + assert_eq!(tasks.is_terminated(), true); +} + +#[test] +fn works_1() { + let (a_tx, a_rx) = oneshot::channel::(); + let (b_tx, b_rx) = oneshot::channel::(); + let (c_tx, c_rx) = oneshot::channel::(); + + let mut iter = + block_on_stream(vec![a_rx, b_rx, c_rx].into_iter().collect::>()); + + b_tx.send(99).unwrap(); + assert_eq!(Some(Ok(99)), iter.next()); + + a_tx.send(33).unwrap(); + c_tx.send(33).unwrap(); + assert_eq!(Some(Ok(33)), iter.next()); + assert_eq!(Some(Ok(33)), iter.next()); + assert_eq!(None, iter.next()); +} + +#[test] +fn works_2() { + let (a_tx, a_rx) = oneshot::channel::(); + let (b_tx, b_rx) = oneshot::channel::(); + let (c_tx, c_rx) = oneshot::channel::(); + + let mut stream = vec![a_rx.boxed(), join(b_rx, c_rx).map(|(a, b)| Ok(a? + b?)).boxed()] + .into_iter() + .collect::>(); + + a_tx.send(9).unwrap(); + b_tx.send(10).unwrap(); + + let mut cx = noop_context(); + assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(Some(Ok(9)))); + c_tx.send(20).unwrap(); + assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(Some(Ok(30)))); + assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(None)); +} + +#[test] +fn from_iterator() { + let stream = vec![future::ready::(1), future::ready::(2), future::ready::(3)] + .into_iter() + .collect::>(); + assert_eq!(stream.len(), 3); + assert_eq!(block_on(stream.collect::>()), vec![1, 2, 3]); +} + +#[test] +fn finished_future() { + let (_a_tx, a_rx) = oneshot::channel::(); + let (b_tx, b_rx) = oneshot::channel::(); + let (c_tx, c_rx) = oneshot::channel::(); + + let mut stream = vec![ + Box::new(a_rx) as Box> + Unpin>, + Box::new(future::select(b_rx, c_rx).map(|e| e.factor_first().0)) as _, + ] + .into_iter() + .collect::>(); + + let cx = &mut noop_context(); + for _ in 0..10 { + assert!(stream.poll_next_unpin(cx).is_pending()); + } + + b_tx.send(12).unwrap(); + c_tx.send(3).unwrap(); + assert!(stream.poll_next_unpin(cx).is_ready()); + assert!(stream.poll_next_unpin(cx).is_pending()); + assert!(stream.poll_next_unpin(cx).is_pending()); +} + +#[test] +fn iter_mut_cancel() { + let (a_tx, a_rx) = oneshot::channel::(); + let (b_tx, b_rx) = oneshot::channel::(); + let (c_tx, c_rx) = oneshot::channel::(); + + let mut stream = vec![a_rx, b_rx, c_rx].into_iter().collect::>(); + + for rx in stream.iter_mut() { + rx.close(); + } + + let mut iter = block_on_stream(stream); + + assert!(a_tx.is_canceled()); + assert!(b_tx.is_canceled()); + assert!(c_tx.is_canceled()); + + assert_eq!(iter.next(), Some(Err(futures::channel::oneshot::Canceled))); + assert_eq!(iter.next(), Some(Err(futures::channel::oneshot::Canceled))); + assert_eq!(iter.next(), Some(Err(futures::channel::oneshot::Canceled))); + assert_eq!(iter.next(), None); +} + +#[test] +fn iter_mut_len() { + let mut stream = + vec![future::pending::<()>(), future::pending::<()>(), future::pending::<()>()] + .into_iter() + .collect::>(); + + let mut iter_mut = stream.iter_mut(); + assert_eq!(iter_mut.len(), 3); + assert!(iter_mut.next().is_some()); + assert_eq!(iter_mut.len(), 2); + assert!(iter_mut.next().is_some()); + assert_eq!(iter_mut.len(), 1); + assert!(iter_mut.next().is_some()); + assert_eq!(iter_mut.len(), 0); + assert!(iter_mut.next().is_none()); +} + +#[test] +fn iter_cancel() { + struct AtomicCancel { + future: F, + cancel: AtomicBool, + } + + impl Future for AtomicCancel { + type Output = Option<::Output>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + if self.cancel.load(Ordering::Relaxed) { + Poll::Ready(None) + } else { + self.future.poll_unpin(cx).map(Some) + } + } + } + + impl AtomicCancel { + fn new(future: F) -> Self { + Self { future, cancel: AtomicBool::new(false) } + } + } + + let stream = vec![ + AtomicCancel::new(future::pending::<()>()), + AtomicCancel::new(future::pending::<()>()), + AtomicCancel::new(future::pending::<()>()), + ] + .into_iter() + .collect::>(); + + for f in stream.iter() { + f.cancel.store(true, Ordering::Relaxed); + } + + let mut iter = block_on_stream(stream); + + assert_eq!(iter.next(), Some(None)); + assert_eq!(iter.next(), Some(None)); + assert_eq!(iter.next(), Some(None)); + assert_eq!(iter.next(), None); +} + +#[test] +fn iter_len() { + let stream = vec![future::pending::<()>(), future::pending::<()>(), future::pending::<()>()] + .into_iter() + .collect::>(); + + let mut iter = stream.iter(); + assert_eq!(iter.len(), 3); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 2); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 1); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 0); + assert!(iter.next().is_none()); +} + +#[test] +fn into_iter_cancel() { + let (a_tx, a_rx) = oneshot::channel::(); + let (b_tx, b_rx) = oneshot::channel::(); + let (c_tx, c_rx) = oneshot::channel::(); + + let stream = vec![a_rx, b_rx, c_rx].into_iter().collect::>(); + + let stream = stream + .into_iter() + .map(|mut rx| { + rx.close(); + rx + }) + .collect::>(); + + let mut iter = block_on_stream(stream); + + assert!(a_tx.is_canceled()); + assert!(b_tx.is_canceled()); + assert!(c_tx.is_canceled()); + + assert_eq!(iter.next(), Some(Err(futures::channel::oneshot::Canceled))); + assert_eq!(iter.next(), Some(Err(futures::channel::oneshot::Canceled))); + assert_eq!(iter.next(), Some(Err(futures::channel::oneshot::Canceled))); + assert_eq!(iter.next(), None); +} + +#[test] +fn into_iter_len() { + let stream = vec![future::pending::<()>(), future::pending::<()>(), future::pending::<()>()] + .into_iter() + .collect::>(); + + let mut into_iter = stream.into_iter(); + assert_eq!(into_iter.len(), 3); + assert!(into_iter.next().is_some()); + assert_eq!(into_iter.len(), 2); + assert!(into_iter.next().is_some()); + assert_eq!(into_iter.len(), 1); + assert!(into_iter.next().is_some()); + assert_eq!(into_iter.len(), 0); + assert!(into_iter.next().is_none()); +} + +#[test] +fn into_iter_partial() { + let stream = vec![future::ready(1), future::ready(2), future::ready(3), future::ready(4)] + .into_iter() + .collect::>(); + + let mut into_iter = stream.into_iter(); + assert!(into_iter.next().is_some()); + assert!(into_iter.next().is_some()); + assert!(into_iter.next().is_some()); + assert_eq!(into_iter.len(), 1); + // don't panic when iterator is dropped before completing +} + +#[test] +fn futures_not_moved_after_poll() { + // Future that will be ready after being polled twice, + // asserting that it does not move. + let fut = future::ready(()).pending_once().assert_unmoved(); + let mut stream = vec![fut; 3].into_iter().collect::>(); + assert_stream_pending!(stream); + assert_stream_next!(stream, ()); + assert_stream_next!(stream, ()); + assert_stream_next!(stream, ()); + assert_stream_done!(stream); +} + +#[test] +fn len_valid_during_out_of_order_completion() { + // Complete futures out-of-order and add new futures afterwards to ensure + // length values remain correct. + let (a_tx, a_rx) = oneshot::channel::(); + let (b_tx, b_rx) = oneshot::channel::(); + let (c_tx, c_rx) = oneshot::channel::(); + let (d_tx, d_rx) = oneshot::channel::(); + + let mut cx = noop_context(); + let mut stream = FuturesUnordered::new(); + assert_eq!(stream.len(), 0); + + stream.push(a_rx); + assert_eq!(stream.len(), 1); + stream.push(b_rx); + assert_eq!(stream.len(), 2); + stream.push(c_rx); + assert_eq!(stream.len(), 3); + + b_tx.send(4).unwrap(); + assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(Some(Ok(4)))); + assert_eq!(stream.len(), 2); + + stream.push(d_rx); + assert_eq!(stream.len(), 3); + + c_tx.send(5).unwrap(); + assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(Some(Ok(5)))); + assert_eq!(stream.len(), 2); + + d_tx.send(6).unwrap(); + assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(Some(Ok(6)))); + assert_eq!(stream.len(), 1); + + a_tx.send(7).unwrap(); + assert_eq!(stream.poll_next_unpin(&mut cx), Poll::Ready(Some(Ok(7)))); + assert_eq!(stream.len(), 0); +} + +#[test] +fn polled_only_once_at_most_per_iteration() { + #[derive(Debug, Clone, Copy, Default)] + struct F { + polled: bool, + } + + impl Future for F { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, _: &mut Context) -> Poll { + if self.polled { + panic!("polled twice") + } else { + self.polled = true; + Poll::Pending + } + } + } + + let cx = &mut noop_context(); + + let mut tasks = FuturesUnordered::from_iter(vec![F::default(); 10]); + assert!(tasks.poll_next_unpin(cx).is_pending()); + assert_eq!(10, tasks.iter().filter(|f| f.polled).count()); + + let mut tasks = FuturesUnordered::from_iter(vec![F::default(); 33]); + assert!(tasks.poll_next_unpin(cx).is_pending()); + assert_eq!(33, tasks.iter().filter(|f| f.polled).count()); + + let mut tasks = FuturesUnordered::::new(); + assert_eq!(Poll::Ready(None), tasks.poll_next_unpin(cx)); +} + +#[test] +fn clear() { + let mut tasks = FuturesUnordered::from_iter(vec![future::ready(1), future::ready(2)]); + + assert_eq!(block_on(tasks.next()), Some(1)); + assert!(!tasks.is_empty()); + + tasks.clear(); + assert!(tasks.is_empty()); + + tasks.push(future::ready(3)); + assert!(!tasks.is_empty()); + + tasks.clear(); + assert!(tasks.is_empty()); + + assert_eq!(block_on(tasks.next()), None); + assert!(tasks.is_terminated()); + tasks.clear(); + assert!(!tasks.is_terminated()); +} diff --git a/vendor/futures/tests/stream_into_async_read.rs b/vendor/futures/tests/stream_into_async_read.rs new file mode 100644 index 00000000..60188d3e --- /dev/null +++ b/vendor/futures/tests/stream_into_async_read.rs @@ -0,0 +1,94 @@ +use core::pin::Pin; +use futures::io::{AsyncBufRead, AsyncRead}; +use futures::stream::{self, TryStreamExt}; +use futures::task::Poll; +use futures_test::{stream::StreamTestExt, task::noop_context}; + +macro_rules! assert_read { + ($reader:expr, $buf:expr, $item:expr) => { + let mut cx = noop_context(); + loop { + match Pin::new(&mut $reader).poll_read(&mut cx, $buf) { + Poll::Ready(Ok(x)) => { + assert_eq!(x, $item); + break; + } + Poll::Ready(Err(err)) => { + panic!("assertion failed: expected value but got {}", err); + } + Poll::Pending => { + continue; + } + } + } + }; +} + +macro_rules! assert_fill_buf { + ($reader:expr, $buf:expr) => { + let mut cx = noop_context(); + loop { + match Pin::new(&mut $reader).poll_fill_buf(&mut cx) { + Poll::Ready(Ok(x)) => { + assert_eq!(x, $buf); + break; + } + Poll::Ready(Err(err)) => { + panic!("assertion failed: expected value but got {}", err); + } + Poll::Pending => { + continue; + } + } + } + }; +} + +#[test] +fn test_into_async_read() { + let stream = stream::iter((1..=3).flat_map(|_| vec![Ok(vec![]), Ok(vec![1, 2, 3, 4, 5])])); + let mut reader = stream.interleave_pending().into_async_read(); + let mut buf = vec![0; 3]; + + assert_read!(reader, &mut buf, 3); + assert_eq!(&buf, &[1, 2, 3]); + + assert_read!(reader, &mut buf, 2); + assert_eq!(&buf[..2], &[4, 5]); + + assert_read!(reader, &mut buf, 3); + assert_eq!(&buf, &[1, 2, 3]); + + assert_read!(reader, &mut buf, 2); + assert_eq!(&buf[..2], &[4, 5]); + + assert_read!(reader, &mut buf, 3); + assert_eq!(&buf, &[1, 2, 3]); + + assert_read!(reader, &mut buf, 2); + assert_eq!(&buf[..2], &[4, 5]); + + assert_read!(reader, &mut buf, 0); +} + +#[test] +fn test_into_async_bufread() { + let stream = stream::iter((1..=2).flat_map(|_| vec![Ok(vec![]), Ok(vec![1, 2, 3, 4, 5])])); + let mut reader = stream.interleave_pending().into_async_read(); + + let mut reader = Pin::new(&mut reader); + + assert_fill_buf!(reader, &[1, 2, 3, 4, 5][..]); + reader.as_mut().consume(3); + + assert_fill_buf!(reader, &[4, 5][..]); + reader.as_mut().consume(2); + + assert_fill_buf!(reader, &[1, 2, 3, 4, 5][..]); + reader.as_mut().consume(2); + + assert_fill_buf!(reader, &[3, 4, 5][..]); + reader.as_mut().consume(3); + + assert_fill_buf!(reader, &[][..]); +} diff --git a/vendor/futures/tests/stream_peekable.rs b/vendor/futures/tests/stream_peekable.rs new file mode 100644 index 00000000..153fcc25 --- /dev/null +++ b/vendor/futures/tests/stream_peekable.rs @@ -0,0 +1,58 @@ +use futures::executor::block_on; +use futures::pin_mut; +use futures::stream::{self, Peekable, StreamExt}; + +#[test] +fn peekable() { + block_on(async { + let peekable: Peekable<_> = stream::iter(vec![1u8, 2, 3]).peekable(); + pin_mut!(peekable); + assert_eq!(peekable.as_mut().peek().await, Some(&1u8)); + assert_eq!(peekable.collect::>().await, vec![1, 2, 3]); + + let s = stream::once(async { 1 }).peekable(); + pin_mut!(s); + assert_eq!(s.as_mut().peek().await, Some(&1u8)); + assert_eq!(s.collect::>().await, vec![1]); + }); +} + +#[test] +fn peekable_mut() { + block_on(async { + let s = stream::iter(vec![1u8, 2, 3]).peekable(); + pin_mut!(s); + if let Some(p) = s.as_mut().peek_mut().await { + if *p == 1 { + *p = 5; + } + } + assert_eq!(s.collect::>().await, vec![5, 2, 3]); + }); +} + +#[test] +fn peekable_next_if_eq() { + block_on(async { + // first, try on references + let s = stream::iter(vec!["Heart", "of", "Gold"]).peekable(); + pin_mut!(s); + // try before `peek()` + assert_eq!(s.as_mut().next_if_eq(&"trillian").await, None); + assert_eq!(s.as_mut().next_if_eq(&"Heart").await, Some("Heart")); + // try after peek() + assert_eq!(s.as_mut().peek().await, Some(&"of")); + assert_eq!(s.as_mut().next_if_eq(&"of").await, Some("of")); + assert_eq!(s.as_mut().next_if_eq(&"zaphod").await, None); + // make sure `next()` still behaves + assert_eq!(s.next().await, Some("Gold")); + + // make sure comparison works for owned values + let s = stream::iter(vec![String::from("Ludicrous"), "speed".into()]).peekable(); + pin_mut!(s); + // make sure basic functionality works + assert_eq!(s.as_mut().next_if_eq("Ludicrous").await, Some("Ludicrous".into())); + assert_eq!(s.as_mut().next_if_eq("speed").await, Some("speed".into())); + assert_eq!(s.as_mut().next_if_eq("").await, None); + }); +} diff --git a/vendor/futures/tests/stream_select_all.rs b/vendor/futures/tests/stream_select_all.rs new file mode 100644 index 00000000..4ae07357 --- /dev/null +++ b/vendor/futures/tests/stream_select_all.rs @@ -0,0 +1,197 @@ +use futures::channel::mpsc; +use futures::executor::{block_on, block_on_stream}; +use futures::future::{self, FutureExt}; +use futures::stream::{self, select_all, FusedStream, SelectAll, StreamExt}; +use futures::task::Poll; +use futures_test::task::noop_context; + +#[test] +fn is_terminated() { + let mut cx = noop_context(); + let mut tasks = SelectAll::new(); + + assert_eq!(tasks.is_terminated(), false); + assert_eq!(tasks.poll_next_unpin(&mut cx), Poll::Ready(None)); + assert_eq!(tasks.is_terminated(), true); + + // Test that the sentinel value doesn't leak + assert_eq!(tasks.is_empty(), true); + assert_eq!(tasks.len(), 0); + + tasks.push(future::ready(1).into_stream()); + + assert_eq!(tasks.is_empty(), false); + assert_eq!(tasks.len(), 1); + + assert_eq!(tasks.is_terminated(), false); + assert_eq!(tasks.poll_next_unpin(&mut cx), Poll::Ready(Some(1))); + assert_eq!(tasks.is_terminated(), false); + assert_eq!(tasks.poll_next_unpin(&mut cx), Poll::Ready(None)); + assert_eq!(tasks.is_terminated(), true); +} + +#[test] +fn issue_1626() { + let a = stream::iter(0..=2); + let b = stream::iter(10..=14); + + let mut s = block_on_stream(stream::select_all(vec![a, b])); + + assert_eq!(s.next(), Some(0)); + assert_eq!(s.next(), Some(10)); + assert_eq!(s.next(), Some(1)); + assert_eq!(s.next(), Some(11)); + assert_eq!(s.next(), Some(2)); + assert_eq!(s.next(), Some(12)); + assert_eq!(s.next(), Some(13)); + assert_eq!(s.next(), Some(14)); + assert_eq!(s.next(), None); +} + +#[test] +fn works_1() { + let (a_tx, a_rx) = mpsc::unbounded::(); + let (b_tx, b_rx) = mpsc::unbounded::(); + let (c_tx, c_rx) = mpsc::unbounded::(); + + let streams = vec![a_rx, b_rx, c_rx]; + + let mut stream = block_on_stream(select_all(streams)); + + b_tx.unbounded_send(99).unwrap(); + a_tx.unbounded_send(33).unwrap(); + assert_eq!(Some(33), stream.next()); + assert_eq!(Some(99), stream.next()); + + b_tx.unbounded_send(99).unwrap(); + a_tx.unbounded_send(33).unwrap(); + assert_eq!(Some(33), stream.next()); + assert_eq!(Some(99), stream.next()); + + c_tx.unbounded_send(42).unwrap(); + assert_eq!(Some(42), stream.next()); + a_tx.unbounded_send(43).unwrap(); + assert_eq!(Some(43), stream.next()); + + drop((a_tx, b_tx, c_tx)); + assert_eq!(None, stream.next()); +} + +#[test] +fn clear() { + let mut tasks = + select_all(vec![stream::iter(vec![1].into_iter()), stream::iter(vec![2].into_iter())]); + + assert_eq!(block_on(tasks.next()), Some(1)); + assert!(!tasks.is_empty()); + + tasks.clear(); + assert!(tasks.is_empty()); + + tasks.push(stream::iter(vec![3].into_iter())); + assert!(!tasks.is_empty()); + + tasks.clear(); + assert!(tasks.is_empty()); + + assert_eq!(block_on(tasks.next()), None); + assert!(tasks.is_terminated()); + tasks.clear(); + assert!(!tasks.is_terminated()); +} + +#[test] +fn iter_mut() { + let mut stream = + vec![stream::pending::<()>(), stream::pending::<()>(), stream::pending::<()>()] + .into_iter() + .collect::>(); + + let mut iter = stream.iter_mut(); + assert_eq!(iter.len(), 3); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 2); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 1); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 0); + assert!(iter.next().is_none()); + + let mut stream = vec![stream::iter(vec![]), stream::iter(vec![1]), stream::iter(vec![2])] + .into_iter() + .collect::>(); + + assert_eq!(stream.len(), 3); + assert_eq!(block_on(stream.next()), Some(1)); + assert_eq!(stream.len(), 2); + let mut iter = stream.iter_mut(); + assert_eq!(iter.len(), 2); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 1); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 0); + assert!(iter.next().is_none()); + + assert_eq!(block_on(stream.next()), Some(2)); + assert_eq!(stream.len(), 2); + assert_eq!(block_on(stream.next()), None); + let mut iter = stream.iter_mut(); + assert_eq!(iter.len(), 0); + assert!(iter.next().is_none()); +} + +#[test] +fn iter() { + let stream = vec![stream::pending::<()>(), stream::pending::<()>(), stream::pending::<()>()] + .into_iter() + .collect::>(); + + let mut iter = stream.iter(); + assert_eq!(iter.len(), 3); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 2); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 1); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 0); + assert!(iter.next().is_none()); + + let mut stream = vec![stream::iter(vec![]), stream::iter(vec![1]), stream::iter(vec![2])] + .into_iter() + .collect::>(); + + assert_eq!(stream.len(), 3); + assert_eq!(block_on(stream.next()), Some(1)); + assert_eq!(stream.len(), 2); + let mut iter = stream.iter(); + assert_eq!(iter.len(), 2); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 1); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 0); + assert!(iter.next().is_none()); + + assert_eq!(block_on(stream.next()), Some(2)); + assert_eq!(stream.len(), 2); + assert_eq!(block_on(stream.next()), None); + let mut iter = stream.iter(); + assert_eq!(iter.len(), 0); + assert!(iter.next().is_none()); +} + +#[test] +fn into_iter() { + let stream = vec![stream::pending::<()>(), stream::pending::<()>(), stream::pending::<()>()] + .into_iter() + .collect::>(); + + let mut iter = stream.into_iter(); + assert_eq!(iter.len(), 3); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 2); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 1); + assert!(iter.next().is_some()); + assert_eq!(iter.len(), 0); + assert!(iter.next().is_none()); +} diff --git a/vendor/futures/tests/stream_select_next_some.rs b/vendor/futures/tests/stream_select_next_some.rs new file mode 100644 index 00000000..8252ad7b --- /dev/null +++ b/vendor/futures/tests/stream_select_next_some.rs @@ -0,0 +1,86 @@ +use futures::executor::block_on; +use futures::future::{self, FusedFuture, FutureExt}; +use futures::select; +use futures::stream::{FuturesUnordered, StreamExt}; +use futures::task::{Context, Poll}; +use futures_test::future::FutureTestExt; +use futures_test::task::new_count_waker; + +#[test] +fn is_terminated() { + let (waker, counter) = new_count_waker(); + let mut cx = Context::from_waker(&waker); + + let mut tasks = FuturesUnordered::new(); + + let mut select_next_some = tasks.select_next_some(); + assert_eq!(select_next_some.is_terminated(), false); + assert_eq!(select_next_some.poll_unpin(&mut cx), Poll::Pending); + assert_eq!(counter, 1); + assert_eq!(select_next_some.is_terminated(), true); + drop(select_next_some); + + tasks.push(future::ready(1)); + + let mut select_next_some = tasks.select_next_some(); + assert_eq!(select_next_some.is_terminated(), false); + assert_eq!(select_next_some.poll_unpin(&mut cx), Poll::Ready(1)); + assert_eq!(select_next_some.is_terminated(), false); + assert_eq!(select_next_some.poll_unpin(&mut cx), Poll::Pending); + assert_eq!(select_next_some.is_terminated(), true); +} + +#[test] +fn select() { + // Checks that even though `async_tasks` will yield a `None` and return + // `is_terminated() == true` during the first poll, it manages to toggle + // back to having items after a future is pushed into it during the second + // poll (after pending_once completes). + block_on(async { + let mut fut = future::ready(1).pending_once(); + let mut async_tasks = FuturesUnordered::new(); + let mut total = 0; + loop { + select! { + num = fut => { + total += num; + async_tasks.push(async { 5 }); + }, + num = async_tasks.select_next_some() => { + total += num; + } + complete => break, + } + } + assert_eq!(total, 6); + }); +} + +// Check that `select!` macro does not fail when importing from `futures_util`. +#[test] +fn futures_util_select() { + use futures_util::select; + + // Checks that even though `async_tasks` will yield a `None` and return + // `is_terminated() == true` during the first poll, it manages to toggle + // back to having items after a future is pushed into it during the second + // poll (after pending_once completes). + block_on(async { + let mut fut = future::ready(1).pending_once(); + let mut async_tasks = FuturesUnordered::new(); + let mut total = 0; + loop { + select! { + num = fut => { + total += num; + async_tasks.push(async { 5 }); + }, + num = async_tasks.select_next_some() => { + total += num; + } + complete => break, + } + } + assert_eq!(total, 6); + }); +} diff --git a/vendor/futures/tests/stream_split.rs b/vendor/futures/tests/stream_split.rs new file mode 100644 index 00000000..694c1518 --- /dev/null +++ b/vendor/futures/tests/stream_split.rs @@ -0,0 +1,57 @@ +use futures::executor::block_on; +use futures::sink::{Sink, SinkExt}; +use futures::stream::{self, Stream, StreamExt}; +use futures::task::{Context, Poll}; +use pin_project::pin_project; +use std::pin::Pin; + +#[test] +fn test_split() { + #[pin_project] + struct Join { + #[pin] + stream: T, + #[pin] + sink: U, + } + + impl Stream for Join { + type Item = T::Item; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().stream.poll_next(cx) + } + } + + impl, Item> Sink for Join { + type Error = U::Error; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().sink.poll_ready(cx) + } + + fn start_send(self: Pin<&mut Self>, item: Item) -> Result<(), Self::Error> { + self.project().sink.start_send(item) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().sink.poll_flush(cx) + } + + fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.project().sink.poll_close(cx) + } + } + + let mut dest: Vec = Vec::new(); + { + let join = Join { stream: stream::iter(vec![10, 20, 30]), sink: &mut dest }; + + let (sink, stream) = join.split(); + let join = sink.reunite(stream).expect("test_split: reunite error"); + let (mut sink, stream) = join.split(); + let mut stream = stream.map(Ok); + block_on(sink.send_all(&mut stream)).unwrap(); + } + assert_eq!(dest, vec![10, 20, 30]); +} diff --git a/vendor/futures/tests/stream_try_stream.rs b/vendor/futures/tests/stream_try_stream.rs new file mode 100644 index 00000000..ef38c510 --- /dev/null +++ b/vendor/futures/tests/stream_try_stream.rs @@ -0,0 +1,183 @@ +use core::pin::Pin; +use std::convert::Infallible; + +use futures::{ + stream::{self, repeat, Repeat, StreamExt, TryStreamExt}, + task::Poll, + Stream, +}; +use futures_executor::block_on; +use futures_task::Context; +use futures_test::task::noop_context; + +#[test] +fn try_filter_map_after_err() { + let cx = &mut noop_context(); + let mut s = stream::iter(1..=3) + .map(Ok) + .try_filter_map(|v| async move { Err::, _>(v) }) + .filter_map(|r| async move { r.ok() }) + .boxed(); + assert_eq!(Poll::Ready(None), s.poll_next_unpin(cx)); +} + +#[test] +fn try_skip_while_after_err() { + let cx = &mut noop_context(); + let mut s = stream::iter(1..=3) + .map(Ok) + .try_skip_while(|_| async move { Err::<_, ()>(()) }) + .filter_map(|r| async move { r.ok() }) + .boxed(); + assert_eq!(Poll::Ready(None), s.poll_next_unpin(cx)); +} + +#[test] +fn try_take_while_after_err() { + let cx = &mut noop_context(); + let mut s = stream::iter(1..=3) + .map(Ok) + .try_take_while(|_| async move { Err::<_, ()>(()) }) + .filter_map(|r| async move { r.ok() }) + .boxed(); + assert_eq!(Poll::Ready(None), s.poll_next_unpin(cx)); +} + +#[test] +fn try_flatten_unordered() { + let test_st = stream::iter(1..7) + .map(|val: u32| { + if val % 2 == 0 { + Ok(stream::unfold((val, 1), |(val, pow)| async move { + Some((val.pow(pow), (val, pow + 1))) + }) + .take(3) + .map(move |val| if val % 16 != 0 { Ok(val) } else { Err(val) })) + } else { + Err(val) + } + }) + .map_ok(Box::pin) + .try_flatten_unordered(None); + + block_on(async move { + assert_eq!( + // All numbers can be divided by 16 and odds must be `Err` + // For all basic evens we must have powers from 1 to 3 + vec![ + Err(1), + Err(3), + Err(5), + Ok(2), + Ok(4), + Ok(6), + Ok(4), + Err(16), + Ok(36), + Ok(8), + Err(64), + Ok(216) + ], + test_st.collect::>().await + ) + }); + + #[derive(Clone, Debug)] + struct ErrorStream { + error_after: usize, + polled: usize, + } + + impl Stream for ErrorStream { + type Item = Result>, ()>; + + fn poll_next(mut self: Pin<&mut Self>, _: &mut Context) -> Poll> { + if self.polled > self.error_after { + panic!("Polled after error"); + } else { + let out = + if self.polled == self.error_after { Err(()) } else { Ok(repeat(Ok(()))) }; + self.polled += 1; + Poll::Ready(Some(out)) + } + } + } + + block_on(async move { + let mut st = ErrorStream { error_after: 3, polled: 0 }.try_flatten_unordered(None); + let mut ctr = 0; + while (st.try_next().await).is_ok() { + ctr += 1; + } + assert_eq!(ctr, 0); + + assert_eq!( + ErrorStream { error_after: 10, polled: 0 } + .try_flatten_unordered(None) + .inspect_ok(|_| panic!("Unexpected `Ok`")) + .try_collect::>() + .await, + Err(()) + ); + + let mut taken = 0; + assert_eq!( + ErrorStream { error_after: 10, polled: 0 } + .map_ok(|st| st.take(3)) + .try_flatten_unordered(1) + .inspect(|_| taken += 1) + .try_fold((), |(), res| async move { Ok(res) }) + .await, + Err(()) + ); + assert_eq!(taken, 31); + }) +} + +async fn is_even(number: u8) -> bool { + number % 2 == 0 +} + +#[test] +fn try_all() { + block_on(async { + let empty: [Result; 0] = []; + let st = stream::iter(empty); + let all = st.try_all(is_even).await; + assert_eq!(Ok(true), all); + + let st = stream::iter([Ok::<_, Infallible>(2), Ok(4), Ok(6), Ok(8)]); + let all = st.try_all(is_even).await; + assert_eq!(Ok(true), all); + + let st = stream::iter([Ok::<_, Infallible>(2), Ok(3), Ok(4)]); + let all = st.try_all(is_even).await; + assert_eq!(Ok(false), all); + + let st = stream::iter([Ok(2), Ok(4), Err("err"), Ok(8)]); + let all = st.try_all(is_even).await; + assert_eq!(Err("err"), all); + }); +} + +#[test] +fn try_any() { + block_on(async { + let empty: [Result; 0] = []; + let st = stream::iter(empty); + let any = st.try_any(is_even).await; + assert_eq!(Ok(false), any); + + let st = stream::iter([Ok::<_, Infallible>(1), Ok(2), Ok(3)]); + let any = st.try_any(is_even).await; + assert_eq!(Ok(true), any); + + let st = stream::iter([Ok::<_, Infallible>(1), Ok(3), Ok(5)]); + let any = st.try_any(is_even).await; + assert_eq!(Ok(false), any); + + let st = stream::iter([Ok(1), Ok(3), Err("err"), Ok(8)]); + let any = st.try_any(is_even).await; + assert_eq!(Err("err"), any); + }); +} diff --git a/vendor/futures/tests/stream_unfold.rs b/vendor/futures/tests/stream_unfold.rs new file mode 100644 index 00000000..16b10813 --- /dev/null +++ b/vendor/futures/tests/stream_unfold.rs @@ -0,0 +1,32 @@ +use futures::future; +use futures::stream; +use futures_test::future::FutureTestExt; +use futures_test::{assert_stream_done, assert_stream_next, assert_stream_pending}; + +#[test] +fn unfold1() { + let mut stream = stream::unfold(0, |state| { + if state <= 2 { + future::ready(Some((state * 2, state + 1))).pending_once() + } else { + future::ready(None).pending_once() + } + }); + + // Creates the future with the closure + // Not ready (delayed future) + assert_stream_pending!(stream); + // Future is ready, yields the item + assert_stream_next!(stream, 0); + + // Repeat + assert_stream_pending!(stream); + assert_stream_next!(stream, 2); + + assert_stream_pending!(stream); + assert_stream_next!(stream, 4); + + // No more items + assert_stream_pending!(stream); + assert_stream_done!(stream); +} diff --git a/vendor/futures/tests/task_arc_wake.rs b/vendor/futures/tests/task_arc_wake.rs new file mode 100644 index 00000000..aedc15bc --- /dev/null +++ b/vendor/futures/tests/task_arc_wake.rs @@ -0,0 +1,79 @@ +use futures::task::{self, ArcWake, Waker}; +use std::panic; +use std::sync::{Arc, Mutex}; + +struct CountingWaker { + nr_wake: Mutex, +} + +impl CountingWaker { + fn new() -> Self { + Self { nr_wake: Mutex::new(0) } + } + + fn wakes(&self) -> i32 { + *self.nr_wake.lock().unwrap() + } +} + +impl ArcWake for CountingWaker { + fn wake_by_ref(arc_self: &Arc) { + let mut lock = arc_self.nr_wake.lock().unwrap(); + *lock += 1; + } +} + +#[test] +fn create_from_arc() { + let some_w = Arc::new(CountingWaker::new()); + + let w1: Waker = task::waker(some_w.clone()); + assert_eq!(2, Arc::strong_count(&some_w)); + w1.wake_by_ref(); + assert_eq!(1, some_w.wakes()); + + let w2 = w1.clone(); + assert_eq!(3, Arc::strong_count(&some_w)); + + w2.wake_by_ref(); + assert_eq!(2, some_w.wakes()); + + drop(w2); + assert_eq!(2, Arc::strong_count(&some_w)); + drop(w1); + assert_eq!(1, Arc::strong_count(&some_w)); +} + +#[test] +fn ref_wake_same() { + let some_w = Arc::new(CountingWaker::new()); + + let w1: Waker = task::waker(some_w.clone()); + let w2 = task::waker_ref(&some_w); + let w3 = w2.clone(); + + assert!(w1.will_wake(&w2)); + assert!(w2.will_wake(&w3)); +} + +#[test] +fn proper_refcount_on_wake_panic() { + struct PanicWaker; + + impl ArcWake for PanicWaker { + fn wake_by_ref(_arc_self: &Arc) { + panic!("WAKE UP"); + } + } + + let some_w = Arc::new(PanicWaker); + + let w1: Waker = task::waker(some_w.clone()); + assert_eq!( + "WAKE UP", + *panic::catch_unwind(|| w1.wake_by_ref()).unwrap_err().downcast::<&str>().unwrap() + ); + assert_eq!(2, Arc::strong_count(&some_w)); // some_w + w1 + drop(w1); + assert_eq!(1, Arc::strong_count(&some_w)); // some_w +} diff --git a/vendor/futures/tests/task_atomic_waker.rs b/vendor/futures/tests/task_atomic_waker.rs new file mode 100644 index 00000000..cec3db28 --- /dev/null +++ b/vendor/futures/tests/task_atomic_waker.rs @@ -0,0 +1,48 @@ +use futures::executor::block_on; +use futures::future::poll_fn; +use futures::task::{AtomicWaker, Poll}; +use std::sync::atomic::AtomicUsize; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::thread; + +#[test] +fn basic() { + let atomic_waker = Arc::new(AtomicWaker::new()); + let atomic_waker_copy = atomic_waker.clone(); + + let returned_pending = Arc::new(AtomicUsize::new(0)); + let returned_pending_copy = returned_pending.clone(); + + let woken = Arc::new(AtomicUsize::new(0)); + let woken_copy = woken.clone(); + + let t = thread::spawn(move || { + let mut pending_count = 0; + + block_on(poll_fn(move |cx| { + if woken_copy.load(Ordering::Relaxed) == 1 { + Poll::Ready(()) + } else { + // Assert we return pending exactly once + assert_eq!(0, pending_count); + pending_count += 1; + atomic_waker_copy.register(cx.waker()); + + returned_pending_copy.store(1, Ordering::Relaxed); + + Poll::Pending + } + })) + }); + + while returned_pending.load(Ordering::Relaxed) == 0 {} + + // give spawned thread some time to sleep in `block_on` + thread::yield_now(); + + woken.store(1, Ordering::Relaxed); + atomic_waker.wake(); + + t.join().unwrap(); +} diff --git a/vendor/futures/tests/test_macro.rs b/vendor/futures/tests/test_macro.rs new file mode 100644 index 00000000..6adf51d8 --- /dev/null +++ b/vendor/futures/tests/test_macro.rs @@ -0,0 +1,20 @@ +#[futures_test::test] +async fn it_works() { + let fut = async { true }; + assert!(fut.await); + + let fut = async { false }; + assert!(!fut.await); +} + +#[should_panic] +#[futures_test::test] +async fn it_is_being_run() { + let fut = async { false }; + assert!(fut.await); +} + +#[futures_test::test] +async fn return_ty() -> Result<(), ()> { + Ok(()) +} diff --git a/vendor/futures/tests/try_join.rs b/vendor/futures/tests/try_join.rs new file mode 100644 index 00000000..0281ab89 --- /dev/null +++ b/vendor/futures/tests/try_join.rs @@ -0,0 +1,35 @@ +#![deny(unreachable_code)] + +use futures::{executor::block_on, try_join}; + +// TODO: This abuses https://github.com/rust-lang/rust/issues/58733 in order to +// test behavior of the `try_join!` macro with the never type before it is +// stabilized. Once `!` is again stabilized this can be removed and replaced +// with direct use of `!` below where `Never` is used. +trait MyTrait { + type Output; +} +impl MyTrait for fn() -> T { + type Output = T; +} +type Never = ! as MyTrait>::Output; + +#[test] +fn try_join_never_error() { + block_on(async { + let future1 = async { Ok::<(), Never>(()) }; + let future2 = async { Ok::<(), Never>(()) }; + try_join!(future1, future2) + }) + .unwrap(); +} + +#[test] +fn try_join_never_ok() { + block_on(async { + let future1 = async { Err::(()) }; + let future2 = async { Err::(()) }; + try_join!(future1, future2) + }) + .unwrap_err(); +} diff --git a/vendor/futures/tests_disabled/all.rs b/vendor/futures/tests_disabled/all.rs new file mode 100644 index 00000000..a7a57104 --- /dev/null +++ b/vendor/futures/tests_disabled/all.rs @@ -0,0 +1,400 @@ +use futures::channel::oneshot::{self, Canceled}; +use futures::executor::block_on; +use futures::future; +use std::sync::mpsc::{channel, TryRecvError}; + +// mod support; +// use support::*; + +fn unselect(r: Result, Either<(E, B), (E, A)>>) -> Result { + match r { + Ok(Either::Left((t, _))) | Ok(Either::Right((t, _))) => Ok(t), + Err(Either::Left((e, _))) | Err(Either::Right((e, _))) => Err(e), + } +} + +#[test] +fn result_smoke() { + fn is_future_v(_: C) + where + A: Send + 'static, + B: Send + 'static, + C: Future, + { + } + + is_future_v::(f_ok(1).map(|a| a + 1)); + is_future_v::(f_ok(1).map_err(|a| a + 1)); + is_future_v::(f_ok(1).and_then(Ok)); + is_future_v::(f_ok(1).or_else(Err)); + is_future_v::<(i32, i32), u32, _>(f_ok(1).join(Err(3))); + is_future_v::(f_ok(1).map(f_ok).flatten()); + + assert_done(|| f_ok(1), r_ok(1)); + assert_done(|| f_err(1), r_err(1)); + assert_done(|| result(Ok(1)), r_ok(1)); + assert_done(|| result(Err(1)), r_err(1)); + assert_done(|| ok(1), r_ok(1)); + assert_done(|| err(1), r_err(1)); + assert_done(|| f_ok(1).map(|a| a + 2), r_ok(3)); + assert_done(|| f_err(1).map(|a| a + 2), r_err(1)); + assert_done(|| f_ok(1).map_err(|a| a + 2), r_ok(1)); + assert_done(|| f_err(1).map_err(|a| a + 2), r_err(3)); + assert_done(|| f_ok(1).and_then(|a| Ok(a + 2)), r_ok(3)); + assert_done(|| f_err(1).and_then(|a| Ok(a + 2)), r_err(1)); + assert_done(|| f_ok(1).and_then(|a| Err(a as u32 + 3)), r_err(4)); + assert_done(|| f_err(1).and_then(|a| Err(a as u32 + 4)), r_err(1)); + assert_done(|| f_ok(1).or_else(|a| Ok(a as i32 + 2)), r_ok(1)); + assert_done(|| f_err(1).or_else(|a| Ok(a as i32 + 2)), r_ok(3)); + assert_done(|| f_ok(1).or_else(|a| Err(a + 3)), r_ok(1)); + assert_done(|| f_err(1).or_else(|a| Err(a + 4)), r_err(5)); + assert_done(|| f_ok(1).select(f_err(2)).then(unselect), r_ok(1)); + assert_done(|| f_ok(1).select(Ok(2)).then(unselect), r_ok(1)); + assert_done(|| f_err(1).select(f_ok(1)).then(unselect), r_err(1)); + assert_done(|| f_ok(1).select(empty()).then(unselect), Ok(1)); + assert_done(|| empty().select(f_ok(1)).then(unselect), Ok(1)); + assert_done(|| f_ok(1).join(f_err(1)), Err(1)); + assert_done(|| f_ok(1).join(Ok(2)), Ok((1, 2))); + assert_done(|| f_err(1).join(f_ok(1)), Err(1)); + assert_done(|| f_ok(1).then(|_| Ok(2)), r_ok(2)); + assert_done(|| f_ok(1).then(|_| Err(2)), r_err(2)); + assert_done(|| f_err(1).then(|_| Ok(2)), r_ok(2)); + assert_done(|| f_err(1).then(|_| Err(2)), r_err(2)); +} + +#[test] +fn test_empty() { + fn empty() -> Empty { + future::empty() + } + + assert_empty(|| empty()); + assert_empty(|| empty().select(empty())); + assert_empty(|| empty().join(empty())); + assert_empty(|| empty().join(f_ok(1))); + assert_empty(|| f_ok(1).join(empty())); + assert_empty(|| empty().or_else(move |_| empty())); + assert_empty(|| empty().and_then(move |_| empty())); + assert_empty(|| f_err(1).or_else(move |_| empty())); + assert_empty(|| f_ok(1).and_then(move |_| empty())); + assert_empty(|| empty().map(|a| a + 1)); + assert_empty(|| empty().map_err(|a| a + 1)); + assert_empty(|| empty().then(|a| a)); +} + +#[test] +fn test_ok() { + assert_done(|| ok(1), r_ok(1)); + assert_done(|| err(1), r_err(1)); +} + +#[test] +fn flatten() { + fn ok(a: T) -> FutureResult { + future::ok(a) + } + fn err(b: E) -> FutureResult { + future::err(b) + } + + assert_done(|| ok(ok(1)).flatten(), r_ok(1)); + assert_done(|| ok(err(1)).flatten(), r_err(1)); + assert_done(|| err(1u32).map(ok).flatten(), r_err(1)); + assert_done(|| future::ok(future::ok(1)).flatten(), r_ok(1)); + assert_empty(|| ok(empty::()).flatten()); + assert_empty(|| empty::().map(ok).flatten()); +} + +#[test] +fn smoke_oneshot() { + assert_done( + || { + let (c, p) = oneshot::channel(); + c.send(1).unwrap(); + p + }, + Ok(1), + ); + assert_done( + || { + let (c, p) = oneshot::channel::(); + drop(c); + p + }, + Err(Canceled), + ); + let mut completes = Vec::new(); + assert_empty(|| { + let (a, b) = oneshot::channel::(); + completes.push(a); + b + }); + + let (c, mut p) = oneshot::channel::(); + drop(c); + let res = panic_waker_lw(|lw| p.poll(lw)); + assert!(res.is_err()); + let (c, p) = oneshot::channel::(); + drop(c); + let (tx, rx) = channel(); + p.then(move |_| tx.send(())).forget(); + rx.recv().unwrap(); +} + +#[test] +fn select_cancels() { + let ((a, b), (c, d)) = (oneshot::channel::(), oneshot::channel::()); + let ((btx, brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |b| { + btx.send(b).unwrap(); + b + }); + let d = d.map(move |d| { + dtx.send(d).unwrap(); + d + }); + + let mut f = b.select(d).then(unselect); + // assert!(f.poll(&mut Task::new()).is_pending()); + assert!(brx.try_recv().is_err()); + assert!(drx.try_recv().is_err()); + a.send(1).unwrap(); + noop_waker_lw(|lw| { + let res = f.poll(lw); + assert!(res.ok().unwrap().is_ready()); + assert_eq!(brx.recv().unwrap(), 1); + drop(c); + assert!(drx.recv().is_err()); + + let ((a, b), (c, d)) = (oneshot::channel::(), oneshot::channel::()); + let ((btx, _brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |b| { + btx.send(b).unwrap(); + b + }); + let d = d.map(move |d| { + dtx.send(d).unwrap(); + d + }); + + let mut f = b.select(d).then(unselect); + assert!(f.poll(lw).ok().unwrap().is_pending()); + assert!(f.poll(lw).ok().unwrap().is_pending()); + a.send(1).unwrap(); + assert!(f.poll(lw).ok().unwrap().is_ready()); + drop((c, f)); + assert!(drx.recv().is_err()); + }) +} + +#[test] +fn join_cancels() { + let ((a, b), (c, d)) = (oneshot::channel::(), oneshot::channel::()); + let ((btx, _brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |b| { + btx.send(b).unwrap(); + b + }); + let d = d.map(move |d| { + dtx.send(d).unwrap(); + d + }); + + let mut f = b.join(d); + drop(a); + let res = panic_waker_lw(|lw| f.poll(lw)); + assert!(res.is_err()); + drop(c); + assert!(drx.recv().is_err()); + + let ((a, b), (c, d)) = (oneshot::channel::(), oneshot::channel::()); + let ((btx, _brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |b| { + btx.send(b).unwrap(); + b + }); + let d = d.map(move |d| { + dtx.send(d).unwrap(); + d + }); + + let (tx, rx) = channel(); + let f = b.join(d); + f.then(move |_| { + tx.send(()).unwrap(); + let res: Result<(), ()> = Ok(()); + res + }) + .forget(); + assert!(rx.try_recv().is_err()); + drop(a); + rx.recv().unwrap(); + drop(c); + assert!(drx.recv().is_err()); +} + +#[test] +fn join_incomplete() { + let (a, b) = oneshot::channel::(); + let (tx, rx) = channel(); + noop_waker_lw(|lw| { + let mut f = ok(1).join(b).map(move |r| tx.send(r).unwrap()); + assert!(f.poll(lw).ok().unwrap().is_pending()); + assert!(rx.try_recv().is_err()); + a.send(2).unwrap(); + assert!(f.poll(lw).ok().unwrap().is_ready()); + assert_eq!(rx.recv().unwrap(), (1, 2)); + + let (a, b) = oneshot::channel::(); + let (tx, rx) = channel(); + let mut f = b.join(Ok(2)).map(move |r| tx.send(r).unwrap()); + assert!(f.poll(lw).ok().unwrap().is_pending()); + assert!(rx.try_recv().is_err()); + a.send(1).unwrap(); + assert!(f.poll(lw).ok().unwrap().is_ready()); + assert_eq!(rx.recv().unwrap(), (1, 2)); + + let (a, b) = oneshot::channel::(); + let (tx, rx) = channel(); + let mut f = ok(1).join(b).map_err(move |_r| tx.send(2).unwrap()); + assert!(f.poll(lw).ok().unwrap().is_pending()); + assert!(rx.try_recv().is_err()); + drop(a); + assert!(f.poll(lw).is_err()); + assert_eq!(rx.recv().unwrap(), 2); + + let (a, b) = oneshot::channel::(); + let (tx, rx) = channel(); + let mut f = b.join(Ok(2)).map_err(move |_r| tx.send(1).unwrap()); + assert!(f.poll(lw).ok().unwrap().is_pending()); + assert!(rx.try_recv().is_err()); + drop(a); + assert!(f.poll(lw).is_err()); + assert_eq!(rx.recv().unwrap(), 1); + }) +} + +#[test] +fn select2() { + assert_done(|| f_ok(2).select(empty()).then(unselect), Ok(2)); + assert_done(|| empty().select(f_ok(2)).then(unselect), Ok(2)); + assert_done(|| f_err(2).select(empty()).then(unselect), Err(2)); + assert_done(|| empty().select(f_err(2)).then(unselect), Err(2)); + + assert_done( + || { + f_ok(1).select(f_ok(2)).map_err(|_| 0).and_then(|either_tup| { + let (a, b) = either_tup.into_inner(); + b.map(move |b| a + b) + }) + }, + Ok(3), + ); + + // Finish one half of a select and then fail the second, ensuring that we + // get the notification of the second one. + { + let ((a, b), (c, d)) = (oneshot::channel::(), oneshot::channel::()); + let f = b.select(d); + let (tx, rx) = channel(); + f.map(move |r| tx.send(r).unwrap()).forget(); + a.send(1).unwrap(); + let (val, next) = rx.recv().unwrap().into_inner(); + assert_eq!(val, 1); + let (tx, rx) = channel(); + next.map_err(move |_r| tx.send(2).unwrap()).forget(); + assert_eq!(rx.try_recv().err().unwrap(), TryRecvError::Empty); + drop(c); + assert_eq!(rx.recv().unwrap(), 2); + } + + // Fail the second half and ensure that we see the first one finish + { + let ((a, b), (c, d)) = (oneshot::channel::(), oneshot::channel::()); + let f = b.select(d); + let (tx, rx) = channel(); + f.map_err(move |r| tx.send((1, r.into_inner().1)).unwrap()).forget(); + drop(c); + let (val, next) = rx.recv().unwrap(); + assert_eq!(val, 1); + let (tx, rx) = channel(); + next.map(move |r| tx.send(r).unwrap()).forget(); + assert_eq!(rx.try_recv().err().unwrap(), TryRecvError::Empty); + a.send(2).unwrap(); + assert_eq!(rx.recv().unwrap(), 2); + } + + // Cancelling the first half should cancel the second + { + let ((_a, b), (_c, d)) = (oneshot::channel::(), oneshot::channel::()); + let ((btx, brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |v| { + btx.send(v).unwrap(); + v + }); + let d = d.map(move |v| { + dtx.send(v).unwrap(); + v + }); + let f = b.select(d); + drop(f); + assert!(drx.recv().is_err()); + assert!(brx.recv().is_err()); + } + + // Cancel after a schedule + { + let ((_a, b), (_c, d)) = (oneshot::channel::(), oneshot::channel::()); + let ((btx, brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |v| { + btx.send(v).unwrap(); + v + }); + let d = d.map(move |v| { + dtx.send(v).unwrap(); + v + }); + let mut f = b.select(d); + let _res = noop_waker_lw(|lw| f.poll(lw)); + drop(f); + assert!(drx.recv().is_err()); + assert!(brx.recv().is_err()); + } + + // Cancel propagates + { + let ((a, b), (_c, d)) = (oneshot::channel::(), oneshot::channel::()); + let ((btx, brx), (dtx, drx)) = (channel(), channel()); + let b = b.map(move |v| { + btx.send(v).unwrap(); + v + }); + let d = d.map(move |v| { + dtx.send(v).unwrap(); + v + }); + let (tx, rx) = channel(); + b.select(d).map(move |_| tx.send(()).unwrap()).forget(); + drop(a); + assert!(drx.recv().is_err()); + assert!(brx.recv().is_err()); + assert!(rx.recv().is_err()); + } + + // Cancel on early drop + { + let (tx, rx) = channel(); + let f = f_ok(1).select(empty::<_, ()>().map(move |()| { + tx.send(()).unwrap(); + 1 + })); + drop(f); + assert!(rx.recv().is_err()); + } +} + +#[test] +fn option() { + assert_eq!(Ok(Some(())), block_on(Some(ok::<(), ()>(())).into_future())); + assert_eq!(Ok::<_, ()>(None::<()>), block_on(None::>.into_future())); +} diff --git a/vendor/futures/tests_disabled/stream.rs b/vendor/futures/tests_disabled/stream.rs new file mode 100644 index 00000000..a4eec2c7 --- /dev/null +++ b/vendor/futures/tests_disabled/stream.rs @@ -0,0 +1,368 @@ +use futures::channel::mpsc; +use futures::channel::oneshot; +use futures::executor::{block_on, block_on_stream}; +use futures::future::{err, ok}; +use futures::stream::{empty, iter_ok, poll_fn, Peekable}; + +// mod support; +// use support::*; + +pub struct Iter { + iter: I, +} + +pub fn iter(i: J) -> Iter +where + J: IntoIterator>, +{ + Iter { iter: i.into_iter() } +} + +impl Stream for Iter +where + I: Iterator>, +{ + type Item = T; + type Error = E; + + fn poll_next(&mut self, _: &mut Context<'_>) -> Poll, E> { + match self.iter.next() { + Some(Ok(e)) => Ok(Poll::Ready(Some(e))), + Some(Err(e)) => Err(e), + None => Ok(Poll::Ready(None)), + } + } +} + +fn list() -> Box + Send> { + let (tx, rx) = mpsc::channel(1); + tx.send(Ok(1)).and_then(|tx| tx.send(Ok(2))).and_then(|tx| tx.send(Ok(3))).forget(); + Box::new(rx.then(|r| r.unwrap())) +} + +fn err_list() -> Box + Send> { + let (tx, rx) = mpsc::channel(1); + tx.send(Ok(1)).and_then(|tx| tx.send(Ok(2))).and_then(|tx| tx.send(Err(3))).forget(); + Box::new(rx.then(|r| r.unwrap())) +} + +#[test] +fn map() { + assert_done(|| list().map(|a| a + 1).collect(), Ok(vec![2, 3, 4])); +} + +#[test] +fn map_err() { + assert_done(|| err_list().map_err(|a| a + 1).collect::>(), Err(4)); +} + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +struct FromErrTest(u32); + +impl From for FromErrTest { + fn from(i: u32) -> Self { + Self(i) + } +} + +#[test] +fn from_err() { + assert_done(|| err_list().err_into().collect::>(), Err(FromErrTest(3))); +} + +#[test] +fn fold() { + assert_done(|| list().fold(0, |a, b| ok::(a + b)), Ok(6)); + assert_done(|| err_list().fold(0, |a, b| ok::(a + b)), Err(3)); +} + +#[test] +fn filter() { + assert_done(|| list().filter(|a| ok(*a % 2 == 0)).collect(), Ok(vec![2])); +} + +#[test] +fn filter_map() { + assert_done( + || list().filter_map(|x| ok(if x % 2 == 0 { Some(x + 10) } else { None })).collect(), + Ok(vec![12]), + ); +} + +#[test] +fn and_then() { + assert_done(|| list().and_then(|a| Ok(a + 1)).collect(), Ok(vec![2, 3, 4])); + assert_done(|| list().and_then(|a| err::(a as u32)).collect::>(), Err(1)); +} + +#[test] +fn then() { + assert_done(|| list().then(|a| a.map(|e| e + 1)).collect(), Ok(vec![2, 3, 4])); +} + +#[test] +fn or_else() { + assert_done(|| err_list().or_else(|a| ok::(a as i32)).collect(), Ok(vec![1, 2, 3])); +} + +#[test] +fn flatten() { + assert_done(|| list().map(|_| list()).flatten().collect(), Ok(vec![1, 2, 3, 1, 2, 3, 1, 2, 3])); +} + +#[test] +fn skip() { + assert_done(|| list().skip(2).collect(), Ok(vec![3])); +} + +#[test] +fn skip_passes_errors_through() { + let mut s = block_on_stream(iter(vec![Err(1), Err(2), Ok(3), Ok(4), Ok(5)]).skip(1)); + assert_eq!(s.next(), Some(Err(1))); + assert_eq!(s.next(), Some(Err(2))); + assert_eq!(s.next(), Some(Ok(4))); + assert_eq!(s.next(), Some(Ok(5))); + assert_eq!(s.next(), None); +} + +#[test] +fn skip_while() { + assert_done(|| list().skip_while(|e| Ok(*e % 2 == 1)).collect(), Ok(vec![2, 3])); +} +#[test] +fn take() { + assert_done(|| list().take(2).collect(), Ok(vec![1, 2])); +} + +#[test] +fn take_while() { + assert_done(|| list().take_while(|e| Ok(*e < 3)).collect(), Ok(vec![1, 2])); +} + +#[test] +fn take_passes_errors_through() { + let mut s = block_on_stream(iter(vec![Err(1), Err(2), Ok(3), Ok(4), Err(4)]).take(1)); + assert_eq!(s.next(), Some(Err(1))); + assert_eq!(s.next(), Some(Err(2))); + assert_eq!(s.next(), Some(Ok(3))); + assert_eq!(s.next(), None); + + let mut s = block_on_stream(iter(vec![Ok(1), Err(2)]).take(1)); + assert_eq!(s.next(), Some(Ok(1))); + assert_eq!(s.next(), None); +} + +#[test] +fn peekable() { + assert_done(|| list().peekable().collect(), Ok(vec![1, 2, 3])); +} + +#[test] +fn fuse() { + let mut stream = block_on_stream(list().fuse()); + assert_eq!(stream.next(), Some(Ok(1))); + assert_eq!(stream.next(), Some(Ok(2))); + assert_eq!(stream.next(), Some(Ok(3))); + assert_eq!(stream.next(), None); + assert_eq!(stream.next(), None); + assert_eq!(stream.next(), None); +} + +#[test] +fn buffered() { + let (tx, rx) = mpsc::channel(1); + let (a, b) = oneshot::channel::(); + let (c, d) = oneshot::channel::(); + + tx.send(Box::new(b.recover(|_| panic!())) as Box + Send>) + .and_then(|tx| tx.send(Box::new(d.map_err(|_| panic!())))) + .forget(); + + let mut rx = rx.buffered(2); + sassert_empty(&mut rx); + c.send(3).unwrap(); + sassert_empty(&mut rx); + a.send(5).unwrap(); + let mut rx = block_on_stream(rx); + assert_eq!(rx.next(), Some(Ok(5))); + assert_eq!(rx.next(), Some(Ok(3))); + assert_eq!(rx.next(), None); + + let (tx, rx) = mpsc::channel(1); + let (a, b) = oneshot::channel::(); + let (c, d) = oneshot::channel::(); + + tx.send(Box::new(b.recover(|_| panic!())) as Box + Send>) + .and_then(|tx| tx.send(Box::new(d.map_err(|_| panic!())))) + .forget(); + + let mut rx = rx.buffered(1); + sassert_empty(&mut rx); + c.send(3).unwrap(); + sassert_empty(&mut rx); + a.send(5).unwrap(); + let mut rx = block_on_stream(rx); + assert_eq!(rx.next(), Some(Ok(5))); + assert_eq!(rx.next(), Some(Ok(3))); + assert_eq!(rx.next(), None); +} + +#[test] +fn unordered() { + let (tx, rx) = mpsc::channel(1); + let (a, b) = oneshot::channel::(); + let (c, d) = oneshot::channel::(); + + tx.send(Box::new(b.recover(|_| panic!())) as Box + Send>) + .and_then(|tx| tx.send(Box::new(d.recover(|_| panic!())))) + .forget(); + + let mut rx = rx.buffer_unordered(2); + sassert_empty(&mut rx); + let mut rx = block_on_stream(rx); + c.send(3).unwrap(); + assert_eq!(rx.next(), Some(Ok(3))); + a.send(5).unwrap(); + assert_eq!(rx.next(), Some(Ok(5))); + assert_eq!(rx.next(), None); + + let (tx, rx) = mpsc::channel(1); + let (a, b) = oneshot::channel::(); + let (c, d) = oneshot::channel::(); + + tx.send(Box::new(b.recover(|_| panic!())) as Box + Send>) + .and_then(|tx| tx.send(Box::new(d.recover(|_| panic!())))) + .forget(); + + // We don't even get to see `c` until `a` completes. + let mut rx = rx.buffer_unordered(1); + sassert_empty(&mut rx); + c.send(3).unwrap(); + sassert_empty(&mut rx); + a.send(5).unwrap(); + let mut rx = block_on_stream(rx); + assert_eq!(rx.next(), Some(Ok(5))); + assert_eq!(rx.next(), Some(Ok(3))); + assert_eq!(rx.next(), None); +} + +#[test] +fn zip() { + assert_done(|| list().zip(list()).collect(), Ok(vec![(1, 1), (2, 2), (3, 3)])); + assert_done(|| list().zip(list().take(2)).collect(), Ok(vec![(1, 1), (2, 2)])); + assert_done(|| list().take(2).zip(list()).collect(), Ok(vec![(1, 1), (2, 2)])); + assert_done(|| err_list().zip(list()).collect::>(), Err(3)); + assert_done(|| list().zip(list().map(|x| x + 1)).collect(), Ok(vec![(1, 2), (2, 3), (3, 4)])); +} + +#[test] +fn peek() { + struct Peek { + inner: Peekable + Send>>, + } + + impl Future for Peek { + type Item = (); + type Error = u32; + + fn poll(&mut self, cx: &mut Context<'_>) -> Poll<(), u32> { + { + let res = ready!(self.inner.peek(cx))?; + assert_eq!(res, Some(&1)); + } + assert_eq!(self.inner.peek(cx).unwrap(), Some(&1).into()); + assert_eq!(self.inner.poll_next(cx).unwrap(), Some(1).into()); + Ok(Poll::Ready(())) + } + } + + block_on(Peek { inner: list().peekable() }).unwrap() +} + +#[test] +fn wait() { + assert_eq!(block_on_stream(list()).collect::, _>>(), Ok(vec![1, 2, 3])); +} + +#[test] +fn chunks() { + assert_done(|| list().chunks(3).collect(), Ok(vec![vec![1, 2, 3]])); + assert_done(|| list().chunks(1).collect(), Ok(vec![vec![1], vec![2], vec![3]])); + assert_done(|| list().chunks(2).collect(), Ok(vec![vec![1, 2], vec![3]])); + let mut list = block_on_stream(err_list().chunks(3)); + let i = list.next().unwrap().unwrap(); + assert_eq!(i, vec![1, 2]); + let i = list.next().unwrap().unwrap_err(); + assert_eq!(i, 3); +} + +#[test] +#[should_panic] +fn chunks_panic_on_cap_zero() { + let _ = list().chunks(0); +} + +#[test] +fn forward() { + let v = Vec::new(); + let v = block_on(iter_ok::<_, Never>(vec![0, 1]).forward(v)).unwrap().1; + assert_eq!(v, vec![0, 1]); + + let v = block_on(iter_ok::<_, Never>(vec![2, 3]).forward(v)).unwrap().1; + assert_eq!(v, vec![0, 1, 2, 3]); + + assert_done( + move || iter_ok::<_, Never>(vec![4, 5]).forward(v).map(|(_, s)| s), + Ok(vec![0, 1, 2, 3, 4, 5]), + ); +} + +#[test] +fn concat() { + let a = iter_ok::<_, ()>(vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]); + assert_done(move || a.concat(), Ok(vec![1, 2, 3, 4, 5, 6, 7, 8, 9])); + + let b = iter(vec![Ok::<_, ()>(vec![1, 2, 3]), Err(()), Ok(vec![7, 8, 9])]); + assert_done(move || b.concat(), Err(())); +} + +#[test] +fn concat2() { + let a = iter_ok::<_, ()>(vec![vec![1, 2, 3], vec![4, 5, 6], vec![7, 8, 9]]); + assert_done(move || a.concat(), Ok(vec![1, 2, 3, 4, 5, 6, 7, 8, 9])); + + let b = iter(vec![Ok::<_, ()>(vec![1, 2, 3]), Err(()), Ok(vec![7, 8, 9])]); + assert_done(move || b.concat(), Err(())); + + let c = empty::, ()>(); + assert_done(move || c.concat(), Ok(vec![])) +} + +#[test] +fn stream_poll_fn() { + let mut counter = 5usize; + + let read_stream = poll_fn(move |_| -> Poll, std::io::Error> { + if counter == 0 { + return Ok(Poll::Ready(None)); + } + counter -= 1; + Ok(Poll::Ready(Some(counter))) + }); + + assert_eq!(block_on_stream(read_stream).count(), 5); +} + +#[test] +fn inspect() { + let mut seen = vec![]; + assert_done(|| list().inspect(|&a| seen.push(a)).collect(), Ok(vec![1, 2, 3])); + assert_eq!(seen, [1, 2, 3]); +} + +#[test] +fn inspect_err() { + let mut seen = vec![]; + assert_done(|| err_list().inspect_err(|&a| seen.push(a)).collect::>(), Err(3)); + assert_eq!(seen, [3]); +} diff --git a/vendor/serial_test/.cargo-checksum.json b/vendor/serial_test/.cargo-checksum.json new file mode 100644 index 00000000..f4d0ae46 --- /dev/null +++ b/vendor/serial_test/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"ea2e9ea5de16a27bc738199c6c86ef3e406d75dc51b0ed3ebf06acb348d8537b","LICENSE":"ac7e05bd11cc1cfc3f9452c1b9986a9b1d54e180fa88e44e69caf955f95dc8a6","README.md":"007276a4a3e2567175306b75f261b83e08b865ba6bc7a9a26530f8c0e335f42b","src/code_lock.rs":"f9d148fc3f5ac20e75350e0195040f768fc86e58803b85b209bb47ef2cac6c05","src/file_lock.rs":"3fda415db24307c42c0b87d4c4fb6df3887b2d69f16095da74c032611fe02b95","src/lib.rs":"b2e77bc55ee60dd84a1af4709e142e8ad0b8fb5322396b567555a00cf66cdff3","src/parallel_code_lock.rs":"1ec502242ec6fa80b9ac5b566c4d90b3d8b8fa91cb9767cc6c2ae55f07e2a346","src/parallel_file_lock.rs":"e46b8e9a16163399ca622be2a7a3ae750dfef4790a1562f2c7f3c959dc1487c2","src/rwlock.rs":"168e0f12615ebb03f51793f223e4ff93cc93af283badcbd2283f8f9729c8a911","src/serial_code_lock.rs":"512ddfed92121d33b63f15df8ab944e5169980a6a06acd0c643ffd3b5c43815d","src/serial_file_lock.rs":"6322cb6418e4786ea949184e0ae6bcb80134a0aea5d4c8c47e466793605a5e22","tests/tests.rs":"76ed1916b07416be281e407bd078c323f64c0830cc5c24898631a51a065bd8d9"},"package":"953ad9342b3aaca7cb43c45c097dd008d4907070394bd0751a0aa8817e5a018d"} \ No newline at end of file diff --git a/vendor/serial_test/Cargo.toml b/vendor/serial_test/Cargo.toml new file mode 100644 index 00000000..dfc41933 --- /dev/null +++ b/vendor/serial_test/Cargo.toml @@ -0,0 +1,90 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "serial_test" +version = "3.0.0" +authors = ["Tom Parker-Shemilt "] +description = "Allows for the creation of serialised Rust tests" +readme = "README.md" +keywords = [ + "sequential", + "testing", + "parallel", +] +categories = ["development-tools::testing"] +license = "MIT" +repository = "https://github.com/palfrey/serial_test/" + +[package.metadata.cargo-all-features] +denylist = ["docsrs"] +skip_optional_dependencies = true + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = [ + "--cfg", + "docsrs", +] + +[dependencies.dashmap] +version = "5" + +[dependencies.document-features] +version = "0.2" +optional = true + +[dependencies.fslock] +version = "0.2" +optional = true + +[dependencies.futures] +version = "^0.3" +features = ["executor"] +optional = true +default_features = false + +[dependencies.lazy_static] +version = "1.2" + +[dependencies.log] +version = "0.4" +optional = true + +[dependencies.parking_lot] +version = "^0.12" + +[dependencies.serial_test_derive] +version = "~3.0.0" + +[dev-dependencies.itertools] +version = "0.10" + +[dev-dependencies.tokio] +version = "^1.27" +features = [ + "macros", + "rt", +] + +[features] +async = [ + "futures", + "serial_test_derive/async", +] +default = [ + "logging", + "async", +] +docsrs = ["document-features"] +file_locks = ["fslock"] +logging = ["log"] diff --git a/vendor/serial_test/LICENSE b/vendor/serial_test/LICENSE new file mode 100644 index 00000000..5a8adc1d --- /dev/null +++ b/vendor/serial_test/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2018 Tom Parker-Shemilt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/serial_test/README.md b/vendor/serial_test/README.md new file mode 100644 index 00000000..545443fb --- /dev/null +++ b/vendor/serial_test/README.md @@ -0,0 +1,49 @@ +# serial_test +[![Version](https://img.shields.io/crates/v/serial_test.svg)](https://crates.io/crates/serial_test) +[![Downloads](https://img.shields.io/crates/d/serial_test)](https://crates.io/crates/serial_test) +[![Docs](https://docs.rs/serial_test/badge.svg)](https://docs.rs/serial_test/) +[![MIT license](https://img.shields.io/crates/l/serial_test.svg)](./LICENSE) +[![Build Status](https://github.com/palfrey/serial_test/workflows/Continuous%20integration/badge.svg?branch=main)](https://github.com/palfrey/serial_test/actions) +[![MSRV: 1.68.2](https://flat.badgen.net/badge/MSRV/1.68.2/purple)](https://blog.rust-lang.org/2023/03/28/Rust-1.68.2.html) + +`serial_test` allows for the creation of serialised Rust tests using the `serial` attribute +e.g. +```rust +#[test] +#[serial] +fn test_serial_one() { + // Do things +} + +#[test] +#[serial] +fn test_serial_another() { + // Do things +} + +#[tokio::test] +#[serial] +async fn test_serial_another() { + // Do things asynchronously +} +``` +Multiple tests with the `serial` attribute are guaranteed to be executed in serial. Ordering of the tests is not guaranteed however. Other tests with the `parallel` attribute may run at the same time as each other, but not at the same time as a test with `serial`. Tests with neither attribute may run at any time and no guarantees are made about their timing! Both support optional keys for defining subsets of tests to run in serial together, see docs for more details. + +For cases like doctests and integration tests where the tests are run as separate processes, we also support `file_serial`, with +similar properties but based off file locking. Note that there are no guarantees about one test with `serial` and another with +`file_serial` as they lock using different methods. + +All of the attributes can also be applied at a `mod` level and will be automagically applied to all test functions in that block. + +## Usage +The minimum supported Rust version here is 1.68.2. Note this is minimum _supported_, as it may well compile with lower versions, but they're not supported at all. Upgrades to this will require at a major version bump. 1.x supports 1.51 if you need a lower version than that. + +Add to your Cargo.toml +```toml +[dev-dependencies] +serial_test = "*" +``` + +plus `use serial_test::serial;` in your imports section. + +You can then either add `#[serial]` or `#[serial(some_key)]` to tests as required. diff --git a/vendor/serial_test/src/code_lock.rs b/vendor/serial_test/src/code_lock.rs new file mode 100644 index 00000000..24cb9525 --- /dev/null +++ b/vendor/serial_test/src/code_lock.rs @@ -0,0 +1,89 @@ +use crate::rwlock::{Locks, MutexGuardWrapper}; +use dashmap::{try_result::TryResult, DashMap}; +use lazy_static::lazy_static; +#[cfg(feature = "logging")] +use log::debug; +use std::sync::{atomic::AtomicU32, Arc}; +#[cfg(feature = "logging")] +use std::time::Instant; + +#[derive(Clone)] +pub(crate) struct UniqueReentrantMutex { + locks: Locks, + + // Only actually used for tests + #[allow(dead_code)] + pub(crate) id: u32, +} + +impl UniqueReentrantMutex { + pub(crate) fn lock(&self) -> MutexGuardWrapper { + self.locks.serial() + } + + pub(crate) fn start_parallel(&self) { + self.locks.start_parallel(); + } + + pub(crate) fn end_parallel(&self) { + self.locks.end_parallel(); + } + + #[cfg(test)] + pub fn parallel_count(&self) -> u32 { + self.locks.parallel_count() + } + + #[cfg(test)] + pub fn is_locked(&self) -> bool { + self.locks.is_locked() + } +} + +lazy_static! { + pub(crate) static ref LOCKS: Arc> = + Arc::new(DashMap::new()); + static ref MUTEX_ID: Arc = Arc::new(AtomicU32::new(1)); +} + +impl Default for UniqueReentrantMutex { + fn default() -> Self { + Self { + locks: Locks::new(), + id: MUTEX_ID.fetch_add(1, std::sync::atomic::Ordering::SeqCst), + } + } +} + +pub(crate) fn check_new_key(name: &str) { + #[cfg(feature = "logging")] + let start = Instant::now(); + loop { + #[cfg(feature = "logging")] + { + let duration = start.elapsed(); + debug!("Waiting for '{}' {:?}", name, duration); + } + // Check if a new key is needed. Just need a read lock, which can be done in sync with everyone else + match LOCKS.try_get(name) { + TryResult::Present(_) => { + return; + } + TryResult::Locked => { + continue; // wasn't able to get read lock + } + TryResult::Absent => {} // do the write path below + }; + + // This is the rare path, which avoids the multi-writer situation mostly + let try_entry = LOCKS.try_entry(name.to_string()); + + if let Some(entry) = try_entry { + entry.or_default(); + return; + } + + // If the try_entry fails, then go around the loop again + // Odds are another test was also locking on the write and has now written the key + } +} diff --git a/vendor/serial_test/src/file_lock.rs b/vendor/serial_test/src/file_lock.rs new file mode 100644 index 00000000..6e17fa05 --- /dev/null +++ b/vendor/serial_test/src/file_lock.rs @@ -0,0 +1,136 @@ +use fslock::LockFile; +#[cfg(feature = "logging")] +use log::debug; +use std::{ + env, + fs::{self, File}, + io::{Read, Write}, + path::Path, + thread, + time::Duration, +}; + +pub(crate) struct Lock { + lockfile: LockFile, + pub(crate) parallel_count: u32, + path: String, +} + +impl Lock { + // Can't use the same file as fslock truncates it + fn gen_count_file(path: &str) -> String { + format!("{}-count", path) + } + + fn read_parallel_count(path: &str) -> u32 { + let parallel_count = match File::open(Lock::gen_count_file(path)) { + Ok(mut file) => { + let mut count_buf = [0; 4]; + match file.read_exact(&mut count_buf) { + Ok(_) => u32::from_ne_bytes(count_buf), + Err(_err) => { + #[cfg(feature = "logging")] + debug!("Error loading count file: {}", _err); + 0u32 + } + } + } + Err(_) => 0, + }; + + #[cfg(feature = "logging")] + debug!("Parallel count for {:?} is {}", path, parallel_count); + parallel_count + } + + pub(crate) fn new(path: &str) -> Lock { + if !Path::new(path).exists() { + fs::write(path, "").unwrap_or_else(|_| panic!("Lock file path was {:?}", path)) + } + let mut lockfile = LockFile::open(path).unwrap(); + + #[cfg(feature = "logging")] + debug!("Waiting on {:?}", path); + + lockfile.lock().unwrap(); + + #[cfg(feature = "logging")] + debug!("Locked for {:?}", path); + + Lock { + lockfile, + parallel_count: Lock::read_parallel_count(path), + path: String::from(path), + } + } + + pub(crate) fn start_serial(self: &mut Lock) { + loop { + if self.parallel_count == 0 { + return; + } + #[cfg(feature = "logging")] + debug!("Waiting because parallel count is {}", self.parallel_count); + // unlock here is safe because we re-lock before returning + self.unlock(); + thread::sleep(Duration::from_secs(1)); + self.lockfile.lock().unwrap(); + #[cfg(feature = "logging")] + debug!("Locked for {:?}", self.path); + self.parallel_count = Lock::read_parallel_count(&self.path) + } + } + + fn unlock(self: &mut Lock) { + #[cfg(feature = "logging")] + debug!("Unlocking {}", self.path); + self.lockfile.unlock().unwrap(); + } + + pub(crate) fn end_serial(mut self: Lock) { + self.unlock(); + } + + fn write_parallel(self: &Lock) { + let mut file = File::create(&Lock::gen_count_file(&self.path)).unwrap(); + file.write_all(&self.parallel_count.to_ne_bytes()).unwrap(); + } + + pub(crate) fn start_parallel(self: &mut Lock) { + self.parallel_count += 1; + self.write_parallel(); + self.unlock(); + } + + pub(crate) fn end_parallel(mut self: Lock) { + assert!(self.parallel_count > 0); + self.parallel_count -= 1; + self.write_parallel(); + self.unlock(); + } +} + +pub(crate) fn path_for_name(name: &str) -> String { + let mut pathbuf = env::temp_dir(); + pathbuf.push(format!("serial-test-{}", name)); + pathbuf.into_os_string().into_string().unwrap() +} + +fn make_lock_for_name_and_path(name: &str, path: Option<&str>) -> Lock { + if let Some(opt_path) = path { + Lock::new(opt_path) + } else { + let default_path = path_for_name(name); + Lock::new(&default_path) + } +} + +pub(crate) fn get_locks(names: &Vec<&str>, path: Option<&str>) -> Vec { + if names.len() > 1 && path.is_some() { + panic!("Can't do file_parallel with both more than one name _and_ a specific path"); + } + names + .iter() + .map(|name| make_lock_for_name_and_path(name, path)) + .collect::>() +} diff --git a/vendor/serial_test/src/lib.rs b/vendor/serial_test/src/lib.rs new file mode 100644 index 00000000..d6519f04 --- /dev/null +++ b/vendor/serial_test/src/lib.rs @@ -0,0 +1,114 @@ +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +#![deny(unused_variables)] +#![deny(missing_docs)] +#![deny(unused_imports)] + +//! # serial_test +//! `serial_test` allows for the creation of serialised Rust tests using the [serial](macro@serial) attribute +//! e.g. +//! ```` +//! #[test] +//! #[serial] +//! fn test_serial_one() { +//! // Do things +//! } +//! +//! #[test] +//! #[serial(some_key)] +//! fn test_serial_another() { +//! // Do things +//! } +//! +//! #[test] +//! #[parallel] +//! fn test_parallel_another() { +//! // Do parallel things +//! } +//! ```` +//! Multiple tests with the [serial](macro@serial) attribute are guaranteed to be executed in serial. Ordering +//! of the tests is not guaranteed however. Other tests with the [parallel](macro@parallel) attribute may run +//! at the same time as each other, but not at the same time as a test with [serial](macro@serial). Tests with +//! neither attribute may run at any time and no guarantees are made about their timing! +//! +//! For cases like doctests and integration tests where the tests are run as separate processes, we also support +//! [file_serial](macro@file_serial)/[file_parallel](macro@file_parallel), with similar properties but based off file locking. Note that there are no +//! guarantees about one test with [serial](macro@serial)/[parallel](macro@parallel) and another with [file_serial](macro@file_serial)/[file_parallel](macro@file_parallel) +//! as they lock using different methods. +//! ```` +//! #[test] +//! #[file_serial] +//! fn test_serial_three() { +//! // Do things +//! } +//! ```` +//! +//! All of the attributes can also be applied at a `mod` level and will be automagically applied to all test functions in that block +//! ```` +//! #[cfg(test)] +//! #[serial] +//! mod serial_attr_tests { +//! fn foo() { +//! // Won't have `serial` applied, because not a test function +//! println!("Nothing"); +//! } +//! +//! #[test] +//! fn test_bar() { +//! // Will be run serially +//! } +//!} +//! ```` +//! +//! ## Feature flags +#![cfg_attr( + feature = "docsrs", + cfg_attr(doc, doc = ::document_features::document_features!()) +)] + +mod code_lock; +mod parallel_code_lock; +mod rwlock; +mod serial_code_lock; + +#[cfg(feature = "file_locks")] +mod file_lock; +#[cfg(feature = "file_locks")] +mod parallel_file_lock; +#[cfg(feature = "file_locks")] +mod serial_file_lock; + +#[cfg(feature = "async")] +#[doc(hidden)] +pub use parallel_code_lock::{local_async_parallel_core, local_async_parallel_core_with_return}; + +#[doc(hidden)] +pub use parallel_code_lock::{local_parallel_core, local_parallel_core_with_return}; + +#[cfg(feature = "async")] +#[doc(hidden)] +pub use serial_code_lock::{local_async_serial_core, local_async_serial_core_with_return}; + +#[doc(hidden)] +pub use serial_code_lock::{local_serial_core, local_serial_core_with_return}; + +#[cfg(all(feature = "file_locks", feature = "async"))] +#[doc(hidden)] +pub use serial_file_lock::{fs_async_serial_core, fs_async_serial_core_with_return}; + +#[cfg(feature = "file_locks")] +#[doc(hidden)] +pub use serial_file_lock::{fs_serial_core, fs_serial_core_with_return}; + +#[cfg(all(feature = "file_locks", feature = "async"))] +#[doc(hidden)] +pub use parallel_file_lock::{fs_async_parallel_core, fs_async_parallel_core_with_return}; + +#[cfg(feature = "file_locks")] +#[doc(hidden)] +pub use parallel_file_lock::{fs_parallel_core, fs_parallel_core_with_return}; + +// Re-export #[serial/parallel]. +pub use serial_test_derive::{parallel, serial}; + +#[cfg(feature = "file_locks")] +pub use serial_test_derive::{file_parallel, file_serial}; diff --git a/vendor/serial_test/src/parallel_code_lock.rs b/vendor/serial_test/src/parallel_code_lock.rs new file mode 100644 index 00000000..91cf9bb0 --- /dev/null +++ b/vendor/serial_test/src/parallel_code_lock.rs @@ -0,0 +1,193 @@ +#![allow(clippy::await_holding_lock)] + +use crate::code_lock::{check_new_key, LOCKS}; +#[cfg(feature = "async")] +use futures::FutureExt; +use std::panic; + +fn get_locks( + names: Vec<&str>, +) -> Vec> { + names + .into_iter() + .map(|name| { + check_new_key(name); + LOCKS.get(name).expect("key to be set") + }) + .collect::>() +} + +#[doc(hidden)] +pub fn local_parallel_core_with_return( + names: Vec<&str>, + _path: Option<&str>, + function: fn() -> Result<(), E>, +) -> Result<(), E> { + let locks = get_locks(names); + + locks.iter().for_each(|lock| lock.start_parallel()); + let res = panic::catch_unwind(function); + locks.iter().for_each(|lock| lock.end_parallel()); + match res { + Ok(ret) => ret, + Err(err) => { + panic::resume_unwind(err); + } + } +} + +#[doc(hidden)] +pub fn local_parallel_core(names: Vec<&str>, _path: Option<&str>, function: fn()) { + let locks = get_locks(names); + locks.iter().for_each(|lock| lock.start_parallel()); + let res = panic::catch_unwind(|| { + function(); + }); + locks.iter().for_each(|lock| lock.end_parallel()); + if let Err(err) = res { + panic::resume_unwind(err); + } +} + +#[doc(hidden)] +#[cfg(feature = "async")] +pub async fn local_async_parallel_core_with_return( + names: Vec<&str>, + _path: Option<&str>, + fut: impl std::future::Future> + panic::UnwindSafe, +) -> Result<(), E> { + let locks = get_locks(names); + locks.iter().for_each(|lock| lock.start_parallel()); + let res = fut.catch_unwind().await; + locks.iter().for_each(|lock| lock.end_parallel()); + match res { + Ok(ret) => ret, + Err(err) => { + panic::resume_unwind(err); + } + } +} + +#[doc(hidden)] +#[cfg(feature = "async")] +pub async fn local_async_parallel_core( + names: Vec<&str>, + _path: Option<&str>, + fut: impl std::future::Future + panic::UnwindSafe, +) { + let locks = get_locks(names); + locks.iter().for_each(|lock| lock.start_parallel()); + let res = fut.catch_unwind().await; + locks.iter().for_each(|lock| lock.end_parallel()); + if let Err(err) = res { + panic::resume_unwind(err); + } +} + +#[cfg(test)] +mod tests { + #[cfg(feature = "async")] + use crate::{local_async_parallel_core, local_async_parallel_core_with_return}; + + use crate::{code_lock::LOCKS, local_parallel_core, local_parallel_core_with_return}; + use std::{io::Error, panic}; + + #[test] + fn unlock_on_assert_sync_without_return() { + let _ = panic::catch_unwind(|| { + local_parallel_core(vec!["unlock_on_assert_sync_without_return"], None, || { + assert!(false); + }) + }); + assert_eq!( + LOCKS + .get("unlock_on_assert_sync_without_return") + .unwrap() + .parallel_count(), + 0 + ); + } + + #[test] + fn unlock_on_assert_sync_with_return() { + let _ = panic::catch_unwind(|| { + local_parallel_core_with_return( + vec!["unlock_on_assert_sync_with_return"], + None, + || -> Result<(), Error> { + assert!(false); + Ok(()) + }, + ) + }); + assert_eq!( + LOCKS + .get("unlock_on_assert_sync_with_return") + .unwrap() + .parallel_count(), + 0 + ); + } + + #[tokio::test] + #[cfg(feature = "async")] + async fn unlock_on_assert_async_without_return() { + async fn demo_assert() { + assert!(false); + } + async fn call_serial_test_fn() { + local_async_parallel_core( + vec!["unlock_on_assert_async_without_return"], + None, + demo_assert(), + ) + .await + } + // as per https://stackoverflow.com/a/66529014/320546 + let _ = panic::catch_unwind(|| { + let handle = tokio::runtime::Handle::current(); + let _enter_guard = handle.enter(); + futures::executor::block_on(call_serial_test_fn()); + }); + assert_eq!( + LOCKS + .get("unlock_on_assert_async_without_return") + .unwrap() + .parallel_count(), + 0 + ); + } + + #[tokio::test] + #[cfg(feature = "async")] + async fn unlock_on_assert_async_with_return() { + async fn demo_assert() -> Result<(), Error> { + assert!(false); + Ok(()) + } + + #[allow(unused_must_use)] + async fn call_serial_test_fn() { + local_async_parallel_core_with_return( + vec!["unlock_on_assert_async_with_return"], + None, + demo_assert(), + ) + .await; + } + + // as per https://stackoverflow.com/a/66529014/320546 + let _ = panic::catch_unwind(|| { + let handle = tokio::runtime::Handle::current(); + let _enter_guard = handle.enter(); + futures::executor::block_on(call_serial_test_fn()); + }); + assert_eq!( + LOCKS + .get("unlock_on_assert_async_with_return") + .unwrap() + .parallel_count(), + 0 + ); + } +} diff --git a/vendor/serial_test/src/parallel_file_lock.rs b/vendor/serial_test/src/parallel_file_lock.rs new file mode 100644 index 00000000..beed7e10 --- /dev/null +++ b/vendor/serial_test/src/parallel_file_lock.rs @@ -0,0 +1,187 @@ +use std::panic; + +#[cfg(feature = "async")] +use futures::FutureExt; + +use crate::file_lock::get_locks; + +#[doc(hidden)] +pub fn fs_parallel_core(names: Vec<&str>, path: Option<&str>, function: fn()) { + get_locks(&names, path) + .iter_mut() + .for_each(|lock| lock.start_parallel()); + let res = panic::catch_unwind(|| { + function(); + }); + get_locks(&names, path) + .into_iter() + .for_each(|lock| lock.end_parallel()); + if let Err(err) = res { + panic::resume_unwind(err); + } +} + +#[doc(hidden)] +pub fn fs_parallel_core_with_return( + names: Vec<&str>, + path: Option<&str>, + function: fn() -> Result<(), E>, +) -> Result<(), E> { + get_locks(&names, path) + .iter_mut() + .for_each(|lock| lock.start_parallel()); + let res = panic::catch_unwind(function); + get_locks(&names, path) + .into_iter() + .for_each(|lock| lock.end_parallel()); + match res { + Ok(ret) => ret, + Err(err) => { + panic::resume_unwind(err); + } + } +} + +#[doc(hidden)] +#[cfg(feature = "async")] +pub async fn fs_async_parallel_core_with_return( + names: Vec<&str>, + path: Option<&str>, + fut: impl std::future::Future> + panic::UnwindSafe, +) -> Result<(), E> { + get_locks(&names, path) + .iter_mut() + .for_each(|lock| lock.start_parallel()); + let res = fut.catch_unwind().await; + get_locks(&names, path) + .into_iter() + .for_each(|lock| lock.end_parallel()); + match res { + Ok(ret) => ret, + Err(err) => { + panic::resume_unwind(err); + } + } +} + +#[doc(hidden)] +#[cfg(feature = "async")] +pub async fn fs_async_parallel_core( + names: Vec<&str>, + path: Option<&str>, + fut: impl std::future::Future + panic::UnwindSafe, +) { + get_locks(&names, path) + .iter_mut() + .for_each(|lock| lock.start_parallel()); + + let res = fut.catch_unwind().await; + get_locks(&names, path) + .into_iter() + .for_each(|lock| lock.end_parallel()); + if let Err(err) = res { + panic::resume_unwind(err); + } +} + +#[cfg(test)] +mod tests { + #[cfg(feature = "async")] + use crate::{fs_async_parallel_core, fs_async_parallel_core_with_return}; + + use crate::{ + file_lock::{path_for_name, Lock}, + fs_parallel_core, fs_parallel_core_with_return, + }; + use std::{io::Error, panic}; + + fn unlock_ok(lock_path: &str) { + let lock = Lock::new(lock_path); + assert_eq!(lock.parallel_count, 0); + } + + #[test] + fn unlock_on_assert_sync_without_return() { + let lock_path = path_for_name("unlock_on_assert_sync_without_return"); + let _ = panic::catch_unwind(|| { + fs_parallel_core( + vec!["unlock_on_assert_sync_without_return"], + Some(&lock_path), + || { + assert!(false); + }, + ) + }); + unlock_ok(&lock_path); + } + + #[test] + fn unlock_on_assert_sync_with_return() { + let lock_path = path_for_name("unlock_on_assert_sync_with_return"); + let _ = panic::catch_unwind(|| { + fs_parallel_core_with_return( + vec!["unlock_on_assert_sync_with_return"], + Some(&lock_path), + || -> Result<(), Error> { + assert!(false); + Ok(()) + }, + ) + }); + unlock_ok(&lock_path); + } + + #[tokio::test] + #[cfg(feature = "async")] + async fn unlock_on_assert_async_without_return() { + let lock_path = path_for_name("unlock_on_assert_async_without_return"); + async fn demo_assert() { + assert!(false); + } + async fn call_serial_test_fn(lock_path: &str) { + fs_async_parallel_core( + vec!["unlock_on_assert_async_without_return"], + Some(&lock_path), + demo_assert(), + ) + .await + } + + // as per https://stackoverflow.com/a/66529014/320546 + let _ = panic::catch_unwind(|| { + let handle = tokio::runtime::Handle::current(); + let _enter_guard = handle.enter(); + futures::executor::block_on(call_serial_test_fn(&lock_path)); + }); + unlock_ok(&lock_path); + } + + #[tokio::test] + #[cfg(feature = "async")] + async fn unlock_on_assert_async_with_return() { + let lock_path = path_for_name("unlock_on_assert_async_with_return"); + + async fn demo_assert() -> Result<(), Error> { + assert!(false); + Ok(()) + } + + #[allow(unused_must_use)] + async fn call_serial_test_fn(lock_path: &str) { + fs_async_parallel_core_with_return( + vec!["unlock_on_assert_async_with_return"], + Some(&lock_path), + demo_assert(), + ) + .await; + } + + // as per https://stackoverflow.com/a/66529014/320546 + let _ = panic::catch_unwind(|| { + let handle = tokio::runtime::Handle::current(); + let _enter_guard = handle.enter(); + futures::executor::block_on(call_serial_test_fn(&lock_path)); + }); + unlock_ok(&lock_path); + } +} diff --git a/vendor/serial_test/src/rwlock.rs b/vendor/serial_test/src/rwlock.rs new file mode 100644 index 00000000..c61944b3 --- /dev/null +++ b/vendor/serial_test/src/rwlock.rs @@ -0,0 +1,102 @@ +use parking_lot::{Condvar, Mutex, ReentrantMutex, ReentrantMutexGuard}; +use std::{sync::Arc, time::Duration}; + +struct LockState { + parallels: u32, +} + +struct LockData { + mutex: Mutex, + serial: ReentrantMutex<()>, + condvar: Condvar, +} + +#[derive(Clone)] +pub(crate) struct Locks { + arc: Arc, +} + +pub(crate) struct MutexGuardWrapper<'a> { + #[allow(dead_code)] // need it around to get dropped + mutex_guard: ReentrantMutexGuard<'a, ()>, + locks: Locks, +} + +impl<'a> Drop for MutexGuardWrapper<'a> { + fn drop(&mut self) { + self.locks.arc.condvar.notify_one(); + } +} + +impl Locks { + pub fn new() -> Locks { + Locks { + arc: Arc::new(LockData { + mutex: Mutex::new(LockState { parallels: 0 }), + condvar: Condvar::new(), + serial: Default::default(), + }), + } + } + + #[cfg(test)] + pub fn is_locked(&self) -> bool { + self.arc.serial.is_locked() + } + + pub fn serial(&self) -> MutexGuardWrapper { + let mut lock_state = self.arc.mutex.lock(); + loop { + // If all the things we want are true, try to lock out serial + if lock_state.parallels == 0 { + let possible_serial_lock = self.arc.serial.try_lock(); + if let Some(serial_lock) = possible_serial_lock { + return MutexGuardWrapper { + mutex_guard: serial_lock, + locks: self.clone(), + }; + } + } + + self.arc + .condvar + .wait_for(&mut lock_state, Duration::from_secs(1)); + } + } + + pub fn start_parallel(&self) { + let mut lock_state = self.arc.mutex.lock(); + loop { + if lock_state.parallels > 0 { + // fast path, as someone else already has it locked + lock_state.parallels += 1; + return; + } + + let possible_serial_lock = self.arc.serial.try_lock(); + if possible_serial_lock.is_some() { + // We now know no-one else has the serial lock, so we can add to parallel + lock_state.parallels = 1; // Had to have been 0 before, as otherwise we'd have hit the fast path + return; + } + + self.arc + .condvar + .wait_for(&mut lock_state, Duration::from_secs(1)); + } + } + + pub fn end_parallel(&self) { + let mut lock_state = self.arc.mutex.lock(); + assert!(lock_state.parallels > 0); + lock_state.parallels -= 1; + drop(lock_state); + self.arc.condvar.notify_one(); + } + + #[cfg(test)] + pub fn parallel_count(&self) -> u32 { + let lock_state = self.arc.mutex.lock(); + lock_state.parallels + } +} diff --git a/vendor/serial_test/src/serial_code_lock.rs b/vendor/serial_test/src/serial_code_lock.rs new file mode 100644 index 00000000..10454a93 --- /dev/null +++ b/vendor/serial_test/src/serial_code_lock.rs @@ -0,0 +1,118 @@ +#![allow(clippy::await_holding_lock)] + +use crate::code_lock::{check_new_key, LOCKS}; + +#[doc(hidden)] +macro_rules! core_internal { + ($names: ident) => { + let unlocks: Vec<_> = $names + .into_iter() + .map(|name| { + check_new_key(name); + LOCKS.get(name).expect("key to be set") + }) + .collect(); + let _guards: Vec<_> = unlocks.iter().map(|unlock| unlock.lock()).collect(); + }; +} + +#[doc(hidden)] +pub fn local_serial_core_with_return( + names: Vec<&str>, + _path: Option, + function: fn() -> Result<(), E>, +) -> Result<(), E> { + core_internal!(names); + function() +} + +#[doc(hidden)] +pub fn local_serial_core(names: Vec<&str>, _path: Option<&str>, function: fn()) { + core_internal!(names); + function(); +} + +#[doc(hidden)] +#[cfg(feature = "async")] +pub async fn local_async_serial_core_with_return( + names: Vec<&str>, + _path: Option<&str>, + fut: impl std::future::Future> + std::marker::Send, +) -> Result<(), E> { + core_internal!(names); + fut.await +} + +#[doc(hidden)] +#[cfg(feature = "async")] +pub async fn local_async_serial_core( + names: Vec<&str>, + _path: Option<&str>, + fut: impl std::future::Future, +) { + core_internal!(names); + fut.await; +} + +#[cfg(test)] +#[allow(clippy::print_stdout)] +mod tests { + use super::local_serial_core; + use crate::code_lock::{check_new_key, LOCKS}; + use itertools::Itertools; + use parking_lot::RwLock; + use std::{ + sync::{Arc, Barrier}, + thread, + time::Duration, + }; + + #[test] + fn test_hammer_check_new_key() { + let ptrs = Arc::new(RwLock::new(Vec::new())); + let mut threads = Vec::new(); + + let count = 100; + let barrier = Arc::new(Barrier::new(count)); + + for _ in 0..count { + let local_locks = LOCKS.clone(); + let local_ptrs = ptrs.clone(); + let c = barrier.clone(); + threads.push(thread::spawn(move || { + c.wait(); + check_new_key("foo"); + { + let unlock = local_locks.get("foo").expect("read didn't work"); + let mutex = unlock.value(); + + let mut ptr_guard = local_ptrs + .try_write_for(Duration::from_secs(1)) + .expect("write lock didn't work"); + ptr_guard.push(mutex.id); + } + + c.wait(); + })); + } + for thread in threads { + thread.join().expect("thread join worked"); + } + let ptrs_read_lock = ptrs + .try_read_recursive_for(Duration::from_secs(1)) + .expect("ptrs read work"); + assert_eq!(ptrs_read_lock.len(), count); + println!("{:?}", ptrs_read_lock); + assert_eq!(ptrs_read_lock.iter().unique().count(), 1); + } + + #[test] + fn unlock_on_assert() { + let _ = std::panic::catch_unwind(|| { + local_serial_core(vec!["assert"], None, || { + assert!(false); + }) + }); + assert!(!LOCKS.get("assert").unwrap().is_locked()); + } +} diff --git a/vendor/serial_test/src/serial_file_lock.rs b/vendor/serial_test/src/serial_file_lock.rs new file mode 100644 index 00000000..ba921720 --- /dev/null +++ b/vendor/serial_test/src/serial_file_lock.rs @@ -0,0 +1,86 @@ +use std::panic; + +use crate::file_lock::get_locks; + +#[doc(hidden)] +pub fn fs_serial_core(names: Vec<&str>, path: Option<&str>, function: fn()) { + let mut locks = get_locks(&names, path); + locks.iter_mut().for_each(|lock| lock.start_serial()); + let res = panic::catch_unwind(function); + locks.into_iter().for_each(|lock| lock.end_serial()); + if let Err(err) = res { + panic::resume_unwind(err); + } +} + +#[doc(hidden)] +pub fn fs_serial_core_with_return( + names: Vec<&str>, + path: Option<&str>, + function: fn() -> Result<(), E>, +) -> Result<(), E> { + let mut locks = get_locks(&names, path); + locks.iter_mut().for_each(|lock| lock.start_serial()); + let res = panic::catch_unwind(function); + locks.into_iter().for_each(|lock| lock.end_serial()); + match res { + Ok(ret) => ret, + Err(err) => { + panic::resume_unwind(err); + } + } +} + +#[doc(hidden)] +#[cfg(feature = "async")] +pub async fn fs_async_serial_core_with_return( + names: Vec<&str>, + path: Option<&str>, + fut: impl std::future::Future>, +) -> Result<(), E> { + let mut locks = get_locks(&names, path); + locks.iter_mut().for_each(|lock| lock.start_serial()); + let ret: Result<(), E> = fut.await; + locks.into_iter().for_each(|lock| lock.end_serial()); + ret +} + +#[doc(hidden)] +#[cfg(feature = "async")] +pub async fn fs_async_serial_core( + names: Vec<&str>, + path: Option<&str>, + fut: impl std::future::Future, +) { + let mut locks = get_locks(&names, path); + locks.iter_mut().for_each(|lock| lock.start_serial()); + fut.await; + locks.into_iter().for_each(|lock| lock.end_serial()); +} + +#[cfg(test)] +mod tests { + use std::panic; + + use fslock::LockFile; + + use super::fs_serial_core; + use crate::file_lock::path_for_name; + + #[test] + fn test_serial() { + fs_serial_core(vec!["test"], None, || {}); + } + + #[test] + fn unlock_on_assert_sync_without_return() { + let lock_path = path_for_name("unlock_on_assert_sync_without_return"); + let _ = panic::catch_unwind(|| { + fs_serial_core(vec!["foo"], Some(&lock_path), || { + assert!(false); + }) + }); + let mut lockfile = LockFile::open(&lock_path).unwrap(); + assert!(lockfile.try_lock().unwrap()); + } +} diff --git a/vendor/serial_test/tests/tests.rs b/vendor/serial_test/tests/tests.rs new file mode 100644 index 00000000..123dc351 --- /dev/null +++ b/vendor/serial_test/tests/tests.rs @@ -0,0 +1,8 @@ +use serial_test::local_serial_core; + +#[test] +fn test_empty_serial_call() { + local_serial_core(vec!["beta"], None, || { + println!("Bar"); + }); +} diff --git a/vendor/serial_test_derive/.cargo-checksum.json b/vendor/serial_test_derive/.cargo-checksum.json new file mode 100644 index 00000000..5bfbcdda --- /dev/null +++ b/vendor/serial_test_derive/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.toml":"bf81287df4268ee025f5114a1378300dc4fde80b10cdf02cf30ed68c8baf2ee7","LICENSE":"ac7e05bd11cc1cfc3f9452c1b9986a9b1d54e180fa88e44e69caf955f95dc8a6","README.md":"007276a4a3e2567175306b75f261b83e08b865ba6bc7a9a26530f8c0e335f42b","src/lib.rs":"4660aa68ec05f57ef4dc35e811e93e08654cff229c925ac0c3bf5bf6f45eb610"},"package":"b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212"} \ No newline at end of file diff --git a/vendor/serial_test_derive/Cargo.toml b/vendor/serial_test_derive/Cargo.toml new file mode 100644 index 00000000..065a8cd9 --- /dev/null +++ b/vendor/serial_test_derive/Cargo.toml @@ -0,0 +1,43 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2018" +name = "serial_test_derive" +version = "3.0.0" +authors = ["Tom Parker-Shemilt "] +description = "Helper crate for serial_test" +readme = "README.md" +categories = ["development-tools::testing"] +license = "MIT" +repository = "https://github.com/palfrey/serial_test/" + +[lib] +proc-macro = true + +[dependencies.proc-macro2] +version = "1.0.60" + +[dependencies.quote] +version = "1.0" + +[dependencies.syn] +version = "2" +features = ["full"] + +[dev-dependencies.env_logger] +version = "0.10" + +[dev-dependencies.prettyplease] +version = "0.2" + +[features] +async = [] diff --git a/vendor/serial_test_derive/LICENSE b/vendor/serial_test_derive/LICENSE new file mode 100644 index 00000000..5a8adc1d --- /dev/null +++ b/vendor/serial_test_derive/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2018 Tom Parker-Shemilt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/serial_test_derive/README.md b/vendor/serial_test_derive/README.md new file mode 100644 index 00000000..545443fb --- /dev/null +++ b/vendor/serial_test_derive/README.md @@ -0,0 +1,49 @@ +# serial_test +[![Version](https://img.shields.io/crates/v/serial_test.svg)](https://crates.io/crates/serial_test) +[![Downloads](https://img.shields.io/crates/d/serial_test)](https://crates.io/crates/serial_test) +[![Docs](https://docs.rs/serial_test/badge.svg)](https://docs.rs/serial_test/) +[![MIT license](https://img.shields.io/crates/l/serial_test.svg)](./LICENSE) +[![Build Status](https://github.com/palfrey/serial_test/workflows/Continuous%20integration/badge.svg?branch=main)](https://github.com/palfrey/serial_test/actions) +[![MSRV: 1.68.2](https://flat.badgen.net/badge/MSRV/1.68.2/purple)](https://blog.rust-lang.org/2023/03/28/Rust-1.68.2.html) + +`serial_test` allows for the creation of serialised Rust tests using the `serial` attribute +e.g. +```rust +#[test] +#[serial] +fn test_serial_one() { + // Do things +} + +#[test] +#[serial] +fn test_serial_another() { + // Do things +} + +#[tokio::test] +#[serial] +async fn test_serial_another() { + // Do things asynchronously +} +``` +Multiple tests with the `serial` attribute are guaranteed to be executed in serial. Ordering of the tests is not guaranteed however. Other tests with the `parallel` attribute may run at the same time as each other, but not at the same time as a test with `serial`. Tests with neither attribute may run at any time and no guarantees are made about their timing! Both support optional keys for defining subsets of tests to run in serial together, see docs for more details. + +For cases like doctests and integration tests where the tests are run as separate processes, we also support `file_serial`, with +similar properties but based off file locking. Note that there are no guarantees about one test with `serial` and another with +`file_serial` as they lock using different methods. + +All of the attributes can also be applied at a `mod` level and will be automagically applied to all test functions in that block. + +## Usage +The minimum supported Rust version here is 1.68.2. Note this is minimum _supported_, as it may well compile with lower versions, but they're not supported at all. Upgrades to this will require at a major version bump. 1.x supports 1.51 if you need a lower version than that. + +Add to your Cargo.toml +```toml +[dev-dependencies] +serial_test = "*" +``` + +plus `use serial_test::serial;` in your imports section. + +You can then either add `#[serial]` or `#[serial(some_key)]` to tests as required. diff --git a/vendor/serial_test_derive/src/lib.rs b/vendor/serial_test_derive/src/lib.rs new file mode 100644 index 00000000..ba69ee0f --- /dev/null +++ b/vendor/serial_test_derive/src/lib.rs @@ -0,0 +1,715 @@ +//! # serial_test_derive +//! Helper crate for [serial_test](../serial_test/index.html) + +#![cfg_attr(docsrs, feature(doc_cfg))] +#![deny(missing_docs)] + +extern crate proc_macro; + +use proc_macro::TokenStream; +use proc_macro2::{Literal, TokenTree}; +use quote::{format_ident, quote, ToTokens, TokenStreamExt}; +use std::ops::Deref; +use syn::Result as SynResult; + +/// Allows for the creation of serialised Rust tests +/// ```` +/// #[test] +/// #[serial] +/// fn test_serial_one() { +/// // Do things +/// } +/// +/// #[test] +/// #[serial] +/// fn test_serial_another() { +/// // Do things +/// } +/// ```` +/// Multiple tests with the [serial](macro@serial) attribute are guaranteed to be executed in serial. Ordering +/// of the tests is not guaranteed however. If you have other tests that can be run in parallel, but would clash +/// if run at the same time as the [serial](macro@serial) tests, you can use the [parallel](macro@parallel) attribute. +/// +/// If you want different subsets of tests to be serialised with each +/// other, but not depend on other subsets, you can add a key argument to [serial](macro@serial), and all calls +/// with identical arguments will be called in serial. Multiple comma-separated keys will make a test run in serial with all of the sets with any of those keys. +/// +/// ```` +/// #[test] +/// #[serial(something)] +/// fn test_serial_one() { +/// // Do things +/// } +/// +/// #[test] +/// #[serial(something)] +/// fn test_serial_another() { +/// // Do things +/// } +/// +/// #[test] +/// #[serial(other)] +/// fn test_serial_third() { +/// // Do things +/// } +/// +/// #[test] +/// #[serial(other)] +/// fn test_serial_fourth() { +/// // Do things +/// } +/// +/// #[test] +/// #[serial(something, other)] +/// fn test_serial_fifth() { +/// // Do things, eventually +/// } +/// ```` +/// `test_serial_one` and `test_serial_another` will be executed in serial, as will `test_serial_third` and `test_serial_fourth` +/// but neither sequence will be blocked by the other. `test_serial_fifth` is blocked by tests in either sequence. +/// +/// Nested serialised tests (i.e. a [serial](macro@serial) tagged test calling another) are supported. +#[proc_macro_attribute] +pub fn serial(attr: TokenStream, input: TokenStream) -> TokenStream { + local_serial_core(attr.into(), input.into()).into() +} + +/// Allows for the creation of parallel Rust tests that won't clash with serial tests +/// ```` +/// #[test] +/// #[serial] +/// fn test_serial_one() { +/// // Do things +/// } +/// +/// #[test] +/// #[parallel] +/// fn test_parallel_one() { +/// // Do things +/// } +/// +/// #[test] +/// #[parallel] +/// fn test_parallel_two() { +/// // Do things +/// } +/// ```` +/// Multiple tests with the [parallel](macro@parallel) attribute may run in parallel, but not at the +/// same time as [serial](macro@serial) tests. e.g. in the example code above, `test_parallel_one` +/// and `test_parallel_two` may run at the same time, but `test_serial_one` is guaranteed not to run +/// at the same time as either of them. [parallel](macro@parallel) also takes key arguments for groups +/// of tests as per [serial](macro@serial). +/// +/// Note that this has zero effect on [file_serial](macro@file_serial) tests, as that uses a different +/// serialisation mechanism. For that, you want [file_parallel](macro@file_parallel). +#[proc_macro_attribute] +pub fn parallel(attr: TokenStream, input: TokenStream) -> TokenStream { + local_parallel_core(attr.into(), input.into()).into() +} + +/// Allows for the creation of file-serialised Rust tests +/// ```` +/// #[test] +/// #[file_serial] +/// fn test_serial_one() { +/// // Do things +/// } +/// +/// #[test] +/// #[file_serial] +/// fn test_serial_another() { +/// // Do things +/// } +/// ```` +/// +/// Multiple tests with the [file_serial](macro@file_serial) attribute are guaranteed to run in serial, as per the [serial](macro@serial) +/// attribute. Note that there are no guarantees about one test with [serial](macro@serial) and another with [file_serial](macro@file_serial) +/// as they lock using different methods, and [file_serial](macro@file_serial) does not support nested serialised tests, but otherwise acts +/// like [serial](macro@serial). If you have other tests that can be run in parallel, but would clash +/// if run at the same time as the [file_serial](macro@file_serial) tests, you can use the [file_parallel](macro@file_parallel) attribute. +/// +/// It also supports an optional `path` arg as well as key(s) as per [serial](macro@serial). +/// ```` +/// #[test] +/// #[file_serial(key)] +/// fn test_serial_one() { +/// // Do things +/// } +/// +/// #[test] +/// #[file_serial(key, path => "/tmp/foo")] +/// fn test_serial_another() { +/// // Do things +/// } +/// ```` +/// The path defaults to a reasonable temp directory for the OS if not specified. If the `path` is specified, you can only use one key. +#[proc_macro_attribute] +#[cfg_attr(docsrs, doc(cfg(feature = "file_locks")))] +pub fn file_serial(attr: TokenStream, input: TokenStream) -> TokenStream { + fs_serial_core(attr.into(), input.into()).into() +} + +/// Allows for the creation of file-serialised parallel Rust tests that won't clash with file-serialised serial tests +/// ```` +/// #[test] +/// #[file_serial] +/// fn test_serial_one() { +/// // Do things +/// } +/// +/// #[test] +/// #[file_parallel] +/// fn test_parallel_one() { +/// // Do things +/// } +/// +/// #[test] +/// #[file_parallel] +/// fn test_parallel_two() { +/// // Do things +/// } +/// ```` +/// Effectively, this should behave like [parallel](macro@parallel) but for [file_serial](macro@file_serial). +/// Note that as per [file_serial](macro@file_serial) this doesn't do anything for [serial](macro@serial)/[parallel](macro@parallel) tests. +/// +/// It also supports an optional `path` arg as well as key(s) as per [serial](macro@serial). +/// ```` +/// #[test] +/// #[file_parallel(key, path => "/tmp/foo")] +/// fn test_parallel_one() { +/// // Do things +/// } +/// +/// #[test] +/// #[file_parallel(key, path => "/tmp/foo")] +/// fn test_parallel_another() { +/// // Do things +/// } +/// ```` +/// The path defaults to a reasonable temp directory for the OS if not specified. If the `path` is specified, you can only use one key. +#[proc_macro_attribute] +#[cfg_attr(docsrs, doc(cfg(feature = "file_locks")))] +pub fn file_parallel(attr: TokenStream, input: TokenStream) -> TokenStream { + fs_parallel_core(attr.into(), input.into()).into() +} + +// Based off of https://github.com/dtolnay/quote/issues/20#issuecomment-437341743 +#[derive(Default, Debug, Clone)] +struct QuoteOption(Option); + +impl ToTokens for QuoteOption { + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + tokens.append_all(match self.0 { + Some(ref t) => quote! { ::std::option::Option::Some(#t) }, + None => quote! { ::std::option::Option::None }, + }); + } +} + +#[derive(Default, Debug)] +struct Config { + names: Vec, + path: QuoteOption, +} + +fn string_from_literal(literal: Literal) -> String { + let string_literal = literal.to_string(); + if !string_literal.starts_with('\"') || !string_literal.ends_with('\"') { + panic!("Expected a string literal, got '{}'", string_literal); + } + // Hacky way of getting a string without the enclosing quotes + string_literal[1..string_literal.len() - 1].to_string() +} + +fn get_config(attr: proc_macro2::TokenStream) -> Config { + let mut attrs = attr.into_iter().collect::>(); + let mut raw_args: Vec = Vec::new(); + let mut in_path: bool = false; + let mut path: Option = None; + while !attrs.is_empty() { + match attrs.remove(0) { + TokenTree::Ident(id) if id.to_string().eq_ignore_ascii_case("path") => { + in_path = true; + } + TokenTree::Ident(id) => { + let name = id.to_string(); + raw_args.push(name); + } + x => { + panic!( + "Expected literal as key args (or a 'path => '\"foo\"'), not {}", + x + ); + } + } + if in_path { + if attrs.len() < 3 { + panic!("Expected a '=> ' after 'path'"); + } + match attrs.remove(0) { + TokenTree::Punct(p) if p.as_char() == '=' => {} + x => { + panic!("Expected = after path, not {}", x); + } + } + match attrs.remove(0) { + TokenTree::Punct(p) if p.as_char() == '>' => {} + x => { + panic!("Expected > after path, not {}", x); + } + } + match attrs.remove(0) { + TokenTree::Literal(literal) => { + path = Some(string_from_literal(literal)); + } + x => { + panic!("Expected literals as path arg, not {}", x); + } + } + in_path = false; + } + if !attrs.is_empty() { + match attrs.remove(0) { + TokenTree::Punct(p) if p.as_char() == ',' => {} + x => { + panic!("Expected , between args, not {}", x); + } + } + } + } + if raw_args.is_empty() { + raw_args.push(String::new()); + } + raw_args.sort(); // So the keys are always requested in the same order. Avoids dining philosopher issues. + Config { + names: raw_args, + path: QuoteOption(path), + } +} + +fn local_serial_core( + attr: proc_macro2::TokenStream, + input: proc_macro2::TokenStream, +) -> proc_macro2::TokenStream { + let config = get_config(attr); + serial_setup(input, config, "local") +} + +fn local_parallel_core( + attr: proc_macro2::TokenStream, + input: proc_macro2::TokenStream, +) -> proc_macro2::TokenStream { + let config = get_config(attr); + parallel_setup(input, config, "local") +} + +fn fs_serial_core( + attr: proc_macro2::TokenStream, + input: proc_macro2::TokenStream, +) -> proc_macro2::TokenStream { + let config = get_config(attr); + serial_setup(input, config, "fs") +} + +fn fs_parallel_core( + attr: proc_macro2::TokenStream, + input: proc_macro2::TokenStream, +) -> proc_macro2::TokenStream { + let config = get_config(attr); + parallel_setup(input, config, "fs") +} + +#[allow(clippy::cmp_owned)] +fn core_setup( + input: proc_macro2::TokenStream, + config: &Config, + prefix: &str, + kind: &str, +) -> proc_macro2::TokenStream { + let fn_ast: SynResult = syn::parse2(input.clone()); + if let Ok(ast) = fn_ast { + return fn_setup(ast, config, prefix, kind); + }; + let mod_ast: SynResult = syn::parse2(input); + match mod_ast { + Ok(mut ast) => { + let new_content = ast.content.clone().map(|(brace, items)| { + let new_items = items + .into_iter() + .map(|item| match item { + syn::Item::Fn(item_fn) + if item_fn.attrs.iter().any(|attr| { + attr.meta + .path() + .segments + .first() + .unwrap() + .ident + .to_string() + .contains("test") + }) => + { + syn::parse2(fn_setup(item_fn, config, prefix, kind)).unwrap() + } + other => other, + }) + .collect(); + (brace, new_items) + }); + if let Some(nc) = new_content { + ast.content.replace(nc); + } + ast.attrs.retain(|attr| { + attr.meta.path().segments.first().unwrap().ident.to_string() != "serial" + }); + ast.into_token_stream() + } + Err(_) => { + panic!("Attribute applied to something other than mod or fn!"); + } + } +} + +fn fn_setup( + ast: syn::ItemFn, + config: &Config, + prefix: &str, + kind: &str, +) -> proc_macro2::TokenStream { + let asyncness = ast.sig.asyncness; + if asyncness.is_some() && cfg!(not(feature = "async")) { + panic!("async testing attempted with async feature disabled in serial_test!"); + } + let vis = ast.vis; + let name = ast.sig.ident; + let return_type = match ast.sig.output { + syn::ReturnType::Default => None, + syn::ReturnType::Type(_rarrow, ref box_type) => Some(box_type.deref()), + }; + let block = ast.block; + let attrs: Vec = ast.attrs.into_iter().collect(); + let names = config.names.clone(); + let path = config.path.clone(); + if let Some(ret) = return_type { + match asyncness { + Some(_) => { + let fnname = format_ident!("{}_async_{}_core_with_return", prefix, kind); + let temp_fn = format_ident!("_{}_internal", name); + quote! { + async fn #temp_fn () -> #ret + #block + + #(#attrs) + * + #vis async fn #name () -> #ret { + serial_test::#fnname(vec![#(#names ),*], #path, #temp_fn()).await + } + } + } + None => { + let fnname = format_ident!("{}_{}_core_with_return", prefix, kind); + quote! { + #(#attrs) + * + #vis fn #name () -> #ret { + serial_test::#fnname(vec![#(#names ),*], #path, || #block ) + } + } + } + } + } else { + match asyncness { + Some(_) => { + let fnname = format_ident!("{}_async_{}_core", prefix, kind); + let temp_fn = format_ident!("_{}_internal", name); + quote! { + async fn #temp_fn () + #block + + #(#attrs) + * + #vis async fn #name () { + serial_test::#fnname(vec![#(#names ),*], #path, #temp_fn()).await; + } + } + } + None => { + let fnname = format_ident!("{}_{}_core", prefix, kind); + quote! { + #(#attrs) + * + #vis fn #name () { + serial_test::#fnname(vec![#(#names ),*], #path, || #block ); + } + } + } + } + } +} + +fn serial_setup( + input: proc_macro2::TokenStream, + config: Config, + prefix: &str, +) -> proc_macro2::TokenStream { + core_setup(input, &config, prefix, "serial") +} + +fn parallel_setup( + input: proc_macro2::TokenStream, + config: Config, + prefix: &str, +) -> proc_macro2::TokenStream { + core_setup(input, &config, prefix, "parallel") +} + +#[cfg(test)] +mod tests { + use super::{fs_serial_core, local_serial_core}; + use proc_macro2::TokenStream; + use quote::quote; + use std::iter::FromIterator; + + fn unparse(input: TokenStream) -> String { + let item = syn::parse2(input).unwrap(); + let file = syn::File { + attrs: vec![], + items: vec![item], + shebang: None, + }; + + prettyplease::unparse(&file) + } + + fn compare_streams(first: TokenStream, second: TokenStream) { + let f = unparse(first); + assert_eq!(f, unparse(second)); + } + + #[test] + fn test_serial() { + let attrs = proc_macro2::TokenStream::new(); + let input = quote! { + #[test] + fn foo() {} + }; + let stream = local_serial_core(attrs.into(), input); + let compare = quote! { + #[test] + fn foo () { + serial_test::local_serial_core(vec![""], ::std::option::Option::None, || {} ); + } + }; + compare_streams(compare, stream); + } + + #[test] + fn test_serial_with_pub() { + let attrs = proc_macro2::TokenStream::new(); + let input = quote! { + #[test] + pub fn foo() {} + }; + let stream = local_serial_core(attrs.into(), input); + let compare = quote! { + #[test] + pub fn foo () { + serial_test::local_serial_core(vec![""], ::std::option::Option::None, || {} ); + } + }; + compare_streams(compare, stream); + } + + #[test] + fn test_other_attributes() { + let _ = env_logger::builder().is_test(true).try_init(); + let attrs = proc_macro2::TokenStream::new(); + let input = quote! { + #[test] + #[ignore] + #[should_panic(expected = "Testing panic")] + #[something_else] + fn foo() {} + }; + let stream = local_serial_core(attrs.into(), input); + let compare = quote! { + #[test] + #[ignore] + #[should_panic(expected = "Testing panic")] + #[something_else] + fn foo () { + serial_test::local_serial_core(vec![""], ::std::option::Option::None, || {} ); + } + }; + compare_streams(compare, stream); + } + + #[test] + #[cfg(feature = "async")] + fn test_serial_async() { + let attrs = proc_macro2::TokenStream::new(); + let input = quote! { + async fn foo() {} + }; + let stream = local_serial_core(attrs.into(), input); + let compare = quote! { + async fn _foo_internal () { } + async fn foo () { + serial_test::local_async_serial_core(vec![""], ::std::option::Option::None, _foo_internal() ).await; + } + }; + assert_eq!(format!("{}", compare), format!("{}", stream)); + } + + #[test] + #[cfg(feature = "async")] + fn test_serial_async_return() { + let attrs = proc_macro2::TokenStream::new(); + let input = quote! { + async fn foo() -> Result<(), ()> { Ok(()) } + }; + let stream = local_serial_core(attrs.into(), input); + let compare = quote! { + async fn _foo_internal () -> Result<(), ()> { Ok(()) } + async fn foo () -> Result<(), ()> { + serial_test::local_async_serial_core_with_return(vec![""], ::std::option::Option::None, _foo_internal() ).await + } + }; + assert_eq!(format!("{}", compare), format!("{}", stream)); + } + + #[test] + fn test_file_serial() { + let attrs: Vec<_> = quote! { foo }.into_iter().collect(); + let input = quote! { + #[test] + fn foo() {} + }; + let stream = fs_serial_core( + proc_macro2::TokenStream::from_iter(attrs.into_iter()), + input, + ); + let compare = quote! { + #[test] + fn foo () { + serial_test::fs_serial_core(vec!["foo"], ::std::option::Option::None, || {} ); + } + }; + compare_streams(compare, stream); + } + + #[test] + fn test_file_serial_no_args() { + let attrs = proc_macro2::TokenStream::new(); + let input = quote! { + #[test] + fn foo() {} + }; + let stream = fs_serial_core( + proc_macro2::TokenStream::from_iter(attrs.into_iter()), + input, + ); + let compare = quote! { + #[test] + fn foo () { + serial_test::fs_serial_core(vec![""], ::std::option::Option::None, || {} ); + } + }; + compare_streams(compare, stream); + } + + #[test] + fn test_file_serial_with_path() { + let attrs: Vec<_> = quote! { foo, path => "bar_path" }.into_iter().collect(); + let input = quote! { + #[test] + fn foo() {} + }; + let stream = fs_serial_core( + proc_macro2::TokenStream::from_iter(attrs.into_iter()), + input, + ); + let compare = quote! { + #[test] + fn foo () { + serial_test::fs_serial_core(vec!["foo"], ::std::option::Option::Some("bar_path"), || {} ); + } + }; + compare_streams(compare, stream); + } + + #[test] + fn test_single_attr() { + let attrs: Vec<_> = quote! { one}.into_iter().collect(); + let input = quote! { + #[test] + fn single() {} + }; + let stream = local_serial_core( + proc_macro2::TokenStream::from_iter(attrs.into_iter()), + input, + ); + let compare = quote! { + #[test] + fn single () { + serial_test::local_serial_core(vec!["one"], ::std::option::Option::None, || {} ); + } + }; + compare_streams(compare, stream); + } + + #[test] + fn test_multiple_attr() { + let attrs: Vec<_> = quote! { two, one }.into_iter().collect(); + let input = quote! { + #[test] + fn multiple() {} + }; + let stream = local_serial_core( + proc_macro2::TokenStream::from_iter(attrs.into_iter()), + input, + ); + let compare = quote! { + #[test] + fn multiple () { + serial_test::local_serial_core(vec!["one", "two"], ::std::option::Option::None, || {} ); + } + }; + compare_streams(compare, stream); + } + + #[test] + fn test_mod() { + let attrs = proc_macro2::TokenStream::new(); + let input = quote! { + #[cfg(test)] + #[serial] + mod serial_attr_tests { + pub fn foo() { + println!("Nothing"); + } + + #[test] + fn bar() {} + } + }; + let stream = local_serial_core( + proc_macro2::TokenStream::from_iter(attrs.into_iter()), + input, + ); + let compare = quote! { + #[cfg(test)] + mod serial_attr_tests { + pub fn foo() { + println!("Nothing"); + } + + #[test] + fn bar() { + serial_test::local_serial_core(vec![""], ::std::option::Option::None, || {} ); + } + } + }; + compare_streams(compare, stream); + } +}