Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

1
vendor/rand/.cargo-checksum.json vendored Normal file
View File

@@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"76b505678de234d2eef751593feec6d9debb76c20d45564a9f23c9e9783dbc63","COPYRIGHT":"90eb64f0279b0d9432accfa6023ff803bc4965212383697eee27a0f426d5f8d5","Cargo.lock":"3fcf8e3c5b6093e73a0f33d4c4c24ebfbe962ede3156b29ea97e42ad68460711","Cargo.toml":"9bb028fb3b697653beb433ddcf4c1292b3db10ea5ed27a695df6e4e604ba6d4b","LICENSE-APACHE":"35242e7a83f69875e6edeff02291e688c97caafe2f8902e4e19b49d3e78b4cab","LICENSE-MIT":"209fbbe0ad52d9235e37badf9cadfe4dbdc87203179c0899e738b39ade42177b","README.md":"ddb5a1fa9442c6cab92a3510365937e729f839c94b97e75d3f0430bf3a4dd2bd","src/distributions/bernoulli.rs":"437e61c41f73b6fffad11a65cc45d05df198ab05b37328eba687a9779b86948a","src/distributions/distribution.rs":"36086233c9682eb16874ba87bb1ec39db71559c5ce2ca618dc8c6bd9710d3b3a","src/distributions/float.rs":"ef894cbfeab9c734894468175c4553100b4a261f431047f2bbc4949aa43e2ccd","src/distributions/integer.rs":"a380e0627c97cfad0d94e0fdfb4dad73060d23073cc1d379f06c4dbd2a4fc2db","src/distributions/mod.rs":"f87a133e704e38ad554c8b4f62497d6320c74ef7d37df7871c61bde48d200b5b","src/distributions/other.rs":"e60568f8eadc0594636641a2070e53f5127fb532a74101ed4767f424a6e92622","src/distributions/slice.rs":"94f5abfe602679e980e4561bb03dcac28bbd3bb5f7bd2821f396a6293c0878db","src/distributions/uniform.rs":"9eb0769b7c268c2e4f502ede0d779cb1ab5243d70a1fb39f2f5e316bcf9586e2","src/distributions/utils.rs":"41304f5e2d74e750fc62f7871443c6e9d510a6c99be4614fb5c756682e0344d7","src/distributions/weighted.rs":"ae019d9b688e33cb912c9a04668cce3e7df86abab994db88478c6c339f98222f","src/distributions/weighted_index.rs":"874d1db2e258d9c049be08ae80b72ec2c75af0f2571f83091a26a3f6c747a6f0","src/lib.rs":"a773ff7b0dad376e5ef23661c40b7a96df4233fef90dab303db93f209aee314f","src/prelude.rs":"2f2132d74ce9f70513224baad3b161b1585a639f9136a254cdb0e7f8ffceb25b","src/rng.rs":"5d9b55069197f9f98298e8d930b13d8c65ab2701660bfbf52d83c6d7d7aff8c6","src/rngs/adapter/mod.rs":"28318871529da61dccc0fe8c0bcafa99b133c721d4bb506fa34d0831f4ca2639","src/rngs/adapter/read.rs":"b044061c46d0b8e6a4f25c69d3e8bb6f9df08cd8df9b5eae131a1d4934020e03","src/rngs/adapter/reseeding.rs":"89abebade9bca847889155ac3357c0021d2c6181dd47478332a644820ade0c6e","src/rngs/mock.rs":"0074abe04cf84b1263218f50140931fa4188f4e0a43fe3205556a00e4c36d1e9","src/rngs/mod.rs":"a6dec3d19e1726ba05f130ab9b20719d79177b8c1584cdd7b5f37b9996315ed3","src/rngs/small.rs":"a8e61c6e0bad62f06db1325e3b93eff1d4aa9e82cf0316fbfd02da2ef5b85b83","src/rngs/std.rs":"3cee48bf1fea18b84f585680a947f3aeea949b756cc37d99217291f9759be7c9","src/rngs/thread.rs":"c3cc07465bf02d08182afc47a40e50095d7c83633e09dcd071974b2a902e6fce","src/rngs/xoshiro128plusplus.rs":"deca2450a2d5ea826ca6f47cccb9ee06daeac38799a30a107b78c5dae78ae30c","src/rngs/xoshiro256plusplus.rs":"d7e214f8288041cede7ef26e829dd2196f7b4843455d7f1b9a3ef080d570bc5f","src/seq/index.rs":"5247833f7bfc8c5c11337ce7dc0a55a6979ea664ddddd70b6e2b9598058ab44d","src/seq/mod.rs":"dd97a635e89e1d50153c57ec03d8a346a063486998ef14ca4fdc60659f1612fb"},"package":"34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"}

699
vendor/rand/CHANGELOG.md vendored Normal file
View File

@@ -0,0 +1,699 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
A [separate changelog is kept for rand_core](rand_core/CHANGELOG.md).
You may also find the [Upgrade Guide](https://rust-random.github.io/book/update.html) useful.
## [0.8.5] - 2021-08-20
### Fixes
- Fix build on non-32/64-bit architectures (#1144)
- Fix "min_const_gen" feature for `no_std` (#1173)
- Check `libc::pthread_atfork` return value with panic on error (#1178)
- More robust reseeding in case `ReseedingRng` is used from a fork handler (#1178)
- Fix nightly: remove unused `slice_partition_at_index` feature (#1215)
- Fix nightly + `simd_support`: update `packed_simd` (#1216)
### Rngs
- `StdRng`: Switch from HC128 to ChaCha12 on emscripten (#1142).
We now use ChaCha12 on all platforms.
### Documentation
- Added docs about rand's use of const generics (#1150)
- Better random chars example (#1157)
## [0.8.4] - 2021-06-15
### Additions
- Use const-generics to support arrays of all sizes (#1104)
- Implement `Clone` and `Copy` for `Alphanumeric` (#1126)
- Add `Distribution::map` to derive a distribution using a closure (#1129)
- Add `Slice` distribution (#1107)
- Add `DistString` trait with impls for `Standard` and `Alphanumeric` (#1133)
### Other
- Reorder asserts in `Uniform` float distributions for easier debugging of non-finite arguments
(#1094, #1108)
- Add range overflow check in `Uniform` float distributions (#1108)
- Deprecate `rngs::adapter::ReadRng` (#1130)
## [0.8.3] - 2021-01-25
### Fixes
- Fix `no-std` + `alloc` build by gating `choose_multiple_weighted` on `std` (#1088)
## [0.8.2] - 2021-01-12
### Fixes
- Fix panic in `UniformInt::sample_single_inclusive` and `Rng::gen_range` when
providing a full integer range (eg `0..=MAX`) (#1087)
## [0.8.1] - 2020-12-31
### Other
- Enable all stable features in the playground (#1081)
## [0.8.0] - 2020-12-18
### Platform support
- The minimum supported Rust version is now 1.36 (#1011)
- `getrandom` updated to v0.2 (#1041)
- Remove `wasm-bindgen` and `stdweb` feature flags. For details of WASM support,
see the [getrandom documentation](https://docs.rs/getrandom/latest). (#948)
- `ReadRng::next_u32` and `next_u64` now use little-Endian conversion instead
of native-Endian, affecting results on Big-Endian platforms (#1061)
- The `nightly` feature no longer implies the `simd_support` feature (#1048)
- Fix `simd_support` feature to work on current nightlies (#1056)
### Rngs
- `ThreadRng` is no longer `Copy` to enable safe usage within thread-local destructors (#1035)
- `gen_range(a, b)` was replaced with `gen_range(a..b)`. `gen_range(a..=b)` is
also supported. Note that `a` and `b` can no longer be references or SIMD types. (#744, #1003)
- Replace `AsByteSliceMut` with `Fill` and add support for `[bool], [char], [f32], [f64]` (#940)
- Restrict `rand::rngs::adapter` to `std` (#1027; see also #928)
- `StdRng`: add new `std_rng` feature flag (enabled by default, but might need
to be used if disabling default crate features) (#948)
- `StdRng`: Switch from ChaCha20 to ChaCha12 for better performance (#1028)
- `SmallRng`: Replace PCG algorithm with xoshiro{128,256}++ (#1038)
### Sequences
- Add `IteratorRandom::choose_stable` as an alternative to `choose` which does
not depend on size hints (#1057)
- Improve accuracy and performance of `IteratorRandom::choose` (#1059)
- Implement `IntoIterator` for `IndexVec`, replacing the `into_iter` method (#1007)
- Add value stability tests for `seq` module (#933)
### Misc
- Support `PartialEq` and `Eq` for `StdRng`, `SmallRng` and `StepRng` (#979)
- Added a `serde1` feature and added Serialize/Deserialize to `UniformInt` and `WeightedIndex` (#974)
- Drop some unsafe code (#962, #963, #1011)
- Reduce packaged crate size (#983)
- Migrate to GitHub Actions from Travis+AppVeyor (#1073)
### Distributions
- `Alphanumeric` samples bytes instead of chars (#935)
- `Uniform` now supports `char`, enabling `rng.gen_range('A'..='Z')` (#1068)
- Add `UniformSampler::sample_single_inclusive` (#1003)
#### Weighted sampling
- Implement weighted sampling without replacement (#976, #1013)
- `rand::distributions::alias_method::WeightedIndex` was moved to `rand_distr::WeightedAliasIndex`.
The simpler alternative `rand::distribution::WeightedIndex` remains. (#945)
- Improve treatment of rounding errors in `WeightedIndex::update_weights` (#956)
- `WeightedIndex`: return error on NaN instead of panic (#1005)
### Documentation
- Document types supported by `random` (#994)
- Document notes on password generation (#995)
- Note that `SmallRng` may not be the best choice for performance and in some
other cases (#1038)
- Use `doc(cfg)` to annotate feature-gated items (#1019)
- Adjust README (#1065)
## [0.7.3] - 2020-01-10
### Fixes
- The `Bernoulli` distribution constructors now reports an error on NaN and on
`denominator == 0`. (#925)
- Use `std::sync::Once` to register fork handler, avoiding possible atomicity violation (#928)
- Fix documentation on the precision of generated floating-point values
### Changes
- Unix: make libc dependency optional; only use fork protection with std feature (#928)
### Additions
- Implement `std::error::Error` for `BernoulliError` (#919)
## [0.7.2] - 2019-09-16
### Fixes
- Fix dependency on `rand_core` 0.5.1 (#890)
### Additions
- Unit tests for value stability of distributions added (#888)
## [0.7.1] - 2019-09-13
### Yanked
This release was yanked since it depends on `rand_core::OsRng` added in 0.5.1
but specifies a dependency on version 0.5.0 (#890), causing a broken builds
when updating from `rand 0.7.0` without also updating `rand_core`.
### Fixes
- Fix `no_std` behaviour, appropriately enable c2-chacha's `std` feature (#844)
- `alloc` feature in `no_std` is available since Rust 1.36 (#856)
- Fix or squelch issues from Clippy lints (#840)
### Additions
- Add a `no_std` target to CI to continuously evaluate `no_std` status (#844)
- `WeightedIndex`: allow adjusting a sub-set of weights (#866)
## [0.7.0] - 2019-06-28
### Fixes
- Fix incorrect pointer usages revealed by Miri testing (#780, #781)
- Fix (tiny!) bias in `Uniform` for 8- and 16-bit ints (#809)
### Crate
- Bumped MSRV (min supported Rust version) to 1.32.0
- Updated to Rust Edition 2018 (#823, #824)
- Removed dependence on `rand_xorshift`, `rand_isaac`, `rand_jitter` crates (#759, #765)
- Remove dependency on `winapi` (#724)
- Removed all `build.rs` files (#824)
- Removed code already deprecated in version 0.6 (#757)
- Removed the serde1 feature (It's still available for backwards compatibility, but it does not do anything. #830)
- Many documentation changes
### rand_core
- Updated to `rand_core` 0.5.0
- `Error` type redesigned with new API (#800)
- Move `from_entropy` method to `SeedableRng` and remove `FromEntropy` (#800)
- `SeedableRng::from_rng` is now expected to be value-stable (#815)
### Standard RNGs
- OS interface moved from `rand_os` to new `getrandom` crate (#765, [getrandom](https://github.com/rust-random/getrandom))
- Use ChaCha for `StdRng` and `ThreadRng` (#792)
- Feature-gate `SmallRng` (#792)
- `ThreadRng` now supports `Copy` (#758)
- Deprecated `EntropyRng` (#765)
- Enable fork protection of ReseedingRng without `std` (#724)
### Distributions
- Many distributions have been moved to `rand_distr` (#761)
- `Bernoulli::new` constructor now returns a `Result` (#803)
- `Distribution::sample_iter` adjusted for more flexibility (#758)
- Added `distributions::weighted::alias_method::WeightedIndex` for `O(1)` sampling (#692)
- Support sampling `NonZeroU*` types with the `Standard` distribution (#728)
- Optimised `Binomial` distribution sampling (#735, #740, #752)
- Optimised SIMD float sampling (#739)
### Sequences
- Make results portable across 32- and 64-bit by using `u32` samples for `usize` where possible (#809)
## [0.6.5] - 2019-01-28
### Crates
- Update `rand_core` to 0.4 (#703)
- Move `JitterRng` to its own crate (#685)
- Add a wasm-bindgen test crate (#696)
### Platforms
- Fuchsia: Replaced fuchsia-zircon with fuchsia-cprng
### Doc
- Use RFC 1946 for doc links (#691)
- Fix some doc links and notes (#711)
## [0.6.4] - 2019-01-08
### Fixes
- Move wasm-bindgen shims to correct crate (#686)
- Make `wasm32-unknown-unknown` compile but fail at run-time if missing bindingsg (#686)
## [0.6.3] - 2019-01-04
### Fixes
- Make the `std` feature require the optional `rand_os` dependency (#675)
- Re-export the optional WASM dependencies of `rand_os` from `rand` to avoid breakage (#674)
## [0.6.2] - 2019-01-04
### Additions
- Add `Default` for `ThreadRng` (#657)
- Move `rngs::OsRng` to `rand_os` sub-crate; clean up code; use as dependency (#643) ##BLOCKER##
- Add `rand_xoshiro` sub-crate, plus benchmarks (#642, #668)
### Fixes
- Fix bias in `UniformInt::sample_single` (#662)
- Use `autocfg` instead of `rustc_version` for rustc version detection (#664)
- Disable `i128` and `u128` if the `target_os` is `emscripten` (#671: work-around Emscripten limitation)
- CI fixes (#660, #671)
### Optimisations
- Optimise memory usage of `UnitCircle` and `UnitSphereSurface` distributions (no PR)
## [0.6.1] - 2018-11-22
- Support sampling `Duration` also for `no_std` (only since Rust 1.25) (#649)
- Disable default features of `libc` (#647)
## [0.6.0] - 2018-11-14
### Project organisation
- Rand has moved from [rust-lang-nursery](https://github.com/rust-lang-nursery/rand)
to [rust-random](https://github.com/rust-random/rand)! (#578)
- Created [The Rust Random Book](https://rust-random.github.io/book/)
([source](https://github.com/rust-random/book))
- Update copyright and licence notices (#591, #611)
- Migrate policy documentation from the wiki (#544)
### Platforms
- Add fork protection on Unix (#466)
- Added support for wasm-bindgen. (#541, #559, #562, #600)
- Enable `OsRng` for powerpc64, sparc and sparc64 (#609)
- Use `syscall` from `libc` on Linux instead of redefining it (#629)
### RNGs
- Switch `SmallRng` to use PCG (#623)
- Implement `Pcg32` and `Pcg64Mcg` generators (#632)
- Move ISAAC RNGs to a dedicated crate (#551)
- Move Xorshift RNG to its own crate (#557)
- Move ChaCha and HC128 RNGs to dedicated crates (#607, #636)
- Remove usage of `Rc` from `ThreadRng` (#615)
### Sampling and distributions
- Implement `Rng.gen_ratio()` and `Bernoulli::new_ratio()` (#491)
- Make `Uniform` strictly respect `f32` / `f64` high/low bounds (#477)
- Allow `gen_range` and `Uniform` to work on non-`Copy` types (#506)
- `Uniform` supports inclusive ranges: `Uniform::from(a..=b)`. This is
automatically enabled for Rust >= 1.27. (#566)
- Implement `TrustedLen` and `FusedIterator` for `DistIter` (#620)
#### New distributions
- Add the `Dirichlet` distribution (#485)
- Added sampling from the unit sphere and circle. (#567)
- Implement the triangular distribution (#575)
- Implement the Weibull distribution (#576)
- Implement the Beta distribution (#574)
#### Optimisations
- Optimise `Bernoulli::new` (#500)
- Optimise `char` sampling (#519)
- Optimise sampling of `std::time::Duration` (#583)
### Sequences
- Redesign the `seq` module (#483, #515)
- Add `WeightedIndex` and `choose_weighted` (#518, #547)
- Optimised and changed return type of the `sample_indices` function. (#479)
- Use `Iterator::size_hint()` to speed up `IteratorRandom::choose` (#593)
### SIMD
- Support for generating SIMD types (#523, #542, #561, #630)
### Other
- Revise CI scripts (#632, #635)
- Remove functionality already deprecated in 0.5 (#499)
- Support for `i128` and `u128` is automatically enabled for Rust >= 1.26. This
renders the `i128_support` feature obsolete. It still exists for backwards
compatibility but does not have any effect. This breaks programs using Rand
with `i128_support` on nightlies older than Rust 1.26. (#571)
## [0.5.5] - 2018-08-07
### Documentation
- Fix links in documentation (#582)
## [0.5.4] - 2018-07-11
### Platform support
- Make `OsRng` work via WASM/stdweb for WebWorkers
## [0.5.3] - 2018-06-26
### Platform support
- OpenBSD, Bitrig: fix compilation (broken in 0.5.1) (#530)
## [0.5.2] - 2018-06-18
### Platform support
- Hide `OsRng` and `JitterRng` on unsupported platforms (#512; fixes #503).
## [0.5.1] - 2018-06-08
### New distributions
- Added Cauchy distribution. (#474, #486)
- Added Pareto distribution. (#495)
### Platform support and `OsRng`
- Remove blanket Unix implementation. (#484)
- Remove Wasm unimplemented stub. (#484)
- Dragonfly BSD: read from `/dev/random`. (#484)
- Bitrig: use `getentropy` like OpenBSD. (#484)
- Solaris: (untested) use `getrandom` if available, otherwise `/dev/random`. (#484)
- Emscripten, `stdweb`: split the read up in chunks. (#484)
- Emscripten, Haiku: don't do an extra blocking read from `/dev/random`. (#484)
- Linux, NetBSD, Solaris: read in blocking mode on first use in `fill_bytes`. (#484)
- Fuchsia, CloudABI: fix compilation (broken in Rand 0.5). (#484)
## [0.5.0] - 2018-05-21
### Crate features and organisation
- Minimum Rust version update: 1.22.0. (#239)
- Create a separate `rand_core` crate. (#288)
- Deprecate `rand_derive`. (#256)
- Add `prelude` (and module reorganisation). (#435)
- Add `log` feature. Logging is now available in `JitterRng`, `OsRng`, `EntropyRng` and `ReseedingRng`. (#246)
- Add `serde1` feature for some PRNGs. (#189)
- `stdweb` feature for `OsRng` support on WASM via stdweb. (#272, #336)
### `Rng` trait
- Split `Rng` in `RngCore` and `Rng` extension trait.
`next_u32`, `next_u64` and `fill_bytes` are now part of `RngCore`. (#265)
- Add `Rng::sample`. (#256)
- Deprecate `Rng::gen_weighted_bool`. (#308)
- Add `Rng::gen_bool`. (#308)
- Remove `Rng::next_f32` and `Rng::next_f64`. (#273)
- Add optimized `Rng::fill` and `Rng::try_fill` methods. (#247)
- Deprecate `Rng::gen_iter`. (#286)
- Deprecate `Rng::gen_ascii_chars`. (#279)
### `rand_core` crate
- `rand` now depends on new `rand_core` crate (#288)
- `RngCore` and `SeedableRng` are now part of `rand_core`. (#288)
- Add modules to help implementing RNGs `impl` and `le`. (#209, #228)
- Add `Error` and `ErrorKind`. (#225)
- Add `CryptoRng` marker trait. (#273)
- Add `BlockRngCore` trait. (#281)
- Add `BlockRng` and `BlockRng64` wrappers to help implementations. (#281, #325)
- Revise the `SeedableRng` trait. (#233)
- Remove default implementations for `RngCore::next_u64` and `RngCore::fill_bytes`. (#288)
- Add `RngCore::try_fill_bytes`. (#225)
### Other traits and types
- Add `FromEntropy` trait. (#233, #375)
- Add `SmallRng` wrapper. (#296)
- Rewrite `ReseedingRng` to only work with `BlockRngCore` (substantial performance improvement). (#281)
- Deprecate `weak_rng`. Use `SmallRng` instead. (#296)
- Deprecate `AsciiGenerator`. (#279)
### Random number generators
- Switch `StdRng` and `thread_rng` to HC-128. (#277)
- `StdRng` must now be created with `from_entropy` instead of `new`
- Change `thread_rng` reseeding threshold to 32 MiB. (#277)
- PRNGs no longer implement `Copy`. (#209)
- `Debug` implementations no longer show internals. (#209)
- Implement `Clone` for `ReseedingRng`, `JitterRng`, OsRng`. (#383, #384)
- Implement serialization for `XorShiftRng`, `IsaacRng` and `Isaac64Rng` under the `serde1` feature. (#189)
- Implement `BlockRngCore` for `ChaChaCore` and `Hc128Core`. (#281)
- All PRNGs are now portable across big- and little-endian architectures. (#209)
- `Isaac64Rng::next_u32` no longer throws away half the results. (#209)
- Add `IsaacRng::new_from_u64` and `Isaac64Rng::new_from_u64`. (#209)
- Add the HC-128 CSPRNG `Hc128Rng`. (#210)
- Change ChaCha20 to have 64-bit counter and 64-bit stream. (#349)
- Changes to `JitterRng` to get its size down from 2112 to 24 bytes. (#251)
- Various performance improvements to all PRNGs.
### Platform support and `OsRng`
- Add support for CloudABI. (#224)
- Remove support for NaCl. (#225)
- WASM support for `OsRng` via stdweb, behind the `stdweb` feature. (#272, #336)
- Use `getrandom` on more platforms for Linux, and on Android. (#338)
- Use the `SecRandomCopyBytes` interface on macOS. (#322)
- On systems that do not have a syscall interface, only keep a single file descriptor open for `OsRng`. (#239)
- On Unix, first try a single read from `/dev/random`, then `/dev/urandom`. (#338)
- Better error handling and reporting in `OsRng` (using new error type). (#225)
- `OsRng` now uses non-blocking when available. (#225)
- Add `EntropyRng`, which provides `OsRng`, but has `JitterRng` as a fallback. (#235)
### Distributions
- New `Distribution` trait. (#256)
- Add `Distribution::sample_iter` and `Rng::::sample_iter`. (#361)
- Deprecate `Rand`, `Sample` and `IndependentSample` traits. (#256)
- Add a `Standard` distribution (replaces most `Rand` implementations). (#256)
- Add `Binomial` and `Poisson` distributions. (#96)
- Add `Bernoulli` dsitribution. (#411)
- Add `Alphanumeric` distribution. (#279)
- Remove `Closed01` distribution, add `OpenClosed01`. (#274, #420)
- Rework `Range` type, making it possible to implement it for user types. (#274)
- Rename `Range` to `Uniform`. (#395)
- Add `Uniform::new_inclusive` for inclusive ranges. (#274)
- Use widening multiply method for much faster integer range reduction. (#274)
- `Standard` distribution for `char` uses `Uniform` internally. (#274)
- `Standard` distribution for `bool` uses sign test. (#274)
- Implement `Standard` distribution for `Wrapping<T>`. (#436)
- Implement `Uniform` distribution for `Duration`. (#427)
## [0.4.3] - 2018-08-16
### Fixed
- Use correct syscall number for PowerPC (#589)
## [0.4.2] - 2018-01-06
### Changed
- Use `winapi` on Windows
- Update for Fuchsia OS
- Remove dev-dependency on `log`
## [0.4.1] - 2017-12-17
### Added
- `no_std` support
## [0.4.0-pre.0] - 2017-12-11
### Added
- `JitterRng` added as a high-quality alternative entropy source using the
system timer
- new `seq` module with `sample_iter`, `sample_slice`, etc.
- WASM support via dummy implementations (fail at run-time)
- Additional benchmarks, covering generators and new seq code
### Changed
- `thread_rng` uses `JitterRng` if seeding from system time fails
(slower but more secure than previous method)
### Deprecated
- `sample` function deprecated (replaced by `sample_iter`)
## [0.3.20] - 2018-01-06
### Changed
- Remove dev-dependency on `log`
- Update `fuchsia-zircon` dependency to 0.3.2
## [0.3.19] - 2017-12-27
### Changed
- Require `log <= 0.3.8` for dev builds
- Update `fuchsia-zircon` dependency to 0.3
- Fix broken links in docs (to unblock compiler docs testing CI)
## [0.3.18] - 2017-11-06
### Changed
- `thread_rng` is seeded from the system time if `OsRng` fails
- `weak_rng` now uses `thread_rng` internally
## [0.3.17] - 2017-10-07
### Changed
- Fuchsia: Magenta was renamed Zircon
## [0.3.16] - 2017-07-27
### Added
- Implement Debug for mote non-public types
- implement `Rand` for (i|u)i128
- Support for Fuchsia
### Changed
- Add inline attribute to SampleRange::construct_range.
This improves the benchmark for sample in 11% and for shuffle in 16%.
- Use `RtlGenRandom` instead of `CryptGenRandom`
## [0.3.15] - 2016-11-26
### Added
- Add `Rng` trait method `choose_mut`
- Redox support
### Changed
- Use `arc4rand` for `OsRng` on FreeBSD.
- Use `arc4random(3)` for `OsRng` on OpenBSD.
### Fixed
- Fix filling buffers 4 GiB or larger with `OsRng::fill_bytes` on Windows
## [0.3.14] - 2016-02-13
### Fixed
- Inline definitions from winapi/advapi32, which decreases build times
## [0.3.13] - 2016-01-09
### Fixed
- Compatible with Rust 1.7.0-nightly (needed some extra type annotations)
## [0.3.12] - 2015-11-09
### Changed
- Replaced the methods in `next_f32` and `next_f64` with the technique described
Saito & Matsumoto at MCQMC'08. The new method should exhibit a slightly more
uniform distribution.
- Depend on libc 0.2
### Fixed
- Fix iterator protocol issue in `rand::sample`
## [0.3.11] - 2015-08-31
### Added
- Implement `Rand` for arrays with n <= 32
## [0.3.10] - 2015-08-17
### Added
- Support for NaCl platforms
### Changed
- Allow `Rng` to be `?Sized`, impl for `&mut R` and `Box<R>` where `R: ?Sized + Rng`
## [0.3.9] - 2015-06-18
### Changed
- Use `winapi` for Windows API things
### Fixed
- Fixed test on stable/nightly
- Fix `getrandom` syscall number for aarch64-unknown-linux-gnu
## [0.3.8] - 2015-04-23
### Changed
- `log` is a dev dependency
### Fixed
- Fix race condition of atomics in `is_getrandom_available`
## [0.3.7] - 2015-04-03
### Fixed
- Derive Copy/Clone changes
## [0.3.6] - 2015-04-02
### Changed
- Move to stable Rust!
## [0.3.5] - 2015-04-01
### Fixed
- Compatible with Rust master
## [0.3.4] - 2015-03-31
### Added
- Implement Clone for `Weighted`
### Fixed
- Compatible with Rust master
## [0.3.3] - 2015-03-26
### Fixed
- Fix compile on Windows
## [0.3.2] - 2015-03-26
## [0.3.1] - 2015-03-26
### Fixed
- Fix compile on Windows
## [0.3.0] - 2015-03-25
### Changed
- Update to use log version 0.3.x
## [0.2.1] - 2015-03-22
### Fixed
- Compatible with Rust master
- Fixed iOS compilation
## [0.2.0] - 2015-03-06
### Fixed
- Compatible with Rust master (move from `old_io` to `std::io`)
## [0.1.4] - 2015-03-04
### Fixed
- Compatible with Rust master (use wrapping ops)
## [0.1.3] - 2015-02-20
### Fixed
- Compatible with Rust master
### Removed
- Removed Copy implementations from RNGs
## [0.1.2] - 2015-02-03
### Added
- Imported functionality from `std::rand`, including:
- `StdRng`, `SeedableRng`, `TreadRng`, `weak_rng()`
- `ReaderRng`: A wrapper around any Reader to treat it as an RNG.
- Imported documentation from `std::rand`
- Imported tests from `std::rand`
## [0.1.1] - 2015-02-03
### Added
- Migrate to a cargo-compatible directory structure.
### Fixed
- Do not use entropy during `gen_weighted_bool(1)`
## [Rust 0.12.0] - 2014-10-09
### Added
- Impl Rand for tuples of arity 11 and 12
- Include ChaCha pseudorandom generator
- Add `next_f64` and `next_f32` to Rng
- Implement Clone for PRNGs
### Changed
- Rename `TaskRng` to `ThreadRng` and `task_rng` to `thread_rng` (since a
runtime is removed from Rust).
### Fixed
- Improved performance of ISAAC and ISAAC64 by 30% and 12 % respectively, by
informing the optimiser that indexing is never out-of-bounds.
### Removed
- Removed the Deprecated `choose_option`
## [Rust 0.11.0] - 2014-07-02
### Added
- document when to use `OSRng` in cryptographic context, and explain why we use `/dev/urandom` instead of `/dev/random`
- `Rng::gen_iter()` which will return an infinite stream of random values
- `Rng::gen_ascii_chars()` which will return an infinite stream of random ascii characters
### Changed
- Now only depends on libcore!
- Remove `Rng.choose()`, rename `Rng.choose_option()` to `.choose()`
- Rename OSRng to OsRng
- The WeightedChoice structure is no longer built with a `Vec<Weighted<T>>`,
but rather a `&mut [Weighted<T>]`. This means that the WeightedChoice
structure now has a lifetime associated with it.
- The `sample` method on `Rng` has been moved to a top-level function in the
`rand` module due to its dependence on `Vec`.
### Removed
- `Rng::gen_vec()` was removed. Previous behavior can be regained with
`rng.gen_iter().take(n).collect()`
- `Rng::gen_ascii_str()` was removed. Previous behavior can be regained with
`rng.gen_ascii_chars().take(n).collect()`
- {IsaacRng, Isaac64Rng, XorShiftRng}::new() have all been removed. These all
relied on being able to use an OSRng for seeding, but this is no longer
available in librand (where these types are defined). To retain the same
functionality, these types now implement the `Rand` trait so they can be
generated with a random seed from another random number generator. This allows
the stdlib to use an OSRng to create seeded instances of these RNGs.
- Rand implementations for `Box<T>` and `@T` were removed. These seemed to be
pretty rare in the codebase, and it allows for librand to not depend on
liballoc. Additionally, other pointer types like Rc<T> and Arc<T> were not
supported.
- Remove a slew of old deprecated functions
## [Rust 0.10] - 2014-04-03
### Changed
- replace `Rng.shuffle's` functionality with `.shuffle_mut`
- bubble up IO errors when creating an OSRng
### Fixed
- Use `fill()` instead of `read()`
- Rewrite OsRng in Rust for windows
## [0.10-pre] - 2014-03-02
### Added
- Separate `rand` out of the standard library

12
vendor/rand/COPYRIGHT vendored Normal file
View File

@@ -0,0 +1,12 @@
Copyrights in the Rand project are retained by their contributors. No
copyright assignment is required to contribute to the Rand project.
For full authorship information, see the version control history.
Except as otherwise noted (below and/or in individual files), Rand is
licensed under the Apache License, Version 2.0 <LICENSE-APACHE> or
<http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
<LICENSE-MIT> or <http://opensource.org/licenses/MIT>, at your option.
The Rand project includes code from the Rust project
published under these same licenses.

170
vendor/rand/Cargo.lock generated vendored Normal file
View File

@@ -0,0 +1,170 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "bincode"
version = "1.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
dependencies = [
"serde",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "getrandom"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "libc"
version = "0.2.112"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125"
[[package]]
name = "libm"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a"
[[package]]
name = "log"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
dependencies = [
"cfg-if",
]
[[package]]
name = "packed_simd_2"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "defdcfef86dcc44ad208f71d9ff4ce28df6537a4e0d6b0e8e845cb8ca10059a6"
dependencies = [
"cfg-if",
"libm",
]
[[package]]
name = "ppv-lite86"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
[[package]]
name = "proc-macro2"
version = "1.0.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029"
dependencies = [
"unicode-xid",
]
[[package]]
name = "quote"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rand"
version = "0.8.5"
dependencies = [
"bincode",
"libc",
"log",
"packed_simd_2",
"rand_chacha",
"rand_core",
"rand_pcg",
"serde",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
dependencies = [
"getrandom",
"serde",
]
[[package]]
name = "rand_pcg"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e"
dependencies = [
"rand_core",
]
[[package]]
name = "serde"
version = "1.0.133"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.133"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "syn"
version = "1.0.85"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7"
dependencies = [
"proc-macro2",
"quote",
"unicode-xid",
]
[[package]]
name = "unicode-xid"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
[[package]]
name = "wasi"
version = "0.10.2+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"

75
vendor/rand/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,75 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "rand"
version = "0.8.5"
authors = ["The Rand Project Developers", "The Rust Project Developers"]
include = ["src/", "LICENSE-*", "README.md", "CHANGELOG.md", "COPYRIGHT"]
autobenches = true
description = "Random number generators and other randomness functionality.\n"
homepage = "https://rust-random.github.io/book"
documentation = "https://docs.rs/rand"
readme = "README.md"
keywords = ["random", "rng"]
categories = ["algorithms", "no-std"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-random/rand"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "doc_cfg"]
[package.metadata.playground]
features = ["small_rng", "serde1"]
[dependencies.log]
version = "0.4.4"
optional = true
[dependencies.packed_simd]
version = "0.3.7"
features = ["into_bits"]
optional = true
package = "packed_simd_2"
[dependencies.rand_chacha]
version = "0.3.0"
optional = true
default-features = false
[dependencies.rand_core]
version = "0.6.0"
[dependencies.serde]
version = "1.0.103"
features = ["derive"]
optional = true
[dev-dependencies.bincode]
version = "1.2.1"
[dev-dependencies.rand_pcg]
version = "0.3.0"
[features]
alloc = ["rand_core/alloc"]
default = ["std", "std_rng"]
getrandom = ["rand_core/getrandom"]
min_const_gen = []
nightly = []
serde1 = ["serde", "rand_core/serde1"]
simd_support = ["packed_simd"]
small_rng = []
std = ["rand_core/std", "rand_chacha/std", "alloc", "getrandom", "libc"]
std_rng = ["rand_chacha"]
[target."cfg(unix)".dependencies.libc]
version = "0.2.22"
optional = true
default-features = false

176
vendor/rand/LICENSE-APACHE vendored Normal file
View File

@@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

26
vendor/rand/LICENSE-MIT vendored Normal file
View File

@@ -0,0 +1,26 @@
Copyright 2018 Developers of the Rand project
Copyright (c) 2014 The Rust Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

158
vendor/rand/README.md vendored Normal file
View File

@@ -0,0 +1,158 @@
# Rand
[![Test Status](https://github.com/rust-random/rand/workflows/Tests/badge.svg?event=push)](https://github.com/rust-random/rand/actions)
[![Crate](https://img.shields.io/crates/v/rand.svg)](https://crates.io/crates/rand)
[![Book](https://img.shields.io/badge/book-master-yellow.svg)](https://rust-random.github.io/book/)
[![API](https://img.shields.io/badge/api-master-yellow.svg)](https://rust-random.github.io/rand/rand)
[![API](https://docs.rs/rand/badge.svg)](https://docs.rs/rand)
[![Minimum rustc version](https://img.shields.io/badge/rustc-1.36+-lightgray.svg)](https://github.com/rust-random/rand#rust-version-requirements)
A Rust library for random number generation, featuring:
- Easy random value generation and usage via the [`Rng`](https://docs.rs/rand/*/rand/trait.Rng.html),
[`SliceRandom`](https://docs.rs/rand/*/rand/seq/trait.SliceRandom.html) and
[`IteratorRandom`](https://docs.rs/rand/*/rand/seq/trait.IteratorRandom.html) traits
- Secure seeding via the [`getrandom` crate](https://crates.io/crates/getrandom)
and fast, convenient generation via [`thread_rng`](https://docs.rs/rand/*/rand/fn.thread_rng.html)
- A modular design built over [`rand_core`](https://crates.io/crates/rand_core)
([see the book](https://rust-random.github.io/book/crates.html))
- Fast implementations of the best-in-class [cryptographic](https://rust-random.github.io/book/guide-rngs.html#cryptographically-secure-pseudo-random-number-generators-csprngs) and
[non-cryptographic](https://rust-random.github.io/book/guide-rngs.html#basic-pseudo-random-number-generators-prngs) generators
- A flexible [`distributions`](https://docs.rs/rand/*/rand/distributions/index.html) module
- Samplers for a large number of random number distributions via our own
[`rand_distr`](https://docs.rs/rand_distr) and via
the [`statrs`](https://docs.rs/statrs/0.13.0/statrs/)
- [Portably reproducible output](https://rust-random.github.io/book/portability.html)
- `#[no_std]` compatibility (partial)
- *Many* performance optimisations
It's also worth pointing out what `rand` *is not*:
- Small. Most low-level crates are small, but the higher-level `rand` and
`rand_distr` each contain a lot of functionality.
- Simple (implementation). We have a strong focus on correctness, speed and flexibility, but
not simplicity. If you prefer a small-and-simple library, there are
alternatives including [fastrand](https://crates.io/crates/fastrand)
and [oorandom](https://crates.io/crates/oorandom).
- Slow. We take performance seriously, with considerations also for set-up
time of new distributions, commonly-used parameters, and parameters of the
current sampler.
Documentation:
- [The Rust Rand Book](https://rust-random.github.io/book)
- [API reference (master branch)](https://rust-random.github.io/rand)
- [API reference (docs.rs)](https://docs.rs/rand)
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
rand = "0.8.4"
```
To get started using Rand, see [The Book](https://rust-random.github.io/book).
## Versions
Rand is *mature* (suitable for general usage, with infrequent breaking releases
which minimise breakage) but not yet at 1.0. We maintain compatibility with
pinned versions of the Rust compiler (see below).
Current Rand versions are:
- Version 0.7 was released in June 2019, moving most non-uniform distributions
to an external crate, moving `from_entropy` to `SeedableRng`, and many small
changes and fixes.
- Version 0.8 was released in December 2020 with many small changes.
A detailed [changelog](CHANGELOG.md) is available for releases.
When upgrading to the next minor series (especially 0.4 → 0.5), we recommend
reading the [Upgrade Guide](https://rust-random.github.io/book/update.html).
Rand has not yet reached 1.0 implying some breaking changes may arrive in the
future ([SemVer](https://semver.org/) allows each 0.x.0 release to include
breaking changes), but is considered *mature*: breaking changes are minimised
and breaking releases are infrequent.
Rand libs have inter-dependencies and make use of the
[semver trick](https://github.com/dtolnay/semver-trick/) in order to make traits
compatible across crate versions. (This is especially important for `RngCore`
and `SeedableRng`.) A few crate releases are thus compatibility shims,
depending on the *next* lib version (e.g. `rand_core` versions `0.2.2` and
`0.3.1`). This means, for example, that `rand_core_0_4_0::SeedableRng` and
`rand_core_0_3_0::SeedableRng` are distinct, incompatible traits, which can
cause build errors. Usually, running `cargo update` is enough to fix any issues.
### Yanked versions
Some versions of Rand crates have been yanked ("unreleased"). Where this occurs,
the crate's CHANGELOG *should* be updated with a rationale, and a search on the
issue tracker with the keyword `yank` *should* uncover the motivation.
### Rust version requirements
Since version 0.8, Rand requires **Rustc version 1.36 or greater**.
Rand 0.7 requires Rustc 1.32 or greater while versions 0.5 require Rustc 1.22 or
greater, and 0.4 and 0.3 (since approx. June 2017) require Rustc version 1.15 or
greater. Subsets of the Rand code may work with older Rust versions, but this is
not supported.
Continuous Integration (CI) will always test the minimum supported Rustc version
(the MSRV). The current policy is that this can be updated in any
Rand release if required, but the change must be noted in the changelog.
## Crate Features
Rand is built with these features enabled by default:
- `std` enables functionality dependent on the `std` lib
- `alloc` (implied by `std`) enables functionality requiring an allocator
- `getrandom` (implied by `std`) is an optional dependency providing the code
behind `rngs::OsRng`
- `std_rng` enables inclusion of `StdRng`, `thread_rng` and `random`
(the latter two *also* require that `std` be enabled)
Optionally, the following dependencies can be enabled:
- `log` enables logging via the `log` crate
Additionally, these features configure Rand:
- `small_rng` enables inclusion of the `SmallRng` PRNG
- `nightly` enables some optimizations requiring nightly Rust
- `simd_support` (experimental) enables sampling of SIMD values
(uniformly random SIMD integers and floats), requiring nightly Rust
- `min_const_gen` enables generating random arrays of
any size using min-const-generics, requiring Rust ≥ 1.51.
Note that nightly features are not stable and therefore not all library and
compiler versions will be compatible. This is especially true of Rand's
experimental `simd_support` feature.
Rand supports limited functionality in `no_std` mode (enabled via
`default-features = false`). In this case, `OsRng` and `from_entropy` are
unavailable (unless `getrandom` is enabled), large parts of `seq` are
unavailable (unless `alloc` is enabled), and `thread_rng` and `random` are
unavailable.
### WASM support
The WASM target `wasm32-unknown-unknown` is not *automatically* supported by
`rand` or `getrandom`. To solve this, either use a different target such as
`wasm32-wasi` or add a direct dependency on `getrandom` with the `js` feature
(if the target supports JavaScript). See
[getrandom#WebAssembly support](https://docs.rs/getrandom/latest/getrandom/#webassembly-support).
# License
Rand is distributed under the terms of both the MIT license and the
Apache License (Version 2.0).
See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT), and
[COPYRIGHT](COPYRIGHT) for details.

View File

@@ -0,0 +1,219 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Bernoulli distribution.
use crate::distributions::Distribution;
use crate::Rng;
use core::{fmt, u64};
#[cfg(feature = "serde1")]
use serde::{Serialize, Deserialize};
/// The Bernoulli distribution.
///
/// This is a special case of the Binomial distribution where `n = 1`.
///
/// # Example
///
/// ```rust
/// use rand::distributions::{Bernoulli, Distribution};
///
/// let d = Bernoulli::new(0.3).unwrap();
/// let v = d.sample(&mut rand::thread_rng());
/// println!("{} is from a Bernoulli distribution", v);
/// ```
///
/// # Precision
///
/// This `Bernoulli` distribution uses 64 bits from the RNG (a `u64`),
/// so only probabilities that are multiples of 2<sup>-64</sup> can be
/// represented.
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Bernoulli {
/// Probability of success, relative to the maximal integer.
p_int: u64,
}
// To sample from the Bernoulli distribution we use a method that compares a
// random `u64` value `v < (p * 2^64)`.
//
// If `p == 1.0`, the integer `v` to compare against can not represented as a
// `u64`. We manually set it to `u64::MAX` instead (2^64 - 1 instead of 2^64).
// Note that value of `p < 1.0` can never result in `u64::MAX`, because an
// `f64` only has 53 bits of precision, and the next largest value of `p` will
// result in `2^64 - 2048`.
//
// Also there is a 100% theoretical concern: if someone consistently wants to
// generate `true` using the Bernoulli distribution (i.e. by using a probability
// of `1.0`), just using `u64::MAX` is not enough. On average it would return
// false once every 2^64 iterations. Some people apparently care about this
// case.
//
// That is why we special-case `u64::MAX` to always return `true`, without using
// the RNG, and pay the performance price for all uses that *are* reasonable.
// Luckily, if `new()` and `sample` are close, the compiler can optimize out the
// extra check.
const ALWAYS_TRUE: u64 = u64::MAX;
// This is just `2.0.powi(64)`, but written this way because it is not available
// in `no_std` mode.
const SCALE: f64 = 2.0 * (1u64 << 63) as f64;
/// Error type returned from `Bernoulli::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BernoulliError {
/// `p < 0` or `p > 1`.
InvalidProbability,
}
impl fmt::Display for BernoulliError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
BernoulliError::InvalidProbability => "p is outside [0, 1] in Bernoulli distribution",
})
}
}
#[cfg(feature = "std")]
impl ::std::error::Error for BernoulliError {}
impl Bernoulli {
/// Construct a new `Bernoulli` with the given probability of success `p`.
///
/// # Precision
///
/// For `p = 1.0`, the resulting distribution will always generate true.
/// For `p = 0.0`, the resulting distribution will always generate false.
///
/// This method is accurate for any input `p` in the range `[0, 1]` which is
/// a multiple of 2<sup>-64</sup>. (Note that not all multiples of
/// 2<sup>-64</sup> in `[0, 1]` can be represented as a `f64`.)
#[inline]
pub fn new(p: f64) -> Result<Bernoulli, BernoulliError> {
if !(0.0..1.0).contains(&p) {
if p == 1.0 {
return Ok(Bernoulli { p_int: ALWAYS_TRUE });
}
return Err(BernoulliError::InvalidProbability);
}
Ok(Bernoulli {
p_int: (p * SCALE) as u64,
})
}
/// Construct a new `Bernoulli` with the probability of success of
/// `numerator`-in-`denominator`. I.e. `new_ratio(2, 3)` will return
/// a `Bernoulli` with a 2-in-3 chance, or about 67%, of returning `true`.
///
/// return `true`. If `numerator == 0` it will always return `false`.
/// For `numerator > denominator` and `denominator == 0`, this returns an
/// error. Otherwise, for `numerator == denominator`, samples are always
/// true; for `numerator == 0` samples are always false.
#[inline]
pub fn from_ratio(numerator: u32, denominator: u32) -> Result<Bernoulli, BernoulliError> {
if numerator > denominator || denominator == 0 {
return Err(BernoulliError::InvalidProbability);
}
if numerator == denominator {
return Ok(Bernoulli { p_int: ALWAYS_TRUE });
}
let p_int = ((f64::from(numerator) / f64::from(denominator)) * SCALE) as u64;
Ok(Bernoulli { p_int })
}
}
impl Distribution<bool> for Bernoulli {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> bool {
// Make sure to always return true for p = 1.0.
if self.p_int == ALWAYS_TRUE {
return true;
}
let v: u64 = rng.gen();
v < self.p_int
}
}
#[cfg(test)]
mod test {
use super::Bernoulli;
use crate::distributions::Distribution;
use crate::Rng;
#[test]
#[cfg(feature="serde1")]
fn test_serializing_deserializing_bernoulli() {
let coin_flip = Bernoulli::new(0.5).unwrap();
let de_coin_flip : Bernoulli = bincode::deserialize(&bincode::serialize(&coin_flip).unwrap()).unwrap();
assert_eq!(coin_flip.p_int, de_coin_flip.p_int);
}
#[test]
fn test_trivial() {
// We prefer to be explicit here.
#![allow(clippy::bool_assert_comparison)]
let mut r = crate::test::rng(1);
let always_false = Bernoulli::new(0.0).unwrap();
let always_true = Bernoulli::new(1.0).unwrap();
for _ in 0..5 {
assert_eq!(r.sample::<bool, _>(&always_false), false);
assert_eq!(r.sample::<bool, _>(&always_true), true);
assert_eq!(Distribution::<bool>::sample(&always_false, &mut r), false);
assert_eq!(Distribution::<bool>::sample(&always_true, &mut r), true);
}
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_average() {
const P: f64 = 0.3;
const NUM: u32 = 3;
const DENOM: u32 = 10;
let d1 = Bernoulli::new(P).unwrap();
let d2 = Bernoulli::from_ratio(NUM, DENOM).unwrap();
const N: u32 = 100_000;
let mut sum1: u32 = 0;
let mut sum2: u32 = 0;
let mut rng = crate::test::rng(2);
for _ in 0..N {
if d1.sample(&mut rng) {
sum1 += 1;
}
if d2.sample(&mut rng) {
sum2 += 1;
}
}
let avg1 = (sum1 as f64) / (N as f64);
assert!((avg1 - P).abs() < 5e-3);
let avg2 = (sum2 as f64) / (N as f64);
assert!((avg2 - (NUM as f64) / (DENOM as f64)).abs() < 5e-3);
}
#[test]
fn value_stability() {
let mut rng = crate::test::rng(3);
let distr = Bernoulli::new(0.4532).unwrap();
let mut buf = [false; 10];
for x in &mut buf {
*x = rng.sample(&distr);
}
assert_eq!(buf, [
true, false, false, true, false, false, true, true, true, true
]);
}
#[test]
fn bernoulli_distributions_can_be_compared() {
assert_eq!(Bernoulli::new(1.0), Bernoulli::new(1.0));
}
}

View File

@@ -0,0 +1,272 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Distribution trait and associates
use crate::Rng;
use core::iter;
#[cfg(feature = "alloc")]
use alloc::string::String;
/// Types (distributions) that can be used to create a random instance of `T`.
///
/// It is possible to sample from a distribution through both the
/// `Distribution` and [`Rng`] traits, via `distr.sample(&mut rng)` and
/// `rng.sample(distr)`. They also both offer the [`sample_iter`] method, which
/// produces an iterator that samples from the distribution.
///
/// All implementations are expected to be immutable; this has the significant
/// advantage of not needing to consider thread safety, and for most
/// distributions efficient state-less sampling algorithms are available.
///
/// Implementations are typically expected to be portable with reproducible
/// results when used with a PRNG with fixed seed; see the
/// [portability chapter](https://rust-random.github.io/book/portability.html)
/// of The Rust Rand Book. In some cases this does not apply, e.g. the `usize`
/// type requires different sampling on 32-bit and 64-bit machines.
///
/// [`sample_iter`]: Distribution::sample_iter
pub trait Distribution<T> {
/// Generate a random value of `T`, using `rng` as the source of randomness.
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T;
/// Create an iterator that generates random values of `T`, using `rng` as
/// the source of randomness.
///
/// Note that this function takes `self` by value. This works since
/// `Distribution<T>` is impl'd for `&D` where `D: Distribution<T>`,
/// however borrowing is not automatic hence `distr.sample_iter(...)` may
/// need to be replaced with `(&distr).sample_iter(...)` to borrow or
/// `(&*distr).sample_iter(...)` to reborrow an existing reference.
///
/// # Example
///
/// ```
/// use rand::thread_rng;
/// use rand::distributions::{Distribution, Alphanumeric, Uniform, Standard};
///
/// let mut rng = thread_rng();
///
/// // Vec of 16 x f32:
/// let v: Vec<f32> = Standard.sample_iter(&mut rng).take(16).collect();
///
/// // String:
/// let s: String = Alphanumeric
/// .sample_iter(&mut rng)
/// .take(7)
/// .map(char::from)
/// .collect();
///
/// // Dice-rolling:
/// let die_range = Uniform::new_inclusive(1, 6);
/// let mut roll_die = die_range.sample_iter(&mut rng);
/// while roll_die.next().unwrap() != 6 {
/// println!("Not a 6; rolling again!");
/// }
/// ```
fn sample_iter<R>(self, rng: R) -> DistIter<Self, R, T>
where
R: Rng,
Self: Sized,
{
DistIter {
distr: self,
rng,
phantom: ::core::marker::PhantomData,
}
}
/// Create a distribution of values of 'S' by mapping the output of `Self`
/// through the closure `F`
///
/// # Example
///
/// ```
/// use rand::thread_rng;
/// use rand::distributions::{Distribution, Uniform};
///
/// let mut rng = thread_rng();
///
/// let die = Uniform::new_inclusive(1, 6);
/// let even_number = die.map(|num| num % 2 == 0);
/// while !even_number.sample(&mut rng) {
/// println!("Still odd; rolling again!");
/// }
/// ```
fn map<F, S>(self, func: F) -> DistMap<Self, F, T, S>
where
F: Fn(T) -> S,
Self: Sized,
{
DistMap {
distr: self,
func,
phantom: ::core::marker::PhantomData,
}
}
}
impl<'a, T, D: Distribution<T>> Distribution<T> for &'a D {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> T {
(*self).sample(rng)
}
}
/// An iterator that generates random values of `T` with distribution `D`,
/// using `R` as the source of randomness.
///
/// This `struct` is created by the [`sample_iter`] method on [`Distribution`].
/// See its documentation for more.
///
/// [`sample_iter`]: Distribution::sample_iter
#[derive(Debug)]
pub struct DistIter<D, R, T> {
distr: D,
rng: R,
phantom: ::core::marker::PhantomData<T>,
}
impl<D, R, T> Iterator for DistIter<D, R, T>
where
D: Distribution<T>,
R: Rng,
{
type Item = T;
#[inline(always)]
fn next(&mut self) -> Option<T> {
// Here, self.rng may be a reference, but we must take &mut anyway.
// Even if sample could take an R: Rng by value, we would need to do this
// since Rng is not copyable and we cannot enforce that this is "reborrowable".
Some(self.distr.sample(&mut self.rng))
}
fn size_hint(&self) -> (usize, Option<usize>) {
(usize::max_value(), None)
}
}
impl<D, R, T> iter::FusedIterator for DistIter<D, R, T>
where
D: Distribution<T>,
R: Rng,
{
}
#[cfg(features = "nightly")]
impl<D, R, T> iter::TrustedLen for DistIter<D, R, T>
where
D: Distribution<T>,
R: Rng,
{
}
/// A distribution of values of type `S` derived from the distribution `D`
/// by mapping its output of type `T` through the closure `F`.
///
/// This `struct` is created by the [`Distribution::map`] method.
/// See its documentation for more.
#[derive(Debug)]
pub struct DistMap<D, F, T, S> {
distr: D,
func: F,
phantom: ::core::marker::PhantomData<fn(T) -> S>,
}
impl<D, F, T, S> Distribution<S> for DistMap<D, F, T, S>
where
D: Distribution<T>,
F: Fn(T) -> S,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> S {
(self.func)(self.distr.sample(rng))
}
}
/// `String` sampler
///
/// Sampling a `String` of random characters is not quite the same as collecting
/// a sequence of chars. This trait contains some helpers.
#[cfg(feature = "alloc")]
pub trait DistString {
/// Append `len` random chars to `string`
fn append_string<R: Rng + ?Sized>(&self, rng: &mut R, string: &mut String, len: usize);
/// Generate a `String` of `len` random chars
#[inline]
fn sample_string<R: Rng + ?Sized>(&self, rng: &mut R, len: usize) -> String {
let mut s = String::new();
self.append_string(rng, &mut s, len);
s
}
}
#[cfg(test)]
mod tests {
use crate::distributions::{Distribution, Uniform};
use crate::Rng;
#[test]
fn test_distributions_iter() {
use crate::distributions::Open01;
let mut rng = crate::test::rng(210);
let distr = Open01;
let mut iter = Distribution::<f32>::sample_iter(distr, &mut rng);
let mut sum: f32 = 0.;
for _ in 0..100 {
sum += iter.next().unwrap();
}
assert!(0. < sum && sum < 100.);
}
#[test]
fn test_distributions_map() {
let dist = Uniform::new_inclusive(0, 5).map(|val| val + 15);
let mut rng = crate::test::rng(212);
let val = dist.sample(&mut rng);
assert!((15..=20).contains(&val));
}
#[test]
fn test_make_an_iter() {
fn ten_dice_rolls_other_than_five<R: Rng>(
rng: &mut R,
) -> impl Iterator<Item = i32> + '_ {
Uniform::new_inclusive(1, 6)
.sample_iter(rng)
.filter(|x| *x != 5)
.take(10)
}
let mut rng = crate::test::rng(211);
let mut count = 0;
for val in ten_dice_rolls_other_than_five(&mut rng) {
assert!((1..=6).contains(&val) && val != 5);
count += 1;
}
assert_eq!(count, 10);
}
#[test]
#[cfg(feature = "alloc")]
fn test_dist_string() {
use core::str;
use crate::distributions::{Alphanumeric, DistString, Standard};
let mut rng = crate::test::rng(213);
let s1 = Alphanumeric.sample_string(&mut rng, 20);
assert_eq!(s1.len(), 20);
assert_eq!(str::from_utf8(s1.as_bytes()), Ok(s1.as_str()));
let s2 = Standard.sample_string(&mut rng, 20);
assert_eq!(s2.chars().count(), 20);
assert_eq!(str::from_utf8(s2.as_bytes()), Ok(s2.as_str()));
}
}

312
vendor/rand/src/distributions/float.rs vendored Normal file
View File

@@ -0,0 +1,312 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Basic floating-point number distributions
use crate::distributions::utils::FloatSIMDUtils;
use crate::distributions::{Distribution, Standard};
use crate::Rng;
use core::mem;
#[cfg(feature = "simd_support")] use packed_simd::*;
#[cfg(feature = "serde1")]
use serde::{Serialize, Deserialize};
/// A distribution to sample floating point numbers uniformly in the half-open
/// interval `(0, 1]`, i.e. including 1 but not 0.
///
/// All values that can be generated are of the form `n * ε/2`. For `f32`
/// the 24 most significant random bits of a `u32` are used and for `f64` the
/// 53 most significant bits of a `u64` are used. The conversion uses the
/// multiplicative method.
///
/// See also: [`Standard`] which samples from `[0, 1)`, [`Open01`]
/// which samples from `(0, 1)` and [`Uniform`] which samples from arbitrary
/// ranges.
///
/// # Example
/// ```
/// use rand::{thread_rng, Rng};
/// use rand::distributions::OpenClosed01;
///
/// let val: f32 = thread_rng().sample(OpenClosed01);
/// println!("f32 from (0, 1): {}", val);
/// ```
///
/// [`Standard`]: crate::distributions::Standard
/// [`Open01`]: crate::distributions::Open01
/// [`Uniform`]: crate::distributions::uniform::Uniform
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct OpenClosed01;
/// A distribution to sample floating point numbers uniformly in the open
/// interval `(0, 1)`, i.e. not including either endpoint.
///
/// All values that can be generated are of the form `n * ε + ε/2`. For `f32`
/// the 23 most significant random bits of an `u32` are used, for `f64` 52 from
/// an `u64`. The conversion uses a transmute-based method.
///
/// See also: [`Standard`] which samples from `[0, 1)`, [`OpenClosed01`]
/// which samples from `(0, 1]` and [`Uniform`] which samples from arbitrary
/// ranges.
///
/// # Example
/// ```
/// use rand::{thread_rng, Rng};
/// use rand::distributions::Open01;
///
/// let val: f32 = thread_rng().sample(Open01);
/// println!("f32 from (0, 1): {}", val);
/// ```
///
/// [`Standard`]: crate::distributions::Standard
/// [`OpenClosed01`]: crate::distributions::OpenClosed01
/// [`Uniform`]: crate::distributions::uniform::Uniform
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Open01;
// This trait is needed by both this lib and rand_distr hence is a hidden export
#[doc(hidden)]
pub trait IntoFloat {
type F;
/// Helper method to combine the fraction and a constant exponent into a
/// float.
///
/// Only the least significant bits of `self` may be set, 23 for `f32` and
/// 52 for `f64`.
/// The resulting value will fall in a range that depends on the exponent.
/// As an example the range with exponent 0 will be
/// [2<sup>0</sup>..2<sup>1</sup>), which is [1..2).
fn into_float_with_exponent(self, exponent: i32) -> Self::F;
}
macro_rules! float_impls {
($ty:ident, $uty:ident, $f_scalar:ident, $u_scalar:ty,
$fraction_bits:expr, $exponent_bias:expr) => {
impl IntoFloat for $uty {
type F = $ty;
#[inline(always)]
fn into_float_with_exponent(self, exponent: i32) -> $ty {
// The exponent is encoded using an offset-binary representation
let exponent_bits: $u_scalar =
(($exponent_bias + exponent) as $u_scalar) << $fraction_bits;
$ty::from_bits(self | exponent_bits)
}
}
impl Distribution<$ty> for Standard {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
// Multiply-based method; 24/53 random bits; [0, 1) interval.
// We use the most significant bits because for simple RNGs
// those are usually more random.
let float_size = mem::size_of::<$f_scalar>() as u32 * 8;
let precision = $fraction_bits + 1;
let scale = 1.0 / ((1 as $u_scalar << precision) as $f_scalar);
let value: $uty = rng.gen();
let value = value >> (float_size - precision);
scale * $ty::cast_from_int(value)
}
}
impl Distribution<$ty> for OpenClosed01 {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
// Multiply-based method; 24/53 random bits; (0, 1] interval.
// We use the most significant bits because for simple RNGs
// those are usually more random.
let float_size = mem::size_of::<$f_scalar>() as u32 * 8;
let precision = $fraction_bits + 1;
let scale = 1.0 / ((1 as $u_scalar << precision) as $f_scalar);
let value: $uty = rng.gen();
let value = value >> (float_size - precision);
// Add 1 to shift up; will not overflow because of right-shift:
scale * $ty::cast_from_int(value + 1)
}
}
impl Distribution<$ty> for Open01 {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
// Transmute-based method; 23/52 random bits; (0, 1) interval.
// We use the most significant bits because for simple RNGs
// those are usually more random.
use core::$f_scalar::EPSILON;
let float_size = mem::size_of::<$f_scalar>() as u32 * 8;
let value: $uty = rng.gen();
let fraction = value >> (float_size - $fraction_bits);
fraction.into_float_with_exponent(0) - (1.0 - EPSILON / 2.0)
}
}
}
}
float_impls! { f32, u32, f32, u32, 23, 127 }
float_impls! { f64, u64, f64, u64, 52, 1023 }
#[cfg(feature = "simd_support")]
float_impls! { f32x2, u32x2, f32, u32, 23, 127 }
#[cfg(feature = "simd_support")]
float_impls! { f32x4, u32x4, f32, u32, 23, 127 }
#[cfg(feature = "simd_support")]
float_impls! { f32x8, u32x8, f32, u32, 23, 127 }
#[cfg(feature = "simd_support")]
float_impls! { f32x16, u32x16, f32, u32, 23, 127 }
#[cfg(feature = "simd_support")]
float_impls! { f64x2, u64x2, f64, u64, 52, 1023 }
#[cfg(feature = "simd_support")]
float_impls! { f64x4, u64x4, f64, u64, 52, 1023 }
#[cfg(feature = "simd_support")]
float_impls! { f64x8, u64x8, f64, u64, 52, 1023 }
#[cfg(test)]
mod tests {
use super::*;
use crate::rngs::mock::StepRng;
const EPSILON32: f32 = ::core::f32::EPSILON;
const EPSILON64: f64 = ::core::f64::EPSILON;
macro_rules! test_f32 {
($fnn:ident, $ty:ident, $ZERO:expr, $EPSILON:expr) => {
#[test]
fn $fnn() {
// Standard
let mut zeros = StepRng::new(0, 0);
assert_eq!(zeros.gen::<$ty>(), $ZERO);
let mut one = StepRng::new(1 << 8 | 1 << (8 + 32), 0);
assert_eq!(one.gen::<$ty>(), $EPSILON / 2.0);
let mut max = StepRng::new(!0, 0);
assert_eq!(max.gen::<$ty>(), 1.0 - $EPSILON / 2.0);
// OpenClosed01
let mut zeros = StepRng::new(0, 0);
assert_eq!(zeros.sample::<$ty, _>(OpenClosed01), 0.0 + $EPSILON / 2.0);
let mut one = StepRng::new(1 << 8 | 1 << (8 + 32), 0);
assert_eq!(one.sample::<$ty, _>(OpenClosed01), $EPSILON);
let mut max = StepRng::new(!0, 0);
assert_eq!(max.sample::<$ty, _>(OpenClosed01), $ZERO + 1.0);
// Open01
let mut zeros = StepRng::new(0, 0);
assert_eq!(zeros.sample::<$ty, _>(Open01), 0.0 + $EPSILON / 2.0);
let mut one = StepRng::new(1 << 9 | 1 << (9 + 32), 0);
assert_eq!(one.sample::<$ty, _>(Open01), $EPSILON / 2.0 * 3.0);
let mut max = StepRng::new(!0, 0);
assert_eq!(max.sample::<$ty, _>(Open01), 1.0 - $EPSILON / 2.0);
}
};
}
test_f32! { f32_edge_cases, f32, 0.0, EPSILON32 }
#[cfg(feature = "simd_support")]
test_f32! { f32x2_edge_cases, f32x2, f32x2::splat(0.0), f32x2::splat(EPSILON32) }
#[cfg(feature = "simd_support")]
test_f32! { f32x4_edge_cases, f32x4, f32x4::splat(0.0), f32x4::splat(EPSILON32) }
#[cfg(feature = "simd_support")]
test_f32! { f32x8_edge_cases, f32x8, f32x8::splat(0.0), f32x8::splat(EPSILON32) }
#[cfg(feature = "simd_support")]
test_f32! { f32x16_edge_cases, f32x16, f32x16::splat(0.0), f32x16::splat(EPSILON32) }
macro_rules! test_f64 {
($fnn:ident, $ty:ident, $ZERO:expr, $EPSILON:expr) => {
#[test]
fn $fnn() {
// Standard
let mut zeros = StepRng::new(0, 0);
assert_eq!(zeros.gen::<$ty>(), $ZERO);
let mut one = StepRng::new(1 << 11, 0);
assert_eq!(one.gen::<$ty>(), $EPSILON / 2.0);
let mut max = StepRng::new(!0, 0);
assert_eq!(max.gen::<$ty>(), 1.0 - $EPSILON / 2.0);
// OpenClosed01
let mut zeros = StepRng::new(0, 0);
assert_eq!(zeros.sample::<$ty, _>(OpenClosed01), 0.0 + $EPSILON / 2.0);
let mut one = StepRng::new(1 << 11, 0);
assert_eq!(one.sample::<$ty, _>(OpenClosed01), $EPSILON);
let mut max = StepRng::new(!0, 0);
assert_eq!(max.sample::<$ty, _>(OpenClosed01), $ZERO + 1.0);
// Open01
let mut zeros = StepRng::new(0, 0);
assert_eq!(zeros.sample::<$ty, _>(Open01), 0.0 + $EPSILON / 2.0);
let mut one = StepRng::new(1 << 12, 0);
assert_eq!(one.sample::<$ty, _>(Open01), $EPSILON / 2.0 * 3.0);
let mut max = StepRng::new(!0, 0);
assert_eq!(max.sample::<$ty, _>(Open01), 1.0 - $EPSILON / 2.0);
}
};
}
test_f64! { f64_edge_cases, f64, 0.0, EPSILON64 }
#[cfg(feature = "simd_support")]
test_f64! { f64x2_edge_cases, f64x2, f64x2::splat(0.0), f64x2::splat(EPSILON64) }
#[cfg(feature = "simd_support")]
test_f64! { f64x4_edge_cases, f64x4, f64x4::splat(0.0), f64x4::splat(EPSILON64) }
#[cfg(feature = "simd_support")]
test_f64! { f64x8_edge_cases, f64x8, f64x8::splat(0.0), f64x8::splat(EPSILON64) }
#[test]
fn value_stability() {
fn test_samples<T: Copy + core::fmt::Debug + PartialEq, D: Distribution<T>>(
distr: &D, zero: T, expected: &[T],
) {
let mut rng = crate::test::rng(0x6f44f5646c2a7334);
let mut buf = [zero; 3];
for x in &mut buf {
*x = rng.sample(&distr);
}
assert_eq!(&buf, expected);
}
test_samples(&Standard, 0f32, &[0.0035963655, 0.7346052, 0.09778172]);
test_samples(&Standard, 0f64, &[
0.7346051961657583,
0.20298547462974248,
0.8166436635290655,
]);
test_samples(&OpenClosed01, 0f32, &[0.003596425, 0.73460525, 0.09778178]);
test_samples(&OpenClosed01, 0f64, &[
0.7346051961657584,
0.2029854746297426,
0.8166436635290656,
]);
test_samples(&Open01, 0f32, &[0.0035963655, 0.73460525, 0.09778172]);
test_samples(&Open01, 0f64, &[
0.7346051961657584,
0.20298547462974248,
0.8166436635290656,
]);
#[cfg(feature = "simd_support")]
{
// We only test a sub-set of types here. Values are identical to
// non-SIMD types; we assume this pattern continues across all
// SIMD types.
test_samples(&Standard, f32x2::new(0.0, 0.0), &[
f32x2::new(0.0035963655, 0.7346052),
f32x2::new(0.09778172, 0.20298547),
f32x2::new(0.34296435, 0.81664366),
]);
test_samples(&Standard, f64x2::new(0.0, 0.0), &[
f64x2::new(0.7346051961657583, 0.20298547462974248),
f64x2::new(0.8166436635290655, 0.7423708925400552),
f64x2::new(0.16387782224016323, 0.9087068770169618),
]);
}
}
}

274
vendor/rand/src/distributions/integer.rs vendored Normal file
View File

@@ -0,0 +1,274 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The implementations of the `Standard` distribution for integer types.
use crate::distributions::{Distribution, Standard};
use crate::Rng;
#[cfg(all(target_arch = "x86", feature = "simd_support"))]
use core::arch::x86::{__m128i, __m256i};
#[cfg(all(target_arch = "x86_64", feature = "simd_support"))]
use core::arch::x86_64::{__m128i, __m256i};
use core::num::{NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize,
NonZeroU128};
#[cfg(feature = "simd_support")] use packed_simd::*;
impl Distribution<u8> for Standard {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u8 {
rng.next_u32() as u8
}
}
impl Distribution<u16> for Standard {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u16 {
rng.next_u32() as u16
}
}
impl Distribution<u32> for Standard {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u32 {
rng.next_u32()
}
}
impl Distribution<u64> for Standard {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
rng.next_u64()
}
}
impl Distribution<u128> for Standard {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u128 {
// Use LE; we explicitly generate one value before the next.
let x = u128::from(rng.next_u64());
let y = u128::from(rng.next_u64());
(y << 64) | x
}
}
impl Distribution<usize> for Standard {
#[inline]
#[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
rng.next_u32() as usize
}
#[inline]
#[cfg(target_pointer_width = "64")]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
rng.next_u64() as usize
}
}
macro_rules! impl_int_from_uint {
($ty:ty, $uty:ty) => {
impl Distribution<$ty> for Standard {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
rng.gen::<$uty>() as $ty
}
}
};
}
impl_int_from_uint! { i8, u8 }
impl_int_from_uint! { i16, u16 }
impl_int_from_uint! { i32, u32 }
impl_int_from_uint! { i64, u64 }
impl_int_from_uint! { i128, u128 }
impl_int_from_uint! { isize, usize }
macro_rules! impl_nzint {
($ty:ty, $new:path) => {
impl Distribution<$ty> for Standard {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
loop {
if let Some(nz) = $new(rng.gen()) {
break nz;
}
}
}
}
};
}
impl_nzint!(NonZeroU8, NonZeroU8::new);
impl_nzint!(NonZeroU16, NonZeroU16::new);
impl_nzint!(NonZeroU32, NonZeroU32::new);
impl_nzint!(NonZeroU64, NonZeroU64::new);
impl_nzint!(NonZeroU128, NonZeroU128::new);
impl_nzint!(NonZeroUsize, NonZeroUsize::new);
#[cfg(feature = "simd_support")]
macro_rules! simd_impl {
($(($intrinsic:ident, $vec:ty),)+) => {$(
impl Distribution<$intrinsic> for Standard {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $intrinsic {
$intrinsic::from_bits(rng.gen::<$vec>())
}
}
)+};
($bits:expr,) => {};
($bits:expr, $ty:ty, $($ty_more:ty,)*) => {
simd_impl!($bits, $($ty_more,)*);
impl Distribution<$ty> for Standard {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> $ty {
let mut vec: $ty = Default::default();
unsafe {
let ptr = &mut vec;
let b_ptr = &mut *(ptr as *mut $ty as *mut [u8; $bits/8]);
rng.fill_bytes(b_ptr);
}
vec.to_le()
}
}
};
}
#[cfg(feature = "simd_support")]
simd_impl!(16, u8x2, i8x2,);
#[cfg(feature = "simd_support")]
simd_impl!(32, u8x4, i8x4, u16x2, i16x2,);
#[cfg(feature = "simd_support")]
simd_impl!(64, u8x8, i8x8, u16x4, i16x4, u32x2, i32x2,);
#[cfg(feature = "simd_support")]
simd_impl!(128, u8x16, i8x16, u16x8, i16x8, u32x4, i32x4, u64x2, i64x2,);
#[cfg(feature = "simd_support")]
simd_impl!(256, u8x32, i8x32, u16x16, i16x16, u32x8, i32x8, u64x4, i64x4,);
#[cfg(feature = "simd_support")]
simd_impl!(512, u8x64, i8x64, u16x32, i16x32, u32x16, i32x16, u64x8, i64x8,);
#[cfg(all(
feature = "simd_support",
any(target_arch = "x86", target_arch = "x86_64")
))]
simd_impl!((__m128i, u8x16), (__m256i, u8x32),);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_integers() {
let mut rng = crate::test::rng(806);
rng.sample::<isize, _>(Standard);
rng.sample::<i8, _>(Standard);
rng.sample::<i16, _>(Standard);
rng.sample::<i32, _>(Standard);
rng.sample::<i64, _>(Standard);
rng.sample::<i128, _>(Standard);
rng.sample::<usize, _>(Standard);
rng.sample::<u8, _>(Standard);
rng.sample::<u16, _>(Standard);
rng.sample::<u32, _>(Standard);
rng.sample::<u64, _>(Standard);
rng.sample::<u128, _>(Standard);
}
#[test]
fn value_stability() {
fn test_samples<T: Copy + core::fmt::Debug + PartialEq>(zero: T, expected: &[T])
where Standard: Distribution<T> {
let mut rng = crate::test::rng(807);
let mut buf = [zero; 3];
for x in &mut buf {
*x = rng.sample(Standard);
}
assert_eq!(&buf, expected);
}
test_samples(0u8, &[9, 247, 111]);
test_samples(0u16, &[32265, 42999, 38255]);
test_samples(0u32, &[2220326409, 2575017975, 2018088303]);
test_samples(0u64, &[
11059617991457472009,
16096616328739788143,
1487364411147516184,
]);
test_samples(0u128, &[
296930161868957086625409848350820761097,
145644820879247630242265036535529306392,
111087889832015897993126088499035356354,
]);
#[cfg(any(target_pointer_width = "32", target_pointer_width = "16"))]
test_samples(0usize, &[2220326409, 2575017975, 2018088303]);
#[cfg(target_pointer_width = "64")]
test_samples(0usize, &[
11059617991457472009,
16096616328739788143,
1487364411147516184,
]);
test_samples(0i8, &[9, -9, 111]);
// Skip further i* types: they are simple reinterpretation of u* samples
#[cfg(feature = "simd_support")]
{
// We only test a sub-set of types here and make assumptions about the rest.
test_samples(u8x2::default(), &[
u8x2::new(9, 126),
u8x2::new(247, 167),
u8x2::new(111, 149),
]);
test_samples(u8x4::default(), &[
u8x4::new(9, 126, 87, 132),
u8x4::new(247, 167, 123, 153),
u8x4::new(111, 149, 73, 120),
]);
test_samples(u8x8::default(), &[
u8x8::new(9, 126, 87, 132, 247, 167, 123, 153),
u8x8::new(111, 149, 73, 120, 68, 171, 98, 223),
u8x8::new(24, 121, 1, 50, 13, 46, 164, 20),
]);
test_samples(i64x8::default(), &[
i64x8::new(
-7387126082252079607,
-2350127744969763473,
1487364411147516184,
7895421560427121838,
602190064936008898,
6022086574635100741,
-5080089175222015595,
-4066367846667249123,
),
i64x8::new(
9180885022207963908,
3095981199532211089,
6586075293021332726,
419343203796414657,
3186951873057035255,
5287129228749947252,
444726432079249540,
-1587028029513790706,
),
i64x8::new(
6075236523189346388,
1351763722368165432,
-6192309979959753740,
-7697775502176768592,
-4482022114172078123,
7522501477800909500,
-1837258847956201231,
-586926753024886735,
),
]);
}
}
}

218
vendor/rand/src/distributions/mod.rs vendored Normal file
View File

@@ -0,0 +1,218 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Generating random samples from probability distributions
//!
//! This module is the home of the [`Distribution`] trait and several of its
//! implementations. It is the workhorse behind some of the convenient
//! functionality of the [`Rng`] trait, e.g. [`Rng::gen`] and of course
//! [`Rng::sample`].
//!
//! Abstractly, a [probability distribution] describes the probability of
//! occurrence of each value in its sample space.
//!
//! More concretely, an implementation of `Distribution<T>` for type `X` is an
//! algorithm for choosing values from the sample space (a subset of `T`)
//! according to the distribution `X` represents, using an external source of
//! randomness (an RNG supplied to the `sample` function).
//!
//! A type `X` may implement `Distribution<T>` for multiple types `T`.
//! Any type implementing [`Distribution`] is stateless (i.e. immutable),
//! but it may have internal parameters set at construction time (for example,
//! [`Uniform`] allows specification of its sample space as a range within `T`).
//!
//!
//! # The `Standard` distribution
//!
//! The [`Standard`] distribution is important to mention. This is the
//! distribution used by [`Rng::gen`] and represents the "default" way to
//! produce a random value for many different types, including most primitive
//! types, tuples, arrays, and a few derived types. See the documentation of
//! [`Standard`] for more details.
//!
//! Implementing `Distribution<T>` for [`Standard`] for user types `T` makes it
//! possible to generate type `T` with [`Rng::gen`], and by extension also
//! with the [`random`] function.
//!
//! ## Random characters
//!
//! [`Alphanumeric`] is a simple distribution to sample random letters and
//! numbers of the `char` type; in contrast [`Standard`] may sample any valid
//! `char`.
//!
//!
//! # Uniform numeric ranges
//!
//! The [`Uniform`] distribution is more flexible than [`Standard`], but also
//! more specialised: it supports fewer target types, but allows the sample
//! space to be specified as an arbitrary range within its target type `T`.
//! Both [`Standard`] and [`Uniform`] are in some sense uniform distributions.
//!
//! Values may be sampled from this distribution using [`Rng::sample(Range)`] or
//! by creating a distribution object with [`Uniform::new`],
//! [`Uniform::new_inclusive`] or `From<Range>`. When the range limits are not
//! known at compile time it is typically faster to reuse an existing
//! `Uniform` object than to call [`Rng::sample(Range)`].
//!
//! User types `T` may also implement `Distribution<T>` for [`Uniform`],
//! although this is less straightforward than for [`Standard`] (see the
//! documentation in the [`uniform`] module). Doing so enables generation of
//! values of type `T` with [`Rng::sample(Range)`].
//!
//! ## Open and half-open ranges
//!
//! There are surprisingly many ways to uniformly generate random floats. A
//! range between 0 and 1 is standard, but the exact bounds (open vs closed)
//! and accuracy differ. In addition to the [`Standard`] distribution Rand offers
//! [`Open01`] and [`OpenClosed01`]. See "Floating point implementation" section of
//! [`Standard`] documentation for more details.
//!
//! # Non-uniform sampling
//!
//! Sampling a simple true/false outcome with a given probability has a name:
//! the [`Bernoulli`] distribution (this is used by [`Rng::gen_bool`]).
//!
//! For weighted sampling from a sequence of discrete values, use the
//! [`WeightedIndex`] distribution.
//!
//! This crate no longer includes other non-uniform distributions; instead
//! it is recommended that you use either [`rand_distr`] or [`statrs`].
//!
//!
//! [probability distribution]: https://en.wikipedia.org/wiki/Probability_distribution
//! [`rand_distr`]: https://crates.io/crates/rand_distr
//! [`statrs`]: https://crates.io/crates/statrs
//! [`random`]: crate::random
//! [`rand_distr`]: https://crates.io/crates/rand_distr
//! [`statrs`]: https://crates.io/crates/statrs
mod bernoulli;
mod distribution;
mod float;
mod integer;
mod other;
mod slice;
mod utils;
#[cfg(feature = "alloc")]
mod weighted_index;
#[doc(hidden)]
pub mod hidden_export {
pub use super::float::IntoFloat; // used by rand_distr
}
pub mod uniform;
#[deprecated(
since = "0.8.0",
note = "use rand::distributions::{WeightedIndex, WeightedError} instead"
)]
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod weighted;
pub use self::bernoulli::{Bernoulli, BernoulliError};
pub use self::distribution::{Distribution, DistIter, DistMap};
#[cfg(feature = "alloc")]
pub use self::distribution::DistString;
pub use self::float::{Open01, OpenClosed01};
pub use self::other::Alphanumeric;
pub use self::slice::Slice;
#[doc(inline)]
pub use self::uniform::Uniform;
#[cfg(feature = "alloc")]
pub use self::weighted_index::{WeightedError, WeightedIndex};
#[allow(unused)]
use crate::Rng;
/// A generic random value distribution, implemented for many primitive types.
/// Usually generates values with a numerically uniform distribution, and with a
/// range appropriate to the type.
///
/// ## Provided implementations
///
/// Assuming the provided `Rng` is well-behaved, these implementations
/// generate values with the following ranges and distributions:
///
/// * Integers (`i32`, `u32`, `isize`, `usize`, etc.): Uniformly distributed
/// over all values of the type.
/// * `char`: Uniformly distributed over all Unicode scalar values, i.e. all
/// code points in the range `0...0x10_FFFF`, except for the range
/// `0xD800...0xDFFF` (the surrogate code points). This includes
/// unassigned/reserved code points.
/// * `bool`: Generates `false` or `true`, each with probability 0.5.
/// * Floating point types (`f32` and `f64`): Uniformly distributed in the
/// half-open range `[0, 1)`. See notes below.
/// * Wrapping integers (`Wrapping<T>`), besides the type identical to their
/// normal integer variants.
///
/// The `Standard` distribution also supports generation of the following
/// compound types where all component types are supported:
///
/// * Tuples (up to 12 elements): each element is generated sequentially.
/// * Arrays (up to 32 elements): each element is generated sequentially;
/// see also [`Rng::fill`] which supports arbitrary array length for integer
/// and float types and tends to be faster for `u32` and smaller types.
/// When using `rustc` ≥ 1.51, enable the `min_const_gen` feature to support
/// arrays larger than 32 elements.
/// Note that [`Rng::fill`] and `Standard`'s array support are *not* equivalent:
/// the former is optimised for integer types (using fewer RNG calls for
/// element types smaller than the RNG word size), while the latter supports
/// any element type supported by `Standard`.
/// * `Option<T>` first generates a `bool`, and if true generates and returns
/// `Some(value)` where `value: T`, otherwise returning `None`.
///
/// ## Custom implementations
///
/// The [`Standard`] distribution may be implemented for user types as follows:
///
/// ```
/// # #![allow(dead_code)]
/// use rand::Rng;
/// use rand::distributions::{Distribution, Standard};
///
/// struct MyF32 {
/// x: f32,
/// }
///
/// impl Distribution<MyF32> for Standard {
/// fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> MyF32 {
/// MyF32 { x: rng.gen() }
/// }
/// }
/// ```
///
/// ## Example usage
/// ```
/// use rand::prelude::*;
/// use rand::distributions::Standard;
///
/// let val: f32 = StdRng::from_entropy().sample(Standard);
/// println!("f32 from [0, 1): {}", val);
/// ```
///
/// # Floating point implementation
/// The floating point implementations for `Standard` generate a random value in
/// the half-open interval `[0, 1)`, i.e. including 0 but not 1.
///
/// All values that can be generated are of the form `n * ε/2`. For `f32`
/// the 24 most significant random bits of a `u32` are used and for `f64` the
/// 53 most significant bits of a `u64` are used. The conversion uses the
/// multiplicative method: `(rng.gen::<$uty>() >> N) as $ty * (ε/2)`.
///
/// See also: [`Open01`] which samples from `(0, 1)`, [`OpenClosed01`] which
/// samples from `(0, 1]` and `Rng::gen_range(0..1)` which also samples from
/// `[0, 1)`. Note that `Open01` uses transmute-based methods which yield 1 bit
/// less precision but may perform faster on some architectures (on modern Intel
/// CPUs all methods have approximately equal performance).
///
/// [`Uniform`]: uniform::Uniform
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Standard;

365
vendor/rand/src/distributions/other.rs vendored Normal file
View File

@@ -0,0 +1,365 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The implementations of the `Standard` distribution for other built-in types.
use core::char;
use core::num::Wrapping;
#[cfg(feature = "alloc")]
use alloc::string::String;
use crate::distributions::{Distribution, Standard, Uniform};
#[cfg(feature = "alloc")]
use crate::distributions::DistString;
use crate::Rng;
#[cfg(feature = "serde1")]
use serde::{Serialize, Deserialize};
#[cfg(feature = "min_const_gen")]
use core::mem::{self, MaybeUninit};
// ----- Sampling distributions -----
/// Sample a `u8`, uniformly distributed over ASCII letters and numbers:
/// a-z, A-Z and 0-9.
///
/// # Example
///
/// ```
/// use rand::{Rng, thread_rng};
/// use rand::distributions::Alphanumeric;
///
/// let mut rng = thread_rng();
/// let chars: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect();
/// println!("Random chars: {}", chars);
/// ```
///
/// The [`DistString`] trait provides an easier method of generating
/// a random `String`, and offers more efficient allocation:
/// ```
/// use rand::distributions::{Alphanumeric, DistString};
/// let string = Alphanumeric.sample_string(&mut rand::thread_rng(), 16);
/// println!("Random string: {}", string);
/// ```
///
/// # Passwords
///
/// Users sometimes ask whether it is safe to use a string of random characters
/// as a password. In principle, all RNGs in Rand implementing `CryptoRng` are
/// suitable as a source of randomness for generating passwords (if they are
/// properly seeded), but it is more conservative to only use randomness
/// directly from the operating system via the `getrandom` crate, or the
/// corresponding bindings of a crypto library.
///
/// When generating passwords or keys, it is important to consider the threat
/// model and in some cases the memorability of the password. This is out of
/// scope of the Rand project, and therefore we defer to the following
/// references:
///
/// - [Wikipedia article on Password Strength](https://en.wikipedia.org/wiki/Password_strength)
/// - [Diceware for generating memorable passwords](https://en.wikipedia.org/wiki/Diceware)
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Alphanumeric;
// ----- Implementations of distributions -----
impl Distribution<char> for Standard {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> char {
// A valid `char` is either in the interval `[0, 0xD800)` or
// `(0xDFFF, 0x11_0000)`. All `char`s must therefore be in
// `[0, 0x11_0000)` but not in the "gap" `[0xD800, 0xDFFF]` which is
// reserved for surrogates. This is the size of that gap.
const GAP_SIZE: u32 = 0xDFFF - 0xD800 + 1;
// Uniform::new(0, 0x11_0000 - GAP_SIZE) can also be used but it
// seemed slower.
let range = Uniform::new(GAP_SIZE, 0x11_0000);
let mut n = range.sample(rng);
if n <= 0xDFFF {
n -= GAP_SIZE;
}
unsafe { char::from_u32_unchecked(n) }
}
}
/// Note: the `String` is potentially left with excess capacity; optionally the
/// user may call `string.shrink_to_fit()` afterwards.
#[cfg(feature = "alloc")]
impl DistString for Standard {
fn append_string<R: Rng + ?Sized>(&self, rng: &mut R, s: &mut String, len: usize) {
// A char is encoded with at most four bytes, thus this reservation is
// guaranteed to be sufficient. We do not shrink_to_fit afterwards so
// that repeated usage on the same `String` buffer does not reallocate.
s.reserve(4 * len);
s.extend(Distribution::<char>::sample_iter(self, rng).take(len));
}
}
impl Distribution<u8> for Alphanumeric {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u8 {
const RANGE: u32 = 26 + 26 + 10;
const GEN_ASCII_STR_CHARSET: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789";
// We can pick from 62 characters. This is so close to a power of 2, 64,
// that we can do better than `Uniform`. Use a simple bitshift and
// rejection sampling. We do not use a bitmask, because for small RNGs
// the most significant bits are usually of higher quality.
loop {
let var = rng.next_u32() >> (32 - 6);
if var < RANGE {
return GEN_ASCII_STR_CHARSET[var as usize];
}
}
}
}
#[cfg(feature = "alloc")]
impl DistString for Alphanumeric {
fn append_string<R: Rng + ?Sized>(&self, rng: &mut R, string: &mut String, len: usize) {
unsafe {
let v = string.as_mut_vec();
v.extend(self.sample_iter(rng).take(len));
}
}
}
impl Distribution<bool> for Standard {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> bool {
// We can compare against an arbitrary bit of an u32 to get a bool.
// Because the least significant bits of a lower quality RNG can have
// simple patterns, we compare against the most significant bit. This is
// easiest done using a sign test.
(rng.next_u32() as i32) < 0
}
}
macro_rules! tuple_impl {
// use variables to indicate the arity of the tuple
($($tyvar:ident),* ) => {
// the trailing commas are for the 1 tuple
impl< $( $tyvar ),* >
Distribution<( $( $tyvar ),* , )>
for Standard
where $( Standard: Distribution<$tyvar> ),*
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> ( $( $tyvar ),* , ) {
(
// use the $tyvar's to get the appropriate number of
// repeats (they're not actually needed)
$(
_rng.gen::<$tyvar>()
),*
,
)
}
}
}
}
impl Distribution<()> for Standard {
#[allow(clippy::unused_unit)]
#[inline]
fn sample<R: Rng + ?Sized>(&self, _: &mut R) -> () {
()
}
}
tuple_impl! {A}
tuple_impl! {A, B}
tuple_impl! {A, B, C}
tuple_impl! {A, B, C, D}
tuple_impl! {A, B, C, D, E}
tuple_impl! {A, B, C, D, E, F}
tuple_impl! {A, B, C, D, E, F, G}
tuple_impl! {A, B, C, D, E, F, G, H}
tuple_impl! {A, B, C, D, E, F, G, H, I}
tuple_impl! {A, B, C, D, E, F, G, H, I, J}
tuple_impl! {A, B, C, D, E, F, G, H, I, J, K}
tuple_impl! {A, B, C, D, E, F, G, H, I, J, K, L}
#[cfg(feature = "min_const_gen")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "min_const_gen")))]
impl<T, const N: usize> Distribution<[T; N]> for Standard
where Standard: Distribution<T>
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> [T; N] {
let mut buff: [MaybeUninit<T>; N] = unsafe { MaybeUninit::uninit().assume_init() };
for elem in &mut buff {
*elem = MaybeUninit::new(_rng.gen());
}
unsafe { mem::transmute_copy::<_, _>(&buff) }
}
}
#[cfg(not(feature = "min_const_gen"))]
macro_rules! array_impl {
// recursive, given at least one type parameter:
{$n:expr, $t:ident, $($ts:ident,)*} => {
array_impl!{($n - 1), $($ts,)*}
impl<T> Distribution<[T; $n]> for Standard where Standard: Distribution<T> {
#[inline]
fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> [T; $n] {
[_rng.gen::<$t>(), $(_rng.gen::<$ts>()),*]
}
}
};
// empty case:
{$n:expr,} => {
impl<T> Distribution<[T; $n]> for Standard {
fn sample<R: Rng + ?Sized>(&self, _rng: &mut R) -> [T; $n] { [] }
}
};
}
#[cfg(not(feature = "min_const_gen"))]
array_impl! {32, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T,}
impl<T> Distribution<Option<T>> for Standard
where Standard: Distribution<T>
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Option<T> {
// UFCS is needed here: https://github.com/rust-lang/rust/issues/24066
if rng.gen::<bool>() {
Some(rng.gen())
} else {
None
}
}
}
impl<T> Distribution<Wrapping<T>> for Standard
where Standard: Distribution<T>
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Wrapping<T> {
Wrapping(rng.gen())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::RngCore;
#[cfg(feature = "alloc")] use alloc::string::String;
#[test]
fn test_misc() {
let rng: &mut dyn RngCore = &mut crate::test::rng(820);
rng.sample::<char, _>(Standard);
rng.sample::<bool, _>(Standard);
}
#[cfg(feature = "alloc")]
#[test]
fn test_chars() {
use core::iter;
let mut rng = crate::test::rng(805);
// Test by generating a relatively large number of chars, so we also
// take the rejection sampling path.
let word: String = iter::repeat(())
.map(|()| rng.gen::<char>())
.take(1000)
.collect();
assert!(!word.is_empty());
}
#[test]
fn test_alphanumeric() {
let mut rng = crate::test::rng(806);
// Test by generating a relatively large number of chars, so we also
// take the rejection sampling path.
let mut incorrect = false;
for _ in 0..100 {
let c: char = rng.sample(Alphanumeric).into();
incorrect |= !(('0'..='9').contains(&c) ||
('A'..='Z').contains(&c) ||
('a'..='z').contains(&c) );
}
assert!(!incorrect);
}
#[test]
fn value_stability() {
fn test_samples<T: Copy + core::fmt::Debug + PartialEq, D: Distribution<T>>(
distr: &D, zero: T, expected: &[T],
) {
let mut rng = crate::test::rng(807);
let mut buf = [zero; 5];
for x in &mut buf {
*x = rng.sample(&distr);
}
assert_eq!(&buf, expected);
}
test_samples(&Standard, 'a', &[
'\u{8cdac}',
'\u{a346a}',
'\u{80120}',
'\u{ed692}',
'\u{35888}',
]);
test_samples(&Alphanumeric, 0, &[104, 109, 101, 51, 77]);
test_samples(&Standard, false, &[true, true, false, true, false]);
test_samples(&Standard, None as Option<bool>, &[
Some(true),
None,
Some(false),
None,
Some(false),
]);
test_samples(&Standard, Wrapping(0i32), &[
Wrapping(-2074640887),
Wrapping(-1719949321),
Wrapping(2018088303),
Wrapping(-547181756),
Wrapping(838957336),
]);
// We test only sub-sets of tuple and array impls
test_samples(&Standard, (), &[(), (), (), (), ()]);
test_samples(&Standard, (false,), &[
(true,),
(true,),
(false,),
(true,),
(false,),
]);
test_samples(&Standard, (false, false), &[
(true, true),
(false, true),
(false, false),
(true, false),
(false, false),
]);
test_samples(&Standard, [0u8; 0], &[[], [], [], [], []]);
test_samples(&Standard, [0u8; 3], &[
[9, 247, 111],
[68, 24, 13],
[174, 19, 194],
[172, 69, 213],
[149, 207, 29],
]);
}
}

117
vendor/rand/src/distributions/slice.rs vendored Normal file
View File

@@ -0,0 +1,117 @@
// Copyright 2021 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::distributions::{Distribution, Uniform};
/// A distribution to sample items uniformly from a slice.
///
/// [`Slice::new`] constructs a distribution referencing a slice and uniformly
/// samples references from the items in the slice. It may do extra work up
/// front to make sampling of multiple values faster; if only one sample from
/// the slice is required, [`SliceRandom::choose`] can be more efficient.
///
/// Steps are taken to avoid bias which might be present in naive
/// implementations; for example `slice[rng.gen() % slice.len()]` samples from
/// the slice, but may be more likely to select numbers in the low range than
/// other values.
///
/// This distribution samples with replacement; each sample is independent.
/// Sampling without replacement requires state to be retained, and therefore
/// cannot be handled by a distribution; you should instead consider methods
/// on [`SliceRandom`], such as [`SliceRandom::choose_multiple`].
///
/// # Example
///
/// ```
/// use rand::Rng;
/// use rand::distributions::Slice;
///
/// let vowels = ['a', 'e', 'i', 'o', 'u'];
/// let vowels_dist = Slice::new(&vowels).unwrap();
/// let rng = rand::thread_rng();
///
/// // build a string of 10 vowels
/// let vowel_string: String = rng
/// .sample_iter(&vowels_dist)
/// .take(10)
/// .collect();
///
/// println!("{}", vowel_string);
/// assert_eq!(vowel_string.len(), 10);
/// assert!(vowel_string.chars().all(|c| vowels.contains(&c)));
/// ```
///
/// For a single sample, [`SliceRandom::choose`][crate::seq::SliceRandom::choose]
/// may be preferred:
///
/// ```
/// use rand::seq::SliceRandom;
///
/// let vowels = ['a', 'e', 'i', 'o', 'u'];
/// let mut rng = rand::thread_rng();
///
/// println!("{}", vowels.choose(&mut rng).unwrap())
/// ```
///
/// [`SliceRandom`]: crate::seq::SliceRandom
/// [`SliceRandom::choose`]: crate::seq::SliceRandom::choose
/// [`SliceRandom::choose_multiple`]: crate::seq::SliceRandom::choose_multiple
#[derive(Debug, Clone, Copy)]
pub struct Slice<'a, T> {
slice: &'a [T],
range: Uniform<usize>,
}
impl<'a, T> Slice<'a, T> {
/// Create a new `Slice` instance which samples uniformly from the slice.
/// Returns `Err` if the slice is empty.
pub fn new(slice: &'a [T]) -> Result<Self, EmptySlice> {
match slice.len() {
0 => Err(EmptySlice),
len => Ok(Self {
slice,
range: Uniform::new(0, len),
}),
}
}
}
impl<'a, T> Distribution<&'a T> for Slice<'a, T> {
fn sample<R: crate::Rng + ?Sized>(&self, rng: &mut R) -> &'a T {
let idx = self.range.sample(rng);
debug_assert!(
idx < self.slice.len(),
"Uniform::new(0, {}) somehow returned {}",
self.slice.len(),
idx
);
// Safety: at construction time, it was ensured that the slice was
// non-empty, and that the `Uniform` range produces values in range
// for the slice
unsafe { self.slice.get_unchecked(idx) }
}
}
/// Error type indicating that a [`Slice`] distribution was improperly
/// constructed with an empty slice.
#[derive(Debug, Clone, Copy)]
pub struct EmptySlice;
impl core::fmt::Display for EmptySlice {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(
f,
"Tried to create a `distributions::Slice` with an empty slice"
)
}
}
#[cfg(feature = "std")]
impl std::error::Error for EmptySlice {}

1658
vendor/rand/src/distributions/uniform.rs vendored Normal file

File diff suppressed because it is too large Load Diff

429
vendor/rand/src/distributions/utils.rs vendored Normal file
View File

@@ -0,0 +1,429 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Math helper functions
#[cfg(feature = "simd_support")] use packed_simd::*;
pub(crate) trait WideningMultiply<RHS = Self> {
type Output;
fn wmul(self, x: RHS) -> Self::Output;
}
macro_rules! wmul_impl {
($ty:ty, $wide:ty, $shift:expr) => {
impl WideningMultiply for $ty {
type Output = ($ty, $ty);
#[inline(always)]
fn wmul(self, x: $ty) -> Self::Output {
let tmp = (self as $wide) * (x as $wide);
((tmp >> $shift) as $ty, tmp as $ty)
}
}
};
// simd bulk implementation
($(($ty:ident, $wide:ident),)+, $shift:expr) => {
$(
impl WideningMultiply for $ty {
type Output = ($ty, $ty);
#[inline(always)]
fn wmul(self, x: $ty) -> Self::Output {
// For supported vectors, this should compile to a couple
// supported multiply & swizzle instructions (no actual
// casting).
// TODO: optimize
let y: $wide = self.cast();
let x: $wide = x.cast();
let tmp = y * x;
let hi: $ty = (tmp >> $shift).cast();
let lo: $ty = tmp.cast();
(hi, lo)
}
}
)+
};
}
wmul_impl! { u8, u16, 8 }
wmul_impl! { u16, u32, 16 }
wmul_impl! { u32, u64, 32 }
wmul_impl! { u64, u128, 64 }
// This code is a translation of the __mulddi3 function in LLVM's
// compiler-rt. It is an optimised variant of the common method
// `(a + b) * (c + d) = ac + ad + bc + bd`.
//
// For some reason LLVM can optimise the C version very well, but
// keeps shuffling registers in this Rust translation.
macro_rules! wmul_impl_large {
($ty:ty, $half:expr) => {
impl WideningMultiply for $ty {
type Output = ($ty, $ty);
#[inline(always)]
fn wmul(self, b: $ty) -> Self::Output {
const LOWER_MASK: $ty = !0 >> $half;
let mut low = (self & LOWER_MASK).wrapping_mul(b & LOWER_MASK);
let mut t = low >> $half;
low &= LOWER_MASK;
t += (self >> $half).wrapping_mul(b & LOWER_MASK);
low += (t & LOWER_MASK) << $half;
let mut high = t >> $half;
t = low >> $half;
low &= LOWER_MASK;
t += (b >> $half).wrapping_mul(self & LOWER_MASK);
low += (t & LOWER_MASK) << $half;
high += t >> $half;
high += (self >> $half).wrapping_mul(b >> $half);
(high, low)
}
}
};
// simd bulk implementation
(($($ty:ty,)+) $scalar:ty, $half:expr) => {
$(
impl WideningMultiply for $ty {
type Output = ($ty, $ty);
#[inline(always)]
fn wmul(self, b: $ty) -> Self::Output {
// needs wrapping multiplication
const LOWER_MASK: $scalar = !0 >> $half;
let mut low = (self & LOWER_MASK) * (b & LOWER_MASK);
let mut t = low >> $half;
low &= LOWER_MASK;
t += (self >> $half) * (b & LOWER_MASK);
low += (t & LOWER_MASK) << $half;
let mut high = t >> $half;
t = low >> $half;
low &= LOWER_MASK;
t += (b >> $half) * (self & LOWER_MASK);
low += (t & LOWER_MASK) << $half;
high += t >> $half;
high += (self >> $half) * (b >> $half);
(high, low)
}
}
)+
};
}
wmul_impl_large! { u128, 64 }
macro_rules! wmul_impl_usize {
($ty:ty) => {
impl WideningMultiply for usize {
type Output = (usize, usize);
#[inline(always)]
fn wmul(self, x: usize) -> Self::Output {
let (high, low) = (self as $ty).wmul(x as $ty);
(high as usize, low as usize)
}
}
};
}
#[cfg(target_pointer_width = "16")]
wmul_impl_usize! { u16 }
#[cfg(target_pointer_width = "32")]
wmul_impl_usize! { u32 }
#[cfg(target_pointer_width = "64")]
wmul_impl_usize! { u64 }
#[cfg(feature = "simd_support")]
mod simd_wmul {
use super::*;
#[cfg(target_arch = "x86")] use core::arch::x86::*;
#[cfg(target_arch = "x86_64")] use core::arch::x86_64::*;
wmul_impl! {
(u8x2, u16x2),
(u8x4, u16x4),
(u8x8, u16x8),
(u8x16, u16x16),
(u8x32, u16x32),,
8
}
wmul_impl! { (u16x2, u32x2),, 16 }
wmul_impl! { (u16x4, u32x4),, 16 }
#[cfg(not(target_feature = "sse2"))]
wmul_impl! { (u16x8, u32x8),, 16 }
#[cfg(not(target_feature = "avx2"))]
wmul_impl! { (u16x16, u32x16),, 16 }
// 16-bit lane widths allow use of the x86 `mulhi` instructions, which
// means `wmul` can be implemented with only two instructions.
#[allow(unused_macros)]
macro_rules! wmul_impl_16 {
($ty:ident, $intrinsic:ident, $mulhi:ident, $mullo:ident) => {
impl WideningMultiply for $ty {
type Output = ($ty, $ty);
#[inline(always)]
fn wmul(self, x: $ty) -> Self::Output {
let b = $intrinsic::from_bits(x);
let a = $intrinsic::from_bits(self);
let hi = $ty::from_bits(unsafe { $mulhi(a, b) });
let lo = $ty::from_bits(unsafe { $mullo(a, b) });
(hi, lo)
}
}
};
}
#[cfg(target_feature = "sse2")]
wmul_impl_16! { u16x8, __m128i, _mm_mulhi_epu16, _mm_mullo_epi16 }
#[cfg(target_feature = "avx2")]
wmul_impl_16! { u16x16, __m256i, _mm256_mulhi_epu16, _mm256_mullo_epi16 }
// FIXME: there are no `__m512i` types in stdsimd yet, so `wmul::<u16x32>`
// cannot use the same implementation.
wmul_impl! {
(u32x2, u64x2),
(u32x4, u64x4),
(u32x8, u64x8),,
32
}
// TODO: optimize, this seems to seriously slow things down
wmul_impl_large! { (u8x64,) u8, 4 }
wmul_impl_large! { (u16x32,) u16, 8 }
wmul_impl_large! { (u32x16,) u32, 16 }
wmul_impl_large! { (u64x2, u64x4, u64x8,) u64, 32 }
}
/// Helper trait when dealing with scalar and SIMD floating point types.
pub(crate) trait FloatSIMDUtils {
// `PartialOrd` for vectors compares lexicographically. We want to compare all
// the individual SIMD lanes instead, and get the combined result over all
// lanes. This is possible using something like `a.lt(b).all()`, but we
// implement it as a trait so we can write the same code for `f32` and `f64`.
// Only the comparison functions we need are implemented.
fn all_lt(self, other: Self) -> bool;
fn all_le(self, other: Self) -> bool;
fn all_finite(self) -> bool;
type Mask;
fn finite_mask(self) -> Self::Mask;
fn gt_mask(self, other: Self) -> Self::Mask;
fn ge_mask(self, other: Self) -> Self::Mask;
// Decrease all lanes where the mask is `true` to the next lower value
// representable by the floating-point type. At least one of the lanes
// must be set.
fn decrease_masked(self, mask: Self::Mask) -> Self;
// Convert from int value. Conversion is done while retaining the numerical
// value, not by retaining the binary representation.
type UInt;
fn cast_from_int(i: Self::UInt) -> Self;
}
/// Implement functions available in std builds but missing from core primitives
#[cfg(not(std))]
// False positive: We are following `std` here.
#[allow(clippy::wrong_self_convention)]
pub(crate) trait Float: Sized {
fn is_nan(self) -> bool;
fn is_infinite(self) -> bool;
fn is_finite(self) -> bool;
}
/// Implement functions on f32/f64 to give them APIs similar to SIMD types
pub(crate) trait FloatAsSIMD: Sized {
#[inline(always)]
fn lanes() -> usize {
1
}
#[inline(always)]
fn splat(scalar: Self) -> Self {
scalar
}
#[inline(always)]
fn extract(self, index: usize) -> Self {
debug_assert_eq!(index, 0);
self
}
#[inline(always)]
fn replace(self, index: usize, new_value: Self) -> Self {
debug_assert_eq!(index, 0);
new_value
}
}
pub(crate) trait BoolAsSIMD: Sized {
fn any(self) -> bool;
fn all(self) -> bool;
fn none(self) -> bool;
}
impl BoolAsSIMD for bool {
#[inline(always)]
fn any(self) -> bool {
self
}
#[inline(always)]
fn all(self) -> bool {
self
}
#[inline(always)]
fn none(self) -> bool {
!self
}
}
macro_rules! scalar_float_impl {
($ty:ident, $uty:ident) => {
#[cfg(not(std))]
impl Float for $ty {
#[inline]
fn is_nan(self) -> bool {
self != self
}
#[inline]
fn is_infinite(self) -> bool {
self == ::core::$ty::INFINITY || self == ::core::$ty::NEG_INFINITY
}
#[inline]
fn is_finite(self) -> bool {
!(self.is_nan() || self.is_infinite())
}
}
impl FloatSIMDUtils for $ty {
type Mask = bool;
type UInt = $uty;
#[inline(always)]
fn all_lt(self, other: Self) -> bool {
self < other
}
#[inline(always)]
fn all_le(self, other: Self) -> bool {
self <= other
}
#[inline(always)]
fn all_finite(self) -> bool {
self.is_finite()
}
#[inline(always)]
fn finite_mask(self) -> Self::Mask {
self.is_finite()
}
#[inline(always)]
fn gt_mask(self, other: Self) -> Self::Mask {
self > other
}
#[inline(always)]
fn ge_mask(self, other: Self) -> Self::Mask {
self >= other
}
#[inline(always)]
fn decrease_masked(self, mask: Self::Mask) -> Self {
debug_assert!(mask, "At least one lane must be set");
<$ty>::from_bits(self.to_bits() - 1)
}
#[inline]
fn cast_from_int(i: Self::UInt) -> Self {
i as $ty
}
}
impl FloatAsSIMD for $ty {}
};
}
scalar_float_impl!(f32, u32);
scalar_float_impl!(f64, u64);
#[cfg(feature = "simd_support")]
macro_rules! simd_impl {
($ty:ident, $f_scalar:ident, $mty:ident, $uty:ident) => {
impl FloatSIMDUtils for $ty {
type Mask = $mty;
type UInt = $uty;
#[inline(always)]
fn all_lt(self, other: Self) -> bool {
self.lt(other).all()
}
#[inline(always)]
fn all_le(self, other: Self) -> bool {
self.le(other).all()
}
#[inline(always)]
fn all_finite(self) -> bool {
self.finite_mask().all()
}
#[inline(always)]
fn finite_mask(self) -> Self::Mask {
// This can possibly be done faster by checking bit patterns
let neg_inf = $ty::splat(::core::$f_scalar::NEG_INFINITY);
let pos_inf = $ty::splat(::core::$f_scalar::INFINITY);
self.gt(neg_inf) & self.lt(pos_inf)
}
#[inline(always)]
fn gt_mask(self, other: Self) -> Self::Mask {
self.gt(other)
}
#[inline(always)]
fn ge_mask(self, other: Self) -> Self::Mask {
self.ge(other)
}
#[inline(always)]
fn decrease_masked(self, mask: Self::Mask) -> Self {
// Casting a mask into ints will produce all bits set for
// true, and 0 for false. Adding that to the binary
// representation of a float means subtracting one from
// the binary representation, resulting in the next lower
// value representable by $ty. This works even when the
// current value is infinity.
debug_assert!(mask.any(), "At least one lane must be set");
<$ty>::from_bits(<$uty>::from_bits(self) + <$uty>::from_bits(mask))
}
#[inline]
fn cast_from_int(i: Self::UInt) -> Self {
i.cast()
}
}
};
}
#[cfg(feature="simd_support")] simd_impl! { f32x2, f32, m32x2, u32x2 }
#[cfg(feature="simd_support")] simd_impl! { f32x4, f32, m32x4, u32x4 }
#[cfg(feature="simd_support")] simd_impl! { f32x8, f32, m32x8, u32x8 }
#[cfg(feature="simd_support")] simd_impl! { f32x16, f32, m32x16, u32x16 }
#[cfg(feature="simd_support")] simd_impl! { f64x2, f64, m64x2, u64x2 }
#[cfg(feature="simd_support")] simd_impl! { f64x4, f64, m64x4, u64x4 }
#[cfg(feature="simd_support")] simd_impl! { f64x8, f64, m64x8, u64x8 }

View File

@@ -0,0 +1,47 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Weighted index sampling
//!
//! This module is deprecated. Use [`crate::distributions::WeightedIndex`] and
//! [`crate::distributions::WeightedError`] instead.
pub use super::{WeightedIndex, WeightedError};
#[allow(missing_docs)]
#[deprecated(since = "0.8.0", note = "moved to rand_distr crate")]
pub mod alias_method {
// This module exists to provide a deprecation warning which minimises
// compile errors, but still fails to compile if ever used.
use core::marker::PhantomData;
use alloc::vec::Vec;
use super::WeightedError;
#[derive(Debug)]
pub struct WeightedIndex<W: Weight> {
_phantom: PhantomData<W>,
}
impl<W: Weight> WeightedIndex<W> {
pub fn new(_weights: Vec<W>) -> Result<Self, WeightedError> {
Err(WeightedError::NoItem)
}
}
pub trait Weight {}
macro_rules! impl_weight {
() => {};
($T:ident, $($more:ident,)*) => {
impl Weight for $T {}
impl_weight!($($more,)*);
};
}
impl_weight!(f64, f32,);
impl_weight!(u8, u16, u32, u64, usize,);
impl_weight!(i8, i16, i32, i64, isize,);
impl_weight!(u128, i128,);
}

View File

@@ -0,0 +1,458 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Weighted index sampling
use crate::distributions::uniform::{SampleBorrow, SampleUniform, UniformSampler};
use crate::distributions::Distribution;
use crate::Rng;
use core::cmp::PartialOrd;
use core::fmt;
// Note that this whole module is only imported if feature="alloc" is enabled.
use alloc::vec::Vec;
#[cfg(feature = "serde1")]
use serde::{Serialize, Deserialize};
/// A distribution using weighted sampling of discrete items
///
/// Sampling a `WeightedIndex` distribution returns the index of a randomly
/// selected element from the iterator used when the `WeightedIndex` was
/// created. The chance of a given element being picked is proportional to the
/// value of the element. The weights can use any type `X` for which an
/// implementation of [`Uniform<X>`] exists.
///
/// # Performance
///
/// Time complexity of sampling from `WeightedIndex` is `O(log N)` where
/// `N` is the number of weights. As an alternative,
/// [`rand_distr::weighted_alias`](https://docs.rs/rand_distr/*/rand_distr/weighted_alias/index.html)
/// supports `O(1)` sampling, but with much higher initialisation cost.
///
/// A `WeightedIndex<X>` contains a `Vec<X>` and a [`Uniform<X>`] and so its
/// size is the sum of the size of those objects, possibly plus some alignment.
///
/// Creating a `WeightedIndex<X>` will allocate enough space to hold `N - 1`
/// weights of type `X`, where `N` is the number of weights. However, since
/// `Vec` doesn't guarantee a particular growth strategy, additional memory
/// might be allocated but not used. Since the `WeightedIndex` object also
/// contains, this might cause additional allocations, though for primitive
/// types, [`Uniform<X>`] doesn't allocate any memory.
///
/// Sampling from `WeightedIndex` will result in a single call to
/// `Uniform<X>::sample` (method of the [`Distribution`] trait), which typically
/// will request a single value from the underlying [`RngCore`], though the
/// exact number depends on the implementation of `Uniform<X>::sample`.
///
/// # Example
///
/// ```
/// use rand::prelude::*;
/// use rand::distributions::WeightedIndex;
///
/// let choices = ['a', 'b', 'c'];
/// let weights = [2, 1, 1];
/// let dist = WeightedIndex::new(&weights).unwrap();
/// let mut rng = thread_rng();
/// for _ in 0..100 {
/// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c'
/// println!("{}", choices[dist.sample(&mut rng)]);
/// }
///
/// let items = [('a', 0), ('b', 3), ('c', 7)];
/// let dist2 = WeightedIndex::new(items.iter().map(|item| item.1)).unwrap();
/// for _ in 0..100 {
/// // 0% chance to print 'a', 30% chance to print 'b', 70% chance to print 'c'
/// println!("{}", items[dist2.sample(&mut rng)].0);
/// }
/// ```
///
/// [`Uniform<X>`]: crate::distributions::Uniform
/// [`RngCore`]: crate::RngCore
#[derive(Debug, Clone, PartialEq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub struct WeightedIndex<X: SampleUniform + PartialOrd> {
cumulative_weights: Vec<X>,
total_weight: X,
weight_distribution: X::Sampler,
}
impl<X: SampleUniform + PartialOrd> WeightedIndex<X> {
/// Creates a new a `WeightedIndex` [`Distribution`] using the values
/// in `weights`. The weights can use any type `X` for which an
/// implementation of [`Uniform<X>`] exists.
///
/// Returns an error if the iterator is empty, if any weight is `< 0`, or
/// if its total value is 0.
///
/// [`Uniform<X>`]: crate::distributions::uniform::Uniform
pub fn new<I>(weights: I) -> Result<WeightedIndex<X>, WeightedError>
where
I: IntoIterator,
I::Item: SampleBorrow<X>,
X: for<'a> ::core::ops::AddAssign<&'a X> + Clone + Default,
{
let mut iter = weights.into_iter();
let mut total_weight: X = iter.next().ok_or(WeightedError::NoItem)?.borrow().clone();
let zero = <X as Default>::default();
if !(total_weight >= zero) {
return Err(WeightedError::InvalidWeight);
}
let mut weights = Vec::<X>::with_capacity(iter.size_hint().0);
for w in iter {
// Note that `!(w >= x)` is not equivalent to `w < x` for partially
// ordered types due to NaNs which are equal to nothing.
if !(w.borrow() >= &zero) {
return Err(WeightedError::InvalidWeight);
}
weights.push(total_weight.clone());
total_weight += w.borrow();
}
if total_weight == zero {
return Err(WeightedError::AllWeightsZero);
}
let distr = X::Sampler::new(zero, total_weight.clone());
Ok(WeightedIndex {
cumulative_weights: weights,
total_weight,
weight_distribution: distr,
})
}
/// Update a subset of weights, without changing the number of weights.
///
/// `new_weights` must be sorted by the index.
///
/// Using this method instead of `new` might be more efficient if only a small number of
/// weights is modified. No allocations are performed, unless the weight type `X` uses
/// allocation internally.
///
/// In case of error, `self` is not modified.
pub fn update_weights(&mut self, new_weights: &[(usize, &X)]) -> Result<(), WeightedError>
where X: for<'a> ::core::ops::AddAssign<&'a X>
+ for<'a> ::core::ops::SubAssign<&'a X>
+ Clone
+ Default {
if new_weights.is_empty() {
return Ok(());
}
let zero = <X as Default>::default();
let mut total_weight = self.total_weight.clone();
// Check for errors first, so we don't modify `self` in case something
// goes wrong.
let mut prev_i = None;
for &(i, w) in new_weights {
if let Some(old_i) = prev_i {
if old_i >= i {
return Err(WeightedError::InvalidWeight);
}
}
if !(*w >= zero) {
return Err(WeightedError::InvalidWeight);
}
if i > self.cumulative_weights.len() {
return Err(WeightedError::TooMany);
}
let mut old_w = if i < self.cumulative_weights.len() {
self.cumulative_weights[i].clone()
} else {
self.total_weight.clone()
};
if i > 0 {
old_w -= &self.cumulative_weights[i - 1];
}
total_weight -= &old_w;
total_weight += w;
prev_i = Some(i);
}
if total_weight <= zero {
return Err(WeightedError::AllWeightsZero);
}
// Update the weights. Because we checked all the preconditions in the
// previous loop, this should never panic.
let mut iter = new_weights.iter();
let mut prev_weight = zero.clone();
let mut next_new_weight = iter.next();
let &(first_new_index, _) = next_new_weight.unwrap();
let mut cumulative_weight = if first_new_index > 0 {
self.cumulative_weights[first_new_index - 1].clone()
} else {
zero.clone()
};
for i in first_new_index..self.cumulative_weights.len() {
match next_new_weight {
Some(&(j, w)) if i == j => {
cumulative_weight += w;
next_new_weight = iter.next();
}
_ => {
let mut tmp = self.cumulative_weights[i].clone();
tmp -= &prev_weight; // We know this is positive.
cumulative_weight += &tmp;
}
}
prev_weight = cumulative_weight.clone();
core::mem::swap(&mut prev_weight, &mut self.cumulative_weights[i]);
}
self.total_weight = total_weight;
self.weight_distribution = X::Sampler::new(zero, self.total_weight.clone());
Ok(())
}
}
impl<X> Distribution<usize> for WeightedIndex<X>
where X: SampleUniform + PartialOrd
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
use ::core::cmp::Ordering;
let chosen_weight = self.weight_distribution.sample(rng);
// Find the first item which has a weight *higher* than the chosen weight.
self.cumulative_weights
.binary_search_by(|w| {
if *w <= chosen_weight {
Ordering::Less
} else {
Ordering::Greater
}
})
.unwrap_err()
}
}
#[cfg(test)]
mod test {
use super::*;
#[cfg(feature = "serde1")]
#[test]
fn test_weightedindex_serde1() {
let weighted_index = WeightedIndex::new(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).unwrap();
let ser_weighted_index = bincode::serialize(&weighted_index).unwrap();
let de_weighted_index: WeightedIndex<i32> =
bincode::deserialize(&ser_weighted_index).unwrap();
assert_eq!(
de_weighted_index.cumulative_weights,
weighted_index.cumulative_weights
);
assert_eq!(de_weighted_index.total_weight, weighted_index.total_weight);
}
#[test]
fn test_accepting_nan(){
assert_eq!(
WeightedIndex::new(&[core::f32::NAN, 0.5]).unwrap_err(),
WeightedError::InvalidWeight,
);
assert_eq!(
WeightedIndex::new(&[core::f32::NAN]).unwrap_err(),
WeightedError::InvalidWeight,
);
assert_eq!(
WeightedIndex::new(&[0.5, core::f32::NAN]).unwrap_err(),
WeightedError::InvalidWeight,
);
assert_eq!(
WeightedIndex::new(&[0.5, 7.0])
.unwrap()
.update_weights(&[(0, &core::f32::NAN)])
.unwrap_err(),
WeightedError::InvalidWeight,
)
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_weightedindex() {
let mut r = crate::test::rng(700);
const N_REPS: u32 = 5000;
let weights = [1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7];
let total_weight = weights.iter().sum::<u32>() as f32;
let verify = |result: [i32; 14]| {
for (i, count) in result.iter().enumerate() {
let exp = (weights[i] * N_REPS) as f32 / total_weight;
let mut err = (*count as f32 - exp).abs();
if err != 0.0 {
err /= exp;
}
assert!(err <= 0.25);
}
};
// WeightedIndex from vec
let mut chosen = [0i32; 14];
let distr = WeightedIndex::new(weights.to_vec()).unwrap();
for _ in 0..N_REPS {
chosen[distr.sample(&mut r)] += 1;
}
verify(chosen);
// WeightedIndex from slice
chosen = [0i32; 14];
let distr = WeightedIndex::new(&weights[..]).unwrap();
for _ in 0..N_REPS {
chosen[distr.sample(&mut r)] += 1;
}
verify(chosen);
// WeightedIndex from iterator
chosen = [0i32; 14];
let distr = WeightedIndex::new(weights.iter()).unwrap();
for _ in 0..N_REPS {
chosen[distr.sample(&mut r)] += 1;
}
verify(chosen);
for _ in 0..5 {
assert_eq!(WeightedIndex::new(&[0, 1]).unwrap().sample(&mut r), 1);
assert_eq!(WeightedIndex::new(&[1, 0]).unwrap().sample(&mut r), 0);
assert_eq!(
WeightedIndex::new(&[0, 0, 0, 0, 10, 0])
.unwrap()
.sample(&mut r),
4
);
}
assert_eq!(
WeightedIndex::new(&[10][0..0]).unwrap_err(),
WeightedError::NoItem
);
assert_eq!(
WeightedIndex::new(&[0]).unwrap_err(),
WeightedError::AllWeightsZero
);
assert_eq!(
WeightedIndex::new(&[10, 20, -1, 30]).unwrap_err(),
WeightedError::InvalidWeight
);
assert_eq!(
WeightedIndex::new(&[-10, 20, 1, 30]).unwrap_err(),
WeightedError::InvalidWeight
);
assert_eq!(
WeightedIndex::new(&[-10]).unwrap_err(),
WeightedError::InvalidWeight
);
}
#[test]
fn test_update_weights() {
let data = [
(
&[10u32, 2, 3, 4][..],
&[(1, &100), (2, &4)][..], // positive change
&[10, 100, 4, 4][..],
),
(
&[1u32, 2, 3, 0, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7][..],
&[(2, &1), (5, &1), (13, &100)][..], // negative change and last element
&[1u32, 2, 1, 0, 5, 1, 7, 1, 2, 3, 4, 5, 6, 100][..],
),
];
for (weights, update, expected_weights) in data.iter() {
let total_weight = weights.iter().sum::<u32>();
let mut distr = WeightedIndex::new(weights.to_vec()).unwrap();
assert_eq!(distr.total_weight, total_weight);
distr.update_weights(update).unwrap();
let expected_total_weight = expected_weights.iter().sum::<u32>();
let expected_distr = WeightedIndex::new(expected_weights.to_vec()).unwrap();
assert_eq!(distr.total_weight, expected_total_weight);
assert_eq!(distr.total_weight, expected_distr.total_weight);
assert_eq!(distr.cumulative_weights, expected_distr.cumulative_weights);
}
}
#[test]
fn value_stability() {
fn test_samples<X: SampleUniform + PartialOrd, I>(
weights: I, buf: &mut [usize], expected: &[usize],
) where
I: IntoIterator,
I::Item: SampleBorrow<X>,
X: for<'a> ::core::ops::AddAssign<&'a X> + Clone + Default,
{
assert_eq!(buf.len(), expected.len());
let distr = WeightedIndex::new(weights).unwrap();
let mut rng = crate::test::rng(701);
for r in buf.iter_mut() {
*r = rng.sample(&distr);
}
assert_eq!(buf, expected);
}
let mut buf = [0; 10];
test_samples(&[1i32, 1, 1, 1, 1, 1, 1, 1, 1], &mut buf, &[
0, 6, 2, 6, 3, 4, 7, 8, 2, 5,
]);
test_samples(&[0.7f32, 0.1, 0.1, 0.1], &mut buf, &[
0, 0, 0, 1, 0, 0, 2, 3, 0, 0,
]);
test_samples(&[1.0f64, 0.999, 0.998, 0.997], &mut buf, &[
2, 2, 1, 3, 2, 1, 3, 3, 2, 1,
]);
}
#[test]
fn weighted_index_distributions_can_be_compared() {
assert_eq!(WeightedIndex::new(&[1, 2]), WeightedIndex::new(&[1, 2]));
}
}
/// Error type returned from `WeightedIndex::new`.
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum WeightedError {
/// The provided weight collection contains no items.
NoItem,
/// A weight is either less than zero, greater than the supported maximum,
/// NaN, or otherwise invalid.
InvalidWeight,
/// All items in the provided weight collection are zero.
AllWeightsZero,
/// Too many weights are provided (length greater than `u32::MAX`)
TooMany,
}
#[cfg(feature = "std")]
impl std::error::Error for WeightedError {}
impl fmt::Display for WeightedError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
WeightedError::NoItem => "No weights provided in distribution",
WeightedError::InvalidWeight => "A weight is invalid in distribution",
WeightedError::AllWeightsZero => "All weights are zero in distribution",
WeightedError::TooMany => "Too many weights (hit u32::MAX) in distribution",
})
}
}

214
vendor/rand/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,214 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Utilities for random number generation
//!
//! Rand provides utilities to generate random numbers, to convert them to
//! useful types and distributions, and some randomness-related algorithms.
//!
//! # Quick Start
//!
//! To get you started quickly, the easiest and highest-level way to get
//! a random value is to use [`random()`]; alternatively you can use
//! [`thread_rng()`]. The [`Rng`] trait provides a useful API on all RNGs, while
//! the [`distributions`] and [`seq`] modules provide further
//! functionality on top of RNGs.
//!
//! ```
//! use rand::prelude::*;
//!
//! if rand::random() { // generates a boolean
//! // Try printing a random unicode code point (probably a bad idea)!
//! println!("char: {}", rand::random::<char>());
//! }
//!
//! let mut rng = rand::thread_rng();
//! let y: f64 = rng.gen(); // generates a float between 0 and 1
//!
//! let mut nums: Vec<i32> = (1..100).collect();
//! nums.shuffle(&mut rng);
//! ```
//!
//! # The Book
//!
//! For the user guide and further documentation, please read
//! [The Rust Rand Book](https://rust-random.github.io/book).
#![doc(
html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://rust-random.github.io/rand/"
)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#![doc(test(attr(allow(unused_variables), deny(warnings))))]
#![no_std]
#![cfg_attr(feature = "simd_support", feature(stdsimd))]
#![cfg_attr(doc_cfg, feature(doc_cfg))]
#![allow(
clippy::float_cmp,
clippy::neg_cmp_op_on_partial_ord,
)]
#[cfg(feature = "std")] extern crate std;
#[cfg(feature = "alloc")] extern crate alloc;
#[allow(unused)]
macro_rules! trace { ($($x:tt)*) => (
#[cfg(feature = "log")] {
log::trace!($($x)*)
}
) }
#[allow(unused)]
macro_rules! debug { ($($x:tt)*) => (
#[cfg(feature = "log")] {
log::debug!($($x)*)
}
) }
#[allow(unused)]
macro_rules! info { ($($x:tt)*) => (
#[cfg(feature = "log")] {
log::info!($($x)*)
}
) }
#[allow(unused)]
macro_rules! warn { ($($x:tt)*) => (
#[cfg(feature = "log")] {
log::warn!($($x)*)
}
) }
#[allow(unused)]
macro_rules! error { ($($x:tt)*) => (
#[cfg(feature = "log")] {
log::error!($($x)*)
}
) }
// Re-exports from rand_core
pub use rand_core::{CryptoRng, Error, RngCore, SeedableRng};
// Public modules
pub mod distributions;
pub mod prelude;
mod rng;
pub mod rngs;
pub mod seq;
// Public exports
#[cfg(all(feature = "std", feature = "std_rng"))]
pub use crate::rngs::thread::thread_rng;
pub use rng::{Fill, Rng};
#[cfg(all(feature = "std", feature = "std_rng"))]
use crate::distributions::{Distribution, Standard};
/// Generates a random value using the thread-local random number generator.
///
/// This is simply a shortcut for `thread_rng().gen()`. See [`thread_rng`] for
/// documentation of the entropy source and [`Standard`] for documentation of
/// distributions and type-specific generation.
///
/// # Provided implementations
///
/// The following types have provided implementations that
/// generate values with the following ranges and distributions:
///
/// * Integers (`i32`, `u32`, `isize`, `usize`, etc.): Uniformly distributed
/// over all values of the type.
/// * `char`: Uniformly distributed over all Unicode scalar values, i.e. all
/// code points in the range `0...0x10_FFFF`, except for the range
/// `0xD800...0xDFFF` (the surrogate code points). This includes
/// unassigned/reserved code points.
/// * `bool`: Generates `false` or `true`, each with probability 0.5.
/// * Floating point types (`f32` and `f64`): Uniformly distributed in the
/// half-open range `[0, 1)`. See notes below.
/// * Wrapping integers (`Wrapping<T>`), besides the type identical to their
/// normal integer variants.
///
/// Also supported is the generation of the following
/// compound types where all component types are supported:
///
/// * Tuples (up to 12 elements): each element is generated sequentially.
/// * Arrays (up to 32 elements): each element is generated sequentially;
/// see also [`Rng::fill`] which supports arbitrary array length for integer
/// types and tends to be faster for `u32` and smaller types.
/// * `Option<T>` first generates a `bool`, and if true generates and returns
/// `Some(value)` where `value: T`, otherwise returning `None`.
///
/// # Examples
///
/// ```
/// let x = rand::random::<u8>();
/// println!("{}", x);
///
/// let y = rand::random::<f64>();
/// println!("{}", y);
///
/// if rand::random() { // generates a boolean
/// println!("Better lucky than good!");
/// }
/// ```
///
/// If you're calling `random()` in a loop, caching the generator as in the
/// following example can increase performance.
///
/// ```
/// use rand::Rng;
///
/// let mut v = vec![1, 2, 3];
///
/// for x in v.iter_mut() {
/// *x = rand::random()
/// }
///
/// // can be made faster by caching thread_rng
///
/// let mut rng = rand::thread_rng();
///
/// for x in v.iter_mut() {
/// *x = rng.gen();
/// }
/// ```
///
/// [`Standard`]: distributions::Standard
#[cfg(all(feature = "std", feature = "std_rng"))]
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", feature = "std_rng"))))]
#[inline]
pub fn random<T>() -> T
where Standard: Distribution<T> {
thread_rng().gen()
}
#[cfg(test)]
mod test {
use super::*;
/// Construct a deterministic RNG with the given seed
pub fn rng(seed: u64) -> impl RngCore {
// For tests, we want a statistically good, fast, reproducible RNG.
// PCG32 will do fine, and will be easy to embed if we ever need to.
const INC: u64 = 11634580027462260723;
rand_pcg::Pcg32::new(seed, INC)
}
#[test]
#[cfg(all(feature = "std", feature = "std_rng"))]
fn test_random() {
let _n: usize = random();
let _f: f32 = random();
let _o: Option<Option<i8>> = random();
#[allow(clippy::type_complexity)]
let _many: (
(),
(usize, isize, Option<(u32, (bool,))>),
(u8, i8, u16, i16, u32, i32, u64, i64),
(f32, (f64, (f64,))),
) = random();
}
}

34
vendor/rand/src/prelude.rs vendored Normal file
View File

@@ -0,0 +1,34 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Convenience re-export of common members
//!
//! Like the standard library's prelude, this module simplifies importing of
//! common items. Unlike the standard prelude, the contents of this module must
//! be imported manually:
//!
//! ```
//! use rand::prelude::*;
//! # let mut r = StdRng::from_rng(thread_rng()).unwrap();
//! # let _: f32 = r.gen();
//! ```
#[doc(no_inline)] pub use crate::distributions::Distribution;
#[cfg(feature = "small_rng")]
#[doc(no_inline)]
pub use crate::rngs::SmallRng;
#[cfg(feature = "std_rng")]
#[doc(no_inline)] pub use crate::rngs::StdRng;
#[doc(no_inline)]
#[cfg(all(feature = "std", feature = "std_rng"))]
pub use crate::rngs::ThreadRng;
#[doc(no_inline)] pub use crate::seq::{IteratorRandom, SliceRandom};
#[doc(no_inline)]
#[cfg(all(feature = "std", feature = "std_rng"))]
pub use crate::{random, thread_rng};
#[doc(no_inline)] pub use crate::{CryptoRng, Rng, RngCore, SeedableRng};

600
vendor/rand/src/rng.rs vendored Normal file
View File

@@ -0,0 +1,600 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! [`Rng`] trait
use rand_core::{Error, RngCore};
use crate::distributions::uniform::{SampleRange, SampleUniform};
use crate::distributions::{self, Distribution, Standard};
use core::num::Wrapping;
use core::{mem, slice};
/// An automatically-implemented extension trait on [`RngCore`] providing high-level
/// generic methods for sampling values and other convenience methods.
///
/// This is the primary trait to use when generating random values.
///
/// # Generic usage
///
/// The basic pattern is `fn foo<R: Rng + ?Sized>(rng: &mut R)`. Some
/// things are worth noting here:
///
/// - Since `Rng: RngCore` and every `RngCore` implements `Rng`, it makes no
/// difference whether we use `R: Rng` or `R: RngCore`.
/// - The `+ ?Sized` un-bounding allows functions to be called directly on
/// type-erased references; i.e. `foo(r)` where `r: &mut dyn RngCore`. Without
/// this it would be necessary to write `foo(&mut r)`.
///
/// An alternative pattern is possible: `fn foo<R: Rng>(rng: R)`. This has some
/// trade-offs. It allows the argument to be consumed directly without a `&mut`
/// (which is how `from_rng(thread_rng())` works); also it still works directly
/// on references (including type-erased references). Unfortunately within the
/// function `foo` it is not known whether `rng` is a reference type or not,
/// hence many uses of `rng` require an extra reference, either explicitly
/// (`distr.sample(&mut rng)`) or implicitly (`rng.gen()`); one may hope the
/// optimiser can remove redundant references later.
///
/// Example:
///
/// ```
/// # use rand::thread_rng;
/// use rand::Rng;
///
/// fn foo<R: Rng + ?Sized>(rng: &mut R) -> f32 {
/// rng.gen()
/// }
///
/// # let v = foo(&mut thread_rng());
/// ```
pub trait Rng: RngCore {
/// Return a random value supporting the [`Standard`] distribution.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
/// let x: u32 = rng.gen();
/// println!("{}", x);
/// println!("{:?}", rng.gen::<(f64, bool)>());
/// ```
///
/// # Arrays and tuples
///
/// The `rng.gen()` method is able to generate arrays (up to 32 elements)
/// and tuples (up to 12 elements), so long as all element types can be
/// generated.
/// When using `rustc` ≥ 1.51, enable the `min_const_gen` feature to support
/// arrays larger than 32 elements.
///
/// For arrays of integers, especially for those with small element types
/// (< 64 bit), it will likely be faster to instead use [`Rng::fill`].
///
/// ```
/// use rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
/// let tuple: (u8, i32, char) = rng.gen(); // arbitrary tuple support
///
/// let arr1: [f32; 32] = rng.gen(); // array construction
/// let mut arr2 = [0u8; 128];
/// rng.fill(&mut arr2); // array fill
/// ```
///
/// [`Standard`]: distributions::Standard
#[inline]
fn gen<T>(&mut self) -> T
where Standard: Distribution<T> {
Standard.sample(self)
}
/// Generate a random value in the given range.
///
/// This function is optimised for the case that only a single sample is
/// made from the given range. See also the [`Uniform`] distribution
/// type which may be faster if sampling from the same range repeatedly.
///
/// Only `gen_range(low..high)` and `gen_range(low..=high)` are supported.
///
/// # Panics
///
/// Panics if the range is empty.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
///
/// // Exclusive range
/// let n: u32 = rng.gen_range(0..10);
/// println!("{}", n);
/// let m: f64 = rng.gen_range(-40.0..1.3e5);
/// println!("{}", m);
///
/// // Inclusive range
/// let n: u32 = rng.gen_range(0..=10);
/// println!("{}", n);
/// ```
///
/// [`Uniform`]: distributions::uniform::Uniform
fn gen_range<T, R>(&mut self, range: R) -> T
where
T: SampleUniform,
R: SampleRange<T>
{
assert!(!range.is_empty(), "cannot sample empty range");
range.sample_single(self)
}
/// Sample a new value, using the given distribution.
///
/// ### Example
///
/// ```
/// use rand::{thread_rng, Rng};
/// use rand::distributions::Uniform;
///
/// let mut rng = thread_rng();
/// let x = rng.sample(Uniform::new(10u32, 15));
/// // Type annotation requires two types, the type and distribution; the
/// // distribution can be inferred.
/// let y = rng.sample::<u16, _>(Uniform::new(10, 15));
/// ```
fn sample<T, D: Distribution<T>>(&mut self, distr: D) -> T {
distr.sample(self)
}
/// Create an iterator that generates values using the given distribution.
///
/// Note that this function takes its arguments by value. This works since
/// `(&mut R): Rng where R: Rng` and
/// `(&D): Distribution where D: Distribution`,
/// however borrowing is not automatic hence `rng.sample_iter(...)` may
/// need to be replaced with `(&mut rng).sample_iter(...)`.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
/// use rand::distributions::{Alphanumeric, Uniform, Standard};
///
/// let mut rng = thread_rng();
///
/// // Vec of 16 x f32:
/// let v: Vec<f32> = (&mut rng).sample_iter(Standard).take(16).collect();
///
/// // String:
/// let s: String = (&mut rng).sample_iter(Alphanumeric)
/// .take(7)
/// .map(char::from)
/// .collect();
///
/// // Combined values
/// println!("{:?}", (&mut rng).sample_iter(Standard).take(5)
/// .collect::<Vec<(f64, bool)>>());
///
/// // Dice-rolling:
/// let die_range = Uniform::new_inclusive(1, 6);
/// let mut roll_die = (&mut rng).sample_iter(die_range);
/// while roll_die.next().unwrap() != 6 {
/// println!("Not a 6; rolling again!");
/// }
/// ```
fn sample_iter<T, D>(self, distr: D) -> distributions::DistIter<D, Self, T>
where
D: Distribution<T>,
Self: Sized,
{
distr.sample_iter(self)
}
/// Fill any type implementing [`Fill`] with random data
///
/// The distribution is expected to be uniform with portable results, but
/// this cannot be guaranteed for third-party implementations.
///
/// This is identical to [`try_fill`] except that it panics on error.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
///
/// let mut arr = [0i8; 20];
/// thread_rng().fill(&mut arr[..]);
/// ```
///
/// [`fill_bytes`]: RngCore::fill_bytes
/// [`try_fill`]: Rng::try_fill
fn fill<T: Fill + ?Sized>(&mut self, dest: &mut T) {
dest.try_fill(self).unwrap_or_else(|_| panic!("Rng::fill failed"))
}
/// Fill any type implementing [`Fill`] with random data
///
/// The distribution is expected to be uniform with portable results, but
/// this cannot be guaranteed for third-party implementations.
///
/// This is identical to [`fill`] except that it forwards errors.
///
/// # Example
///
/// ```
/// # use rand::Error;
/// use rand::{thread_rng, Rng};
///
/// # fn try_inner() -> Result<(), Error> {
/// let mut arr = [0u64; 4];
/// thread_rng().try_fill(&mut arr[..])?;
/// # Ok(())
/// # }
///
/// # try_inner().unwrap()
/// ```
///
/// [`try_fill_bytes`]: RngCore::try_fill_bytes
/// [`fill`]: Rng::fill
fn try_fill<T: Fill + ?Sized>(&mut self, dest: &mut T) -> Result<(), Error> {
dest.try_fill(self)
}
/// Return a bool with a probability `p` of being true.
///
/// See also the [`Bernoulli`] distribution, which may be faster if
/// sampling from the same probability repeatedly.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
/// println!("{}", rng.gen_bool(1.0 / 3.0));
/// ```
///
/// # Panics
///
/// If `p < 0` or `p > 1`.
///
/// [`Bernoulli`]: distributions::Bernoulli
#[inline]
fn gen_bool(&mut self, p: f64) -> bool {
let d = distributions::Bernoulli::new(p).unwrap();
self.sample(d)
}
/// Return a bool with a probability of `numerator/denominator` of being
/// true. I.e. `gen_ratio(2, 3)` has chance of 2 in 3, or about 67%, of
/// returning true. If `numerator == denominator`, then the returned value
/// is guaranteed to be `true`. If `numerator == 0`, then the returned
/// value is guaranteed to be `false`.
///
/// See also the [`Bernoulli`] distribution, which may be faster if
/// sampling from the same `numerator` and `denominator` repeatedly.
///
/// # Panics
///
/// If `denominator == 0` or `numerator > denominator`.
///
/// # Example
///
/// ```
/// use rand::{thread_rng, Rng};
///
/// let mut rng = thread_rng();
/// println!("{}", rng.gen_ratio(2, 3));
/// ```
///
/// [`Bernoulli`]: distributions::Bernoulli
#[inline]
fn gen_ratio(&mut self, numerator: u32, denominator: u32) -> bool {
let d = distributions::Bernoulli::from_ratio(numerator, denominator).unwrap();
self.sample(d)
}
}
impl<R: RngCore + ?Sized> Rng for R {}
/// Types which may be filled with random data
///
/// This trait allows arrays to be efficiently filled with random data.
///
/// Implementations are expected to be portable across machines unless
/// clearly documented otherwise (see the
/// [Chapter on Portability](https://rust-random.github.io/book/portability.html)).
pub trait Fill {
/// Fill self with random data
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error>;
}
macro_rules! impl_fill_each {
() => {};
($t:ty) => {
impl Fill for [$t] {
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error> {
for elt in self.iter_mut() {
*elt = rng.gen();
}
Ok(())
}
}
};
($t:ty, $($tt:ty,)*) => {
impl_fill_each!($t);
impl_fill_each!($($tt,)*);
};
}
impl_fill_each!(bool, char, f32, f64,);
impl Fill for [u8] {
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error> {
rng.try_fill_bytes(self)
}
}
macro_rules! impl_fill {
() => {};
($t:ty) => {
impl Fill for [$t] {
#[inline(never)] // in micro benchmarks, this improves performance
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error> {
if self.len() > 0 {
rng.try_fill_bytes(unsafe {
slice::from_raw_parts_mut(self.as_mut_ptr()
as *mut u8,
self.len() * mem::size_of::<$t>()
)
})?;
for x in self {
*x = x.to_le();
}
}
Ok(())
}
}
impl Fill for [Wrapping<$t>] {
#[inline(never)]
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error> {
if self.len() > 0 {
rng.try_fill_bytes(unsafe {
slice::from_raw_parts_mut(self.as_mut_ptr()
as *mut u8,
self.len() * mem::size_of::<$t>()
)
})?;
for x in self {
*x = Wrapping(x.0.to_le());
}
}
Ok(())
}
}
};
($t:ty, $($tt:ty,)*) => {
impl_fill!($t);
// TODO: this could replace above impl once Rust #32463 is fixed
// impl_fill!(Wrapping<$t>);
impl_fill!($($tt,)*);
}
}
impl_fill!(u16, u32, u64, usize, u128,);
impl_fill!(i8, i16, i32, i64, isize, i128,);
#[cfg_attr(doc_cfg, doc(cfg(feature = "min_const_gen")))]
#[cfg(feature = "min_const_gen")]
impl<T, const N: usize> Fill for [T; N]
where [T]: Fill
{
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error> {
self[..].try_fill(rng)
}
}
#[cfg(not(feature = "min_const_gen"))]
macro_rules! impl_fill_arrays {
($n:expr,) => {};
($n:expr, $N:ident) => {
impl<T> Fill for [T; $n] where [T]: Fill {
fn try_fill<R: Rng + ?Sized>(&mut self, rng: &mut R) -> Result<(), Error> {
self[..].try_fill(rng)
}
}
};
($n:expr, $N:ident, $($NN:ident,)*) => {
impl_fill_arrays!($n, $N);
impl_fill_arrays!($n - 1, $($NN,)*);
};
(!div $n:expr,) => {};
(!div $n:expr, $N:ident, $($NN:ident,)*) => {
impl_fill_arrays!($n, $N);
impl_fill_arrays!(!div $n / 2, $($NN,)*);
};
}
#[cfg(not(feature = "min_const_gen"))]
#[rustfmt::skip]
impl_fill_arrays!(32, N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,);
#[cfg(not(feature = "min_const_gen"))]
impl_fill_arrays!(!div 4096, N,N,N,N,N,N,N,);
#[cfg(test)]
mod test {
use super::*;
use crate::test::rng;
use crate::rngs::mock::StepRng;
#[cfg(feature = "alloc")] use alloc::boxed::Box;
#[test]
fn test_fill_bytes_default() {
let mut r = StepRng::new(0x11_22_33_44_55_66_77_88, 0);
// check every remainder mod 8, both in small and big vectors.
let lengths = [0, 1, 2, 3, 4, 5, 6, 7, 80, 81, 82, 83, 84, 85, 86, 87];
for &n in lengths.iter() {
let mut buffer = [0u8; 87];
let v = &mut buffer[0..n];
r.fill_bytes(v);
// use this to get nicer error messages.
for (i, &byte) in v.iter().enumerate() {
if byte == 0 {
panic!("byte {} of {} is zero", i, n)
}
}
}
}
#[test]
fn test_fill() {
let x = 9041086907909331047; // a random u64
let mut rng = StepRng::new(x, 0);
// Convert to byte sequence and back to u64; byte-swap twice if BE.
let mut array = [0u64; 2];
rng.fill(&mut array[..]);
assert_eq!(array, [x, x]);
assert_eq!(rng.next_u64(), x);
// Convert to bytes then u32 in LE order
let mut array = [0u32; 2];
rng.fill(&mut array[..]);
assert_eq!(array, [x as u32, (x >> 32) as u32]);
assert_eq!(rng.next_u32(), x as u32);
// Check equivalence using wrapped arrays
let mut warray = [Wrapping(0u32); 2];
rng.fill(&mut warray[..]);
assert_eq!(array[0], warray[0].0);
assert_eq!(array[1], warray[1].0);
// Check equivalence for generated floats
let mut array = [0f32; 2];
rng.fill(&mut array);
let gen: [f32; 2] = rng.gen();
assert_eq!(array, gen);
}
#[test]
fn test_fill_empty() {
let mut array = [0u32; 0];
let mut rng = StepRng::new(0, 1);
rng.fill(&mut array);
rng.fill(&mut array[..]);
}
#[test]
fn test_gen_range_int() {
let mut r = rng(101);
for _ in 0..1000 {
let a = r.gen_range(-4711..17);
assert!((-4711..17).contains(&a));
let a: i8 = r.gen_range(-3..42);
assert!((-3..42).contains(&a));
let a: u16 = r.gen_range(10..99);
assert!((10..99).contains(&a));
let a: i32 = r.gen_range(-100..2000);
assert!((-100..2000).contains(&a));
let a: u32 = r.gen_range(12..=24);
assert!((12..=24).contains(&a));
assert_eq!(r.gen_range(0u32..1), 0u32);
assert_eq!(r.gen_range(-12i64..-11), -12i64);
assert_eq!(r.gen_range(3_000_000..3_000_001), 3_000_000);
}
}
#[test]
fn test_gen_range_float() {
let mut r = rng(101);
for _ in 0..1000 {
let a = r.gen_range(-4.5..1.7);
assert!((-4.5..1.7).contains(&a));
let a = r.gen_range(-1.1..=-0.3);
assert!((-1.1..=-0.3).contains(&a));
assert_eq!(r.gen_range(0.0f32..=0.0), 0.);
assert_eq!(r.gen_range(-11.0..=-11.0), -11.);
assert_eq!(r.gen_range(3_000_000.0..=3_000_000.0), 3_000_000.);
}
}
#[test]
#[should_panic]
fn test_gen_range_panic_int() {
#![allow(clippy::reversed_empty_ranges)]
let mut r = rng(102);
r.gen_range(5..-2);
}
#[test]
#[should_panic]
fn test_gen_range_panic_usize() {
#![allow(clippy::reversed_empty_ranges)]
let mut r = rng(103);
r.gen_range(5..2);
}
#[test]
fn test_gen_bool() {
#![allow(clippy::bool_assert_comparison)]
let mut r = rng(105);
for _ in 0..5 {
assert_eq!(r.gen_bool(0.0), false);
assert_eq!(r.gen_bool(1.0), true);
}
}
#[test]
fn test_rng_trait_object() {
use crate::distributions::{Distribution, Standard};
let mut rng = rng(109);
let mut r = &mut rng as &mut dyn RngCore;
r.next_u32();
r.gen::<i32>();
assert_eq!(r.gen_range(0..1), 0);
let _c: u8 = Standard.sample(&mut r);
}
#[test]
#[cfg(feature = "alloc")]
fn test_rng_boxed_trait() {
use crate::distributions::{Distribution, Standard};
let rng = rng(110);
let mut r = Box::new(rng) as Box<dyn RngCore>;
r.next_u32();
r.gen::<i32>();
assert_eq!(r.gen_range(0..1), 0);
let _c: u8 = Standard.sample(&mut r);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_gen_ratio_average() {
const NUM: u32 = 3;
const DENOM: u32 = 10;
const N: u32 = 100_000;
let mut sum: u32 = 0;
let mut rng = rng(111);
for _ in 0..N {
if rng.gen_ratio(NUM, DENOM) {
sum += 1;
}
}
// Have Binomial(N, NUM/DENOM) distribution
let expected = (NUM * N) / DENOM; // exact integer
assert!(((sum - expected) as i32).abs() < 500);
}
}

16
vendor/rand/src/rngs/adapter/mod.rs vendored Normal file
View File

@@ -0,0 +1,16 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Wrappers / adapters forming RNGs
mod read;
mod reseeding;
#[allow(deprecated)]
pub use self::read::{ReadError, ReadRng};
pub use self::reseeding::ReseedingRng;

150
vendor/rand/src/rngs/adapter/read.rs vendored Normal file
View File

@@ -0,0 +1,150 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A wrapper around any Read to treat it as an RNG.
#![allow(deprecated)]
use std::fmt;
use std::io::Read;
use rand_core::{impls, Error, RngCore};
/// An RNG that reads random bytes straight from any type supporting
/// [`std::io::Read`], for example files.
///
/// This will work best with an infinite reader, but that is not required.
///
/// This can be used with `/dev/urandom` on Unix but it is recommended to use
/// [`OsRng`] instead.
///
/// # Panics
///
/// `ReadRng` uses [`std::io::Read::read_exact`], which retries on interrupts.
/// All other errors from the underlying reader, including when it does not
/// have enough data, will only be reported through [`try_fill_bytes`].
/// The other [`RngCore`] methods will panic in case of an error.
///
/// [`OsRng`]: crate::rngs::OsRng
/// [`try_fill_bytes`]: RngCore::try_fill_bytes
#[derive(Debug)]
#[deprecated(since="0.8.4", note="removal due to lack of usage")]
pub struct ReadRng<R> {
reader: R,
}
impl<R: Read> ReadRng<R> {
/// Create a new `ReadRng` from a `Read`.
pub fn new(r: R) -> ReadRng<R> {
ReadRng { reader: r }
}
}
impl<R: Read> RngCore for ReadRng<R> {
fn next_u32(&mut self) -> u32 {
impls::next_u32_via_fill(self)
}
fn next_u64(&mut self) -> u64 {
impls::next_u64_via_fill(self)
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
self.try_fill_bytes(dest).unwrap_or_else(|err| {
panic!(
"reading random bytes from Read implementation failed; error: {}",
err
)
});
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
if dest.is_empty() {
return Ok(());
}
// Use `std::io::read_exact`, which retries on `ErrorKind::Interrupted`.
self.reader
.read_exact(dest)
.map_err(|e| Error::new(ReadError(e)))
}
}
/// `ReadRng` error type
#[derive(Debug)]
#[deprecated(since="0.8.4")]
pub struct ReadError(std::io::Error);
impl fmt::Display for ReadError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ReadError: {}", self.0)
}
}
impl std::error::Error for ReadError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&self.0)
}
}
#[cfg(test)]
mod test {
use std::println;
use super::ReadRng;
use crate::RngCore;
#[test]
fn test_reader_rng_u64() {
// transmute from the target to avoid endianness concerns.
#[rustfmt::skip]
let v = [0u8, 0, 0, 0, 0, 0, 0, 1,
0, 4, 0, 0, 3, 0, 0, 2,
5, 0, 0, 0, 0, 0, 0, 0];
let mut rng = ReadRng::new(&v[..]);
assert_eq!(rng.next_u64(), 1 << 56);
assert_eq!(rng.next_u64(), (2 << 56) + (3 << 32) + (4 << 8));
assert_eq!(rng.next_u64(), 5);
}
#[test]
fn test_reader_rng_u32() {
let v = [0u8, 0, 0, 1, 0, 0, 2, 0, 3, 0, 0, 0];
let mut rng = ReadRng::new(&v[..]);
assert_eq!(rng.next_u32(), 1 << 24);
assert_eq!(rng.next_u32(), 2 << 16);
assert_eq!(rng.next_u32(), 3);
}
#[test]
fn test_reader_rng_fill_bytes() {
let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
let mut w = [0u8; 8];
let mut rng = ReadRng::new(&v[..]);
rng.fill_bytes(&mut w);
assert!(v == w);
}
#[test]
fn test_reader_rng_insufficient_bytes() {
let v = [1u8, 2, 3, 4, 5, 6, 7, 8];
let mut w = [0u8; 9];
let mut rng = ReadRng::new(&v[..]);
let result = rng.try_fill_bytes(&mut w);
assert!(result.is_err());
println!("Error: {}", result.unwrap_err());
}
}

View File

@@ -0,0 +1,386 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A wrapper around another PRNG that reseeds it after it
//! generates a certain number of random bytes.
use core::mem::size_of;
use rand_core::block::{BlockRng, BlockRngCore};
use rand_core::{CryptoRng, Error, RngCore, SeedableRng};
/// A wrapper around any PRNG that implements [`BlockRngCore`], that adds the
/// ability to reseed it.
///
/// `ReseedingRng` reseeds the underlying PRNG in the following cases:
///
/// - On a manual call to [`reseed()`].
/// - After `clone()`, the clone will be reseeded on first use.
/// - When a process is forked on UNIX, the RNGs in both the parent and child
/// processes will be reseeded just before the next call to
/// [`BlockRngCore::generate`], i.e. "soon". For ChaCha and Hc128 this is a
/// maximum of fifteen `u32` values before reseeding.
/// - After the PRNG has generated a configurable number of random bytes.
///
/// # When should reseeding after a fixed number of generated bytes be used?
///
/// Reseeding after a fixed number of generated bytes is never strictly
/// *necessary*. Cryptographic PRNGs don't have a limited number of bytes they
/// can output, or at least not a limit reachable in any practical way. There is
/// no such thing as 'running out of entropy'.
///
/// Occasionally reseeding can be seen as some form of 'security in depth'. Even
/// if in the future a cryptographic weakness is found in the CSPRNG being used,
/// or a flaw in the implementation, occasionally reseeding should make
/// exploiting it much more difficult or even impossible.
///
/// Use [`ReseedingRng::new`] with a `threshold` of `0` to disable reseeding
/// after a fixed number of generated bytes.
///
/// # Limitations
///
/// It is recommended that a `ReseedingRng` (including `ThreadRng`) not be used
/// from a fork handler.
/// Use `OsRng` or `getrandom`, or defer your use of the RNG until later.
///
/// # Error handling
///
/// Although unlikely, reseeding the wrapped PRNG can fail. `ReseedingRng` will
/// never panic but try to handle the error intelligently through some
/// combination of retrying and delaying reseeding until later.
/// If handling the source error fails `ReseedingRng` will continue generating
/// data from the wrapped PRNG without reseeding.
///
/// Manually calling [`reseed()`] will not have this retry or delay logic, but
/// reports the error.
///
/// # Example
///
/// ```
/// use rand::prelude::*;
/// use rand_chacha::ChaCha20Core; // Internal part of ChaChaRng that
/// // implements BlockRngCore
/// use rand::rngs::OsRng;
/// use rand::rngs::adapter::ReseedingRng;
///
/// let prng = ChaCha20Core::from_entropy();
/// let mut reseeding_rng = ReseedingRng::new(prng, 0, OsRng);
///
/// println!("{}", reseeding_rng.gen::<u64>());
///
/// let mut cloned_rng = reseeding_rng.clone();
/// assert!(reseeding_rng.gen::<u64>() != cloned_rng.gen::<u64>());
/// ```
///
/// [`BlockRngCore`]: rand_core::block::BlockRngCore
/// [`ReseedingRng::new`]: ReseedingRng::new
/// [`reseed()`]: ReseedingRng::reseed
#[derive(Debug)]
pub struct ReseedingRng<R, Rsdr>(BlockRng<ReseedingCore<R, Rsdr>>)
where
R: BlockRngCore + SeedableRng,
Rsdr: RngCore;
impl<R, Rsdr> ReseedingRng<R, Rsdr>
where
R: BlockRngCore + SeedableRng,
Rsdr: RngCore,
{
/// Create a new `ReseedingRng` from an existing PRNG, combined with a RNG
/// to use as reseeder.
///
/// `threshold` sets the number of generated bytes after which to reseed the
/// PRNG. Set it to zero to never reseed based on the number of generated
/// values.
pub fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self {
ReseedingRng(BlockRng::new(ReseedingCore::new(rng, threshold, reseeder)))
}
/// Reseed the internal PRNG.
pub fn reseed(&mut self) -> Result<(), Error> {
self.0.core.reseed()
}
}
// TODO: this should be implemented for any type where the inner type
// implements RngCore, but we can't specify that because ReseedingCore is private
impl<R, Rsdr: RngCore> RngCore for ReseedingRng<R, Rsdr>
where
R: BlockRngCore<Item = u32> + SeedableRng,
<R as BlockRngCore>::Results: AsRef<[u32]> + AsMut<[u32]>,
{
#[inline(always)]
fn next_u32(&mut self) -> u32 {
self.0.next_u32()
}
#[inline(always)]
fn next_u64(&mut self) -> u64 {
self.0.next_u64()
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
self.0.fill_bytes(dest)
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
self.0.try_fill_bytes(dest)
}
}
impl<R, Rsdr> Clone for ReseedingRng<R, Rsdr>
where
R: BlockRngCore + SeedableRng + Clone,
Rsdr: RngCore + Clone,
{
fn clone(&self) -> ReseedingRng<R, Rsdr> {
// Recreating `BlockRng` seems easier than cloning it and resetting
// the index.
ReseedingRng(BlockRng::new(self.0.core.clone()))
}
}
impl<R, Rsdr> CryptoRng for ReseedingRng<R, Rsdr>
where
R: BlockRngCore + SeedableRng + CryptoRng,
Rsdr: RngCore + CryptoRng,
{
}
#[derive(Debug)]
struct ReseedingCore<R, Rsdr> {
inner: R,
reseeder: Rsdr,
threshold: i64,
bytes_until_reseed: i64,
fork_counter: usize,
}
impl<R, Rsdr> BlockRngCore for ReseedingCore<R, Rsdr>
where
R: BlockRngCore + SeedableRng,
Rsdr: RngCore,
{
type Item = <R as BlockRngCore>::Item;
type Results = <R as BlockRngCore>::Results;
fn generate(&mut self, results: &mut Self::Results) {
let global_fork_counter = fork::get_fork_counter();
if self.bytes_until_reseed <= 0 || self.is_forked(global_fork_counter) {
// We get better performance by not calling only `reseed` here
// and continuing with the rest of the function, but by directly
// returning from a non-inlined function.
return self.reseed_and_generate(results, global_fork_counter);
}
let num_bytes = results.as_ref().len() * size_of::<Self::Item>();
self.bytes_until_reseed -= num_bytes as i64;
self.inner.generate(results);
}
}
impl<R, Rsdr> ReseedingCore<R, Rsdr>
where
R: BlockRngCore + SeedableRng,
Rsdr: RngCore,
{
/// Create a new `ReseedingCore`.
fn new(rng: R, threshold: u64, reseeder: Rsdr) -> Self {
use ::core::i64::MAX;
fork::register_fork_handler();
// Because generating more values than `i64::MAX` takes centuries on
// current hardware, we just clamp to that value.
// Also we set a threshold of 0, which indicates no limit, to that
// value.
let threshold = if threshold == 0 {
MAX
} else if threshold <= MAX as u64 {
threshold as i64
} else {
MAX
};
ReseedingCore {
inner: rng,
reseeder,
threshold: threshold as i64,
bytes_until_reseed: threshold as i64,
fork_counter: 0,
}
}
/// Reseed the internal PRNG.
fn reseed(&mut self) -> Result<(), Error> {
R::from_rng(&mut self.reseeder).map(|result| {
self.bytes_until_reseed = self.threshold;
self.inner = result
})
}
fn is_forked(&self, global_fork_counter: usize) -> bool {
// In theory, on 32-bit platforms, it is possible for
// `global_fork_counter` to wrap around after ~4e9 forks.
//
// This check will detect a fork in the normal case where
// `fork_counter < global_fork_counter`, and also when the difference
// between both is greater than `isize::MAX` (wrapped around).
//
// It will still fail to detect a fork if there have been more than
// `isize::MAX` forks, without any reseed in between. Seems unlikely
// enough.
(self.fork_counter.wrapping_sub(global_fork_counter) as isize) < 0
}
#[inline(never)]
fn reseed_and_generate(
&mut self, results: &mut <Self as BlockRngCore>::Results, global_fork_counter: usize,
) {
#![allow(clippy::if_same_then_else)] // false positive
if self.is_forked(global_fork_counter) {
info!("Fork detected, reseeding RNG");
} else {
trace!("Reseeding RNG (periodic reseed)");
}
let num_bytes = results.as_ref().len() * size_of::<<R as BlockRngCore>::Item>();
if let Err(e) = self.reseed() {
warn!("Reseeding RNG failed: {}", e);
let _ = e;
}
self.fork_counter = global_fork_counter;
self.bytes_until_reseed = self.threshold - num_bytes as i64;
self.inner.generate(results);
}
}
impl<R, Rsdr> Clone for ReseedingCore<R, Rsdr>
where
R: BlockRngCore + SeedableRng + Clone,
Rsdr: RngCore + Clone,
{
fn clone(&self) -> ReseedingCore<R, Rsdr> {
ReseedingCore {
inner: self.inner.clone(),
reseeder: self.reseeder.clone(),
threshold: self.threshold,
bytes_until_reseed: 0, // reseed clone on first use
fork_counter: self.fork_counter,
}
}
}
impl<R, Rsdr> CryptoRng for ReseedingCore<R, Rsdr>
where
R: BlockRngCore + SeedableRng + CryptoRng,
Rsdr: RngCore + CryptoRng,
{
}
#[cfg(all(unix, not(target_os = "emscripten")))]
mod fork {
use core::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Once;
// Fork protection
//
// We implement fork protection on Unix using `pthread_atfork`.
// When the process is forked, we increment `RESEEDING_RNG_FORK_COUNTER`.
// Every `ReseedingRng` stores the last known value of the static in
// `fork_counter`. If the cached `fork_counter` is less than
// `RESEEDING_RNG_FORK_COUNTER`, it is time to reseed this RNG.
//
// If reseeding fails, we don't deal with this by setting a delay, but just
// don't update `fork_counter`, so a reseed is attempted as soon as
// possible.
static RESEEDING_RNG_FORK_COUNTER: AtomicUsize = AtomicUsize::new(0);
pub fn get_fork_counter() -> usize {
RESEEDING_RNG_FORK_COUNTER.load(Ordering::Relaxed)
}
extern "C" fn fork_handler() {
// Note: fetch_add is defined to wrap on overflow
// (which is what we want).
RESEEDING_RNG_FORK_COUNTER.fetch_add(1, Ordering::Relaxed);
}
pub fn register_fork_handler() {
static REGISTER: Once = Once::new();
REGISTER.call_once(|| {
// Bump the counter before and after forking (see #1169):
let ret = unsafe { libc::pthread_atfork(
Some(fork_handler),
Some(fork_handler),
Some(fork_handler),
) };
if ret != 0 {
panic!("libc::pthread_atfork failed with code {}", ret);
}
});
}
}
#[cfg(not(all(unix, not(target_os = "emscripten"))))]
mod fork {
pub fn get_fork_counter() -> usize {
0
}
pub fn register_fork_handler() {}
}
#[cfg(feature = "std_rng")]
#[cfg(test)]
mod test {
use super::ReseedingRng;
use crate::rngs::mock::StepRng;
use crate::rngs::std::Core;
use crate::{Rng, SeedableRng};
#[test]
fn test_reseeding() {
let mut zero = StepRng::new(0, 0);
let rng = Core::from_rng(&mut zero).unwrap();
let thresh = 1; // reseed every time the buffer is exhausted
let mut reseeding = ReseedingRng::new(rng, thresh, zero);
// RNG buffer size is [u32; 64]
// Debug is only implemented up to length 32 so use two arrays
let mut buf = ([0u32; 32], [0u32; 32]);
reseeding.fill(&mut buf.0);
reseeding.fill(&mut buf.1);
let seq = buf;
for _ in 0..10 {
reseeding.fill(&mut buf.0);
reseeding.fill(&mut buf.1);
assert_eq!(buf, seq);
}
}
#[test]
fn test_clone_reseeding() {
#![allow(clippy::redundant_clone)]
let mut zero = StepRng::new(0, 0);
let rng = Core::from_rng(&mut zero).unwrap();
let mut rng1 = ReseedingRng::new(rng, 32 * 4, zero);
let first: u32 = rng1.gen();
for _ in 0..10 {
let _ = rng1.gen::<u32>();
}
let mut rng2 = rng1.clone();
assert_eq!(first, rng2.gen::<u32>());
}
}

87
vendor/rand/src/rngs/mock.rs vendored Normal file
View File

@@ -0,0 +1,87 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Mock random number generator
use rand_core::{impls, Error, RngCore};
#[cfg(feature = "serde1")]
use serde::{Serialize, Deserialize};
/// A simple implementation of `RngCore` for testing purposes.
///
/// This generates an arithmetic sequence (i.e. adds a constant each step)
/// over a `u64` number, using wrapping arithmetic. If the increment is 0
/// the generator yields a constant.
///
/// ```
/// use rand::Rng;
/// use rand::rngs::mock::StepRng;
///
/// let mut my_rng = StepRng::new(2, 1);
/// let sample: [u64; 3] = my_rng.gen();
/// assert_eq!(sample, [2, 3, 4]);
/// ```
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct StepRng {
v: u64,
a: u64,
}
impl StepRng {
/// Create a `StepRng`, yielding an arithmetic sequence starting with
/// `initial` and incremented by `increment` each time.
pub fn new(initial: u64, increment: u64) -> Self {
StepRng {
v: initial,
a: increment,
}
}
}
impl RngCore for StepRng {
#[inline]
fn next_u32(&mut self) -> u32 {
self.next_u64() as u32
}
#[inline]
fn next_u64(&mut self) -> u64 {
let result = self.v;
self.v = self.v.wrapping_add(self.a);
result
}
#[inline]
fn fill_bytes(&mut self, dest: &mut [u8]) {
impls::fill_bytes_via_next(self, dest);
}
#[inline]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
self.fill_bytes(dest);
Ok(())
}
}
#[cfg(test)]
mod tests {
#[test]
#[cfg(feature = "serde1")]
fn test_serialization_step_rng() {
use super::StepRng;
let some_rng = StepRng::new(42, 7);
let de_some_rng: StepRng =
bincode::deserialize(&bincode::serialize(&some_rng).unwrap()).unwrap();
assert_eq!(some_rng.v, de_some_rng.v);
assert_eq!(some_rng.a, de_some_rng.a);
}
}

119
vendor/rand/src/rngs/mod.rs vendored Normal file
View File

@@ -0,0 +1,119 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Random number generators and adapters
//!
//! ## Background: Random number generators (RNGs)
//!
//! Computers cannot produce random numbers from nowhere. We classify
//! random number generators as follows:
//!
//! - "True" random number generators (TRNGs) use hard-to-predict data sources
//! (e.g. the high-resolution parts of event timings and sensor jitter) to
//! harvest random bit-sequences, apply algorithms to remove bias and
//! estimate available entropy, then combine these bits into a byte-sequence
//! or an entropy pool. This job is usually done by the operating system or
//! a hardware generator (HRNG).
//! - "Pseudo"-random number generators (PRNGs) use algorithms to transform a
//! seed into a sequence of pseudo-random numbers. These generators can be
//! fast and produce well-distributed unpredictable random numbers (or not).
//! They are usually deterministic: given algorithm and seed, the output
//! sequence can be reproduced. They have finite period and eventually loop;
//! with many algorithms this period is fixed and can be proven sufficiently
//! long, while others are chaotic and the period depends on the seed.
//! - "Cryptographically secure" pseudo-random number generators (CSPRNGs)
//! are the sub-set of PRNGs which are secure. Security of the generator
//! relies both on hiding the internal state and using a strong algorithm.
//!
//! ## Traits and functionality
//!
//! All RNGs implement the [`RngCore`] trait, as a consequence of which the
//! [`Rng`] extension trait is automatically implemented. Secure RNGs may
//! additionally implement the [`CryptoRng`] trait.
//!
//! All PRNGs require a seed to produce their random number sequence. The
//! [`SeedableRng`] trait provides three ways of constructing PRNGs:
//!
//! - `from_seed` accepts a type specific to the PRNG
//! - `from_rng` allows a PRNG to be seeded from any other RNG
//! - `seed_from_u64` allows any PRNG to be seeded from a `u64` insecurely
//! - `from_entropy` securely seeds a PRNG from fresh entropy
//!
//! Use the [`rand_core`] crate when implementing your own RNGs.
//!
//! ## Our generators
//!
//! This crate provides several random number generators:
//!
//! - [`OsRng`] is an interface to the operating system's random number
//! source. Typically the operating system uses a CSPRNG with entropy
//! provided by a TRNG and some type of on-going re-seeding.
//! - [`ThreadRng`], provided by the [`thread_rng`] function, is a handle to a
//! thread-local CSPRNG with periodic seeding from [`OsRng`]. Because this
//! is local, it is typically much faster than [`OsRng`]. It should be
//! secure, though the paranoid may prefer [`OsRng`].
//! - [`StdRng`] is a CSPRNG chosen for good performance and trust of security
//! (based on reviews, maturity and usage). The current algorithm is ChaCha12,
//! which is well established and rigorously analysed.
//! [`StdRng`] provides the algorithm used by [`ThreadRng`] but without
//! periodic reseeding.
//! - [`SmallRng`] is an **insecure** PRNG designed to be fast, simple, require
//! little memory, and have good output quality.
//!
//! The algorithms selected for [`StdRng`] and [`SmallRng`] may change in any
//! release and may be platform-dependent, therefore they should be considered
//! **not reproducible**.
//!
//! ## Additional generators
//!
//! **TRNGs**: The [`rdrand`] crate provides an interface to the RDRAND and
//! RDSEED instructions available in modern Intel and AMD CPUs.
//! The [`rand_jitter`] crate provides a user-space implementation of
//! entropy harvesting from CPU timer jitter, but is very slow and has
//! [security issues](https://github.com/rust-random/rand/issues/699).
//!
//! **PRNGs**: Several companion crates are available, providing individual or
//! families of PRNG algorithms. These provide the implementations behind
//! [`StdRng`] and [`SmallRng`] but can also be used directly, indeed *should*
//! be used directly when **reproducibility** matters.
//! Some suggestions are: [`rand_chacha`], [`rand_pcg`], [`rand_xoshiro`].
//! A full list can be found by searching for crates with the [`rng` tag].
//!
//! [`Rng`]: crate::Rng
//! [`RngCore`]: crate::RngCore
//! [`CryptoRng`]: crate::CryptoRng
//! [`SeedableRng`]: crate::SeedableRng
//! [`thread_rng`]: crate::thread_rng
//! [`rdrand`]: https://crates.io/crates/rdrand
//! [`rand_jitter`]: https://crates.io/crates/rand_jitter
//! [`rand_chacha`]: https://crates.io/crates/rand_chacha
//! [`rand_pcg`]: https://crates.io/crates/rand_pcg
//! [`rand_xoshiro`]: https://crates.io/crates/rand_xoshiro
//! [`rng` tag]: https://crates.io/keywords/rng
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg(feature = "std")] pub mod adapter;
pub mod mock; // Public so we don't export `StepRng` directly, making it a bit
// more clear it is intended for testing.
#[cfg(all(feature = "small_rng", target_pointer_width = "64"))]
mod xoshiro256plusplus;
#[cfg(all(feature = "small_rng", not(target_pointer_width = "64")))]
mod xoshiro128plusplus;
#[cfg(feature = "small_rng")] mod small;
#[cfg(feature = "std_rng")] mod std;
#[cfg(all(feature = "std", feature = "std_rng"))] pub(crate) mod thread;
#[cfg(feature = "small_rng")] pub use self::small::SmallRng;
#[cfg(feature = "std_rng")] pub use self::std::StdRng;
#[cfg(all(feature = "std", feature = "std_rng"))] pub use self::thread::ThreadRng;
#[cfg_attr(doc_cfg, doc(cfg(feature = "getrandom")))]
#[cfg(feature = "getrandom")] pub use rand_core::OsRng;

117
vendor/rand/src/rngs/small.rs vendored Normal file
View File

@@ -0,0 +1,117 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A small fast RNG
use rand_core::{Error, RngCore, SeedableRng};
#[cfg(target_pointer_width = "64")]
type Rng = super::xoshiro256plusplus::Xoshiro256PlusPlus;
#[cfg(not(target_pointer_width = "64"))]
type Rng = super::xoshiro128plusplus::Xoshiro128PlusPlus;
/// A small-state, fast non-crypto PRNG
///
/// `SmallRng` may be a good choice when a PRNG with small state, cheap
/// initialization, good statistical quality and good performance are required.
/// Note that depending on the application, [`StdRng`] may be faster on many
/// modern platforms while providing higher-quality randomness. Furthermore,
/// `SmallRng` is **not** a good choice when:
/// - Security against prediction is important. Use [`StdRng`] instead.
/// - Seeds with many zeros are provided. In such cases, it takes `SmallRng`
/// about 10 samples to produce 0 and 1 bits with equal probability. Either
/// provide seeds with an approximately equal number of 0 and 1 (for example
/// by using [`SeedableRng::from_entropy`] or [`SeedableRng::seed_from_u64`]),
/// or use [`StdRng`] instead.
///
/// The algorithm is deterministic but should not be considered reproducible
/// due to dependence on platform and possible replacement in future
/// library versions. For a reproducible generator, use a named PRNG from an
/// external crate, e.g. [rand_xoshiro] or [rand_chacha].
/// Refer also to [The Book](https://rust-random.github.io/book/guide-rngs.html).
///
/// The PRNG algorithm in `SmallRng` is chosen to be efficient on the current
/// platform, without consideration for cryptography or security. The size of
/// its state is much smaller than [`StdRng`]. The current algorithm is
/// `Xoshiro256PlusPlus` on 64-bit platforms and `Xoshiro128PlusPlus` on 32-bit
/// platforms. Both are also implemented by the [rand_xoshiro] crate.
///
/// # Examples
///
/// Initializing `SmallRng` with a random seed can be done using [`SeedableRng::from_entropy`]:
///
/// ```
/// use rand::{Rng, SeedableRng};
/// use rand::rngs::SmallRng;
///
/// // Create small, cheap to initialize and fast RNG with a random seed.
/// // The randomness is supplied by the operating system.
/// let mut small_rng = SmallRng::from_entropy();
/// # let v: u32 = small_rng.gen();
/// ```
///
/// When initializing a lot of `SmallRng`'s, using [`thread_rng`] can be more
/// efficient:
///
/// ```
/// use rand::{SeedableRng, thread_rng};
/// use rand::rngs::SmallRng;
///
/// // Create a big, expensive to initialize and slower, but unpredictable RNG.
/// // This is cached and done only once per thread.
/// let mut thread_rng = thread_rng();
/// // Create small, cheap to initialize and fast RNGs with random seeds.
/// // One can generally assume this won't fail.
/// let rngs: Vec<SmallRng> = (0..10)
/// .map(|_| SmallRng::from_rng(&mut thread_rng).unwrap())
/// .collect();
/// ```
///
/// [`StdRng`]: crate::rngs::StdRng
/// [`thread_rng`]: crate::thread_rng
/// [rand_chacha]: https://crates.io/crates/rand_chacha
/// [rand_xoshiro]: https://crates.io/crates/rand_xoshiro
#[cfg_attr(doc_cfg, doc(cfg(feature = "small_rng")))]
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SmallRng(Rng);
impl RngCore for SmallRng {
#[inline(always)]
fn next_u32(&mut self) -> u32 {
self.0.next_u32()
}
#[inline(always)]
fn next_u64(&mut self) -> u64 {
self.0.next_u64()
}
#[inline(always)]
fn fill_bytes(&mut self, dest: &mut [u8]) {
self.0.fill_bytes(dest);
}
#[inline(always)]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
self.0.try_fill_bytes(dest)
}
}
impl SeedableRng for SmallRng {
type Seed = <Rng as SeedableRng>::Seed;
#[inline(always)]
fn from_seed(seed: Self::Seed) -> Self {
SmallRng(Rng::from_seed(seed))
}
#[inline(always)]
fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> {
Rng::from_rng(rng).map(SmallRng)
}
}

98
vendor/rand/src/rngs/std.rs vendored Normal file
View File

@@ -0,0 +1,98 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The standard RNG
use crate::{CryptoRng, Error, RngCore, SeedableRng};
pub(crate) use rand_chacha::ChaCha12Core as Core;
use rand_chacha::ChaCha12Rng as Rng;
/// The standard RNG. The PRNG algorithm in `StdRng` is chosen to be efficient
/// on the current platform, to be statistically strong and unpredictable
/// (meaning a cryptographically secure PRNG).
///
/// The current algorithm used is the ChaCha block cipher with 12 rounds. Please
/// see this relevant [rand issue] for the discussion. This may change as new
/// evidence of cipher security and performance becomes available.
///
/// The algorithm is deterministic but should not be considered reproducible
/// due to dependence on configuration and possible replacement in future
/// library versions. For a secure reproducible generator, we recommend use of
/// the [rand_chacha] crate directly.
///
/// [rand_chacha]: https://crates.io/crates/rand_chacha
/// [rand issue]: https://github.com/rust-random/rand/issues/932
#[cfg_attr(doc_cfg, doc(cfg(feature = "std_rng")))]
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct StdRng(Rng);
impl RngCore for StdRng {
#[inline(always)]
fn next_u32(&mut self) -> u32 {
self.0.next_u32()
}
#[inline(always)]
fn next_u64(&mut self) -> u64 {
self.0.next_u64()
}
#[inline(always)]
fn fill_bytes(&mut self, dest: &mut [u8]) {
self.0.fill_bytes(dest);
}
#[inline(always)]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
self.0.try_fill_bytes(dest)
}
}
impl SeedableRng for StdRng {
type Seed = <Rng as SeedableRng>::Seed;
#[inline(always)]
fn from_seed(seed: Self::Seed) -> Self {
StdRng(Rng::from_seed(seed))
}
#[inline(always)]
fn from_rng<R: RngCore>(rng: R) -> Result<Self, Error> {
Rng::from_rng(rng).map(StdRng)
}
}
impl CryptoRng for StdRng {}
#[cfg(test)]
mod test {
use crate::rngs::StdRng;
use crate::{RngCore, SeedableRng};
#[test]
fn test_stdrng_construction() {
// Test value-stability of StdRng. This is expected to break any time
// the algorithm is changed.
#[rustfmt::skip]
let seed = [1,0,0,0, 23,0,0,0, 200,1,0,0, 210,30,0,0,
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0];
let target = [10719222850664546238, 14064965282130556830];
let mut rng0 = StdRng::from_seed(seed);
let x0 = rng0.next_u64();
let mut rng1 = StdRng::from_rng(rng0).unwrap();
let x1 = rng1.next_u64();
assert_eq!([x0, x1], target);
}
}

143
vendor/rand/src/rngs/thread.rs vendored Normal file
View File

@@ -0,0 +1,143 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Thread-local random number generator
use core::cell::UnsafeCell;
use std::rc::Rc;
use std::thread_local;
use super::std::Core;
use crate::rngs::adapter::ReseedingRng;
use crate::rngs::OsRng;
use crate::{CryptoRng, Error, RngCore, SeedableRng};
// Rationale for using `UnsafeCell` in `ThreadRng`:
//
// Previously we used a `RefCell`, with an overhead of ~15%. There will only
// ever be one mutable reference to the interior of the `UnsafeCell`, because
// we only have such a reference inside `next_u32`, `next_u64`, etc. Within a
// single thread (which is the definition of `ThreadRng`), there will only ever
// be one of these methods active at a time.
//
// A possible scenario where there could be multiple mutable references is if
// `ThreadRng` is used inside `next_u32` and co. But the implementation is
// completely under our control. We just have to ensure none of them use
// `ThreadRng` internally, which is nonsensical anyway. We should also never run
// `ThreadRng` in destructors of its implementation, which is also nonsensical.
// Number of generated bytes after which to reseed `ThreadRng`.
// According to benchmarks, reseeding has a noticeable impact with thresholds
// of 32 kB and less. We choose 64 kB to avoid significant overhead.
const THREAD_RNG_RESEED_THRESHOLD: u64 = 1024 * 64;
/// A reference to the thread-local generator
///
/// An instance can be obtained via [`thread_rng`] or via `ThreadRng::default()`.
/// This handle is safe to use everywhere (including thread-local destructors),
/// though it is recommended not to use inside a fork handler.
/// The handle cannot be passed between threads (is not `Send` or `Sync`).
///
/// `ThreadRng` uses the same PRNG as [`StdRng`] for security and performance
/// and is automatically seeded from [`OsRng`].
///
/// Unlike `StdRng`, `ThreadRng` uses the [`ReseedingRng`] wrapper to reseed
/// the PRNG from fresh entropy every 64 kiB of random data as well as after a
/// fork on Unix (though not quite immediately; see documentation of
/// [`ReseedingRng`]).
/// Note that the reseeding is done as an extra precaution against side-channel
/// attacks and mis-use (e.g. if somehow weak entropy were supplied initially).
/// The PRNG algorithms used are assumed to be secure.
///
/// [`ReseedingRng`]: crate::rngs::adapter::ReseedingRng
/// [`StdRng`]: crate::rngs::StdRng
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", feature = "std_rng"))))]
#[derive(Clone, Debug)]
pub struct ThreadRng {
// Rc is explicitly !Send and !Sync
rng: Rc<UnsafeCell<ReseedingRng<Core, OsRng>>>,
}
thread_local!(
// We require Rc<..> to avoid premature freeing when thread_rng is used
// within thread-local destructors. See #968.
static THREAD_RNG_KEY: Rc<UnsafeCell<ReseedingRng<Core, OsRng>>> = {
let r = Core::from_rng(OsRng).unwrap_or_else(|err|
panic!("could not initialize thread_rng: {}", err));
let rng = ReseedingRng::new(r,
THREAD_RNG_RESEED_THRESHOLD,
OsRng);
Rc::new(UnsafeCell::new(rng))
}
);
/// Retrieve the lazily-initialized thread-local random number generator,
/// seeded by the system. Intended to be used in method chaining style,
/// e.g. `thread_rng().gen::<i32>()`, or cached locally, e.g.
/// `let mut rng = thread_rng();`. Invoked by the `Default` trait, making
/// `ThreadRng::default()` equivalent.
///
/// For more information see [`ThreadRng`].
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", feature = "std_rng"))))]
pub fn thread_rng() -> ThreadRng {
let rng = THREAD_RNG_KEY.with(|t| t.clone());
ThreadRng { rng }
}
impl Default for ThreadRng {
fn default() -> ThreadRng {
crate::prelude::thread_rng()
}
}
impl RngCore for ThreadRng {
#[inline(always)]
fn next_u32(&mut self) -> u32 {
// SAFETY: We must make sure to stop using `rng` before anyone else
// creates another mutable reference
let rng = unsafe { &mut *self.rng.get() };
rng.next_u32()
}
#[inline(always)]
fn next_u64(&mut self) -> u64 {
// SAFETY: We must make sure to stop using `rng` before anyone else
// creates another mutable reference
let rng = unsafe { &mut *self.rng.get() };
rng.next_u64()
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
// SAFETY: We must make sure to stop using `rng` before anyone else
// creates another mutable reference
let rng = unsafe { &mut *self.rng.get() };
rng.fill_bytes(dest)
}
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
// SAFETY: We must make sure to stop using `rng` before anyone else
// creates another mutable reference
let rng = unsafe { &mut *self.rng.get() };
rng.try_fill_bytes(dest)
}
}
impl CryptoRng for ThreadRng {}
#[cfg(test)]
mod test {
#[test]
fn test_thread_rng() {
use crate::Rng;
let mut r = crate::thread_rng();
r.gen::<i32>();
assert_eq!(r.gen_range(0..1), 0);
}
}

View File

@@ -0,0 +1,118 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[cfg(feature="serde1")] use serde::{Serialize, Deserialize};
use rand_core::impls::{next_u64_via_u32, fill_bytes_via_next};
use rand_core::le::read_u32_into;
use rand_core::{SeedableRng, RngCore, Error};
/// A xoshiro128++ random number generator.
///
/// The xoshiro128++ algorithm is not suitable for cryptographic purposes, but
/// is very fast and has excellent statistical properties.
///
/// The algorithm used here is translated from [the `xoshiro128plusplus.c`
/// reference source code](http://xoshiro.di.unimi.it/xoshiro128plusplus.c) by
/// David Blackman and Sebastiano Vigna.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
pub struct Xoshiro128PlusPlus {
s: [u32; 4],
}
impl SeedableRng for Xoshiro128PlusPlus {
type Seed = [u8; 16];
/// Create a new `Xoshiro128PlusPlus`. If `seed` is entirely 0, it will be
/// mapped to a different seed.
#[inline]
fn from_seed(seed: [u8; 16]) -> Xoshiro128PlusPlus {
if seed.iter().all(|&x| x == 0) {
return Self::seed_from_u64(0);
}
let mut state = [0; 4];
read_u32_into(&seed, &mut state);
Xoshiro128PlusPlus { s: state }
}
/// Create a new `Xoshiro128PlusPlus` from a `u64` seed.
///
/// This uses the SplitMix64 generator internally.
fn seed_from_u64(mut state: u64) -> Self {
const PHI: u64 = 0x9e3779b97f4a7c15;
let mut seed = Self::Seed::default();
for chunk in seed.as_mut().chunks_mut(8) {
state = state.wrapping_add(PHI);
let mut z = state;
z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
z = z ^ (z >> 31);
chunk.copy_from_slice(&z.to_le_bytes());
}
Self::from_seed(seed)
}
}
impl RngCore for Xoshiro128PlusPlus {
#[inline]
fn next_u32(&mut self) -> u32 {
let result_starstar = self.s[0]
.wrapping_add(self.s[3])
.rotate_left(7)
.wrapping_add(self.s[0]);
let t = self.s[1] << 9;
self.s[2] ^= self.s[0];
self.s[3] ^= self.s[1];
self.s[1] ^= self.s[2];
self.s[0] ^= self.s[3];
self.s[2] ^= t;
self.s[3] = self.s[3].rotate_left(11);
result_starstar
}
#[inline]
fn next_u64(&mut self) -> u64 {
next_u64_via_u32(self)
}
#[inline]
fn fill_bytes(&mut self, dest: &mut [u8]) {
fill_bytes_via_next(self, dest);
}
#[inline]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
self.fill_bytes(dest);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn reference() {
let mut rng = Xoshiro128PlusPlus::from_seed(
[1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0]);
// These values were produced with the reference implementation:
// http://xoshiro.di.unimi.it/xoshiro128plusplus.c
let expected = [
641, 1573767, 3222811527, 3517856514, 836907274, 4247214768,
3867114732, 1355841295, 495546011, 621204420,
];
for &e in &expected {
assert_eq!(rng.next_u32(), e);
}
}
}

View File

@@ -0,0 +1,122 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[cfg(feature="serde1")] use serde::{Serialize, Deserialize};
use rand_core::impls::fill_bytes_via_next;
use rand_core::le::read_u64_into;
use rand_core::{SeedableRng, RngCore, Error};
/// A xoshiro256++ random number generator.
///
/// The xoshiro256++ algorithm is not suitable for cryptographic purposes, but
/// is very fast and has excellent statistical properties.
///
/// The algorithm used here is translated from [the `xoshiro256plusplus.c`
/// reference source code](http://xoshiro.di.unimi.it/xoshiro256plusplus.c) by
/// David Blackman and Sebastiano Vigna.
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature="serde1", derive(Serialize, Deserialize))]
pub struct Xoshiro256PlusPlus {
s: [u64; 4],
}
impl SeedableRng for Xoshiro256PlusPlus {
type Seed = [u8; 32];
/// Create a new `Xoshiro256PlusPlus`. If `seed` is entirely 0, it will be
/// mapped to a different seed.
#[inline]
fn from_seed(seed: [u8; 32]) -> Xoshiro256PlusPlus {
if seed.iter().all(|&x| x == 0) {
return Self::seed_from_u64(0);
}
let mut state = [0; 4];
read_u64_into(&seed, &mut state);
Xoshiro256PlusPlus { s: state }
}
/// Create a new `Xoshiro256PlusPlus` from a `u64` seed.
///
/// This uses the SplitMix64 generator internally.
fn seed_from_u64(mut state: u64) -> Self {
const PHI: u64 = 0x9e3779b97f4a7c15;
let mut seed = Self::Seed::default();
for chunk in seed.as_mut().chunks_mut(8) {
state = state.wrapping_add(PHI);
let mut z = state;
z = (z ^ (z >> 30)).wrapping_mul(0xbf58476d1ce4e5b9);
z = (z ^ (z >> 27)).wrapping_mul(0x94d049bb133111eb);
z = z ^ (z >> 31);
chunk.copy_from_slice(&z.to_le_bytes());
}
Self::from_seed(seed)
}
}
impl RngCore for Xoshiro256PlusPlus {
#[inline]
fn next_u32(&mut self) -> u32 {
// The lowest bits have some linear dependencies, so we use the
// upper bits instead.
(self.next_u64() >> 32) as u32
}
#[inline]
fn next_u64(&mut self) -> u64 {
let result_plusplus = self.s[0]
.wrapping_add(self.s[3])
.rotate_left(23)
.wrapping_add(self.s[0]);
let t = self.s[1] << 17;
self.s[2] ^= self.s[0];
self.s[3] ^= self.s[1];
self.s[1] ^= self.s[2];
self.s[0] ^= self.s[3];
self.s[2] ^= t;
self.s[3] = self.s[3].rotate_left(45);
result_plusplus
}
#[inline]
fn fill_bytes(&mut self, dest: &mut [u8]) {
fill_bytes_via_next(self, dest);
}
#[inline]
fn try_fill_bytes(&mut self, dest: &mut [u8]) -> Result<(), Error> {
self.fill_bytes(dest);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn reference() {
let mut rng = Xoshiro256PlusPlus::from_seed(
[1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0,
3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0]);
// These values were produced with the reference implementation:
// http://xoshiro.di.unimi.it/xoshiro256plusplus.c
let expected = [
41943041, 58720359, 3588806011781223, 3591011842654386,
9228616714210784205, 9973669472204895162, 14011001112246962877,
12406186145184390807, 15849039046786891736, 10450023813501588000,
];
for &e in &expected {
assert_eq!(rng.next_u64(), e);
}
}
}

678
vendor/rand/src/seq/index.rs vendored Normal file
View File

@@ -0,0 +1,678 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Low-level API for sampling indices
#[cfg(feature = "alloc")] use core::slice;
#[cfg(feature = "alloc")] use alloc::vec::{self, Vec};
// BTreeMap is not as fast in tests, but better than nothing.
#[cfg(all(feature = "alloc", not(feature = "std")))]
use alloc::collections::BTreeSet;
#[cfg(feature = "std")] use std::collections::HashSet;
#[cfg(feature = "std")]
use crate::distributions::WeightedError;
#[cfg(feature = "alloc")]
use crate::{Rng, distributions::{uniform::SampleUniform, Distribution, Uniform}};
#[cfg(feature = "serde1")]
use serde::{Serialize, Deserialize};
/// A vector of indices.
///
/// Multiple internal representations are possible.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum IndexVec {
#[doc(hidden)]
U32(Vec<u32>),
#[doc(hidden)]
USize(Vec<usize>),
}
impl IndexVec {
/// Returns the number of indices
#[inline]
pub fn len(&self) -> usize {
match *self {
IndexVec::U32(ref v) => v.len(),
IndexVec::USize(ref v) => v.len(),
}
}
/// Returns `true` if the length is 0.
#[inline]
pub fn is_empty(&self) -> bool {
match *self {
IndexVec::U32(ref v) => v.is_empty(),
IndexVec::USize(ref v) => v.is_empty(),
}
}
/// Return the value at the given `index`.
///
/// (Note: we cannot implement [`std::ops::Index`] because of lifetime
/// restrictions.)
#[inline]
pub fn index(&self, index: usize) -> usize {
match *self {
IndexVec::U32(ref v) => v[index] as usize,
IndexVec::USize(ref v) => v[index],
}
}
/// Return result as a `Vec<usize>`. Conversion may or may not be trivial.
#[inline]
pub fn into_vec(self) -> Vec<usize> {
match self {
IndexVec::U32(v) => v.into_iter().map(|i| i as usize).collect(),
IndexVec::USize(v) => v,
}
}
/// Iterate over the indices as a sequence of `usize` values
#[inline]
pub fn iter(&self) -> IndexVecIter<'_> {
match *self {
IndexVec::U32(ref v) => IndexVecIter::U32(v.iter()),
IndexVec::USize(ref v) => IndexVecIter::USize(v.iter()),
}
}
}
impl IntoIterator for IndexVec {
type Item = usize;
type IntoIter = IndexVecIntoIter;
/// Convert into an iterator over the indices as a sequence of `usize` values
#[inline]
fn into_iter(self) -> IndexVecIntoIter {
match self {
IndexVec::U32(v) => IndexVecIntoIter::U32(v.into_iter()),
IndexVec::USize(v) => IndexVecIntoIter::USize(v.into_iter()),
}
}
}
impl PartialEq for IndexVec {
fn eq(&self, other: &IndexVec) -> bool {
use self::IndexVec::*;
match (self, other) {
(&U32(ref v1), &U32(ref v2)) => v1 == v2,
(&USize(ref v1), &USize(ref v2)) => v1 == v2,
(&U32(ref v1), &USize(ref v2)) => {
(v1.len() == v2.len()) && (v1.iter().zip(v2.iter()).all(|(x, y)| *x as usize == *y))
}
(&USize(ref v1), &U32(ref v2)) => {
(v1.len() == v2.len()) && (v1.iter().zip(v2.iter()).all(|(x, y)| *x == *y as usize))
}
}
}
}
impl From<Vec<u32>> for IndexVec {
#[inline]
fn from(v: Vec<u32>) -> Self {
IndexVec::U32(v)
}
}
impl From<Vec<usize>> for IndexVec {
#[inline]
fn from(v: Vec<usize>) -> Self {
IndexVec::USize(v)
}
}
/// Return type of `IndexVec::iter`.
#[derive(Debug)]
pub enum IndexVecIter<'a> {
#[doc(hidden)]
U32(slice::Iter<'a, u32>),
#[doc(hidden)]
USize(slice::Iter<'a, usize>),
}
impl<'a> Iterator for IndexVecIter<'a> {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<usize> {
use self::IndexVecIter::*;
match *self {
U32(ref mut iter) => iter.next().map(|i| *i as usize),
USize(ref mut iter) => iter.next().cloned(),
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
match *self {
IndexVecIter::U32(ref v) => v.size_hint(),
IndexVecIter::USize(ref v) => v.size_hint(),
}
}
}
impl<'a> ExactSizeIterator for IndexVecIter<'a> {}
/// Return type of `IndexVec::into_iter`.
#[derive(Clone, Debug)]
pub enum IndexVecIntoIter {
#[doc(hidden)]
U32(vec::IntoIter<u32>),
#[doc(hidden)]
USize(vec::IntoIter<usize>),
}
impl Iterator for IndexVecIntoIter {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
use self::IndexVecIntoIter::*;
match *self {
U32(ref mut v) => v.next().map(|i| i as usize),
USize(ref mut v) => v.next(),
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
use self::IndexVecIntoIter::*;
match *self {
U32(ref v) => v.size_hint(),
USize(ref v) => v.size_hint(),
}
}
}
impl ExactSizeIterator for IndexVecIntoIter {}
/// Randomly sample exactly `amount` distinct indices from `0..length`, and
/// return them in random order (fully shuffled).
///
/// This method is used internally by the slice sampling methods, but it can
/// sometimes be useful to have the indices themselves so this is provided as
/// an alternative.
///
/// The implementation used is not specified; we automatically select the
/// fastest available algorithm for the `length` and `amount` parameters
/// (based on detailed profiling on an Intel Haswell CPU). Roughly speaking,
/// complexity is `O(amount)`, except that when `amount` is small, performance
/// is closer to `O(amount^2)`, and when `length` is close to `amount` then
/// `O(length)`.
///
/// Note that performance is significantly better over `u32` indices than over
/// `u64` indices. Because of this we hide the underlying type behind an
/// abstraction, `IndexVec`.
///
/// If an allocation-free `no_std` function is required, it is suggested
/// to adapt the internal `sample_floyd` implementation.
///
/// Panics if `amount > length`.
pub fn sample<R>(rng: &mut R, length: usize, amount: usize) -> IndexVec
where R: Rng + ?Sized {
if amount > length {
panic!("`amount` of samples must be less than or equal to `length`");
}
if length > (::core::u32::MAX as usize) {
// We never want to use inplace here, but could use floyd's alg
// Lazy version: always use the cache alg.
return sample_rejection(rng, length, amount);
}
let amount = amount as u32;
let length = length as u32;
// Choice of algorithm here depends on both length and amount. See:
// https://github.com/rust-random/rand/pull/479
// We do some calculations with f32. Accuracy is not very important.
if amount < 163 {
const C: [[f32; 2]; 2] = [[1.6, 8.0 / 45.0], [10.0, 70.0 / 9.0]];
let j = if length < 500_000 { 0 } else { 1 };
let amount_fp = amount as f32;
let m4 = C[0][j] * amount_fp;
// Short-cut: when amount < 12, floyd's is always faster
if amount > 11 && (length as f32) < (C[1][j] + m4) * amount_fp {
sample_inplace(rng, length, amount)
} else {
sample_floyd(rng, length, amount)
}
} else {
const C: [f32; 2] = [270.0, 330.0 / 9.0];
let j = if length < 500_000 { 0 } else { 1 };
if (length as f32) < C[j] * (amount as f32) {
sample_inplace(rng, length, amount)
} else {
sample_rejection(rng, length, amount)
}
}
}
/// Randomly sample exactly `amount` distinct indices from `0..length`, and
/// return them in an arbitrary order (there is no guarantee of shuffling or
/// ordering). The weights are to be provided by the input function `weights`,
/// which will be called once for each index.
///
/// This method is used internally by the slice sampling methods, but it can
/// sometimes be useful to have the indices themselves so this is provided as
/// an alternative.
///
/// This implementation uses `O(length + amount)` space and `O(length)` time
/// if the "nightly" feature is enabled, or `O(length)` space and
/// `O(length + amount * log length)` time otherwise.
///
/// Panics if `amount > length`.
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn sample_weighted<R, F, X>(
rng: &mut R, length: usize, weight: F, amount: usize,
) -> Result<IndexVec, WeightedError>
where
R: Rng + ?Sized,
F: Fn(usize) -> X,
X: Into<f64>,
{
if length > (core::u32::MAX as usize) {
sample_efraimidis_spirakis(rng, length, weight, amount)
} else {
assert!(amount <= core::u32::MAX as usize);
let amount = amount as u32;
let length = length as u32;
sample_efraimidis_spirakis(rng, length, weight, amount)
}
}
/// Randomly sample exactly `amount` distinct indices from `0..length`, and
/// return them in an arbitrary order (there is no guarantee of shuffling or
/// ordering). The weights are to be provided by the input function `weights`,
/// which will be called once for each index.
///
/// This implementation uses the algorithm described by Efraimidis and Spirakis
/// in this paper: https://doi.org/10.1016/j.ipl.2005.11.003
/// It uses `O(length + amount)` space and `O(length)` time if the
/// "nightly" feature is enabled, or `O(length)` space and `O(length
/// + amount * log length)` time otherwise.
///
/// Panics if `amount > length`.
#[cfg(feature = "std")]
fn sample_efraimidis_spirakis<R, F, X, N>(
rng: &mut R, length: N, weight: F, amount: N,
) -> Result<IndexVec, WeightedError>
where
R: Rng + ?Sized,
F: Fn(usize) -> X,
X: Into<f64>,
N: UInt,
IndexVec: From<Vec<N>>,
{
if amount == N::zero() {
return Ok(IndexVec::U32(Vec::new()));
}
if amount > length {
panic!("`amount` of samples must be less than or equal to `length`");
}
struct Element<N> {
index: N,
key: f64,
}
impl<N> PartialOrd for Element<N> {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.key.partial_cmp(&other.key)
}
}
impl<N> Ord for Element<N> {
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
// partial_cmp will always produce a value,
// because we check that the weights are not nan
self.partial_cmp(other).unwrap()
}
}
impl<N> PartialEq for Element<N> {
fn eq(&self, other: &Self) -> bool {
self.key == other.key
}
}
impl<N> Eq for Element<N> {}
#[cfg(feature = "nightly")]
{
let mut candidates = Vec::with_capacity(length.as_usize());
let mut index = N::zero();
while index < length {
let weight = weight(index.as_usize()).into();
if !(weight >= 0.) {
return Err(WeightedError::InvalidWeight);
}
let key = rng.gen::<f64>().powf(1.0 / weight);
candidates.push(Element { index, key });
index += N::one();
}
// Partially sort the array to find the `amount` elements with the greatest
// keys. Do this by using `select_nth_unstable` to put the elements with
// the *smallest* keys at the beginning of the list in `O(n)` time, which
// provides equivalent information about the elements with the *greatest* keys.
let (_, mid, greater)
= candidates.select_nth_unstable(length.as_usize() - amount.as_usize());
let mut result: Vec<N> = Vec::with_capacity(amount.as_usize());
result.push(mid.index);
for element in greater {
result.push(element.index);
}
Ok(IndexVec::from(result))
}
#[cfg(not(feature = "nightly"))]
{
use alloc::collections::BinaryHeap;
// Partially sort the array such that the `amount` elements with the largest
// keys are first using a binary max heap.
let mut candidates = BinaryHeap::with_capacity(length.as_usize());
let mut index = N::zero();
while index < length {
let weight = weight(index.as_usize()).into();
if !(weight >= 0.) {
return Err(WeightedError::InvalidWeight);
}
let key = rng.gen::<f64>().powf(1.0 / weight);
candidates.push(Element { index, key });
index += N::one();
}
let mut result: Vec<N> = Vec::with_capacity(amount.as_usize());
while result.len() < amount.as_usize() {
result.push(candidates.pop().unwrap().index);
}
Ok(IndexVec::from(result))
}
}
/// Randomly sample exactly `amount` indices from `0..length`, using Floyd's
/// combination algorithm.
///
/// The output values are fully shuffled. (Overhead is under 50%.)
///
/// This implementation uses `O(amount)` memory and `O(amount^2)` time.
fn sample_floyd<R>(rng: &mut R, length: u32, amount: u32) -> IndexVec
where R: Rng + ?Sized {
// For small amount we use Floyd's fully-shuffled variant. For larger
// amounts this is slow due to Vec::insert performance, so we shuffle
// afterwards. Benchmarks show little overhead from extra logic.
let floyd_shuffle = amount < 50;
debug_assert!(amount <= length);
let mut indices = Vec::with_capacity(amount as usize);
for j in length - amount..length {
let t = rng.gen_range(0..=j);
if floyd_shuffle {
if let Some(pos) = indices.iter().position(|&x| x == t) {
indices.insert(pos, j);
continue;
}
} else if indices.contains(&t) {
indices.push(j);
continue;
}
indices.push(t);
}
if !floyd_shuffle {
// Reimplement SliceRandom::shuffle with smaller indices
for i in (1..amount).rev() {
// invariant: elements with index > i have been locked in place.
indices.swap(i as usize, rng.gen_range(0..=i) as usize);
}
}
IndexVec::from(indices)
}
/// Randomly sample exactly `amount` indices from `0..length`, using an inplace
/// partial Fisher-Yates method.
/// Sample an amount of indices using an inplace partial fisher yates method.
///
/// This allocates the entire `length` of indices and randomizes only the first `amount`.
/// It then truncates to `amount` and returns.
///
/// This method is not appropriate for large `length` and potentially uses a lot
/// of memory; because of this we only implement for `u32` index (which improves
/// performance in all cases).
///
/// Set-up is `O(length)` time and memory and shuffling is `O(amount)` time.
fn sample_inplace<R>(rng: &mut R, length: u32, amount: u32) -> IndexVec
where R: Rng + ?Sized {
debug_assert!(amount <= length);
let mut indices: Vec<u32> = Vec::with_capacity(length as usize);
indices.extend(0..length);
for i in 0..amount {
let j: u32 = rng.gen_range(i..length);
indices.swap(i as usize, j as usize);
}
indices.truncate(amount as usize);
debug_assert_eq!(indices.len(), amount as usize);
IndexVec::from(indices)
}
trait UInt: Copy + PartialOrd + Ord + PartialEq + Eq + SampleUniform
+ core::hash::Hash + core::ops::AddAssign {
fn zero() -> Self;
fn one() -> Self;
fn as_usize(self) -> usize;
}
impl UInt for u32 {
#[inline]
fn zero() -> Self {
0
}
#[inline]
fn one() -> Self {
1
}
#[inline]
fn as_usize(self) -> usize {
self as usize
}
}
impl UInt for usize {
#[inline]
fn zero() -> Self {
0
}
#[inline]
fn one() -> Self {
1
}
#[inline]
fn as_usize(self) -> usize {
self
}
}
/// Randomly sample exactly `amount` indices from `0..length`, using rejection
/// sampling.
///
/// Since `amount <<< length` there is a low chance of a random sample in
/// `0..length` being a duplicate. We test for duplicates and resample where
/// necessary. The algorithm is `O(amount)` time and memory.
///
/// This function is generic over X primarily so that results are value-stable
/// over 32-bit and 64-bit platforms.
fn sample_rejection<X: UInt, R>(rng: &mut R, length: X, amount: X) -> IndexVec
where
R: Rng + ?Sized,
IndexVec: From<Vec<X>>,
{
debug_assert!(amount < length);
#[cfg(feature = "std")]
let mut cache = HashSet::with_capacity(amount.as_usize());
#[cfg(not(feature = "std"))]
let mut cache = BTreeSet::new();
let distr = Uniform::new(X::zero(), length);
let mut indices = Vec::with_capacity(amount.as_usize());
for _ in 0..amount.as_usize() {
let mut pos = distr.sample(rng);
while !cache.insert(pos) {
pos = distr.sample(rng);
}
indices.push(pos);
}
debug_assert_eq!(indices.len(), amount.as_usize());
IndexVec::from(indices)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
#[cfg(feature = "serde1")]
fn test_serialization_index_vec() {
let some_index_vec = IndexVec::from(vec![254_usize, 234, 2, 1]);
let de_some_index_vec: IndexVec = bincode::deserialize(&bincode::serialize(&some_index_vec).unwrap()).unwrap();
match (some_index_vec, de_some_index_vec) {
(IndexVec::U32(a), IndexVec::U32(b)) => {
assert_eq!(a, b);
},
(IndexVec::USize(a), IndexVec::USize(b)) => {
assert_eq!(a, b);
},
_ => {panic!("failed to seralize/deserialize `IndexVec`")}
}
}
#[cfg(feature = "alloc")] use alloc::vec;
#[test]
fn test_sample_boundaries() {
let mut r = crate::test::rng(404);
assert_eq!(sample_inplace(&mut r, 0, 0).len(), 0);
assert_eq!(sample_inplace(&mut r, 1, 0).len(), 0);
assert_eq!(sample_inplace(&mut r, 1, 1).into_vec(), vec![0]);
assert_eq!(sample_rejection(&mut r, 1u32, 0).len(), 0);
assert_eq!(sample_floyd(&mut r, 0, 0).len(), 0);
assert_eq!(sample_floyd(&mut r, 1, 0).len(), 0);
assert_eq!(sample_floyd(&mut r, 1, 1).into_vec(), vec![0]);
// These algorithms should be fast with big numbers. Test average.
let sum: usize = sample_rejection(&mut r, 1 << 25, 10u32).into_iter().sum();
assert!(1 << 25 < sum && sum < (1 << 25) * 25);
let sum: usize = sample_floyd(&mut r, 1 << 25, 10).into_iter().sum();
assert!(1 << 25 < sum && sum < (1 << 25) * 25);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_sample_alg() {
let seed_rng = crate::test::rng;
// We can't test which algorithm is used directly, but Floyd's alg
// should produce different results from the others. (Also, `inplace`
// and `cached` currently use different sizes thus produce different results.)
// A small length and relatively large amount should use inplace
let (length, amount): (usize, usize) = (100, 50);
let v1 = sample(&mut seed_rng(420), length, amount);
let v2 = sample_inplace(&mut seed_rng(420), length as u32, amount as u32);
assert!(v1.iter().all(|e| e < length));
assert_eq!(v1, v2);
// Test Floyd's alg does produce different results
let v3 = sample_floyd(&mut seed_rng(420), length as u32, amount as u32);
assert!(v1 != v3);
// A large length and small amount should use Floyd
let (length, amount): (usize, usize) = (1 << 20, 50);
let v1 = sample(&mut seed_rng(421), length, amount);
let v2 = sample_floyd(&mut seed_rng(421), length as u32, amount as u32);
assert!(v1.iter().all(|e| e < length));
assert_eq!(v1, v2);
// A large length and larger amount should use cache
let (length, amount): (usize, usize) = (1 << 20, 600);
let v1 = sample(&mut seed_rng(422), length, amount);
let v2 = sample_rejection(&mut seed_rng(422), length as u32, amount as u32);
assert!(v1.iter().all(|e| e < length));
assert_eq!(v1, v2);
}
#[cfg(feature = "std")]
#[test]
fn test_sample_weighted() {
let seed_rng = crate::test::rng;
for &(amount, len) in &[(0, 10), (5, 10), (10, 10)] {
let v = sample_weighted(&mut seed_rng(423), len, |i| i as f64, amount).unwrap();
match v {
IndexVec::U32(mut indices) => {
assert_eq!(indices.len(), amount);
indices.sort_unstable();
indices.dedup();
assert_eq!(indices.len(), amount);
for &i in &indices {
assert!((i as usize) < len);
}
},
IndexVec::USize(_) => panic!("expected `IndexVec::U32`"),
}
}
}
#[test]
fn value_stability_sample() {
let do_test = |length, amount, values: &[u32]| {
let mut buf = [0u32; 8];
let mut rng = crate::test::rng(410);
let res = sample(&mut rng, length, amount);
let len = res.len().min(buf.len());
for (x, y) in res.into_iter().zip(buf.iter_mut()) {
*y = x as u32;
}
assert_eq!(
&buf[0..len],
values,
"failed sampling {}, {}",
length,
amount
);
};
do_test(10, 6, &[8, 0, 3, 5, 9, 6]); // floyd
do_test(25, 10, &[18, 15, 14, 9, 0, 13, 5, 24]); // floyd
do_test(300, 8, &[30, 283, 150, 1, 73, 13, 285, 35]); // floyd
do_test(300, 80, &[31, 289, 248, 154, 5, 78, 19, 286]); // inplace
do_test(300, 180, &[31, 289, 248, 154, 5, 78, 19, 286]); // inplace
do_test(1_000_000, 8, &[
103717, 963485, 826422, 509101, 736394, 807035, 5327, 632573,
]); // floyd
do_test(1_000_000, 180, &[
103718, 963490, 826426, 509103, 736396, 807036, 5327, 632573,
]); // rejection
}
}

1356
vendor/rand/src/seq/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff