Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

View File

@@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"449c9500edb77dc904f152dbf30a6bb65eb5b7c951f002957b3343d42975abe1","COPYRIGHT":"90eb64f0279b0d9432accfa6023ff803bc4965212383697eee27a0f426d5f8d5","Cargo.toml":"de7ba89ed55ad8ff0256372e909588449ce8bb1f131b615c3e978792ead79e1b","LICENSE-APACHE":"6df43f6f4b5d4587f3d8d71e45532c688fd168afa5fe89d571cb32fa09c4ef51","LICENSE-MIT":"a771e4354f6b3ad4c92da1a5c9a239b6c291527db869632ecea4f20e24ca1135","README.md":"22e28422c783a108e694e94da8f7ec59b79d8f407f4b151474eca0d0f70fafaa","src/binomial.rs":"76ef5862f66b6cbffc9054016bfabc7f15365e64378d896bb209cb9bff548c5e","src/cauchy.rs":"977b7a760b019c231e1a5e3121c39fab7cb4fa9e26492bc73f237b9f34d5c4b6","src/dirichlet.rs":"e3758f261cf6cf409baba14c73c72c2ed2c33ada771c1f14716396bf8ef5e67c","src/exponential.rs":"ac25c657caa53c4f59ee01e5cca48d7eb903c4e1d2b0b94cfad083f82fe14855","src/frechet.rs":"560101581d1d041d5bbf07fe51042f71af9dfdc008517d2c5e2fae83a9d0e717","src/gamma.rs":"576eec0d413e9e0048c11f8a15f9bdbbcc1076e7d4c54d8bcfd24fa592af0318","src/geometric.rs":"145dc2763212478fe688d58251cfa2e9e31e49bc6b8060847f2a509fe1f4d555","src/gumbel.rs":"f414b8d51c8e6f95ef40f1e2a32c0ed922bb51fb313d3c1dd303f315402a4db4","src/hypergeometric.rs":"550dc4a320d6327175254286b51319d3e5efd79ced1edb6a4edfcf5aa4987aa3","src/inverse_gaussian.rs":"572b01570541ddfdc8157490b27b254d0e46d74cc7e00445188358223504d16a","src/lib.rs":"66a585e42a90706d499ec012cf17828cf842a20522b0e00ff2e972b54a13c618","src/normal.rs":"9142e348b56be185d7fd8f004f4e0622025ea1a4feb2c14ba10fbc4b5287a9c6","src/normal_inverse_gaussian.rs":"8a6db6a01beb2e2b2727a313a747cfad3bf08fc956ed876feb0fe914f444e7d1","src/pareto.rs":"b99d0e971824d83ee1c007891dc08463a8878df2a93b76912dae0a9d9502aaaf","src/pert.rs":"59092f32d9fa608d27b93bf9909988477f3e1dcc32f7191d2e68582fc920df89","src/poisson.rs":"1136785aa0b582437ce7e9fdf0f459d502747322367b205a0343440cf1150a38","src/skew_normal.rs":"398d79b91af8ab0d24bd37566eebb1dd9c81bfe2b9deaf300fa45f8d58699720","src/triangular.rs":"01b95aea54ce2e77b5e51f716693286e088e84dc21b685174029682fb4953833","src/unit_ball.rs":"4370f3fad05ce188c94968b67d5a770bec1d3f5a405beb873ad17f4304a1c865","src/unit_circle.rs":"65fa18a5c3c146417d257de8c8a9ae873f6a44d54c8a68a347cdf1684874850d","src/unit_disc.rs":"694fa41ed2f08b1cb0d607bf79411aa25556a3912d7e234424cc892707704246","src/unit_sphere.rs":"cb8ec3841d67f0a7fc3b0294875c0b3dfbc8d6a12c3577ae985e28ad8784e3a2","src/utils.rs":"c6fd8171ff95e4f85c359d0616edcd2bdde20b7f3132a23e3751145a4c573654","src/weibull.rs":"07b56354a5b1013a1b0b14ed64bf74fc7e2eef65e22086c111f13cb511fcee54","src/weighted_alias.rs":"da62d2d10955e2c46806742c0ff5d2a74a91acdef6fffa984c5b5c011083f39d","src/ziggurat_tables.rs":"2994bb821a60681e0d7a2bb45fcdcbea1040aa775c9aab2c80a7161d753d1ad0","src/zipf.rs":"42593a0979230ec380f1d05fe9855db1d078deadc48e8851edb45b275fd98e5a"},"package":"32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31"}

65
vendor/rand_distr/CHANGELOG.md vendored Normal file
View File

@@ -0,0 +1,65 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.4.3] - 2021-12-30
- Fix `no_std` build (#1208)
## [0.4.2] - 2021-09-18
- New `Zeta` and `Zipf` distributions (#1136)
- New `SkewNormal` distribution (#1149)
- New `Gumbel` and `Frechet` distributions (#1168, #1171)
## [0.4.1] - 2021-06-15
- Empirically test PDF of normal distribution (#1121)
- Correctly document `no_std` support (#1100)
- Add `std_math` feature to prefer `std` over `libm` for floating point math (#1100)
- Add mean and std_dev accessors to Normal (#1114)
- Make sure all distributions and their error types implement `Error`, `Display`, `Clone`,
`Copy`, `PartialEq` and `Eq` as appropriate (#1126)
- Port benchmarks to use Criterion crate (#1116)
- Support serde for distributions (#1141)
## [0.4.0] - 2020-12-18
- Bump `rand` to v0.8.0
- New `Geometric`, `StandardGeometric` and `Hypergeometric` distributions (#1062)
- New `Beta` sampling algorithm for improved performance and accuracy (#1000)
- `Normal` and `LogNormal` now support `from_mean_cv` and `from_zscore` (#1044)
- Variants of `NormalError` changed (#1044)
## [0.3.0] - 2020-08-25
- Move alias method for `WeightedIndex` from `rand` (#945)
- Rename `WeightedIndex` to `WeightedAliasIndex` (#1008)
- Replace custom `Float` trait with `num-traits::Float` (#987)
- Enable `no_std` support via `num-traits` math functions (#987)
- Remove `Distribution<u64>` impl for `Poisson` (#987)
- Tweak `Dirichlet` and `alias_method` to use boxed slice instead of `Vec` (#987)
- Use whitelist for package contents, reducing size by 5kb (#983)
- Add case `lambda = 0` in the parametrization of `Exp` (#972)
- Implement inverse Gaussian distribution (#954)
- Reformatting and use of `rustfmt::skip` (#926)
- All error types now implement `std::error::Error` (#919)
- Re-exported `rand::distributions::BernoulliError` (#919)
- Add value stability tests for distributions (#891)
## [0.2.2] - 2019-09-10
- Fix version requirement on rand lib (#847)
- Clippy fixes & suppression (#840)
## [0.2.1] - 2019-06-29
- Update dependency to support Rand 0.7
- Doc link fixes
## [0.2.0] - 2019-06-06
- Remove `new` constructors for zero-sized types
- Add Pert distribution
- Fix undefined behavior in `Poisson`
- Make all distributions return `Result`s instead of panicking
- Implement `f32` support for most distributions
- Rename `UnitSphereSurface` to `UnitSphere`
- Implement `UnitBall` and `UnitDisc`
## [0.1.0] - 2019-06-06
Initial release. This is equivalent to the code in `rand` 0.6.5.

12
vendor/rand_distr/COPYRIGHT vendored Normal file
View File

@@ -0,0 +1,12 @@
Copyrights in the Rand project are retained by their contributors. No
copyright assignment is required to contribute to the Rand project.
For full authorship information, see the version control history.
Except as otherwise noted (below and/or in individual files), Rand is
licensed under the Apache License, Version 2.0 <LICENSE-APACHE> or
<http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
<LICENSE-MIT> or <http://opensource.org/licenses/MIT>, at your option.
The Rand project includes code from the Rust project
published under these same licenses.

59
vendor/rand_distr/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,59 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "rand_distr"
version = "0.4.3"
authors = ["The Rand Project Developers"]
include = ["src/", "LICENSE-*", "README.md", "CHANGELOG.md", "COPYRIGHT"]
description = "Sampling from random number distributions\n"
homepage = "https://rust-random.github.io/book"
documentation = "https://docs.rs/rand_distr"
readme = "README.md"
keywords = ["random", "rng", "distribution", "probability"]
categories = ["algorithms", "no-std"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-random/rand"
[dependencies.num-traits]
version = "0.2"
features = ["libm"]
default-features = false
[dependencies.rand]
version = "0.8.0"
default-features = false
[dependencies.serde]
version = "1.0.103"
features = ["derive"]
optional = true
[dev-dependencies.average]
version = "0.13"
features = ["std"]
[dev-dependencies.rand]
version = "0.8.0"
features = ["std_rng", "std", "small_rng"]
default-features = false
[dev-dependencies.rand_pcg]
version = "0.3.0"
[dev-dependencies.special]
version = "0.8.1"
[features]
alloc = ["rand/alloc"]
default = ["std"]
serde1 = ["serde", "rand/serde1"]
std = ["alloc", "rand/std"]
std_math = ["num-traits/std"]

187
vendor/rand_distr/LICENSE-APACHE vendored Normal file
View File

@@ -0,0 +1,187 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.

25
vendor/rand_distr/LICENSE-MIT vendored Normal file
View File

@@ -0,0 +1,25 @@
Copyright 2018 Developers of the Rand project
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

57
vendor/rand_distr/README.md vendored Normal file
View File

@@ -0,0 +1,57 @@
# rand_distr
[![Test Status](https://github.com/rust-random/rand/workflows/Tests/badge.svg?event=push)](https://github.com/rust-random/rand/actions)
[![Latest version](https://img.shields.io/crates/v/rand_distr.svg)](https://crates.io/crates/rand_distr)
[![Book](https://img.shields.io/badge/book-master-yellow.svg)](https://rust-random.github.io/book/)
[![API](https://img.shields.io/badge/api-master-yellow.svg)](https://rust-random.github.io/rand/rand_distr)
[![API](https://docs.rs/rand_distr/badge.svg)](https://docs.rs/rand_distr)
[![Minimum rustc version](https://img.shields.io/badge/rustc-1.36+-lightgray.svg)](https://github.com/rust-random/rand#rust-version-requirements)
Implements a full suite of random number distribution sampling routines.
This crate is a superset of the [rand::distributions] module, including support
for sampling from Beta, Binomial, Cauchy, ChiSquared, Dirichlet, Exponential,
FisherF, Gamma, Geometric, Hypergeometric, InverseGaussian, LogNormal, Normal,
Pareto, PERT, Poisson, StudentT, Triangular and Weibull distributions. Sampling
from the unit ball, unit circle, unit disc and unit sphere surfaces is also
supported.
It is worth mentioning the [statrs] crate which provides similar functionality
along with various support functions, including PDF and CDF computation. In
contrast, this `rand_distr` crate focuses on sampling from distributions.
## Portability and libm
The floating point functions from `num_traits` and `libm` are used to support
`no_std` environments and ensure reproducibility. If the floating point
functions from `std` are preferred, which may provide better accuracy and
performance but may produce different random values, the `std_math` feature
can be enabled.
## Crate features
- `std` (enabled by default): `rand_distr` implements the `Error` trait for
its error types. Implies `alloc` and `rand/std`.
- `alloc` (enabled by default): required for some distributions when not using
`std` (in particular, `Dirichlet` and `WeightedAliasIndex`).
- `std_math`: see above on portability and libm
- `serde1`: implement (de)seriaialization using `serde`
## Links
- [API documentation (master)](https://rust-random.github.io/rand/rand_distr)
- [API documentation (docs.rs)](https://docs.rs/rand_distr)
- [Changelog](CHANGELOG.md)
- [The Rand project](https://github.com/rust-random/rand)
[statrs]: https://github.com/boxtown/statrs
[rand::distributions]: https://rust-random.github.io/rand/rand/distributions/index.html
## License
`rand_distr` is distributed under the terms of both the MIT license and the
Apache License (Version 2.0).
See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT), and
[COPYRIGHT](COPYRIGHT) for details.

350
vendor/rand_distr/src/binomial.rs vendored Normal file
View File

@@ -0,0 +1,350 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2016-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The binomial distribution.
use crate::{Distribution, Uniform};
use rand::Rng;
use core::fmt;
use core::cmp::Ordering;
#[allow(unused_imports)]
use num_traits::Float;
/// The binomial distribution `Binomial(n, p)`.
///
/// This distribution has density function:
/// `f(k) = n!/(k! (n-k)!) p^k (1-p)^(n-k)` for `k >= 0`.
///
/// # Example
///
/// ```
/// use rand_distr::{Binomial, Distribution};
///
/// let bin = Binomial::new(20, 0.3).unwrap();
/// let v = bin.sample(&mut rand::thread_rng());
/// println!("{} is from a binomial distribution", v);
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Binomial {
/// Number of trials.
n: u64,
/// Probability of success.
p: f64,
}
/// Error type returned from `Binomial::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `p < 0` or `nan`.
ProbabilityTooSmall,
/// `p > 1`.
ProbabilityTooLarge,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::ProbabilityTooSmall => "p < 0 or is NaN in binomial distribution",
Error::ProbabilityTooLarge => "p > 1 in binomial distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl Binomial {
/// Construct a new `Binomial` with the given shape parameters `n` (number
/// of trials) and `p` (probability of success).
pub fn new(n: u64, p: f64) -> Result<Binomial, Error> {
if !(p >= 0.0) {
return Err(Error::ProbabilityTooSmall);
}
if !(p <= 1.0) {
return Err(Error::ProbabilityTooLarge);
}
Ok(Binomial { n, p })
}
}
/// Convert a `f64` to an `i64`, panicking on overflow.
fn f64_to_i64(x: f64) -> i64 {
assert!(x < (core::i64::MAX as f64));
x as i64
}
impl Distribution<u64> for Binomial {
#[allow(clippy::many_single_char_names)] // Same names as in the reference.
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
// Handle these values directly.
if self.p == 0.0 {
return 0;
} else if self.p == 1.0 {
return self.n;
}
// The binomial distribution is symmetrical with respect to p -> 1-p,
// k -> n-k switch p so that it is less than 0.5 - this allows for lower
// expected values we will just invert the result at the end
let p = if self.p <= 0.5 { self.p } else { 1.0 - self.p };
let result;
let q = 1. - p;
// For small n * min(p, 1 - p), the BINV algorithm based on the inverse
// transformation of the binomial distribution is efficient. Otherwise,
// the BTPE algorithm is used.
//
// Voratas Kachitvichyanukul and Bruce W. Schmeiser. 1988. Binomial
// random variate generation. Commun. ACM 31, 2 (February 1988),
// 216-222. http://dx.doi.org/10.1145/42372.42381
// Threshold for preferring the BINV algorithm. The paper suggests 10,
// Ranlib uses 30, and GSL uses 14.
const BINV_THRESHOLD: f64 = 10.;
if (self.n as f64) * p < BINV_THRESHOLD && self.n <= (core::i32::MAX as u64) {
// Use the BINV algorithm.
let s = p / q;
let a = ((self.n + 1) as f64) * s;
let mut r = q.powi(self.n as i32);
let mut u: f64 = rng.gen();
let mut x = 0;
while u > r as f64 {
u -= r;
x += 1;
r *= a / (x as f64) - s;
}
result = x;
} else {
// Use the BTPE algorithm.
// Threshold for using the squeeze algorithm. This can be freely
// chosen based on performance. Ranlib and GSL use 20.
const SQUEEZE_THRESHOLD: i64 = 20;
// Step 0: Calculate constants as functions of `n` and `p`.
let n = self.n as f64;
let np = n * p;
let npq = np * q;
let f_m = np + p;
let m = f64_to_i64(f_m);
// radius of triangle region, since height=1 also area of region
let p1 = (2.195 * npq.sqrt() - 4.6 * q).floor() + 0.5;
// tip of triangle
let x_m = (m as f64) + 0.5;
// left edge of triangle
let x_l = x_m - p1;
// right edge of triangle
let x_r = x_m + p1;
let c = 0.134 + 20.5 / (15.3 + (m as f64));
// p1 + area of parallelogram region
let p2 = p1 * (1. + 2. * c);
fn lambda(a: f64) -> f64 {
a * (1. + 0.5 * a)
}
let lambda_l = lambda((f_m - x_l) / (f_m - x_l * p));
let lambda_r = lambda((x_r - f_m) / (x_r * q));
// p1 + area of left tail
let p3 = p2 + c / lambda_l;
// p1 + area of right tail
let p4 = p3 + c / lambda_r;
// return value
let mut y: i64;
let gen_u = Uniform::new(0., p4);
let gen_v = Uniform::new(0., 1.);
loop {
// Step 1: Generate `u` for selecting the region. If region 1 is
// selected, generate a triangularly distributed variate.
let u = gen_u.sample(rng);
let mut v = gen_v.sample(rng);
if !(u > p1) {
y = f64_to_i64(x_m - p1 * v + u);
break;
}
if !(u > p2) {
// Step 2: Region 2, parallelograms. Check if region 2 is
// used. If so, generate `y`.
let x = x_l + (u - p1) / c;
v = v * c + 1.0 - (x - x_m).abs() / p1;
if v > 1. {
continue;
} else {
y = f64_to_i64(x);
}
} else if !(u > p3) {
// Step 3: Region 3, left exponential tail.
y = f64_to_i64(x_l + v.ln() / lambda_l);
if y < 0 {
continue;
} else {
v *= (u - p2) * lambda_l;
}
} else {
// Step 4: Region 4, right exponential tail.
y = f64_to_i64(x_r - v.ln() / lambda_r);
if y > 0 && (y as u64) > self.n {
continue;
} else {
v *= (u - p3) * lambda_r;
}
}
// Step 5: Acceptance/rejection comparison.
// Step 5.0: Test for appropriate method of evaluating f(y).
let k = (y - m).abs();
if !(k > SQUEEZE_THRESHOLD && (k as f64) < 0.5 * npq - 1.) {
// Step 5.1: Evaluate f(y) via the recursive relationship. Start the
// search from the mode.
let s = p / q;
let a = s * (n + 1.);
let mut f = 1.0;
match m.cmp(&y) {
Ordering::Less => {
let mut i = m;
loop {
i += 1;
f *= a / (i as f64) - s;
if i == y {
break;
}
}
},
Ordering::Greater => {
let mut i = y;
loop {
i += 1;
f /= a / (i as f64) - s;
if i == m {
break;
}
}
},
Ordering::Equal => {},
}
if v > f {
continue;
} else {
break;
}
}
// Step 5.2: Squeezing. Check the value of ln(v) against upper and
// lower bound of ln(f(y)).
let k = k as f64;
let rho = (k / npq) * ((k * (k / 3. + 0.625) + 1. / 6.) / npq + 0.5);
let t = -0.5 * k * k / npq;
let alpha = v.ln();
if alpha < t - rho {
break;
}
if alpha > t + rho {
continue;
}
// Step 5.3: Final acceptance/rejection test.
let x1 = (y + 1) as f64;
let f1 = (m + 1) as f64;
let z = (f64_to_i64(n) + 1 - m) as f64;
let w = (f64_to_i64(n) - y + 1) as f64;
fn stirling(a: f64) -> f64 {
let a2 = a * a;
(13860. - (462. - (132. - (99. - 140. / a2) / a2) / a2) / a2) / a / 166320.
}
if alpha
> x_m * (f1 / x1).ln()
+ (n - (m as f64) + 0.5) * (z / w).ln()
+ ((y - m) as f64) * (w * p / (x1 * q)).ln()
// We use the signs from the GSL implementation, which are
// different than the ones in the reference. According to
// the GSL authors, the new signs were verified to be
// correct by one of the original designers of the
// algorithm.
+ stirling(f1)
+ stirling(z)
- stirling(x1)
- stirling(w)
{
continue;
}
break;
}
assert!(y >= 0);
result = y as u64;
}
// Invert the result for p < 0.5.
if p != self.p {
self.n - result
} else {
result
}
}
}
#[cfg(test)]
mod test {
use super::Binomial;
use crate::Distribution;
use rand::Rng;
fn test_binomial_mean_and_variance<R: Rng>(n: u64, p: f64, rng: &mut R) {
let binomial = Binomial::new(n, p).unwrap();
let expected_mean = n as f64 * p;
let expected_variance = n as f64 * p * (1.0 - p);
let mut results = [0.0; 1000];
for i in results.iter_mut() {
*i = binomial.sample(rng) as f64;
}
let mean = results.iter().sum::<f64>() / results.len() as f64;
assert!((mean as f64 - expected_mean).abs() < expected_mean / 50.0);
let variance =
results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>() / results.len() as f64;
assert!((variance - expected_variance).abs() < expected_variance / 10.0);
}
#[test]
fn test_binomial() {
let mut rng = crate::test::rng(351);
test_binomial_mean_and_variance(150, 0.1, &mut rng);
test_binomial_mean_and_variance(70, 0.6, &mut rng);
test_binomial_mean_and_variance(40, 0.5, &mut rng);
test_binomial_mean_and_variance(20, 0.7, &mut rng);
test_binomial_mean_and_variance(20, 0.5, &mut rng);
}
#[test]
fn test_binomial_end_points() {
let mut rng = crate::test::rng(352);
assert_eq!(rng.sample(Binomial::new(20, 0.0).unwrap()), 0);
assert_eq!(rng.sample(Binomial::new(20, 1.0).unwrap()), 20);
}
#[test]
#[should_panic]
fn test_binomial_invalid_lambda_neg() {
Binomial::new(20, -10.0).unwrap();
}
}

167
vendor/rand_distr/src/cauchy.rs vendored Normal file
View File

@@ -0,0 +1,167 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2016-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Cauchy distribution.
use num_traits::{Float, FloatConst};
use crate::{Distribution, Standard};
use rand::Rng;
use core::fmt;
/// The Cauchy distribution `Cauchy(median, scale)`.
///
/// This distribution has a density function:
/// `f(x) = 1 / (pi * scale * (1 + ((x - median) / scale)^2))`
///
/// Note that at least for `f32`, results are not fully portable due to minor
/// differences in the target system's *tan* implementation, `tanf`.
///
/// # Example
///
/// ```
/// use rand_distr::{Cauchy, Distribution};
///
/// let cau = Cauchy::new(2.0, 5.0).unwrap();
/// let v = cau.sample(&mut rand::thread_rng());
/// println!("{} is from a Cauchy(2, 5) distribution", v);
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Cauchy<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
median: F,
scale: F,
}
/// Error type returned from `Cauchy::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `scale <= 0` or `nan`.
ScaleTooSmall,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::ScaleTooSmall => "scale is not positive in Cauchy distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F> Cauchy<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
/// Construct a new `Cauchy` with the given shape parameters
/// `median` the peak location and `scale` the scale factor.
pub fn new(median: F, scale: F) -> Result<Cauchy<F>, Error> {
if !(scale > F::zero()) {
return Err(Error::ScaleTooSmall);
}
Ok(Cauchy { median, scale })
}
}
impl<F> Distribution<F> for Cauchy<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
// sample from [0, 1)
let x = Standard.sample(rng);
// get standard cauchy random number
// note that π/2 is not exactly representable, even if x=0.5 the result is finite
let comp_dev = (F::PI() * x).tan();
// shift and scale according to parameters
self.median + self.scale * comp_dev
}
}
#[cfg(test)]
mod test {
use super::*;
fn median(numbers: &mut [f64]) -> f64 {
sort(numbers);
let mid = numbers.len() / 2;
numbers[mid]
}
fn sort(numbers: &mut [f64]) {
numbers.sort_by(|a, b| a.partial_cmp(b).unwrap());
}
#[test]
fn test_cauchy_averages() {
// NOTE: given that the variance and mean are undefined,
// this test does not have any rigorous statistical meaning.
let cauchy = Cauchy::new(10.0, 5.0).unwrap();
let mut rng = crate::test::rng(123);
let mut numbers: [f64; 1000] = [0.0; 1000];
let mut sum = 0.0;
for number in &mut numbers[..] {
*number = cauchy.sample(&mut rng);
sum += *number;
}
let median = median(&mut numbers);
#[cfg(feature = "std")]
std::println!("Cauchy median: {}", median);
assert!((median - 10.0).abs() < 0.4); // not 100% certain, but probable enough
let mean = sum / 1000.0;
#[cfg(feature = "std")]
std::println!("Cauchy mean: {}", mean);
// for a Cauchy distribution the mean should not converge
assert!((mean - 10.0).abs() > 0.4); // not 100% certain, but probable enough
}
#[test]
#[should_panic]
fn test_cauchy_invalid_scale_zero() {
Cauchy::new(0.0, 0.0).unwrap();
}
#[test]
#[should_panic]
fn test_cauchy_invalid_scale_neg() {
Cauchy::new(0.0, -10.0).unwrap();
}
#[test]
fn value_stability() {
fn gen_samples<F: Float + FloatConst + core::fmt::Debug>(m: F, s: F, buf: &mut [F])
where Standard: Distribution<F> {
let distr = Cauchy::new(m, s).unwrap();
let mut rng = crate::test::rng(353);
for x in buf {
*x = rng.sample(&distr);
}
}
let mut buf = [0.0; 4];
gen_samples(100f64, 10.0, &mut buf);
assert_eq!(&buf, &[
77.93369152808678,
90.1606912098641,
125.31516221323625,
86.10217834773925
]);
// Unfortunately this test is not fully portable due to reliance on the
// system's implementation of tanf (see doc on Cauchy struct).
let mut buf = [0.0; 4];
gen_samples(10f32, 7.0, &mut buf);
let expected = [15.023088, -5.446413, 3.7092876, 3.112482];
for (a, b) in buf.iter().zip(expected.iter()) {
assert_almost_eq!(*a, *b, 1e-5);
}
}
}

186
vendor/rand_distr/src/dirichlet.rs vendored Normal file
View File

@@ -0,0 +1,186 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The dirichlet distribution.
#![cfg(feature = "alloc")]
use num_traits::Float;
use crate::{Distribution, Exp1, Gamma, Open01, StandardNormal};
use rand::Rng;
use core::fmt;
use alloc::{boxed::Box, vec, vec::Vec};
/// The Dirichlet distribution `Dirichlet(alpha)`.
///
/// The Dirichlet distribution is a family of continuous multivariate
/// probability distributions parameterized by a vector alpha of positive reals.
/// It is a multivariate generalization of the beta distribution.
///
/// # Example
///
/// ```
/// use rand::prelude::*;
/// use rand_distr::Dirichlet;
///
/// let dirichlet = Dirichlet::new(&[1.0, 2.0, 3.0]).unwrap();
/// let samples = dirichlet.sample(&mut rand::thread_rng());
/// println!("{:?} is from a Dirichlet([1.0, 2.0, 3.0]) distribution", samples);
/// ```
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Dirichlet<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
/// Concentration parameters (alpha)
alpha: Box<[F]>,
}
/// Error type returned from `Dirchlet::new`.
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `alpha.len() < 2`.
AlphaTooShort,
/// `alpha <= 0.0` or `nan`.
AlphaTooSmall,
/// `size < 2`.
SizeTooSmall,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::AlphaTooShort | Error::SizeTooSmall => {
"less than 2 dimensions in Dirichlet distribution"
}
Error::AlphaTooSmall => "alpha is not positive in Dirichlet distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F> Dirichlet<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
/// Construct a new `Dirichlet` with the given alpha parameter `alpha`.
///
/// Requires `alpha.len() >= 2`.
#[inline]
pub fn new(alpha: &[F]) -> Result<Dirichlet<F>, Error> {
if alpha.len() < 2 {
return Err(Error::AlphaTooShort);
}
for &ai in alpha.iter() {
if !(ai > F::zero()) {
return Err(Error::AlphaTooSmall);
}
}
Ok(Dirichlet { alpha: alpha.to_vec().into_boxed_slice() })
}
/// Construct a new `Dirichlet` with the given shape parameter `alpha` and `size`.
///
/// Requires `size >= 2`.
#[inline]
pub fn new_with_size(alpha: F, size: usize) -> Result<Dirichlet<F>, Error> {
if !(alpha > F::zero()) {
return Err(Error::AlphaTooSmall);
}
if size < 2 {
return Err(Error::SizeTooSmall);
}
Ok(Dirichlet {
alpha: vec![alpha; size].into_boxed_slice(),
})
}
}
impl<F> Distribution<Vec<F>> for Dirichlet<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> Vec<F> {
let n = self.alpha.len();
let mut samples = vec![F::zero(); n];
let mut sum = F::zero();
for (s, &a) in samples.iter_mut().zip(self.alpha.iter()) {
let g = Gamma::new(a, F::one()).unwrap();
*s = g.sample(rng);
sum = sum + (*s);
}
let invacc = F::one() / sum;
for s in samples.iter_mut() {
*s = (*s)*invacc;
}
samples
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_dirichlet() {
let d = Dirichlet::new(&[1.0, 2.0, 3.0]).unwrap();
let mut rng = crate::test::rng(221);
let samples = d.sample(&mut rng);
let _: Vec<f64> = samples
.into_iter()
.map(|x| {
assert!(x > 0.0);
x
})
.collect();
}
#[test]
fn test_dirichlet_with_param() {
let alpha = 0.5f64;
let size = 2;
let d = Dirichlet::new_with_size(alpha, size).unwrap();
let mut rng = crate::test::rng(221);
let samples = d.sample(&mut rng);
let _: Vec<f64> = samples
.into_iter()
.map(|x| {
assert!(x > 0.0);
x
})
.collect();
}
#[test]
#[should_panic]
fn test_dirichlet_invalid_length() {
Dirichlet::new_with_size(0.5f64, 1).unwrap();
}
#[test]
#[should_panic]
fn test_dirichlet_invalid_alpha() {
Dirichlet::new_with_size(0.0f64, 2).unwrap();
}
}

181
vendor/rand_distr/src/exponential.rs vendored Normal file
View File

@@ -0,0 +1,181 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The exponential distribution.
use crate::utils::ziggurat;
use num_traits::Float;
use crate::{ziggurat_tables, Distribution};
use rand::Rng;
use core::fmt;
/// Samples floating-point numbers according to the exponential distribution,
/// with rate parameter `λ = 1`. This is equivalent to `Exp::new(1.0)` or
/// sampling with `-rng.gen::<f64>().ln()`, but faster.
///
/// See `Exp` for the general exponential distribution.
///
/// Implemented via the ZIGNOR variant[^1] of the Ziggurat method. The exact
/// description in the paper was adjusted to use tables for the exponential
/// distribution rather than normal.
///
/// [^1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to
/// Generate Normal Random Samples*](
/// https://www.doornik.com/research/ziggurat.pdf).
/// Nuffield College, Oxford
///
/// # Example
/// ```
/// use rand::prelude::*;
/// use rand_distr::Exp1;
///
/// let val: f64 = thread_rng().sample(Exp1);
/// println!("{}", val);
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Exp1;
impl Distribution<f32> for Exp1 {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f32 {
// TODO: use optimal 32-bit implementation
let x: f64 = self.sample(rng);
x as f32
}
}
// This could be done via `-rng.gen::<f64>().ln()` but that is slower.
impl Distribution<f64> for Exp1 {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
#[inline]
fn pdf(x: f64) -> f64 {
(-x).exp()
}
#[inline]
fn zero_case<R: Rng + ?Sized>(rng: &mut R, _u: f64) -> f64 {
ziggurat_tables::ZIG_EXP_R - rng.gen::<f64>().ln()
}
ziggurat(
rng,
false,
&ziggurat_tables::ZIG_EXP_X,
&ziggurat_tables::ZIG_EXP_F,
pdf,
zero_case,
)
}
}
/// The exponential distribution `Exp(lambda)`.
///
/// This distribution has density function: `f(x) = lambda * exp(-lambda * x)`
/// for `x > 0`, when `lambda > 0`. For `lambda = 0`, all samples yield infinity.
///
/// Note that [`Exp1`](crate::Exp1) is an optimised implementation for `lambda = 1`.
///
/// # Example
///
/// ```
/// use rand_distr::{Exp, Distribution};
///
/// let exp = Exp::new(2.0).unwrap();
/// let v = exp.sample(&mut rand::thread_rng());
/// println!("{} is from a Exp(2) distribution", v);
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Exp<F>
where F: Float, Exp1: Distribution<F>
{
/// `lambda` stored as `1/lambda`, since this is what we scale by.
lambda_inverse: F,
}
/// Error type returned from `Exp::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `lambda < 0` or `nan`.
LambdaTooSmall,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::LambdaTooSmall => "lambda is negative or NaN in exponential distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F: Float> Exp<F>
where F: Float, Exp1: Distribution<F>
{
/// Construct a new `Exp` with the given shape parameter
/// `lambda`.
///
/// # Remarks
///
/// For custom types `N` implementing the [`Float`] trait,
/// the case `lambda = 0` is handled as follows: each sample corresponds
/// to a sample from an `Exp1` multiplied by `1 / 0`. Primitive types
/// yield infinity, since `1 / 0 = infinity`.
#[inline]
pub fn new(lambda: F) -> Result<Exp<F>, Error> {
if !(lambda >= F::zero()) {
return Err(Error::LambdaTooSmall);
}
Ok(Exp {
lambda_inverse: F::one() / lambda,
})
}
}
impl<F> Distribution<F> for Exp<F>
where F: Float, Exp1: Distribution<F>
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
rng.sample(Exp1) * self.lambda_inverse
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_exp() {
let exp = Exp::new(10.0).unwrap();
let mut rng = crate::test::rng(221);
for _ in 0..1000 {
assert!(exp.sample(&mut rng) >= 0.0);
}
}
#[test]
fn test_zero() {
let d = Exp::new(0.0).unwrap();
assert_eq!(d.sample(&mut crate::test::rng(21)), f64::infinity());
}
#[test]
#[should_panic]
fn test_exp_invalid_lambda_neg() {
Exp::new(-10.0).unwrap();
}
#[test]
#[should_panic]
fn test_exp_invalid_lambda_nan() {
Exp::new(f64::nan()).unwrap();
}
}

185
vendor/rand_distr/src/frechet.rs vendored Normal file
View File

@@ -0,0 +1,185 @@
// Copyright 2021 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Fréchet distribution.
use crate::{Distribution, OpenClosed01};
use core::fmt;
use num_traits::Float;
use rand::Rng;
/// Samples floating-point numbers according to the Fréchet distribution
///
/// This distribution has density function:
/// `f(x) = [(x - μ) / σ]^(-1 - α) exp[-(x - μ) / σ]^(-α) α / σ`,
/// where `μ` is the location parameter, `σ` the scale parameter, and `α` the shape parameter.
///
/// # Example
/// ```
/// use rand::prelude::*;
/// use rand_distr::Frechet;
///
/// let val: f64 = thread_rng().sample(Frechet::new(0.0, 1.0, 1.0).unwrap());
/// println!("{}", val);
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Frechet<F>
where
F: Float,
OpenClosed01: Distribution<F>,
{
location: F,
scale: F,
shape: F,
}
/// Error type returned from `Frechet::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// location is infinite or NaN
LocationNotFinite,
/// scale is not finite positive number
ScaleNotPositive,
/// shape is not finite positive number
ShapeNotPositive,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::LocationNotFinite => "location is not finite in Frechet distribution",
Error::ScaleNotPositive => "scale is not positive and finite in Frechet distribution",
Error::ShapeNotPositive => "shape is not positive and finite in Frechet distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F> Frechet<F>
where
F: Float,
OpenClosed01: Distribution<F>,
{
/// Construct a new `Frechet` distribution with given `location`, `scale`, and `shape`.
pub fn new(location: F, scale: F, shape: F) -> Result<Frechet<F>, Error> {
if scale <= F::zero() || scale.is_infinite() || scale.is_nan() {
return Err(Error::ScaleNotPositive);
}
if shape <= F::zero() || shape.is_infinite() || shape.is_nan() {
return Err(Error::ShapeNotPositive);
}
if location.is_infinite() || location.is_nan() {
return Err(Error::LocationNotFinite);
}
Ok(Frechet {
location,
scale,
shape,
})
}
}
impl<F> Distribution<F> for Frechet<F>
where
F: Float,
OpenClosed01: Distribution<F>,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
let x: F = rng.sample(OpenClosed01);
self.location + self.scale * (-x.ln()).powf(-self.shape.recip())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn test_zero_scale() {
Frechet::new(0.0, 0.0, 1.0).unwrap();
}
#[test]
#[should_panic]
fn test_infinite_scale() {
Frechet::new(0.0, core::f64::INFINITY, 1.0).unwrap();
}
#[test]
#[should_panic]
fn test_nan_scale() {
Frechet::new(0.0, core::f64::NAN, 1.0).unwrap();
}
#[test]
#[should_panic]
fn test_zero_shape() {
Frechet::new(0.0, 1.0, 0.0).unwrap();
}
#[test]
#[should_panic]
fn test_infinite_shape() {
Frechet::new(0.0, 1.0, core::f64::INFINITY).unwrap();
}
#[test]
#[should_panic]
fn test_nan_shape() {
Frechet::new(0.0, 1.0, core::f64::NAN).unwrap();
}
#[test]
#[should_panic]
fn test_infinite_location() {
Frechet::new(core::f64::INFINITY, 1.0, 1.0).unwrap();
}
#[test]
#[should_panic]
fn test_nan_location() {
Frechet::new(core::f64::NAN, 1.0, 1.0).unwrap();
}
#[test]
fn test_sample_against_cdf() {
fn quantile_function(x: f64) -> f64 {
(-x.ln()).recip()
}
let location = 0.0;
let scale = 1.0;
let shape = 1.0;
let iterations = 100_000;
let increment = 1.0 / iterations as f64;
let probabilities = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9];
let mut quantiles = [0.0; 9];
for (i, p) in probabilities.iter().enumerate() {
quantiles[i] = quantile_function(*p);
}
let mut proportions = [0.0; 9];
let d = Frechet::new(location, scale, shape).unwrap();
let mut rng = crate::test::rng(1);
for _ in 0..iterations {
let replicate = d.sample(&mut rng);
for (i, q) in quantiles.iter().enumerate() {
if replicate < *q {
proportions[i] += increment;
}
}
}
assert!(proportions
.iter()
.zip(&probabilities)
.all(|(p_hat, p)| (p_hat - p).abs() < 0.003))
}
}

814
vendor/rand_distr/src/gamma.rs vendored Normal file
View File

@@ -0,0 +1,814 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Gamma and derived distributions.
// We use the variable names from the published reference, therefore this
// warning is not helpful.
#![allow(clippy::many_single_char_names)]
use self::ChiSquaredRepr::*;
use self::GammaRepr::*;
use crate::normal::StandardNormal;
use num_traits::Float;
use crate::{Distribution, Exp, Exp1, Open01};
use rand::Rng;
use core::fmt;
#[cfg(feature = "serde1")]
use serde::{Serialize, Deserialize};
/// The Gamma distribution `Gamma(shape, scale)` distribution.
///
/// The density function of this distribution is
///
/// ```text
/// f(x) = x^(k - 1) * exp(-x / θ) / (Γ(k) * θ^k)
/// ```
///
/// where `Γ` is the Gamma function, `k` is the shape and `θ` is the
/// scale and both `k` and `θ` are strictly positive.
///
/// The algorithm used is that described by Marsaglia & Tsang 2000[^1],
/// falling back to directly sampling from an Exponential for `shape
/// == 1`, and using the boosting technique described in that paper for
/// `shape < 1`.
///
/// # Example
///
/// ```
/// use rand_distr::{Distribution, Gamma};
///
/// let gamma = Gamma::new(2.0, 5.0).unwrap();
/// let v = gamma.sample(&mut rand::thread_rng());
/// println!("{} is from a Gamma(2, 5) distribution", v);
/// ```
///
/// [^1]: George Marsaglia and Wai Wan Tsang. 2000. "A Simple Method for
/// Generating Gamma Variables" *ACM Trans. Math. Softw.* 26, 3
/// (September 2000), 363-372.
/// DOI:[10.1145/358407.358414](https://doi.acm.org/10.1145/358407.358414)
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Gamma<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
repr: GammaRepr<F>,
}
/// Error type returned from `Gamma::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `shape <= 0` or `nan`.
ShapeTooSmall,
/// `scale <= 0` or `nan`.
ScaleTooSmall,
/// `1 / scale == 0`.
ScaleTooLarge,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::ShapeTooSmall => "shape is not positive in gamma distribution",
Error::ScaleTooSmall => "scale is not positive in gamma distribution",
Error::ScaleTooLarge => "scale is infinity in gamma distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
enum GammaRepr<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
Large(GammaLargeShape<F>),
One(Exp<F>),
Small(GammaSmallShape<F>),
}
// These two helpers could be made public, but saving the
// match-on-Gamma-enum branch from using them directly (e.g. if one
// knows that the shape is always > 1) doesn't appear to be much
// faster.
/// Gamma distribution where the shape parameter is less than 1.
///
/// Note, samples from this require a compulsory floating-point `pow`
/// call, which makes it significantly slower than sampling from a
/// gamma distribution where the shape parameter is greater than or
/// equal to 1.
///
/// See `Gamma` for sampling from a Gamma distribution with general
/// shape parameters.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
struct GammaSmallShape<F>
where
F: Float,
StandardNormal: Distribution<F>,
Open01: Distribution<F>,
{
inv_shape: F,
large_shape: GammaLargeShape<F>,
}
/// Gamma distribution where the shape parameter is larger than 1.
///
/// See `Gamma` for sampling from a Gamma distribution with general
/// shape parameters.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
struct GammaLargeShape<F>
where
F: Float,
StandardNormal: Distribution<F>,
Open01: Distribution<F>,
{
scale: F,
c: F,
d: F,
}
impl<F> Gamma<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
/// Construct an object representing the `Gamma(shape, scale)`
/// distribution.
#[inline]
pub fn new(shape: F, scale: F) -> Result<Gamma<F>, Error> {
if !(shape > F::zero()) {
return Err(Error::ShapeTooSmall);
}
if !(scale > F::zero()) {
return Err(Error::ScaleTooSmall);
}
let repr = if shape == F::one() {
One(Exp::new(F::one() / scale).map_err(|_| Error::ScaleTooLarge)?)
} else if shape < F::one() {
Small(GammaSmallShape::new_raw(shape, scale))
} else {
Large(GammaLargeShape::new_raw(shape, scale))
};
Ok(Gamma { repr })
}
}
impl<F> GammaSmallShape<F>
where
F: Float,
StandardNormal: Distribution<F>,
Open01: Distribution<F>,
{
fn new_raw(shape: F, scale: F) -> GammaSmallShape<F> {
GammaSmallShape {
inv_shape: F::one() / shape,
large_shape: GammaLargeShape::new_raw(shape + F::one(), scale),
}
}
}
impl<F> GammaLargeShape<F>
where
F: Float,
StandardNormal: Distribution<F>,
Open01: Distribution<F>,
{
fn new_raw(shape: F, scale: F) -> GammaLargeShape<F> {
let d = shape - F::from(1. / 3.).unwrap();
GammaLargeShape {
scale,
c: F::one() / (F::from(9.).unwrap() * d).sqrt(),
d,
}
}
}
impl<F> Distribution<F> for Gamma<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
match self.repr {
Small(ref g) => g.sample(rng),
One(ref g) => g.sample(rng),
Large(ref g) => g.sample(rng),
}
}
}
impl<F> Distribution<F> for GammaSmallShape<F>
where
F: Float,
StandardNormal: Distribution<F>,
Open01: Distribution<F>,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
let u: F = rng.sample(Open01);
self.large_shape.sample(rng) * u.powf(self.inv_shape)
}
}
impl<F> Distribution<F> for GammaLargeShape<F>
where
F: Float,
StandardNormal: Distribution<F>,
Open01: Distribution<F>,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
// Marsaglia & Tsang method, 2000
loop {
let x: F = rng.sample(StandardNormal);
let v_cbrt = F::one() + self.c * x;
if v_cbrt <= F::zero() {
// a^3 <= 0 iff a <= 0
continue;
}
let v = v_cbrt * v_cbrt * v_cbrt;
let u: F = rng.sample(Open01);
let x_sqr = x * x;
if u < F::one() - F::from(0.0331).unwrap() * x_sqr * x_sqr
|| u.ln() < F::from(0.5).unwrap() * x_sqr + self.d * (F::one() - v + v.ln())
{
return self.d * v * self.scale;
}
}
}
}
/// The chi-squared distribution `χ²(k)`, where `k` is the degrees of
/// freedom.
///
/// For `k > 0` integral, this distribution is the sum of the squares
/// of `k` independent standard normal random variables. For other
/// `k`, this uses the equivalent characterisation
/// `χ²(k) = Gamma(k/2, 2)`.
///
/// # Example
///
/// ```
/// use rand_distr::{ChiSquared, Distribution};
///
/// let chi = ChiSquared::new(11.0).unwrap();
/// let v = chi.sample(&mut rand::thread_rng());
/// println!("{} is from a χ²(11) distribution", v)
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct ChiSquared<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
repr: ChiSquaredRepr<F>,
}
/// Error type returned from `ChiSquared::new` and `StudentT::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum ChiSquaredError {
/// `0.5 * k <= 0` or `nan`.
DoFTooSmall,
}
impl fmt::Display for ChiSquaredError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
ChiSquaredError::DoFTooSmall => {
"degrees-of-freedom k is not positive in chi-squared distribution"
}
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for ChiSquaredError {}
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
enum ChiSquaredRepr<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
// k == 1, Gamma(alpha, ..) is particularly slow for alpha < 1,
// e.g. when alpha = 1/2 as it would be for this case, so special-
// casing and using the definition of N(0,1)^2 is faster.
DoFExactlyOne,
DoFAnythingElse(Gamma<F>),
}
impl<F> ChiSquared<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
/// Create a new chi-squared distribution with degrees-of-freedom
/// `k`.
pub fn new(k: F) -> Result<ChiSquared<F>, ChiSquaredError> {
let repr = if k == F::one() {
DoFExactlyOne
} else {
if !(F::from(0.5).unwrap() * k > F::zero()) {
return Err(ChiSquaredError::DoFTooSmall);
}
DoFAnythingElse(Gamma::new(F::from(0.5).unwrap() * k, F::from(2.0).unwrap()).unwrap())
};
Ok(ChiSquared { repr })
}
}
impl<F> Distribution<F> for ChiSquared<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
match self.repr {
DoFExactlyOne => {
// k == 1 => N(0,1)^2
let norm: F = rng.sample(StandardNormal);
norm * norm
}
DoFAnythingElse(ref g) => g.sample(rng),
}
}
}
/// The Fisher F distribution `F(m, n)`.
///
/// This distribution is equivalent to the ratio of two normalised
/// chi-squared distributions, that is, `F(m,n) = (χ²(m)/m) /
/// (χ²(n)/n)`.
///
/// # Example
///
/// ```
/// use rand_distr::{FisherF, Distribution};
///
/// let f = FisherF::new(2.0, 32.0).unwrap();
/// let v = f.sample(&mut rand::thread_rng());
/// println!("{} is from an F(2, 32) distribution", v)
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct FisherF<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
numer: ChiSquared<F>,
denom: ChiSquared<F>,
// denom_dof / numer_dof so that this can just be a straight
// multiplication, rather than a division.
dof_ratio: F,
}
/// Error type returned from `FisherF::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum FisherFError {
/// `m <= 0` or `nan`.
MTooSmall,
/// `n <= 0` or `nan`.
NTooSmall,
}
impl fmt::Display for FisherFError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
FisherFError::MTooSmall => "m is not positive in Fisher F distribution",
FisherFError::NTooSmall => "n is not positive in Fisher F distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for FisherFError {}
impl<F> FisherF<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
/// Create a new `FisherF` distribution, with the given parameter.
pub fn new(m: F, n: F) -> Result<FisherF<F>, FisherFError> {
let zero = F::zero();
if !(m > zero) {
return Err(FisherFError::MTooSmall);
}
if !(n > zero) {
return Err(FisherFError::NTooSmall);
}
Ok(FisherF {
numer: ChiSquared::new(m).unwrap(),
denom: ChiSquared::new(n).unwrap(),
dof_ratio: n / m,
})
}
}
impl<F> Distribution<F> for FisherF<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
self.numer.sample(rng) / self.denom.sample(rng) * self.dof_ratio
}
}
/// The Student t distribution, `t(nu)`, where `nu` is the degrees of
/// freedom.
///
/// # Example
///
/// ```
/// use rand_distr::{StudentT, Distribution};
///
/// let t = StudentT::new(11.0).unwrap();
/// let v = t.sample(&mut rand::thread_rng());
/// println!("{} is from a t(11) distribution", v)
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct StudentT<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
chi: ChiSquared<F>,
dof: F,
}
impl<F> StudentT<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
/// Create a new Student t distribution with `n` degrees of
/// freedom.
pub fn new(n: F) -> Result<StudentT<F>, ChiSquaredError> {
Ok(StudentT {
chi: ChiSquared::new(n)?,
dof: n,
})
}
}
impl<F> Distribution<F> for StudentT<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
let norm: F = rng.sample(StandardNormal);
norm * (self.dof / self.chi.sample(rng)).sqrt()
}
}
/// The algorithm used for sampling the Beta distribution.
///
/// Reference:
///
/// R. C. H. Cheng (1978).
/// Generating beta variates with nonintegral shape parameters.
/// Communications of the ACM 21, 317-322.
/// https://doi.org/10.1145/359460.359482
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
enum BetaAlgorithm<N> {
BB(BB<N>),
BC(BC<N>),
}
/// Algorithm BB for `min(alpha, beta) > 1`.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
struct BB<N> {
alpha: N,
beta: N,
gamma: N,
}
/// Algorithm BC for `min(alpha, beta) <= 1`.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
struct BC<N> {
alpha: N,
beta: N,
delta: N,
kappa1: N,
kappa2: N,
}
/// The Beta distribution with shape parameters `alpha` and `beta`.
///
/// # Example
///
/// ```
/// use rand_distr::{Distribution, Beta};
///
/// let beta = Beta::new(2.0, 5.0).unwrap();
/// let v = beta.sample(&mut rand::thread_rng());
/// println!("{} is from a Beta(2, 5) distribution", v);
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub struct Beta<F>
where
F: Float,
Open01: Distribution<F>,
{
a: F, b: F, switched_params: bool,
algorithm: BetaAlgorithm<F>,
}
/// Error type returned from `Beta::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
pub enum BetaError {
/// `alpha <= 0` or `nan`.
AlphaTooSmall,
/// `beta <= 0` or `nan`.
BetaTooSmall,
}
impl fmt::Display for BetaError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
BetaError::AlphaTooSmall => "alpha is not positive in beta distribution",
BetaError::BetaTooSmall => "beta is not positive in beta distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for BetaError {}
impl<F> Beta<F>
where
F: Float,
Open01: Distribution<F>,
{
/// Construct an object representing the `Beta(alpha, beta)`
/// distribution.
pub fn new(alpha: F, beta: F) -> Result<Beta<F>, BetaError> {
if !(alpha > F::zero()) {
return Err(BetaError::AlphaTooSmall);
}
if !(beta > F::zero()) {
return Err(BetaError::BetaTooSmall);
}
// From now on, we use the notation from the reference,
// i.e. `alpha` and `beta` are renamed to `a0` and `b0`.
let (a0, b0) = (alpha, beta);
let (a, b, switched_params) = if a0 < b0 {
(a0, b0, false)
} else {
(b0, a0, true)
};
if a > F::one() {
// Algorithm BB
let alpha = a + b;
let beta = ((alpha - F::from(2.).unwrap())
/ (F::from(2.).unwrap()*a*b - alpha)).sqrt();
let gamma = a + F::one() / beta;
Ok(Beta {
a, b, switched_params,
algorithm: BetaAlgorithm::BB(BB {
alpha, beta, gamma,
})
})
} else {
// Algorithm BC
//
// Here `a` is the maximum instead of the minimum.
let (a, b, switched_params) = (b, a, !switched_params);
let alpha = a + b;
let beta = F::one() / b;
let delta = F::one() + a - b;
let kappa1 = delta
* (F::from(1. / 18. / 4.).unwrap() + F::from(3. / 18. / 4.).unwrap()*b)
/ (a*beta - F::from(14. / 18.).unwrap());
let kappa2 = F::from(0.25).unwrap()
+ (F::from(0.5).unwrap() + F::from(0.25).unwrap()/delta)*b;
Ok(Beta {
a, b, switched_params,
algorithm: BetaAlgorithm::BC(BC {
alpha, beta, delta, kappa1, kappa2,
})
})
}
}
}
impl<F> Distribution<F> for Beta<F>
where
F: Float,
Open01: Distribution<F>,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
let mut w;
match self.algorithm {
BetaAlgorithm::BB(algo) => {
loop {
// 1.
let u1 = rng.sample(Open01);
let u2 = rng.sample(Open01);
let v = algo.beta * (u1 / (F::one() - u1)).ln();
w = self.a * v.exp();
let z = u1*u1 * u2;
let r = algo.gamma * v - F::from(4.).unwrap().ln();
let s = self.a + r - w;
// 2.
if s + F::one() + F::from(5.).unwrap().ln()
>= F::from(5.).unwrap() * z {
break;
}
// 3.
let t = z.ln();
if s >= t {
break;
}
// 4.
if !(r + algo.alpha * (algo.alpha / (self.b + w)).ln() < t) {
break;
}
}
},
BetaAlgorithm::BC(algo) => {
loop {
let z;
// 1.
let u1 = rng.sample(Open01);
let u2 = rng.sample(Open01);
if u1 < F::from(0.5).unwrap() {
// 2.
let y = u1 * u2;
z = u1 * y;
if F::from(0.25).unwrap() * u2 + z - y >= algo.kappa1 {
continue;
}
} else {
// 3.
z = u1 * u1 * u2;
if z <= F::from(0.25).unwrap() {
let v = algo.beta * (u1 / (F::one() - u1)).ln();
w = self.a * v.exp();
break;
}
// 4.
if z >= algo.kappa2 {
continue;
}
}
// 5.
let v = algo.beta * (u1 / (F::one() - u1)).ln();
w = self.a * v.exp();
if !(algo.alpha * ((algo.alpha / (self.b + w)).ln() + v)
- F::from(4.).unwrap().ln() < z.ln()) {
break;
};
}
},
};
// 5. for BB, 6. for BC
if !self.switched_params {
if w == F::infinity() {
// Assuming `b` is finite, for large `w`:
return F::one();
}
w / (self.b + w)
} else {
self.b / (self.b + w)
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_chi_squared_one() {
let chi = ChiSquared::new(1.0).unwrap();
let mut rng = crate::test::rng(201);
for _ in 0..1000 {
chi.sample(&mut rng);
}
}
#[test]
fn test_chi_squared_small() {
let chi = ChiSquared::new(0.5).unwrap();
let mut rng = crate::test::rng(202);
for _ in 0..1000 {
chi.sample(&mut rng);
}
}
#[test]
fn test_chi_squared_large() {
let chi = ChiSquared::new(30.0).unwrap();
let mut rng = crate::test::rng(203);
for _ in 0..1000 {
chi.sample(&mut rng);
}
}
#[test]
#[should_panic]
fn test_chi_squared_invalid_dof() {
ChiSquared::new(-1.0).unwrap();
}
#[test]
fn test_f() {
let f = FisherF::new(2.0, 32.0).unwrap();
let mut rng = crate::test::rng(204);
for _ in 0..1000 {
f.sample(&mut rng);
}
}
#[test]
fn test_t() {
let t = StudentT::new(11.0).unwrap();
let mut rng = crate::test::rng(205);
for _ in 0..1000 {
t.sample(&mut rng);
}
}
#[test]
fn test_beta() {
let beta = Beta::new(1.0, 2.0).unwrap();
let mut rng = crate::test::rng(201);
for _ in 0..1000 {
beta.sample(&mut rng);
}
}
#[test]
#[should_panic]
fn test_beta_invalid_dof() {
Beta::new(0., 0.).unwrap();
}
#[test]
fn test_beta_small_param() {
let beta = Beta::<f64>::new(1e-3, 1e-3).unwrap();
let mut rng = crate::test::rng(206);
for i in 0..1000 {
assert!(!beta.sample(&mut rng).is_nan(), "failed at i={}", i);
}
}
}

238
vendor/rand_distr/src/geometric.rs vendored Normal file
View File

@@ -0,0 +1,238 @@
//! The geometric distribution.
use crate::Distribution;
use rand::Rng;
use core::fmt;
#[allow(unused_imports)]
use num_traits::Float;
/// The geometric distribution `Geometric(p)` bounded to `[0, u64::MAX]`.
///
/// This is the probability distribution of the number of failures before the
/// first success in a series of Bernoulli trials. It has the density function
/// `f(k) = (1 - p)^k p` for `k >= 0`, where `p` is the probability of success
/// on each trial.
///
/// This is the discrete analogue of the [exponential distribution](crate::Exp).
///
/// Note that [`StandardGeometric`](crate::StandardGeometric) is an optimised
/// implementation for `p = 0.5`.
///
/// # Example
///
/// ```
/// use rand_distr::{Geometric, Distribution};
///
/// let geo = Geometric::new(0.25).unwrap();
/// let v = geo.sample(&mut rand::thread_rng());
/// println!("{} is from a Geometric(0.25) distribution", v);
/// ```
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Geometric
{
p: f64,
pi: f64,
k: u64
}
/// Error type returned from `Geometric::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `p < 0 || p > 1` or `nan`
InvalidProbability,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::InvalidProbability => "p is NaN or outside the interval [0, 1] in geometric distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl Geometric {
/// Construct a new `Geometric` with the given shape parameter `p`
/// (probability of success on each trial).
pub fn new(p: f64) -> Result<Self, Error> {
if !p.is_finite() || p < 0.0 || p > 1.0 {
Err(Error::InvalidProbability)
} else if p == 0.0 || p >= 2.0 / 3.0 {
Ok(Geometric { p, pi: p, k: 0 })
} else {
let (pi, k) = {
// choose smallest k such that pi = (1 - p)^(2^k) <= 0.5
let mut k = 1;
let mut pi = (1.0 - p).powi(2);
while pi > 0.5 {
k += 1;
pi = pi * pi;
}
(pi, k)
};
Ok(Geometric { p, pi, k })
}
}
}
impl Distribution<u64> for Geometric
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
if self.p >= 2.0 / 3.0 {
// use the trivial algorithm:
let mut failures = 0;
loop {
let u = rng.gen::<f64>();
if u <= self.p { break; }
failures += 1;
}
return failures;
}
if self.p == 0.0 { return core::u64::MAX; }
let Geometric { p, pi, k } = *self;
// Based on the algorithm presented in section 3 of
// Karl Bringmann and Tobias Friedrich (July 2013) - Exact and Efficient
// Generation of Geometric Random Variates and Random Graphs, published
// in International Colloquium on Automata, Languages and Programming
// (pp.267-278)
// https://people.mpi-inf.mpg.de/~kbringma/paper/2013ICALP-1.pdf
// Use the trivial algorithm to sample D from Geo(pi) = Geo(p) / 2^k:
let d = {
let mut failures = 0;
while rng.gen::<f64>() < pi {
failures += 1;
}
failures
};
// Use rejection sampling for the remainder M from Geo(p) % 2^k:
// choose M uniformly from [0, 2^k), but reject with probability (1 - p)^M
// NOTE: The paper suggests using bitwise sampling here, which is
// currently unsupported, but should improve performance by requiring
// fewer iterations on average. ~ October 28, 2020
let m = loop {
let m = rng.gen::<u64>() & ((1 << k) - 1);
let p_reject = if m <= core::i32::MAX as u64 {
(1.0 - p).powi(m as i32)
} else {
(1.0 - p).powf(m as f64)
};
let u = rng.gen::<f64>();
if u < p_reject {
break m;
}
};
(d << k) + m
}
}
/// Samples integers according to the geometric distribution with success
/// probability `p = 0.5`. This is equivalent to `Geometeric::new(0.5)`,
/// but faster.
///
/// See [`Geometric`](crate::Geometric) for the general geometric distribution.
///
/// Implemented via iterated [Rng::gen::<u64>().leading_zeros()].
///
/// # Example
/// ```
/// use rand::prelude::*;
/// use rand_distr::StandardGeometric;
///
/// let v = StandardGeometric.sample(&mut thread_rng());
/// println!("{} is from a Geometric(0.5) distribution", v);
/// ```
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct StandardGeometric;
impl Distribution<u64> for StandardGeometric {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
let mut result = 0;
loop {
let x = rng.gen::<u64>().leading_zeros() as u64;
result += x;
if x < 64 { break; }
}
result
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_geo_invalid_p() {
assert!(Geometric::new(core::f64::NAN).is_err());
assert!(Geometric::new(core::f64::INFINITY).is_err());
assert!(Geometric::new(core::f64::NEG_INFINITY).is_err());
assert!(Geometric::new(-0.5).is_err());
assert!(Geometric::new(0.0).is_ok());
assert!(Geometric::new(1.0).is_ok());
assert!(Geometric::new(2.0).is_err());
}
fn test_geo_mean_and_variance<R: Rng>(p: f64, rng: &mut R) {
let distr = Geometric::new(p).unwrap();
let expected_mean = (1.0 - p) / p;
let expected_variance = (1.0 - p) / (p * p);
let mut results = [0.0; 10000];
for i in results.iter_mut() {
*i = distr.sample(rng) as f64;
}
let mean = results.iter().sum::<f64>() / results.len() as f64;
assert!((mean as f64 - expected_mean).abs() < expected_mean / 40.0);
let variance =
results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>() / results.len() as f64;
assert!((variance - expected_variance).abs() < expected_variance / 10.0);
}
#[test]
fn test_geometric() {
let mut rng = crate::test::rng(12345);
test_geo_mean_and_variance(0.10, &mut rng);
test_geo_mean_and_variance(0.25, &mut rng);
test_geo_mean_and_variance(0.50, &mut rng);
test_geo_mean_and_variance(0.75, &mut rng);
test_geo_mean_and_variance(0.90, &mut rng);
}
#[test]
fn test_standard_geometric() {
let mut rng = crate::test::rng(654321);
let distr = StandardGeometric;
let expected_mean = 1.0;
let expected_variance = 2.0;
let mut results = [0.0; 1000];
for i in results.iter_mut() {
*i = distr.sample(&mut rng) as f64;
}
let mean = results.iter().sum::<f64>() / results.len() as f64;
assert!((mean as f64 - expected_mean).abs() < expected_mean / 50.0);
let variance =
results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>() / results.len() as f64;
assert!((variance - expected_variance).abs() < expected_variance / 10.0);
}
}

155
vendor/rand_distr/src/gumbel.rs vendored Normal file
View File

@@ -0,0 +1,155 @@
// Copyright 2021 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Gumbel distribution.
use crate::{Distribution, OpenClosed01};
use core::fmt;
use num_traits::Float;
use rand::Rng;
/// Samples floating-point numbers according to the Gumbel distribution
///
/// This distribution has density function:
/// `f(x) = exp(-(z + exp(-z))) / σ`, where `z = (x - μ) / σ`,
/// `μ` is the location parameter, and `σ` the scale parameter.
///
/// # Example
/// ```
/// use rand::prelude::*;
/// use rand_distr::Gumbel;
///
/// let val: f64 = thread_rng().sample(Gumbel::new(0.0, 1.0).unwrap());
/// println!("{}", val);
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Gumbel<F>
where
F: Float,
OpenClosed01: Distribution<F>,
{
location: F,
scale: F,
}
/// Error type returned from `Gumbel::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// location is infinite or NaN
LocationNotFinite,
/// scale is not finite positive number
ScaleNotPositive,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::ScaleNotPositive => "scale is not positive and finite in Gumbel distribution",
Error::LocationNotFinite => "location is not finite in Gumbel distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F> Gumbel<F>
where
F: Float,
OpenClosed01: Distribution<F>,
{
/// Construct a new `Gumbel` distribution with given `location` and `scale`.
pub fn new(location: F, scale: F) -> Result<Gumbel<F>, Error> {
if scale <= F::zero() || scale.is_infinite() || scale.is_nan() {
return Err(Error::ScaleNotPositive);
}
if location.is_infinite() || location.is_nan() {
return Err(Error::LocationNotFinite);
}
Ok(Gumbel { location, scale })
}
}
impl<F> Distribution<F> for Gumbel<F>
where
F: Float,
OpenClosed01: Distribution<F>,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
let x: F = rng.sample(OpenClosed01);
self.location - self.scale * (-x.ln()).ln()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn test_zero_scale() {
Gumbel::new(0.0, 0.0).unwrap();
}
#[test]
#[should_panic]
fn test_infinite_scale() {
Gumbel::new(0.0, core::f64::INFINITY).unwrap();
}
#[test]
#[should_panic]
fn test_nan_scale() {
Gumbel::new(0.0, core::f64::NAN).unwrap();
}
#[test]
#[should_panic]
fn test_infinite_location() {
Gumbel::new(core::f64::INFINITY, 1.0).unwrap();
}
#[test]
#[should_panic]
fn test_nan_location() {
Gumbel::new(core::f64::NAN, 1.0).unwrap();
}
#[test]
fn test_sample_against_cdf() {
fn neg_log_log(x: f64) -> f64 {
-(-x.ln()).ln()
}
let location = 0.0;
let scale = 1.0;
let iterations = 100_000;
let increment = 1.0 / iterations as f64;
let probabilities = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9];
let mut quantiles = [0.0; 9];
for (i, p) in probabilities.iter().enumerate() {
quantiles[i] = neg_log_log(*p);
}
let mut proportions = [0.0; 9];
let d = Gumbel::new(location, scale).unwrap();
let mut rng = crate::test::rng(1);
for _ in 0..iterations {
let replicate = d.sample(&mut rng);
for (i, q) in quantiles.iter().enumerate() {
if replicate < *q {
proportions[i] += increment;
}
}
}
assert!(proportions
.iter()
.zip(&probabilities)
.all(|(p_hat, p)| (p_hat - p).abs() < 0.003))
}
}

422
vendor/rand_distr/src/hypergeometric.rs vendored Normal file
View File

@@ -0,0 +1,422 @@
//! The hypergeometric distribution.
use crate::Distribution;
use rand::Rng;
use rand::distributions::uniform::Uniform;
use core::fmt;
#[allow(unused_imports)]
use num_traits::Float;
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
enum SamplingMethod {
InverseTransform{ initial_p: f64, initial_x: i64 },
RejectionAcceptance{
m: f64,
a: f64,
lambda_l: f64,
lambda_r: f64,
x_l: f64,
x_r: f64,
p1: f64,
p2: f64,
p3: f64
},
}
/// The hypergeometric distribution `Hypergeometric(N, K, n)`.
///
/// This is the distribution of successes in samples of size `n` drawn without
/// replacement from a population of size `N` containing `K` success states.
/// It has the density function:
/// `f(k) = binomial(K, k) * binomial(N-K, n-k) / binomial(N, n)`,
/// where `binomial(a, b) = a! / (b! * (a - b)!)`.
///
/// The [binomial distribution](crate::Binomial) is the analogous distribution
/// for sampling with replacement. It is a good approximation when the population
/// size is much larger than the sample size.
///
/// # Example
///
/// ```
/// use rand_distr::{Distribution, Hypergeometric};
///
/// let hypergeo = Hypergeometric::new(60, 24, 7).unwrap();
/// let v = hypergeo.sample(&mut rand::thread_rng());
/// println!("{} is from a hypergeometric distribution", v);
/// ```
#[derive(Copy, Clone, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Hypergeometric {
n1: u64,
n2: u64,
k: u64,
offset_x: i64,
sign_x: i64,
sampling_method: SamplingMethod,
}
/// Error type returned from `Hypergeometric::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `total_population_size` is too large, causing floating point underflow.
PopulationTooLarge,
/// `population_with_feature > total_population_size`.
ProbabilityTooLarge,
/// `sample_size > total_population_size`.
SampleSizeTooLarge,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::PopulationTooLarge => "total_population_size is too large causing underflow in geometric distribution",
Error::ProbabilityTooLarge => "population_with_feature > total_population_size in geometric distribution",
Error::SampleSizeTooLarge => "sample_size > total_population_size in geometric distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
// evaluate fact(numerator.0)*fact(numerator.1) / fact(denominator.0)*fact(denominator.1)
fn fraction_of_products_of_factorials(numerator: (u64, u64), denominator: (u64, u64)) -> f64 {
let min_top = u64::min(numerator.0, numerator.1);
let min_bottom = u64::min(denominator.0, denominator.1);
// the factorial of this will cancel out:
let min_all = u64::min(min_top, min_bottom);
let max_top = u64::max(numerator.0, numerator.1);
let max_bottom = u64::max(denominator.0, denominator.1);
let max_all = u64::max(max_top, max_bottom);
let mut result = 1.0;
for i in (min_all + 1)..=max_all {
if i <= min_top {
result *= i as f64;
}
if i <= min_bottom {
result /= i as f64;
}
if i <= max_top {
result *= i as f64;
}
if i <= max_bottom {
result /= i as f64;
}
}
result
}
fn ln_of_factorial(v: f64) -> f64 {
// the paper calls for ln(v!), but also wants to pass in fractions,
// so we need to use Stirling's approximation to fill in the gaps:
v * v.ln() - v
}
impl Hypergeometric {
/// Constructs a new `Hypergeometric` with the shape parameters
/// `N = total_population_size`,
/// `K = population_with_feature`,
/// `n = sample_size`.
#[allow(clippy::many_single_char_names)] // Same names as in the reference.
pub fn new(total_population_size: u64, population_with_feature: u64, sample_size: u64) -> Result<Self, Error> {
if population_with_feature > total_population_size {
return Err(Error::ProbabilityTooLarge);
}
if sample_size > total_population_size {
return Err(Error::SampleSizeTooLarge);
}
// set-up constants as function of original parameters
let n = total_population_size;
let (mut sign_x, mut offset_x) = (1, 0);
let (n1, n2) = {
// switch around success and failure states if necessary to ensure n1 <= n2
let population_without_feature = n - population_with_feature;
if population_with_feature > population_without_feature {
sign_x = -1;
offset_x = sample_size as i64;
(population_without_feature, population_with_feature)
} else {
(population_with_feature, population_without_feature)
}
};
// when sampling more than half the total population, take the smaller
// group as sampled instead (we can then return n1-x instead).
//
// Note: the boundary condition given in the paper is `sample_size < n / 2`;
// we're deviating here, because when n is even, it doesn't matter whether
// we switch here or not, but when n is odd `n/2 < n - n/2`, so switching
// when `k == n/2`, we'd actually be taking the _larger_ group as sampled.
let k = if sample_size <= n / 2 {
sample_size
} else {
offset_x += n1 as i64 * sign_x;
sign_x *= -1;
n - sample_size
};
// Algorithm H2PE has bounded runtime only if `M - max(0, k-n2) >= 10`,
// where `M` is the mode of the distribution.
// Use algorithm HIN for the remaining parameter space.
//
// Voratas Kachitvichyanukul and Bruce W. Schmeiser. 1985. Computer
// generation of hypergeometric random variates.
// J. Statist. Comput. Simul. Vol.22 (August 1985), 127-145
// https://www.researchgate.net/publication/233212638
const HIN_THRESHOLD: f64 = 10.0;
let m = ((k + 1) as f64 * (n1 + 1) as f64 / (n + 2) as f64).floor();
let sampling_method = if m - f64::max(0.0, k as f64 - n2 as f64) < HIN_THRESHOLD {
let (initial_p, initial_x) = if k < n2 {
(fraction_of_products_of_factorials((n2, n - k), (n, n2 - k)), 0)
} else {
(fraction_of_products_of_factorials((n1, k), (n, k - n2)), (k - n2) as i64)
};
if initial_p <= 0.0 || !initial_p.is_finite() {
return Err(Error::PopulationTooLarge);
}
SamplingMethod::InverseTransform { initial_p, initial_x }
} else {
let a = ln_of_factorial(m) +
ln_of_factorial(n1 as f64 - m) +
ln_of_factorial(k as f64 - m) +
ln_of_factorial((n2 - k) as f64 + m);
let numerator = (n - k) as f64 * k as f64 * n1 as f64 * n2 as f64;
let denominator = (n - 1) as f64 * n as f64 * n as f64;
let d = 1.5 * (numerator / denominator).sqrt() + 0.5;
let x_l = m - d + 0.5;
let x_r = m + d + 0.5;
let k_l = f64::exp(a -
ln_of_factorial(x_l) -
ln_of_factorial(n1 as f64 - x_l) -
ln_of_factorial(k as f64 - x_l) -
ln_of_factorial((n2 - k) as f64 + x_l));
let k_r = f64::exp(a -
ln_of_factorial(x_r - 1.0) -
ln_of_factorial(n1 as f64 - x_r + 1.0) -
ln_of_factorial(k as f64 - x_r + 1.0) -
ln_of_factorial((n2 - k) as f64 + x_r - 1.0));
let numerator = x_l * ((n2 - k) as f64 + x_l);
let denominator = (n1 as f64 - x_l + 1.0) * (k as f64 - x_l + 1.0);
let lambda_l = -((numerator / denominator).ln());
let numerator = (n1 as f64 - x_r + 1.0) * (k as f64 - x_r + 1.0);
let denominator = x_r * ((n2 - k) as f64 + x_r);
let lambda_r = -((numerator / denominator).ln());
// the paper literally gives `p2 + kL/lambdaL` where it (probably)
// should have been `p2 <- p1 + kL/lambdaL`; another print error?!
let p1 = 2.0 * d;
let p2 = p1 + k_l / lambda_l;
let p3 = p2 + k_r / lambda_r;
SamplingMethod::RejectionAcceptance {
m, a, lambda_l, lambda_r, x_l, x_r, p1, p2, p3
}
};
Ok(Hypergeometric { n1, n2, k, offset_x, sign_x, sampling_method })
}
}
impl Distribution<u64> for Hypergeometric {
#[allow(clippy::many_single_char_names)] // Same names as in the reference.
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> u64 {
use SamplingMethod::*;
let Hypergeometric { n1, n2, k, sign_x, offset_x, sampling_method } = *self;
let x = match sampling_method {
InverseTransform { initial_p: mut p, initial_x: mut x } => {
let mut u = rng.gen::<f64>();
while u > p && x < k as i64 { // the paper erroneously uses `until n < p`, which doesn't make any sense
u -= p;
p *= ((n1 as i64 - x as i64) * (k as i64 - x as i64)) as f64;
p /= ((x as i64 + 1) * (n2 as i64 - k as i64 + 1 + x as i64)) as f64;
x += 1;
}
x
},
RejectionAcceptance { m, a, lambda_l, lambda_r, x_l, x_r, p1, p2, p3 } => {
let distr_region_select = Uniform::new(0.0, p3);
loop {
let (y, v) = loop {
let u = distr_region_select.sample(rng);
let v = rng.gen::<f64>(); // for the accept/reject decision
if u <= p1 {
// Region 1, central bell
let y = (x_l + u).floor();
break (y, v);
} else if u <= p2 {
// Region 2, left exponential tail
let y = (x_l + v.ln() / lambda_l).floor();
if y as i64 >= i64::max(0, k as i64 - n2 as i64) {
let v = v * (u - p1) * lambda_l;
break (y, v);
}
} else {
// Region 3, right exponential tail
let y = (x_r - v.ln() / lambda_r).floor();
if y as u64 <= u64::min(n1, k) {
let v = v * (u - p2) * lambda_r;
break (y, v);
}
}
};
// Step 4: Acceptance/Rejection Comparison
if m < 100.0 || y <= 50.0 {
// Step 4.1: evaluate f(y) via recursive relationship
let mut f = 1.0;
if m < y {
for i in (m as u64 + 1)..=(y as u64) {
f *= (n1 - i + 1) as f64 * (k - i + 1) as f64;
f /= i as f64 * (n2 - k + i) as f64;
}
} else {
for i in (y as u64 + 1)..=(m as u64) {
f *= i as f64 * (n2 - k + i) as f64;
f /= (n1 - i) as f64 * (k - i) as f64;
}
}
if v <= f { break y as i64; }
} else {
// Step 4.2: Squeezing
let y1 = y + 1.0;
let ym = y - m;
let yn = n1 as f64 - y + 1.0;
let yk = k as f64 - y + 1.0;
let nk = n2 as f64 - k as f64 + y1;
let r = -ym / y1;
let s = ym / yn;
let t = ym / yk;
let e = -ym / nk;
let g = yn * yk / (y1 * nk) - 1.0;
let dg = if g < 0.0 {
1.0 + g
} else {
1.0
};
let gu = g * (1.0 + g * (-0.5 + g / 3.0));
let gl = gu - g.powi(4) / (4.0 * dg);
let xm = m + 0.5;
let xn = n1 as f64 - m + 0.5;
let xk = k as f64 - m + 0.5;
let nm = n2 as f64 - k as f64 + xm;
let ub = xm * r * (1.0 + r * (-0.5 + r / 3.0)) +
xn * s * (1.0 + s * (-0.5 + s / 3.0)) +
xk * t * (1.0 + t * (-0.5 + t / 3.0)) +
nm * e * (1.0 + e * (-0.5 + e / 3.0)) +
y * gu - m * gl + 0.0034;
let av = v.ln();
if av > ub { continue; }
let dr = if r < 0.0 {
xm * r.powi(4) / (1.0 + r)
} else {
xm * r.powi(4)
};
let ds = if s < 0.0 {
xn * s.powi(4) / (1.0 + s)
} else {
xn * s.powi(4)
};
let dt = if t < 0.0 {
xk * t.powi(4) / (1.0 + t)
} else {
xk * t.powi(4)
};
let de = if e < 0.0 {
nm * e.powi(4) / (1.0 + e)
} else {
nm * e.powi(4)
};
if av < ub - 0.25*(dr + ds + dt + de) + (y + m)*(gl - gu) - 0.0078 {
break y as i64;
}
// Step 4.3: Final Acceptance/Rejection Test
let av_critical = a -
ln_of_factorial(y) -
ln_of_factorial(n1 as f64 - y) -
ln_of_factorial(k as f64 - y) -
ln_of_factorial((n2 - k) as f64 + y);
if v.ln() <= av_critical {
break y as i64;
}
}
}
}
};
(offset_x + sign_x * x) as u64
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_hypergeometric_invalid_params() {
assert!(Hypergeometric::new(100, 101, 5).is_err());
assert!(Hypergeometric::new(100, 10, 101).is_err());
assert!(Hypergeometric::new(100, 101, 101).is_err());
assert!(Hypergeometric::new(100, 10, 5).is_ok());
}
fn test_hypergeometric_mean_and_variance<R: Rng>(n: u64, k: u64, s: u64, rng: &mut R)
{
let distr = Hypergeometric::new(n, k, s).unwrap();
let expected_mean = s as f64 * k as f64 / n as f64;
let expected_variance = {
let numerator = (s * k * (n - k) * (n - s)) as f64;
let denominator = (n * n * (n - 1)) as f64;
numerator / denominator
};
let mut results = [0.0; 1000];
for i in results.iter_mut() {
*i = distr.sample(rng) as f64;
}
let mean = results.iter().sum::<f64>() / results.len() as f64;
assert!((mean as f64 - expected_mean).abs() < expected_mean / 50.0);
let variance =
results.iter().map(|x| (x - mean) * (x - mean)).sum::<f64>() / results.len() as f64;
assert!((variance - expected_variance).abs() < expected_variance / 10.0);
}
#[test]
fn test_hypergeometric() {
let mut rng = crate::test::rng(737);
// exercise algorithm HIN:
test_hypergeometric_mean_and_variance(500, 400, 30, &mut rng);
test_hypergeometric_mean_and_variance(250, 200, 230, &mut rng);
test_hypergeometric_mean_and_variance(100, 20, 6, &mut rng);
test_hypergeometric_mean_and_variance(50, 10, 47, &mut rng);
// exercise algorithm H2PE
test_hypergeometric_mean_and_variance(5000, 2500, 500, &mut rng);
test_hypergeometric_mean_and_variance(10100, 10000, 1000, &mut rng);
test_hypergeometric_mean_and_variance(100100, 100, 10000, &mut rng);
}
}

View File

@@ -0,0 +1,112 @@
use crate::{Distribution, Standard, StandardNormal};
use num_traits::Float;
use rand::Rng;
use core::fmt;
/// Error type returned from `InverseGaussian::new`
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Error {
/// `mean <= 0` or `nan`.
MeanNegativeOrNull,
/// `shape <= 0` or `nan`.
ShapeNegativeOrNull,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::MeanNegativeOrNull => "mean <= 0 or is NaN in inverse Gaussian distribution",
Error::ShapeNegativeOrNull => "shape <= 0 or is NaN in inverse Gaussian distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
/// The [inverse Gaussian distribution](https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution)
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct InverseGaussian<F>
where
F: Float,
StandardNormal: Distribution<F>,
Standard: Distribution<F>,
{
mean: F,
shape: F,
}
impl<F> InverseGaussian<F>
where
F: Float,
StandardNormal: Distribution<F>,
Standard: Distribution<F>,
{
/// Construct a new `InverseGaussian` distribution with the given mean and
/// shape.
pub fn new(mean: F, shape: F) -> Result<InverseGaussian<F>, Error> {
let zero = F::zero();
if !(mean > zero) {
return Err(Error::MeanNegativeOrNull);
}
if !(shape > zero) {
return Err(Error::ShapeNegativeOrNull);
}
Ok(Self { mean, shape })
}
}
impl<F> Distribution<F> for InverseGaussian<F>
where
F: Float,
StandardNormal: Distribution<F>,
Standard: Distribution<F>,
{
#[allow(clippy::many_single_char_names)]
fn sample<R>(&self, rng: &mut R) -> F
where R: Rng + ?Sized {
let mu = self.mean;
let l = self.shape;
let v: F = rng.sample(StandardNormal);
let y = mu * v * v;
let mu_2l = mu / (F::from(2.).unwrap() * l);
let x = mu + mu_2l * (y - (F::from(4.).unwrap() * l * y + y * y).sqrt());
let u: F = rng.gen();
if u <= mu / (mu + x) {
return x;
}
mu * mu / x
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_inverse_gaussian() {
let inv_gauss = InverseGaussian::new(1.0, 1.0).unwrap();
let mut rng = crate::test::rng(210);
for _ in 0..1000 {
inv_gauss.sample(&mut rng);
}
}
#[test]
fn test_inverse_gaussian_invalid_param() {
assert!(InverseGaussian::new(-1.0, 1.0).is_err());
assert!(InverseGaussian::new(-1.0, -1.0).is_err());
assert!(InverseGaussian::new(1.0, -1.0).is_err());
assert!(InverseGaussian::new(1.0, 1.0).is_ok());
}
}

213
vendor/rand_distr/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,213 @@
// Copyright 2019 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![doc(
html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://rust-random.github.io/rand/"
)]
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#![allow(
clippy::excessive_precision,
clippy::float_cmp,
clippy::unreadable_literal
)]
#![allow(clippy::neg_cmp_op_on_partial_ord)] // suggested fix too verbose
#![no_std]
#![cfg_attr(doc_cfg, feature(doc_cfg))]
//! Generating random samples from probability distributions.
//!
//! ## Re-exports
//!
//! This crate is a super-set of the [`rand::distributions`] module. See the
//! [`rand::distributions`] module documentation for an overview of the core
//! [`Distribution`] trait and implementations.
//!
//! The following are re-exported:
//!
//! - The [`Distribution`] trait and [`DistIter`] helper type
//! - The [`Standard`], [`Alphanumeric`], [`Uniform`], [`OpenClosed01`],
//! [`Open01`], [`Bernoulli`], and [`WeightedIndex`] distributions
//!
//! ## Distributions
//!
//! This crate provides the following probability distributions:
//!
//! - Related to real-valued quantities that grow linearly
//! (e.g. errors, offsets):
//! - [`Normal`] distribution, and [`StandardNormal`] as a primitive
//! - [`SkewNormal`] distribution
//! - [`Cauchy`] distribution
//! - Related to Bernoulli trials (yes/no events, with a given probability):
//! - [`Binomial`] distribution
//! - [`Geometric`] distribution
//! - [`Hypergeometric`] distribution
//! - Related to positive real-valued quantities that grow exponentially
//! (e.g. prices, incomes, populations):
//! - [`LogNormal`] distribution
//! - Related to the occurrence of independent events at a given rate:
//! - [`Pareto`] distribution
//! - [`Poisson`] distribution
//! - [`Exp`]onential distribution, and [`Exp1`] as a primitive
//! - [`Weibull`] distribution
//! - [`Gumbel`] distribution
//! - [`Frechet`] distribution
//! - [`Zeta`] distribution
//! - [`Zipf`] distribution
//! - Gamma and derived distributions:
//! - [`Gamma`] distribution
//! - [`ChiSquared`] distribution
//! - [`StudentT`] distribution
//! - [`FisherF`] distribution
//! - Triangular distribution:
//! - [`Beta`] distribution
//! - [`Triangular`] distribution
//! - Multivariate probability distributions
//! - [`Dirichlet`] distribution
//! - [`UnitSphere`] distribution
//! - [`UnitBall`] distribution
//! - [`UnitCircle`] distribution
//! - [`UnitDisc`] distribution
//! - Alternative implementation for weighted index sampling
//! - [`WeightedAliasIndex`] distribution
//! - Misc. distributions
//! - [`InverseGaussian`] distribution
//! - [`NormalInverseGaussian`] distribution
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
// This is used for doc links:
#[allow(unused)]
use rand::Rng;
pub use rand::distributions::{
uniform, Alphanumeric, Bernoulli, BernoulliError, DistIter, Distribution, Open01, OpenClosed01,
Standard, Uniform,
};
pub use self::binomial::{Binomial, Error as BinomialError};
pub use self::cauchy::{Cauchy, Error as CauchyError};
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub use self::dirichlet::{Dirichlet, Error as DirichletError};
pub use self::exponential::{Error as ExpError, Exp, Exp1};
pub use self::frechet::{Error as FrechetError, Frechet};
pub use self::gamma::{
Beta, BetaError, ChiSquared, ChiSquaredError, Error as GammaError, FisherF, FisherFError,
Gamma, StudentT,
};
pub use self::geometric::{Error as GeoError, Geometric, StandardGeometric};
pub use self::gumbel::{Error as GumbelError, Gumbel};
pub use self::hypergeometric::{Error as HyperGeoError, Hypergeometric};
pub use self::inverse_gaussian::{Error as InverseGaussianError, InverseGaussian};
pub use self::normal::{Error as NormalError, LogNormal, Normal, StandardNormal};
pub use self::normal_inverse_gaussian::{
Error as NormalInverseGaussianError, NormalInverseGaussian,
};
pub use self::pareto::{Error as ParetoError, Pareto};
pub use self::pert::{Pert, PertError};
pub use self::poisson::{Error as PoissonError, Poisson};
pub use self::skew_normal::{Error as SkewNormalError, SkewNormal};
pub use self::triangular::{Triangular, TriangularError};
pub use self::unit_ball::UnitBall;
pub use self::unit_circle::UnitCircle;
pub use self::unit_disc::UnitDisc;
pub use self::unit_sphere::UnitSphere;
pub use self::weibull::{Error as WeibullError, Weibull};
pub use self::zipf::{Zeta, ZetaError, Zipf, ZipfError};
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub use rand::distributions::{WeightedError, WeightedIndex};
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub use weighted_alias::WeightedAliasIndex;
pub use num_traits;
#[cfg(test)]
#[macro_use]
mod test {
// Notes on testing
//
// Testing random number distributions correctly is hard. The following
// testing is desired:
//
// - Construction: test initialisation with a few valid parameter sets.
// - Erroneous usage: test that incorrect usage generates an error.
// - Vector: test that usage with fixed inputs (including RNG) generates a
// fixed output sequence on all platforms.
// - Correctness at fixed points (optional): using a specific mock RNG,
// check that specific values are sampled (e.g. end-points and median of
// distribution).
// - Correctness of PDF (extra): generate a histogram of samples within a
// certain range, and check this approximates the PDF. These tests are
// expected to be expensive, and should be behind a feature-gate.
//
// TODO: Vector and correctness tests are largely absent so far.
// NOTE: Some distributions have tests checking only that samples can be
// generated. This is redundant with vector and correctness tests.
/// Construct a deterministic RNG with the given seed
pub fn rng(seed: u64) -> impl rand::RngCore {
// For tests, we want a statistically good, fast, reproducible RNG.
// PCG32 will do fine, and will be easy to embed if we ever need to.
const INC: u64 = 11634580027462260723;
rand_pcg::Pcg32::new(seed, INC)
}
/// Assert that two numbers are almost equal to each other.
///
/// On panic, this macro will print the values of the expressions with their
/// debug representations.
macro_rules! assert_almost_eq {
($a:expr, $b:expr, $prec:expr) => {
let diff = ($a - $b).abs();
assert!(diff <= $prec,
"assertion failed: `abs(left - right) = {:.1e} < {:e}`, \
(left: `{}`, right: `{}`)",
diff, $prec, $a, $b
);
};
}
}
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod weighted_alias;
mod binomial;
mod cauchy;
mod dirichlet;
mod exponential;
mod frechet;
mod gamma;
mod geometric;
mod gumbel;
mod hypergeometric;
mod inverse_gaussian;
mod normal;
mod normal_inverse_gaussian;
mod pareto;
mod pert;
mod poisson;
mod skew_normal;
mod triangular;
mod unit_ball;
mod unit_circle;
mod unit_disc;
mod unit_sphere;
mod utils;
mod weibull;
mod ziggurat_tables;
mod zipf;

371
vendor/rand_distr/src/normal.rs vendored Normal file
View File

@@ -0,0 +1,371 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The normal and derived distributions.
use crate::utils::ziggurat;
use num_traits::Float;
use crate::{ziggurat_tables, Distribution, Open01};
use rand::Rng;
use core::fmt;
/// Samples floating-point numbers according to the normal distribution
/// `N(0, 1)` (a.k.a. a standard normal, or Gaussian). This is equivalent to
/// `Normal::new(0.0, 1.0)` but faster.
///
/// See `Normal` for the general normal distribution.
///
/// Implemented via the ZIGNOR variant[^1] of the Ziggurat method.
///
/// [^1]: Jurgen A. Doornik (2005). [*An Improved Ziggurat Method to
/// Generate Normal Random Samples*](
/// https://www.doornik.com/research/ziggurat.pdf).
/// Nuffield College, Oxford
///
/// # Example
/// ```
/// use rand::prelude::*;
/// use rand_distr::StandardNormal;
///
/// let val: f64 = thread_rng().sample(StandardNormal);
/// println!("{}", val);
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct StandardNormal;
impl Distribution<f32> for StandardNormal {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f32 {
// TODO: use optimal 32-bit implementation
let x: f64 = self.sample(rng);
x as f32
}
}
impl Distribution<f64> for StandardNormal {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> f64 {
#[inline]
fn pdf(x: f64) -> f64 {
(-x * x / 2.0).exp()
}
#[inline]
fn zero_case<R: Rng + ?Sized>(rng: &mut R, u: f64) -> f64 {
// compute a random number in the tail by hand
// strange initial conditions, because the loop is not
// do-while, so the condition should be true on the first
// run, they get overwritten anyway (0 < 1, so these are
// good).
let mut x = 1.0f64;
let mut y = 0.0f64;
while -2.0 * y < x * x {
let x_: f64 = rng.sample(Open01);
let y_: f64 = rng.sample(Open01);
x = x_.ln() / ziggurat_tables::ZIG_NORM_R;
y = y_.ln();
}
if u < 0.0 {
x - ziggurat_tables::ZIG_NORM_R
} else {
ziggurat_tables::ZIG_NORM_R - x
}
}
ziggurat(
rng,
true, // this is symmetric
&ziggurat_tables::ZIG_NORM_X,
&ziggurat_tables::ZIG_NORM_F,
pdf,
zero_case,
)
}
}
/// The normal distribution `N(mean, std_dev**2)`.
///
/// This uses the ZIGNOR variant of the Ziggurat method, see [`StandardNormal`]
/// for more details.
///
/// Note that [`StandardNormal`] is an optimised implementation for mean 0, and
/// standard deviation 1.
///
/// # Example
///
/// ```
/// use rand_distr::{Normal, Distribution};
///
/// // mean 2, standard deviation 3
/// let normal = Normal::new(2.0, 3.0).unwrap();
/// let v = normal.sample(&mut rand::thread_rng());
/// println!("{} is from a N(2, 9) distribution", v)
/// ```
///
/// [`StandardNormal`]: crate::StandardNormal
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Normal<F>
where F: Float, StandardNormal: Distribution<F>
{
mean: F,
std_dev: F,
}
/// Error type returned from `Normal::new` and `LogNormal::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// The mean value is too small (log-normal samples must be positive)
MeanTooSmall,
/// The standard deviation or other dispersion parameter is not finite.
BadVariance,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::MeanTooSmall => "mean < 0 or NaN in log-normal distribution",
Error::BadVariance => "variation parameter is non-finite in (log)normal distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F> Normal<F>
where F: Float, StandardNormal: Distribution<F>
{
/// Construct, from mean and standard deviation
///
/// Parameters:
///
/// - mean (`μ`, unrestricted)
/// - standard deviation (`σ`, must be finite)
#[inline]
pub fn new(mean: F, std_dev: F) -> Result<Normal<F>, Error> {
if !std_dev.is_finite() {
return Err(Error::BadVariance);
}
Ok(Normal { mean, std_dev })
}
/// Construct, from mean and coefficient of variation
///
/// Parameters:
///
/// - mean (`μ`, unrestricted)
/// - coefficient of variation (`cv = abs(σ / μ)`)
#[inline]
pub fn from_mean_cv(mean: F, cv: F) -> Result<Normal<F>, Error> {
if !cv.is_finite() || cv < F::zero() {
return Err(Error::BadVariance);
}
let std_dev = cv * mean;
Ok(Normal { mean, std_dev })
}
/// Sample from a z-score
///
/// This may be useful for generating correlated samples `x1` and `x2`
/// from two different distributions, as follows.
/// ```
/// # use rand::prelude::*;
/// # use rand_distr::{Normal, StandardNormal};
/// let mut rng = thread_rng();
/// let z = StandardNormal.sample(&mut rng);
/// let x1 = Normal::new(0.0, 1.0).unwrap().from_zscore(z);
/// let x2 = Normal::new(2.0, -3.0).unwrap().from_zscore(z);
/// ```
#[inline]
pub fn from_zscore(&self, zscore: F) -> F {
self.mean + self.std_dev * zscore
}
/// Returns the mean (`μ`) of the distribution.
pub fn mean(&self) -> F {
self.mean
}
/// Returns the standard deviation (`σ`) of the distribution.
pub fn std_dev(&self) -> F {
self.std_dev
}
}
impl<F> Distribution<F> for Normal<F>
where F: Float, StandardNormal: Distribution<F>
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
self.from_zscore(rng.sample(StandardNormal))
}
}
/// The log-normal distribution `ln N(mean, std_dev**2)`.
///
/// If `X` is log-normal distributed, then `ln(X)` is `N(mean, std_dev**2)`
/// distributed.
///
/// # Example
///
/// ```
/// use rand_distr::{LogNormal, Distribution};
///
/// // mean 2, standard deviation 3
/// let log_normal = LogNormal::new(2.0, 3.0).unwrap();
/// let v = log_normal.sample(&mut rand::thread_rng());
/// println!("{} is from an ln N(2, 9) distribution", v)
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct LogNormal<F>
where F: Float, StandardNormal: Distribution<F>
{
norm: Normal<F>,
}
impl<F> LogNormal<F>
where F: Float, StandardNormal: Distribution<F>
{
/// Construct, from (log-space) mean and standard deviation
///
/// Parameters are the "standard" log-space measures (these are the mean
/// and standard deviation of the logarithm of samples):
///
/// - `mu` (`μ`, unrestricted) is the mean of the underlying distribution
/// - `sigma` (`σ`, must be finite) is the standard deviation of the
/// underlying Normal distribution
#[inline]
pub fn new(mu: F, sigma: F) -> Result<LogNormal<F>, Error> {
let norm = Normal::new(mu, sigma)?;
Ok(LogNormal { norm })
}
/// Construct, from (linear-space) mean and coefficient of variation
///
/// Parameters are linear-space measures:
///
/// - mean (`μ > 0`) is the (real) mean of the distribution
/// - coefficient of variation (`cv = σ / μ`, requiring `cv ≥ 0`) is a
/// standardized measure of dispersion
///
/// As a special exception, `μ = 0, cv = 0` is allowed (samples are `-inf`).
#[inline]
pub fn from_mean_cv(mean: F, cv: F) -> Result<LogNormal<F>, Error> {
if cv == F::zero() {
let mu = mean.ln();
let norm = Normal::new(mu, F::zero()).unwrap();
return Ok(LogNormal { norm });
}
if !(mean > F::zero()) {
return Err(Error::MeanTooSmall);
}
if !(cv >= F::zero()) {
return Err(Error::BadVariance);
}
// Using X ~ lognormal(μ, σ), CV² = Var(X) / E(X)²
// E(X) = exp(μ + σ² / 2) = exp(μ) × exp(σ² / 2)
// Var(X) = exp(2μ + σ²)(exp(σ²) - 1) = E(X)² × (exp(σ²) - 1)
// but Var(X) = (CV × E(X))² so CV² = exp(σ²) - 1
// thus σ² = log(CV² + 1)
// and exp(μ) = E(X) / exp(σ² / 2) = E(X) / sqrt(CV² + 1)
let a = F::one() + cv * cv; // e
let mu = F::from(0.5).unwrap() * (mean * mean / a).ln();
let sigma = a.ln().sqrt();
let norm = Normal::new(mu, sigma)?;
Ok(LogNormal { norm })
}
/// Sample from a z-score
///
/// This may be useful for generating correlated samples `x1` and `x2`
/// from two different distributions, as follows.
/// ```
/// # use rand::prelude::*;
/// # use rand_distr::{LogNormal, StandardNormal};
/// let mut rng = thread_rng();
/// let z = StandardNormal.sample(&mut rng);
/// let x1 = LogNormal::from_mean_cv(3.0, 1.0).unwrap().from_zscore(z);
/// let x2 = LogNormal::from_mean_cv(2.0, 4.0).unwrap().from_zscore(z);
/// ```
#[inline]
pub fn from_zscore(&self, zscore: F) -> F {
self.norm.from_zscore(zscore).exp()
}
}
impl<F> Distribution<F> for LogNormal<F>
where F: Float, StandardNormal: Distribution<F>
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
self.norm.sample(rng).exp()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_normal() {
let norm = Normal::new(10.0, 10.0).unwrap();
let mut rng = crate::test::rng(210);
for _ in 0..1000 {
norm.sample(&mut rng);
}
}
#[test]
fn test_normal_cv() {
let norm = Normal::from_mean_cv(1024.0, 1.0 / 256.0).unwrap();
assert_eq!((norm.mean, norm.std_dev), (1024.0, 4.0));
}
#[test]
fn test_normal_invalid_sd() {
assert!(Normal::from_mean_cv(10.0, -1.0).is_err());
}
#[test]
fn test_log_normal() {
let lnorm = LogNormal::new(10.0, 10.0).unwrap();
let mut rng = crate::test::rng(211);
for _ in 0..1000 {
lnorm.sample(&mut rng);
}
}
#[test]
fn test_log_normal_cv() {
let lnorm = LogNormal::from_mean_cv(0.0, 0.0).unwrap();
assert_eq!((lnorm.norm.mean, lnorm.norm.std_dev), (-core::f64::INFINITY, 0.0));
let lnorm = LogNormal::from_mean_cv(1.0, 0.0).unwrap();
assert_eq!((lnorm.norm.mean, lnorm.norm.std_dev), (0.0, 0.0));
let e = core::f64::consts::E;
let lnorm = LogNormal::from_mean_cv(e.sqrt(), (e - 1.0).sqrt()).unwrap();
assert_almost_eq!(lnorm.norm.mean, 0.0, 2e-16);
assert_almost_eq!(lnorm.norm.std_dev, 1.0, 2e-16);
let lnorm = LogNormal::from_mean_cv(e.powf(1.5), (e - 1.0).sqrt()).unwrap();
assert_almost_eq!(lnorm.norm.mean, 1.0, 1e-15);
assert_eq!(lnorm.norm.std_dev, 1.0);
}
#[test]
fn test_log_normal_invalid_sd() {
assert!(LogNormal::from_mean_cv(-1.0, 1.0).is_err());
assert!(LogNormal::from_mean_cv(0.0, 1.0).is_err());
assert!(LogNormal::from_mean_cv(1.0, -1.0).is_err());
}
}

View File

@@ -0,0 +1,107 @@
use crate::{Distribution, InverseGaussian, Standard, StandardNormal};
use num_traits::Float;
use rand::Rng;
use core::fmt;
/// Error type returned from `NormalInverseGaussian::new`
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Error {
/// `alpha <= 0` or `nan`.
AlphaNegativeOrNull,
/// `|beta| >= alpha` or `nan`.
AbsoluteBetaNotLessThanAlpha,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::AlphaNegativeOrNull => "alpha <= 0 or is NaN in normal inverse Gaussian distribution",
Error::AbsoluteBetaNotLessThanAlpha => "|beta| >= alpha or is NaN in normal inverse Gaussian distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
/// The [normal-inverse Gaussian distribution](https://en.wikipedia.org/wiki/Normal-inverse_Gaussian_distribution)
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct NormalInverseGaussian<F>
where
F: Float,
StandardNormal: Distribution<F>,
Standard: Distribution<F>,
{
alpha: F,
beta: F,
inverse_gaussian: InverseGaussian<F>,
}
impl<F> NormalInverseGaussian<F>
where
F: Float,
StandardNormal: Distribution<F>,
Standard: Distribution<F>,
{
/// Construct a new `NormalInverseGaussian` distribution with the given alpha (tail heaviness) and
/// beta (asymmetry) parameters.
pub fn new(alpha: F, beta: F) -> Result<NormalInverseGaussian<F>, Error> {
if !(alpha > F::zero()) {
return Err(Error::AlphaNegativeOrNull);
}
if !(beta.abs() < alpha) {
return Err(Error::AbsoluteBetaNotLessThanAlpha);
}
let gamma = (alpha * alpha - beta * beta).sqrt();
let mu = F::one() / gamma;
let inverse_gaussian = InverseGaussian::new(mu, F::one()).unwrap();
Ok(Self {
alpha,
beta,
inverse_gaussian,
})
}
}
impl<F> Distribution<F> for NormalInverseGaussian<F>
where
F: Float,
StandardNormal: Distribution<F>,
Standard: Distribution<F>,
{
fn sample<R>(&self, rng: &mut R) -> F
where R: Rng + ?Sized {
let inv_gauss = rng.sample(&self.inverse_gaussian);
self.beta * inv_gauss + inv_gauss.sqrt() * rng.sample(StandardNormal)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_normal_inverse_gaussian() {
let norm_inv_gauss = NormalInverseGaussian::new(2.0, 1.0).unwrap();
let mut rng = crate::test::rng(210);
for _ in 0..1000 {
norm_inv_gauss.sample(&mut rng);
}
}
#[test]
fn test_normal_inverse_gaussian_invalid_param() {
assert!(NormalInverseGaussian::new(-1.0, 1.0).is_err());
assert!(NormalInverseGaussian::new(-1.0, -1.0).is_err());
assert!(NormalInverseGaussian::new(1.0, 2.0).is_err());
assert!(NormalInverseGaussian::new(2.0, 1.0).is_ok());
}
}

134
vendor/rand_distr/src/pareto.rs vendored Normal file
View File

@@ -0,0 +1,134 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Pareto distribution.
use num_traits::Float;
use crate::{Distribution, OpenClosed01};
use rand::Rng;
use core::fmt;
/// Samples floating-point numbers according to the Pareto distribution
///
/// # Example
/// ```
/// use rand::prelude::*;
/// use rand_distr::Pareto;
///
/// let val: f64 = thread_rng().sample(Pareto::new(1., 2.).unwrap());
/// println!("{}", val);
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Pareto<F>
where F: Float, OpenClosed01: Distribution<F>
{
scale: F,
inv_neg_shape: F,
}
/// Error type returned from `Pareto::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `scale <= 0` or `nan`.
ScaleTooSmall,
/// `shape <= 0` or `nan`.
ShapeTooSmall,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::ScaleTooSmall => "scale is not positive in Pareto distribution",
Error::ShapeTooSmall => "shape is not positive in Pareto distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F> Pareto<F>
where F: Float, OpenClosed01: Distribution<F>
{
/// Construct a new Pareto distribution with given `scale` and `shape`.
///
/// In the literature, `scale` is commonly written as x<sub>m</sub> or k and
/// `shape` is often written as α.
pub fn new(scale: F, shape: F) -> Result<Pareto<F>, Error> {
let zero = F::zero();
if !(scale > zero) {
return Err(Error::ScaleTooSmall);
}
if !(shape > zero) {
return Err(Error::ShapeTooSmall);
}
Ok(Pareto {
scale,
inv_neg_shape: F::from(-1.0).unwrap() / shape,
})
}
}
impl<F> Distribution<F> for Pareto<F>
where F: Float, OpenClosed01: Distribution<F>
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
let u: F = OpenClosed01.sample(rng);
self.scale * u.powf(self.inv_neg_shape)
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::fmt::{Debug, Display, LowerExp};
#[test]
#[should_panic]
fn invalid() {
Pareto::new(0., 0.).unwrap();
}
#[test]
fn sample() {
let scale = 1.0;
let shape = 2.0;
let d = Pareto::new(scale, shape).unwrap();
let mut rng = crate::test::rng(1);
for _ in 0..1000 {
let r = d.sample(&mut rng);
assert!(r >= scale);
}
}
#[test]
fn value_stability() {
fn test_samples<F: Float + Debug + Display + LowerExp, D: Distribution<F>>(
distr: D, thresh: F, expected: &[F],
) {
let mut rng = crate::test::rng(213);
for v in expected {
let x = rng.sample(&distr);
assert_almost_eq!(x, *v, thresh);
}
}
test_samples(Pareto::new(1f32, 1.0).unwrap(), 1e-6, &[
1.0423688, 2.1235929, 4.132709, 1.4679428,
]);
test_samples(Pareto::new(2.0, 0.5).unwrap(), 1e-14, &[
9.019295276219136,
4.3097126018270595,
6.837815045397157,
105.8826669383772,
]);
}
}

149
vendor/rand_distr/src/pert.rs vendored Normal file
View File

@@ -0,0 +1,149 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The PERT distribution.
use num_traits::Float;
use crate::{Beta, Distribution, Exp1, Open01, StandardNormal};
use rand::Rng;
use core::fmt;
/// The PERT distribution.
///
/// Similar to the [`Triangular`] distribution, the PERT distribution is
/// parameterised by a range and a mode within that range. Unlike the
/// [`Triangular`] distribution, the probability density function of the PERT
/// distribution is smooth, with a configurable weighting around the mode.
///
/// # Example
///
/// ```rust
/// use rand_distr::{Pert, Distribution};
///
/// let d = Pert::new(0., 5., 2.5).unwrap();
/// let v = d.sample(&mut rand::thread_rng());
/// println!("{} is from a PERT distribution", v);
/// ```
///
/// [`Triangular`]: crate::Triangular
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Pert<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
min: F,
range: F,
beta: Beta<F>,
}
/// Error type returned from [`Pert`] constructors.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PertError {
/// `max < min` or `min` or `max` is NaN.
RangeTooSmall,
/// `mode < min` or `mode > max` or `mode` is NaN.
ModeRange,
/// `shape < 0` or `shape` is NaN
ShapeTooSmall,
}
impl fmt::Display for PertError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
PertError::RangeTooSmall => "requirement min < max is not met in PERT distribution",
PertError::ModeRange => "mode is outside [min, max] in PERT distribution",
PertError::ShapeTooSmall => "shape < 0 or is NaN in PERT distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for PertError {}
impl<F> Pert<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
/// Set up the PERT distribution with defined `min`, `max` and `mode`.
///
/// This is equivalent to calling `Pert::new_shape` with `shape == 4.0`.
#[inline]
pub fn new(min: F, max: F, mode: F) -> Result<Pert<F>, PertError> {
Pert::new_with_shape(min, max, mode, F::from(4.).unwrap())
}
/// Set up the PERT distribution with defined `min`, `max`, `mode` and
/// `shape`.
pub fn new_with_shape(min: F, max: F, mode: F, shape: F) -> Result<Pert<F>, PertError> {
if !(max > min) {
return Err(PertError::RangeTooSmall);
}
if !(mode >= min && max >= mode) {
return Err(PertError::ModeRange);
}
if !(shape >= F::from(0.).unwrap()) {
return Err(PertError::ShapeTooSmall);
}
let range = max - min;
let mu = (min + max + shape * mode) / (shape + F::from(2.).unwrap());
let v = if mu == mode {
shape * F::from(0.5).unwrap() + F::from(1.).unwrap()
} else {
(mu - min) * (F::from(2.).unwrap() * mode - min - max) / ((mode - mu) * (max - min))
};
let w = v * (max - mu) / (mu - min);
let beta = Beta::new(v, w).map_err(|_| PertError::RangeTooSmall)?;
Ok(Pert { min, range, beta })
}
}
impl<F> Distribution<F> for Pert<F>
where
F: Float,
StandardNormal: Distribution<F>,
Exp1: Distribution<F>,
Open01: Distribution<F>,
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
self.beta.sample(rng) * self.range + self.min
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_pert() {
for &(min, max, mode) in &[
(-1., 1., 0.),
(1., 2., 1.),
(5., 25., 25.),
] {
let _distr = Pert::new(min, max, mode).unwrap();
// TODO: test correctness
}
for &(min, max, mode) in &[
(-1., 1., 2.),
(-1., 1., -2.),
(2., 1., 1.),
] {
assert!(Pert::new(min, max, mode).is_err());
}
}
}

181
vendor/rand_distr/src/poisson.rs vendored Normal file
View File

@@ -0,0 +1,181 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2016-2017 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Poisson distribution.
use num_traits::{Float, FloatConst};
use crate::{Cauchy, Distribution, Standard};
use rand::Rng;
use core::fmt;
/// The Poisson distribution `Poisson(lambda)`.
///
/// This distribution has a density function:
/// `f(k) = lambda^k * exp(-lambda) / k!` for `k >= 0`.
///
/// # Example
///
/// ```
/// use rand_distr::{Poisson, Distribution};
///
/// let poi = Poisson::new(2.0).unwrap();
/// let v = poi.sample(&mut rand::thread_rng());
/// println!("{} is from a Poisson(2) distribution", v);
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Poisson<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
lambda: F,
// precalculated values
exp_lambda: F,
log_lambda: F,
sqrt_2lambda: F,
magic_val: F,
}
/// Error type returned from `Poisson::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `lambda <= 0` or `nan`.
ShapeTooSmall,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::ShapeTooSmall => "lambda is not positive in Poisson distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F> Poisson<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
/// Construct a new `Poisson` with the given shape parameter
/// `lambda`.
pub fn new(lambda: F) -> Result<Poisson<F>, Error> {
if !(lambda > F::zero()) {
return Err(Error::ShapeTooSmall);
}
let log_lambda = lambda.ln();
Ok(Poisson {
lambda,
exp_lambda: (-lambda).exp(),
log_lambda,
sqrt_2lambda: (F::from(2.0).unwrap() * lambda).sqrt(),
magic_val: lambda * log_lambda - crate::utils::log_gamma(F::one() + lambda),
})
}
}
impl<F> Distribution<F> for Poisson<F>
where F: Float + FloatConst, Standard: Distribution<F>
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
// using the algorithm from Numerical Recipes in C
// for low expected values use the Knuth method
if self.lambda < F::from(12.0).unwrap() {
let mut result = F::zero();
let mut p = F::one();
while p > self.exp_lambda {
p = p*rng.gen::<F>();
result = result + F::one();
}
result - F::one()
}
// high expected values - rejection method
else {
// we use the Cauchy distribution as the comparison distribution
// f(x) ~ 1/(1+x^2)
let cauchy = Cauchy::new(F::zero(), F::one()).unwrap();
let mut result;
loop {
let mut comp_dev;
loop {
// draw from the Cauchy distribution
comp_dev = rng.sample(cauchy);
// shift the peak of the comparison distribution
result = self.sqrt_2lambda * comp_dev + self.lambda;
// repeat the drawing until we are in the range of possible values
if result >= F::zero() {
break;
}
}
// now the result is a random variable greater than 0 with Cauchy distribution
// the result should be an integer value
result = result.floor();
// this is the ratio of the Poisson distribution to the comparison distribution
// the magic value scales the distribution function to a range of approximately 0-1
// since it is not exact, we multiply the ratio by 0.9 to avoid ratios greater than 1
// this doesn't change the resulting distribution, only increases the rate of failed drawings
let check = F::from(0.9).unwrap()
* (F::one() + comp_dev * comp_dev)
* (result * self.log_lambda
- crate::utils::log_gamma(F::one() + result)
- self.magic_val)
.exp();
// check with uniform random value - if below the threshold, we are within the target distribution
if rng.gen::<F>() <= check {
break;
}
}
result
}
}
}
#[cfg(test)]
mod test {
use super::*;
fn test_poisson_avg_gen<F: Float + FloatConst>(lambda: F, tol: F)
where Standard: Distribution<F>
{
let poisson = Poisson::new(lambda).unwrap();
let mut rng = crate::test::rng(123);
let mut sum = F::zero();
for _ in 0..1000 {
sum = sum + poisson.sample(&mut rng);
}
let avg = sum / F::from(1000.0).unwrap();
assert!((avg - lambda).abs() < tol);
}
#[test]
fn test_poisson_avg() {
test_poisson_avg_gen::<f64>(10.0, 0.5);
test_poisson_avg_gen::<f64>(15.0, 0.5);
test_poisson_avg_gen::<f32>(10.0, 0.5);
test_poisson_avg_gen::<f32>(15.0, 0.5);
}
#[test]
#[should_panic]
fn test_poisson_invalid_lambda_zero() {
Poisson::new(0.0).unwrap();
}
#[test]
#[should_panic]
fn test_poisson_invalid_lambda_neg() {
Poisson::new(-10.0).unwrap();
}
}

256
vendor/rand_distr/src/skew_normal.rs vendored Normal file
View File

@@ -0,0 +1,256 @@
// Copyright 2021 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Skew Normal distribution.
use crate::{Distribution, StandardNormal};
use core::fmt;
use num_traits::Float;
use rand::Rng;
/// The [skew normal distribution] `SN(location, scale, shape)`.
///
/// The skew normal distribution is a generalization of the
/// [`Normal`] distribution to allow for non-zero skewness.
///
/// It has the density function, for `scale > 0`,
/// `f(x) = 2 / scale * phi((x - location) / scale) * Phi(alpha * (x - location) / scale)`
/// where `phi` and `Phi` are the density and distribution of a standard normal variable.
///
/// # Example
///
/// ```
/// use rand_distr::{SkewNormal, Distribution};
///
/// // location 2, scale 3, shape 1
/// let skew_normal = SkewNormal::new(2.0, 3.0, 1.0).unwrap();
/// let v = skew_normal.sample(&mut rand::thread_rng());
/// println!("{} is from a SN(2, 3, 1) distribution", v)
/// ```
///
/// # Implementation details
///
/// We are using the algorithm from [A Method to Simulate the Skew Normal Distribution].
///
/// [skew normal distribution]: https://en.wikipedia.org/wiki/Skew_normal_distribution
/// [`Normal`]: struct.Normal.html
/// [A Method to Simulate the Skew Normal Distribution]: https://dx.doi.org/10.4236/am.2014.513201
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct SkewNormal<F>
where
F: Float,
StandardNormal: Distribution<F>,
{
location: F,
scale: F,
shape: F,
}
/// Error type returned from `SkewNormal::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// The scale parameter is not finite or it is less or equal to zero.
ScaleTooSmall,
/// The shape parameter is not finite.
BadShape,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::ScaleTooSmall => {
"scale parameter is either non-finite or it is less or equal to zero in skew normal distribution"
}
Error::BadShape => "shape parameter is non-finite in skew normal distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F> SkewNormal<F>
where
F: Float,
StandardNormal: Distribution<F>,
{
/// Construct, from location, scale and shape.
///
/// Parameters:
///
/// - location (unrestricted)
/// - scale (must be finite and larger than zero)
/// - shape (must be finite)
#[inline]
pub fn new(location: F, scale: F, shape: F) -> Result<SkewNormal<F>, Error> {
if !scale.is_finite() || !(scale > F::zero()) {
return Err(Error::ScaleTooSmall);
}
if !shape.is_finite() {
return Err(Error::BadShape);
}
Ok(SkewNormal {
location,
scale,
shape,
})
}
/// Returns the location of the distribution.
pub fn location(&self) -> F {
self.location
}
/// Returns the scale of the distribution.
pub fn scale(&self) -> F {
self.scale
}
/// Returns the shape of the distribution.
pub fn shape(&self) -> F {
self.shape
}
}
impl<F> Distribution<F> for SkewNormal<F>
where
F: Float,
StandardNormal: Distribution<F>,
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
let linear_map = |x: F| -> F { x * self.scale + self.location };
let u_1: F = rng.sample(StandardNormal);
if self.shape == F::zero() {
linear_map(u_1)
} else {
let u_2 = rng.sample(StandardNormal);
let (u, v) = (u_1.max(u_2), u_1.min(u_2));
if self.shape == -F::one() {
linear_map(v)
} else if self.shape == F::one() {
linear_map(u)
} else {
let normalized = ((F::one() + self.shape) * u + (F::one() - self.shape) * v)
/ ((F::one() + self.shape * self.shape).sqrt()
* F::from(core::f64::consts::SQRT_2).unwrap());
linear_map(normalized)
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn test_samples<F: Float + core::fmt::Debug, D: Distribution<F>>(
distr: D, zero: F, expected: &[F],
) {
let mut rng = crate::test::rng(213);
let mut buf = [zero; 4];
for x in &mut buf {
*x = rng.sample(&distr);
}
assert_eq!(buf, expected);
}
#[test]
#[should_panic]
fn invalid_scale_nan() {
SkewNormal::new(0.0, core::f64::NAN, 0.0).unwrap();
}
#[test]
#[should_panic]
fn invalid_scale_zero() {
SkewNormal::new(0.0, 0.0, 0.0).unwrap();
}
#[test]
#[should_panic]
fn invalid_scale_negative() {
SkewNormal::new(0.0, -1.0, 0.0).unwrap();
}
#[test]
#[should_panic]
fn invalid_scale_infinite() {
SkewNormal::new(0.0, core::f64::INFINITY, 0.0).unwrap();
}
#[test]
#[should_panic]
fn invalid_shape_nan() {
SkewNormal::new(0.0, 1.0, core::f64::NAN).unwrap();
}
#[test]
#[should_panic]
fn invalid_shape_infinite() {
SkewNormal::new(0.0, 1.0, core::f64::INFINITY).unwrap();
}
#[test]
fn valid_location_nan() {
SkewNormal::new(core::f64::NAN, 1.0, 0.0).unwrap();
}
#[test]
fn skew_normal_value_stability() {
test_samples(
SkewNormal::new(0.0, 1.0, 0.0).unwrap(),
0f32,
&[-0.11844189, 0.781378, 0.06563994, -1.1932899],
);
test_samples(
SkewNormal::new(0.0, 1.0, 0.0).unwrap(),
0f64,
&[
-0.11844188827977231,
0.7813779637772346,
0.06563993969580051,
-1.1932899004186373,
],
);
test_samples(
SkewNormal::new(core::f64::INFINITY, 1.0, 0.0).unwrap(),
0f64,
&[
core::f64::INFINITY,
core::f64::INFINITY,
core::f64::INFINITY,
core::f64::INFINITY,
],
);
test_samples(
SkewNormal::new(core::f64::NEG_INFINITY, 1.0, 0.0).unwrap(),
0f64,
&[
core::f64::NEG_INFINITY,
core::f64::NEG_INFINITY,
core::f64::NEG_INFINITY,
core::f64::NEG_INFINITY,
],
);
}
#[test]
fn skew_normal_value_location_nan() {
let skew_normal = SkewNormal::new(core::f64::NAN, 1.0, 0.0).unwrap();
let mut rng = crate::test::rng(213);
let mut buf = [0.0; 4];
for x in &mut buf {
*x = rng.sample(&skew_normal);
}
for value in buf.iter() {
assert!(value.is_nan());
}
}
}

133
vendor/rand_distr/src/triangular.rs vendored Normal file
View File

@@ -0,0 +1,133 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The triangular distribution.
use num_traits::Float;
use crate::{Distribution, Standard};
use rand::Rng;
use core::fmt;
/// The triangular distribution.
///
/// A continuous probability distribution parameterised by a range, and a mode
/// (most likely value) within that range.
///
/// The probability density function is triangular. For a similar distribution
/// with a smooth PDF, see the [`Pert`] distribution.
///
/// # Example
///
/// ```rust
/// use rand_distr::{Triangular, Distribution};
///
/// let d = Triangular::new(0., 5., 2.5).unwrap();
/// let v = d.sample(&mut rand::thread_rng());
/// println!("{} is from a triangular distribution", v);
/// ```
///
/// [`Pert`]: crate::Pert
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Triangular<F>
where F: Float, Standard: Distribution<F>
{
min: F,
max: F,
mode: F,
}
/// Error type returned from [`Triangular::new`].
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum TriangularError {
/// `max < min` or `min` or `max` is NaN.
RangeTooSmall,
/// `mode < min` or `mode > max` or `mode` is NaN.
ModeRange,
}
impl fmt::Display for TriangularError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
TriangularError::RangeTooSmall => {
"requirement min <= max is not met in triangular distribution"
}
TriangularError::ModeRange => "mode is outside [min, max] in triangular distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for TriangularError {}
impl<F> Triangular<F>
where F: Float, Standard: Distribution<F>
{
/// Set up the Triangular distribution with defined `min`, `max` and `mode`.
#[inline]
pub fn new(min: F, max: F, mode: F) -> Result<Triangular<F>, TriangularError> {
if !(max >= min) {
return Err(TriangularError::RangeTooSmall);
}
if !(mode >= min && max >= mode) {
return Err(TriangularError::ModeRange);
}
Ok(Triangular { min, max, mode })
}
}
impl<F> Distribution<F> for Triangular<F>
where F: Float, Standard: Distribution<F>
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
let f: F = rng.sample(Standard);
let diff_mode_min = self.mode - self.min;
let range = self.max - self.min;
let f_range = f * range;
if f_range < diff_mode_min {
self.min + (f_range * diff_mode_min).sqrt()
} else {
self.max - ((range - f_range) * (self.max - self.mode)).sqrt()
}
}
}
#[cfg(test)]
mod test {
use super::*;
use rand::{rngs::mock, Rng};
#[test]
fn test_triangular() {
let mut half_rng = mock::StepRng::new(0x8000_0000_0000_0000, 0);
assert_eq!(half_rng.gen::<f64>(), 0.5);
for &(min, max, mode, median) in &[
(-1., 1., 0., 0.),
(1., 2., 1., 2. - 0.5f64.sqrt()),
(5., 25., 25., 5. + 200f64.sqrt()),
(1e-5, 1e5, 1e-3, 1e5 - 4999999949.5f64.sqrt()),
(0., 1., 0.9, 0.45f64.sqrt()),
(-4., -0.5, -2., -4.0 + 3.5f64.sqrt()),
] {
#[cfg(feature = "std")]
std::println!("{} {} {} {}", min, max, mode, median);
let distr = Triangular::new(min, max, mode).unwrap();
// Test correct value at median:
assert_eq!(distr.sample(&mut half_rng), median);
}
for &(min, max, mode) in &[
(-1., 1., 2.),
(-1., 1., -2.),
(2., 1., 1.),
] {
assert!(Triangular::new(min, max, mode).is_err());
}
}
}

48
vendor/rand_distr/src/unit_ball.rs vendored Normal file
View File

@@ -0,0 +1,48 @@
// Copyright 2019 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use num_traits::Float;
use crate::{uniform::SampleUniform, Distribution, Uniform};
use rand::Rng;
/// Samples uniformly from the unit ball (surface and interior) in three
/// dimensions.
///
/// Implemented via rejection sampling.
///
///
/// # Example
///
/// ```
/// use rand_distr::{UnitBall, Distribution};
///
/// let v: [f64; 3] = UnitBall.sample(&mut rand::thread_rng());
/// println!("{:?} is from the unit ball.", v)
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct UnitBall;
impl<F: Float + SampleUniform> Distribution<[F; 3]> for UnitBall {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> [F; 3] {
let uniform = Uniform::new(F::from(-1.).unwrap(), F::from(1.).unwrap());
let mut x1;
let mut x2;
let mut x3;
loop {
x1 = uniform.sample(rng);
x2 = uniform.sample(rng);
x3 = uniform.sample(rng);
if x1 * x1 + x2 * x2 + x3 * x3 <= F::from(1.).unwrap() {
break;
}
}
[x1, x2, x3]
}
}

68
vendor/rand_distr/src/unit_circle.rs vendored Normal file
View File

@@ -0,0 +1,68 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use num_traits::Float;
use crate::{uniform::SampleUniform, Distribution, Uniform};
use rand::Rng;
/// Samples uniformly from the edge of the unit circle in two dimensions.
///
/// Implemented via a method by von Neumann[^1].
///
///
/// # Example
///
/// ```
/// use rand_distr::{UnitCircle, Distribution};
///
/// let v: [f64; 2] = UnitCircle.sample(&mut rand::thread_rng());
/// println!("{:?} is from the unit circle.", v)
/// ```
///
/// [^1]: von Neumann, J. (1951) [*Various Techniques Used in Connection with
/// Random Digits.*](https://mcnp.lanl.gov/pdf_files/nbs_vonneumann.pdf)
/// NBS Appl. Math. Ser., No. 12. Washington, DC: U.S. Government Printing
/// Office, pp. 36-38.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct UnitCircle;
impl<F: Float + SampleUniform> Distribution<[F; 2]> for UnitCircle {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> [F; 2] {
let uniform = Uniform::new(F::from(-1.).unwrap(), F::from(1.).unwrap());
let mut x1;
let mut x2;
let mut sum;
loop {
x1 = uniform.sample(rng);
x2 = uniform.sample(rng);
sum = x1 * x1 + x2 * x2;
if sum < F::from(1.).unwrap() {
break;
}
}
let diff = x1 * x1 - x2 * x2;
[diff / sum, F::from(2.).unwrap() * x1 * x2 / sum]
}
}
#[cfg(test)]
mod tests {
use super::UnitCircle;
use crate::Distribution;
#[test]
fn norm() {
let mut rng = crate::test::rng(1);
for _ in 0..1000 {
let x: [f64; 2] = UnitCircle.sample(&mut rng);
assert_almost_eq!(x[0] * x[0] + x[1] * x[1], 1., 1e-15);
}
}
}

45
vendor/rand_distr/src/unit_disc.rs vendored Normal file
View File

@@ -0,0 +1,45 @@
// Copyright 2019 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use num_traits::Float;
use crate::{uniform::SampleUniform, Distribution, Uniform};
use rand::Rng;
/// Samples uniformly from the unit disc in two dimensions.
///
/// Implemented via rejection sampling.
///
///
/// # Example
///
/// ```
/// use rand_distr::{UnitDisc, Distribution};
///
/// let v: [f64; 2] = UnitDisc.sample(&mut rand::thread_rng());
/// println!("{:?} is from the unit Disc.", v)
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct UnitDisc;
impl<F: Float + SampleUniform> Distribution<[F; 2]> for UnitDisc {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> [F; 2] {
let uniform = Uniform::new(F::from(-1.).unwrap(), F::from(1.).unwrap());
let mut x1;
let mut x2;
loop {
x1 = uniform.sample(rng);
x2 = uniform.sample(rng);
if x1 * x1 + x2 * x2 <= F::from(1.).unwrap() {
break;
}
}
[x1, x2]
}
}

63
vendor/rand_distr/src/unit_sphere.rs vendored Normal file
View File

@@ -0,0 +1,63 @@
// Copyright 2018-2019 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use num_traits::Float;
use crate::{uniform::SampleUniform, Distribution, Uniform};
use rand::Rng;
/// Samples uniformly from the surface of the unit sphere in three dimensions.
///
/// Implemented via a method by Marsaglia[^1].
///
///
/// # Example
///
/// ```
/// use rand_distr::{UnitSphere, Distribution};
///
/// let v: [f64; 3] = UnitSphere.sample(&mut rand::thread_rng());
/// println!("{:?} is from the unit sphere surface.", v)
/// ```
///
/// [^1]: Marsaglia, George (1972). [*Choosing a Point from the Surface of a
/// Sphere.*](https://doi.org/10.1214/aoms/1177692644)
/// Ann. Math. Statist. 43, no. 2, 645--646.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct UnitSphere;
impl<F: Float + SampleUniform> Distribution<[F; 3]> for UnitSphere {
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> [F; 3] {
let uniform = Uniform::new(F::from(-1.).unwrap(), F::from(1.).unwrap());
loop {
let (x1, x2) = (uniform.sample(rng), uniform.sample(rng));
let sum = x1 * x1 + x2 * x2;
if sum >= F::from(1.).unwrap() {
continue;
}
let factor = F::from(2.).unwrap() * (F::one() - sum).sqrt();
return [x1 * factor, x2 * factor, F::from(1.).unwrap() - F::from(2.).unwrap() * sum];
}
}
}
#[cfg(test)]
mod tests {
use super::UnitSphere;
use crate::Distribution;
#[test]
fn norm() {
let mut rng = crate::test::rng(1);
for _ in 0..1000 {
let x: [f64; 3] = UnitSphere.sample(&mut rng);
assert_almost_eq!(x[0] * x[0] + x[1] * x[1] + x[2] * x[2], 1., 1e-15);
}
}
}

121
vendor/rand_distr/src/utils.rs vendored Normal file
View File

@@ -0,0 +1,121 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Math helper functions
use crate::ziggurat_tables;
use rand::distributions::hidden_export::IntoFloat;
use rand::Rng;
use num_traits::Float;
/// Calculates ln(gamma(x)) (natural logarithm of the gamma
/// function) using the Lanczos approximation.
///
/// The approximation expresses the gamma function as:
/// `gamma(z+1) = sqrt(2*pi)*(z+g+0.5)^(z+0.5)*exp(-z-g-0.5)*Ag(z)`
/// `g` is an arbitrary constant; we use the approximation with `g=5`.
///
/// Noting that `gamma(z+1) = z*gamma(z)` and applying `ln` to both sides:
/// `ln(gamma(z)) = (z+0.5)*ln(z+g+0.5)-(z+g+0.5) + ln(sqrt(2*pi)*Ag(z)/z)`
///
/// `Ag(z)` is an infinite series with coefficients that can be calculated
/// ahead of time - we use just the first 6 terms, which is good enough
/// for most purposes.
pub(crate) fn log_gamma<F: Float>(x: F) -> F {
// precalculated 6 coefficients for the first 6 terms of the series
let coefficients: [F; 6] = [
F::from(76.18009172947146).unwrap(),
F::from(-86.50532032941677).unwrap(),
F::from(24.01409824083091).unwrap(),
F::from(-1.231739572450155).unwrap(),
F::from(0.1208650973866179e-2).unwrap(),
F::from(-0.5395239384953e-5).unwrap(),
];
// (x+0.5)*ln(x+g+0.5)-(x+g+0.5)
let tmp = x + F::from(5.5).unwrap();
let log = (x + F::from(0.5).unwrap()) * tmp.ln() - tmp;
// the first few terms of the series for Ag(x)
let mut a = F::from(1.000000000190015).unwrap();
let mut denom = x;
for &coeff in &coefficients {
denom = denom + F::one();
a = a + (coeff / denom);
}
// get everything together
// a is Ag(x)
// 2.5066... is sqrt(2pi)
log + (F::from(2.5066282746310005).unwrap() * a / x).ln()
}
/// Sample a random number using the Ziggurat method (specifically the
/// ZIGNOR variant from Doornik 2005). Most of the arguments are
/// directly from the paper:
///
/// * `rng`: source of randomness
/// * `symmetric`: whether this is a symmetric distribution, or one-sided with P(x < 0) = 0.
/// * `X`: the $x_i$ abscissae.
/// * `F`: precomputed values of the PDF at the $x_i$, (i.e. $f(x_i)$)
/// * `F_DIFF`: precomputed values of $f(x_i) - f(x_{i+1})$
/// * `pdf`: the probability density function
/// * `zero_case`: manual sampling from the tail when we chose the
/// bottom box (i.e. i == 0)
// the perf improvement (25-50%) is definitely worth the extra code
// size from force-inlining.
#[inline(always)]
pub(crate) fn ziggurat<R: Rng + ?Sized, P, Z>(
rng: &mut R,
symmetric: bool,
x_tab: ziggurat_tables::ZigTable,
f_tab: ziggurat_tables::ZigTable,
mut pdf: P,
mut zero_case: Z
) -> f64
where
P: FnMut(f64) -> f64,
Z: FnMut(&mut R, f64) -> f64,
{
loop {
// As an optimisation we re-implement the conversion to a f64.
// From the remaining 12 most significant bits we use 8 to construct `i`.
// This saves us generating a whole extra random number, while the added
// precision of using 64 bits for f64 does not buy us much.
let bits = rng.next_u64();
let i = bits as usize & 0xff;
let u = if symmetric {
// Convert to a value in the range [2,4) and subtract to get [-1,1)
// We can't convert to an open range directly, that would require
// subtracting `3.0 - EPSILON`, which is not representable.
// It is possible with an extra step, but an open range does not
// seem necessary for the ziggurat algorithm anyway.
(bits >> 12).into_float_with_exponent(1) - 3.0
} else {
// Convert to a value in the range [1,2) and subtract to get (0,1)
(bits >> 12).into_float_with_exponent(0) - (1.0 - core::f64::EPSILON / 2.0)
};
let x = u * x_tab[i];
let test_x = if symmetric { x.abs() } else { x };
// algebraically equivalent to |u| < x_tab[i+1]/x_tab[i] (or u < x_tab[i+1]/x_tab[i])
if test_x < x_tab[i + 1] {
return x;
}
if i == 0 {
return zero_case(rng, u);
}
// algebraically equivalent to f1 + DRanU()*(f0 - f1) < 1
if f_tab[i + 1] + (f_tab[i] - f_tab[i + 1]) * rng.gen::<f64>() < pdf(x) {
return x;
}
}
}

132
vendor/rand_distr/src/weibull.rs vendored Normal file
View File

@@ -0,0 +1,132 @@
// Copyright 2018 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Weibull distribution.
use num_traits::Float;
use crate::{Distribution, OpenClosed01};
use rand::Rng;
use core::fmt;
/// Samples floating-point numbers according to the Weibull distribution
///
/// # Example
/// ```
/// use rand::prelude::*;
/// use rand_distr::Weibull;
///
/// let val: f64 = thread_rng().sample(Weibull::new(1., 10.).unwrap());
/// println!("{}", val);
/// ```
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
pub struct Weibull<F>
where F: Float, OpenClosed01: Distribution<F>
{
inv_shape: F,
scale: F,
}
/// Error type returned from `Weibull::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Error {
/// `scale <= 0` or `nan`.
ScaleTooSmall,
/// `shape <= 0` or `nan`.
ShapeTooSmall,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
Error::ScaleTooSmall => "scale is not positive in Weibull distribution",
Error::ShapeTooSmall => "shape is not positive in Weibull distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for Error {}
impl<F> Weibull<F>
where F: Float, OpenClosed01: Distribution<F>
{
/// Construct a new `Weibull` distribution with given `scale` and `shape`.
pub fn new(scale: F, shape: F) -> Result<Weibull<F>, Error> {
if !(scale > F::zero()) {
return Err(Error::ScaleTooSmall);
}
if !(shape > F::zero()) {
return Err(Error::ShapeTooSmall);
}
Ok(Weibull {
inv_shape: F::from(1.).unwrap() / shape,
scale,
})
}
}
impl<F> Distribution<F> for Weibull<F>
where F: Float, OpenClosed01: Distribution<F>
{
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
let x: F = rng.sample(OpenClosed01);
self.scale * (-x.ln()).powf(self.inv_shape)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[should_panic]
fn invalid() {
Weibull::new(0., 0.).unwrap();
}
#[test]
fn sample() {
let scale = 1.0;
let shape = 2.0;
let d = Weibull::new(scale, shape).unwrap();
let mut rng = crate::test::rng(1);
for _ in 0..1000 {
let r = d.sample(&mut rng);
assert!(r >= 0.);
}
}
#[test]
fn value_stability() {
fn test_samples<F: Float + core::fmt::Debug, D: Distribution<F>>(
distr: D, zero: F, expected: &[F],
) {
let mut rng = crate::test::rng(213);
let mut buf = [zero; 4];
for x in &mut buf {
*x = rng.sample(&distr);
}
assert_eq!(buf, expected);
}
test_samples(Weibull::new(1.0, 1.0).unwrap(), 0f32, &[
0.041495778,
0.7531094,
1.4189332,
0.38386202,
]);
test_samples(Weibull::new(2.0, 0.5).unwrap(), 0f64, &[
1.1343478702739669,
0.29470010050655226,
0.7556151370284702,
7.877212340241561,
]);
}
}

524
vendor/rand_distr/src/weighted_alias.rs vendored Normal file
View File

@@ -0,0 +1,524 @@
// Copyright 2019 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This module contains an implementation of alias method for sampling random
//! indices with probabilities proportional to a collection of weights.
use super::WeightedError;
use crate::{uniform::SampleUniform, Distribution, Uniform};
use core::fmt;
use core::iter::Sum;
use core::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Sub, SubAssign};
use rand::Rng;
use alloc::{boxed::Box, vec, vec::Vec};
#[cfg(feature = "serde1")]
use serde::{Serialize, Deserialize};
/// A distribution using weighted sampling to pick a discretely selected item.
///
/// Sampling a [`WeightedAliasIndex<W>`] distribution returns the index of a randomly
/// selected element from the vector used to create the [`WeightedAliasIndex<W>`].
/// The chance of a given element being picked is proportional to the value of
/// the element. The weights can have any type `W` for which a implementation of
/// [`AliasableWeight`] exists.
///
/// # Performance
///
/// Given that `n` is the number of items in the vector used to create an
/// [`WeightedAliasIndex<W>`], it will require `O(n)` amount of memory.
/// More specifically it takes up some constant amount of memory plus
/// the vector used to create it and a [`Vec<u32>`] with capacity `n`.
///
/// Time complexity for the creation of a [`WeightedAliasIndex<W>`] is `O(n)`.
/// Sampling is `O(1)`, it makes a call to [`Uniform<u32>::sample`] and a call
/// to [`Uniform<W>::sample`].
///
/// # Example
///
/// ```
/// use rand_distr::WeightedAliasIndex;
/// use rand::prelude::*;
///
/// let choices = vec!['a', 'b', 'c'];
/// let weights = vec![2, 1, 1];
/// let dist = WeightedAliasIndex::new(weights).unwrap();
/// let mut rng = thread_rng();
/// for _ in 0..100 {
/// // 50% chance to print 'a', 25% chance to print 'b', 25% chance to print 'c'
/// println!("{}", choices[dist.sample(&mut rng)]);
/// }
///
/// let items = [('a', 0), ('b', 3), ('c', 7)];
/// let dist2 = WeightedAliasIndex::new(items.iter().map(|item| item.1).collect()).unwrap();
/// for _ in 0..100 {
/// // 0% chance to print 'a', 30% chance to print 'b', 70% chance to print 'c'
/// println!("{}", items[dist2.sample(&mut rng)].0);
/// }
/// ```
///
/// [`WeightedAliasIndex<W>`]: WeightedAliasIndex
/// [`Vec<u32>`]: Vec
/// [`Uniform<u32>::sample`]: Distribution::sample
/// [`Uniform<W>::sample`]: Distribution::sample
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[cfg_attr(feature = "serde1", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "serde1", serde(bound(serialize = "W: Serialize, W::Sampler: Serialize")))]
#[cfg_attr(feature = "serde1", serde(bound(deserialize = "W: Deserialize<'de>, W::Sampler: Deserialize<'de>")))]
pub struct WeightedAliasIndex<W: AliasableWeight> {
aliases: Box<[u32]>,
no_alias_odds: Box<[W]>,
uniform_index: Uniform<u32>,
uniform_within_weight_sum: Uniform<W>,
}
impl<W: AliasableWeight> WeightedAliasIndex<W> {
/// Creates a new [`WeightedAliasIndex`].
///
/// Returns an error if:
/// - The vector is empty.
/// - The vector is longer than `u32::MAX`.
/// - For any weight `w`: `w < 0` or `w > max` where `max = W::MAX /
/// weights.len()`.
/// - The sum of weights is zero.
pub fn new(weights: Vec<W>) -> Result<Self, WeightedError> {
let n = weights.len();
if n == 0 {
return Err(WeightedError::NoItem);
} else if n > ::core::u32::MAX as usize {
return Err(WeightedError::TooMany);
}
let n = n as u32;
let max_weight_size = W::try_from_u32_lossy(n)
.map(|n| W::MAX / n)
.unwrap_or(W::ZERO);
if !weights
.iter()
.all(|&w| W::ZERO <= w && w <= max_weight_size)
{
return Err(WeightedError::InvalidWeight);
}
// The sum of weights will represent 100% of no alias odds.
let weight_sum = AliasableWeight::sum(weights.as_slice());
// Prevent floating point overflow due to rounding errors.
let weight_sum = if weight_sum > W::MAX {
W::MAX
} else {
weight_sum
};
if weight_sum == W::ZERO {
return Err(WeightedError::AllWeightsZero);
}
// `weight_sum` would have been zero if `try_from_lossy` causes an error here.
let n_converted = W::try_from_u32_lossy(n).unwrap();
let mut no_alias_odds = weights.into_boxed_slice();
for odds in no_alias_odds.iter_mut() {
*odds *= n_converted;
// Prevent floating point overflow due to rounding errors.
*odds = if *odds > W::MAX { W::MAX } else { *odds };
}
/// This struct is designed to contain three data structures at once,
/// sharing the same memory. More precisely it contains two linked lists
/// and an alias map, which will be the output of this method. To keep
/// the three data structures from getting in each other's way, it must
/// be ensured that a single index is only ever in one of them at the
/// same time.
struct Aliases {
aliases: Box<[u32]>,
smalls_head: u32,
bigs_head: u32,
}
impl Aliases {
fn new(size: u32) -> Self {
Aliases {
aliases: vec![0; size as usize].into_boxed_slice(),
smalls_head: ::core::u32::MAX,
bigs_head: ::core::u32::MAX,
}
}
fn push_small(&mut self, idx: u32) {
self.aliases[idx as usize] = self.smalls_head;
self.smalls_head = idx;
}
fn push_big(&mut self, idx: u32) {
self.aliases[idx as usize] = self.bigs_head;
self.bigs_head = idx;
}
fn pop_small(&mut self) -> u32 {
let popped = self.smalls_head;
self.smalls_head = self.aliases[popped as usize];
popped
}
fn pop_big(&mut self) -> u32 {
let popped = self.bigs_head;
self.bigs_head = self.aliases[popped as usize];
popped
}
fn smalls_is_empty(&self) -> bool {
self.smalls_head == ::core::u32::MAX
}
fn bigs_is_empty(&self) -> bool {
self.bigs_head == ::core::u32::MAX
}
fn set_alias(&mut self, idx: u32, alias: u32) {
self.aliases[idx as usize] = alias;
}
}
let mut aliases = Aliases::new(n);
// Split indices into those with small weights and those with big weights.
for (index, &odds) in no_alias_odds.iter().enumerate() {
if odds < weight_sum {
aliases.push_small(index as u32);
} else {
aliases.push_big(index as u32);
}
}
// Build the alias map by finding an alias with big weight for each index with
// small weight.
while !aliases.smalls_is_empty() && !aliases.bigs_is_empty() {
let s = aliases.pop_small();
let b = aliases.pop_big();
aliases.set_alias(s, b);
no_alias_odds[b as usize] =
no_alias_odds[b as usize] - weight_sum + no_alias_odds[s as usize];
if no_alias_odds[b as usize] < weight_sum {
aliases.push_small(b);
} else {
aliases.push_big(b);
}
}
// The remaining indices should have no alias odds of about 100%. This is due to
// numeric accuracy. Otherwise they would be exactly 100%.
while !aliases.smalls_is_empty() {
no_alias_odds[aliases.pop_small() as usize] = weight_sum;
}
while !aliases.bigs_is_empty() {
no_alias_odds[aliases.pop_big() as usize] = weight_sum;
}
// Prepare distributions for sampling. Creating them beforehand improves
// sampling performance.
let uniform_index = Uniform::new(0, n);
let uniform_within_weight_sum = Uniform::new(W::ZERO, weight_sum);
Ok(Self {
aliases: aliases.aliases,
no_alias_odds,
uniform_index,
uniform_within_weight_sum,
})
}
}
impl<W: AliasableWeight> Distribution<usize> for WeightedAliasIndex<W> {
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> usize {
let candidate = rng.sample(self.uniform_index);
if rng.sample(&self.uniform_within_weight_sum) < self.no_alias_odds[candidate as usize] {
candidate as usize
} else {
self.aliases[candidate as usize] as usize
}
}
}
impl<W: AliasableWeight> fmt::Debug for WeightedAliasIndex<W>
where
W: fmt::Debug,
Uniform<W>: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("WeightedAliasIndex")
.field("aliases", &self.aliases)
.field("no_alias_odds", &self.no_alias_odds)
.field("uniform_index", &self.uniform_index)
.field("uniform_within_weight_sum", &self.uniform_within_weight_sum)
.finish()
}
}
impl<W: AliasableWeight> Clone for WeightedAliasIndex<W>
where Uniform<W>: Clone
{
fn clone(&self) -> Self {
Self {
aliases: self.aliases.clone(),
no_alias_odds: self.no_alias_odds.clone(),
uniform_index: self.uniform_index,
uniform_within_weight_sum: self.uniform_within_weight_sum.clone(),
}
}
}
/// Trait that must be implemented for weights, that are used with
/// [`WeightedAliasIndex`]. Currently no guarantees on the correctness of
/// [`WeightedAliasIndex`] are given for custom implementations of this trait.
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub trait AliasableWeight:
Sized
+ Copy
+ SampleUniform
+ PartialOrd
+ Add<Output = Self>
+ AddAssign
+ Sub<Output = Self>
+ SubAssign
+ Mul<Output = Self>
+ MulAssign
+ Div<Output = Self>
+ DivAssign
+ Sum
{
/// Maximum number representable by `Self`.
const MAX: Self;
/// Element of `Self` equivalent to 0.
const ZERO: Self;
/// Produce an instance of `Self` from a `u32` value, or return `None` if
/// out of range. Loss of precision (where `Self` is a floating point type)
/// is acceptable.
fn try_from_u32_lossy(n: u32) -> Option<Self>;
/// Sums all values in slice `values`.
fn sum(values: &[Self]) -> Self {
values.iter().copied().sum()
}
}
macro_rules! impl_weight_for_float {
($T: ident) => {
impl AliasableWeight for $T {
const MAX: Self = ::core::$T::MAX;
const ZERO: Self = 0.0;
fn try_from_u32_lossy(n: u32) -> Option<Self> {
Some(n as $T)
}
fn sum(values: &[Self]) -> Self {
pairwise_sum(values)
}
}
};
}
/// In comparison to naive accumulation, the pairwise sum algorithm reduces
/// rounding errors when there are many floating point values.
fn pairwise_sum<T: AliasableWeight>(values: &[T]) -> T {
if values.len() <= 32 {
values.iter().copied().sum()
} else {
let mid = values.len() / 2;
let (a, b) = values.split_at(mid);
pairwise_sum(a) + pairwise_sum(b)
}
}
macro_rules! impl_weight_for_int {
($T: ident) => {
impl AliasableWeight for $T {
const MAX: Self = ::core::$T::MAX;
const ZERO: Self = 0;
fn try_from_u32_lossy(n: u32) -> Option<Self> {
let n_converted = n as Self;
if n_converted >= Self::ZERO && n_converted as u32 == n {
Some(n_converted)
} else {
None
}
}
}
};
}
impl_weight_for_float!(f64);
impl_weight_for_float!(f32);
impl_weight_for_int!(usize);
impl_weight_for_int!(u128);
impl_weight_for_int!(u64);
impl_weight_for_int!(u32);
impl_weight_for_int!(u16);
impl_weight_for_int!(u8);
impl_weight_for_int!(isize);
impl_weight_for_int!(i128);
impl_weight_for_int!(i64);
impl_weight_for_int!(i32);
impl_weight_for_int!(i16);
impl_weight_for_int!(i8);
#[cfg(test)]
mod test {
use super::*;
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_weighted_index_f32() {
test_weighted_index(f32::into);
// Floating point special cases
assert_eq!(
WeightedAliasIndex::new(vec![::core::f32::INFINITY]).unwrap_err(),
WeightedError::InvalidWeight
);
assert_eq!(
WeightedAliasIndex::new(vec![-0_f32]).unwrap_err(),
WeightedError::AllWeightsZero
);
assert_eq!(
WeightedAliasIndex::new(vec![-1_f32]).unwrap_err(),
WeightedError::InvalidWeight
);
assert_eq!(
WeightedAliasIndex::new(vec![-::core::f32::INFINITY]).unwrap_err(),
WeightedError::InvalidWeight
);
assert_eq!(
WeightedAliasIndex::new(vec![::core::f32::NAN]).unwrap_err(),
WeightedError::InvalidWeight
);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_weighted_index_u128() {
test_weighted_index(|x: u128| x as f64);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_weighted_index_i128() {
test_weighted_index(|x: i128| x as f64);
// Signed integer special cases
assert_eq!(
WeightedAliasIndex::new(vec![-1_i128]).unwrap_err(),
WeightedError::InvalidWeight
);
assert_eq!(
WeightedAliasIndex::new(vec![::core::i128::MIN]).unwrap_err(),
WeightedError::InvalidWeight
);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_weighted_index_u8() {
test_weighted_index(u8::into);
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_weighted_index_i8() {
test_weighted_index(i8::into);
// Signed integer special cases
assert_eq!(
WeightedAliasIndex::new(vec![-1_i8]).unwrap_err(),
WeightedError::InvalidWeight
);
assert_eq!(
WeightedAliasIndex::new(vec![::core::i8::MIN]).unwrap_err(),
WeightedError::InvalidWeight
);
}
fn test_weighted_index<W: AliasableWeight, F: Fn(W) -> f64>(w_to_f64: F)
where WeightedAliasIndex<W>: fmt::Debug {
const NUM_WEIGHTS: u32 = 10;
const ZERO_WEIGHT_INDEX: u32 = 3;
const NUM_SAMPLES: u32 = 15000;
let mut rng = crate::test::rng(0x9c9fa0b0580a7031);
let weights = {
let mut weights = Vec::with_capacity(NUM_WEIGHTS as usize);
let random_weight_distribution = Uniform::new_inclusive(
W::ZERO,
W::MAX / W::try_from_u32_lossy(NUM_WEIGHTS).unwrap(),
);
for _ in 0..NUM_WEIGHTS {
weights.push(rng.sample(&random_weight_distribution));
}
weights[ZERO_WEIGHT_INDEX as usize] = W::ZERO;
weights
};
let weight_sum = weights.iter().copied().sum::<W>();
let expected_counts = weights
.iter()
.map(|&w| w_to_f64(w) / w_to_f64(weight_sum) * NUM_SAMPLES as f64)
.collect::<Vec<f64>>();
let weight_distribution = WeightedAliasIndex::new(weights).unwrap();
let mut counts = vec![0; NUM_WEIGHTS as usize];
for _ in 0..NUM_SAMPLES {
counts[rng.sample(&weight_distribution)] += 1;
}
assert_eq!(counts[ZERO_WEIGHT_INDEX as usize], 0);
for (count, expected_count) in counts.into_iter().zip(expected_counts) {
let difference = (count as f64 - expected_count).abs();
let max_allowed_difference = NUM_SAMPLES as f64 / NUM_WEIGHTS as f64 * 0.1;
assert!(difference <= max_allowed_difference);
}
assert_eq!(
WeightedAliasIndex::<W>::new(vec![]).unwrap_err(),
WeightedError::NoItem
);
assert_eq!(
WeightedAliasIndex::new(vec![W::ZERO]).unwrap_err(),
WeightedError::AllWeightsZero
);
assert_eq!(
WeightedAliasIndex::new(vec![W::MAX, W::MAX]).unwrap_err(),
WeightedError::InvalidWeight
);
}
#[test]
fn value_stability() {
fn test_samples<W: AliasableWeight>(weights: Vec<W>, buf: &mut [usize], expected: &[usize]) {
assert_eq!(buf.len(), expected.len());
let distr = WeightedAliasIndex::new(weights).unwrap();
let mut rng = crate::test::rng(0x9c9fa0b0580a7031);
for r in buf.iter_mut() {
*r = rng.sample(&distr);
}
assert_eq!(buf, expected);
}
let mut buf = [0; 10];
test_samples(vec![1i32, 1, 1, 1, 1, 1, 1, 1, 1], &mut buf, &[
6, 5, 7, 5, 8, 7, 6, 2, 3, 7,
]);
test_samples(vec![0.7f32, 0.1, 0.1, 0.1], &mut buf, &[
2, 0, 0, 0, 0, 0, 0, 0, 1, 3,
]);
test_samples(vec![1.0f64, 0.999, 0.998, 0.997], &mut buf, &[
2, 1, 2, 3, 2, 1, 3, 2, 1, 1,
]);
}
}

283
vendor/rand_distr/src/ziggurat_tables.rs vendored Normal file
View File

@@ -0,0 +1,283 @@
// Copyright 2018 Developers of the Rand project.
// Copyright 2013 The Rust Project Developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tables for distributions which are sampled using the ziggurat
// algorithm. Autogenerated by `ziggurat_tables.py`.
pub type ZigTable = &'static [f64; 257];
pub const ZIG_NORM_R: f64 = 3.654152885361008796;
#[rustfmt::skip]
pub static ZIG_NORM_X: [f64; 257] =
[3.910757959537090045, 3.654152885361008796, 3.449278298560964462, 3.320244733839166074,
3.224575052047029100, 3.147889289517149969, 3.083526132001233044, 3.027837791768635434,
2.978603279880844834, 2.934366867207854224, 2.894121053612348060, 2.857138730872132548,
2.822877396825325125, 2.790921174000785765, 2.760944005278822555, 2.732685359042827056,
2.705933656121858100, 2.680514643284522158, 2.656283037575502437, 2.633116393630324570,
2.610910518487548515, 2.589575986706995181, 2.569035452680536569, 2.549221550323460761,
2.530075232158516929, 2.511544441625342294, 2.493583041269680667, 2.476149939669143318,
2.459208374333311298, 2.442725318198956774, 2.426670984935725972, 2.411018413899685520,
2.395743119780480601, 2.380822795170626005, 2.366237056715818632, 2.351967227377659952,
2.337996148795031370, 2.324308018869623016, 2.310888250599850036, 2.297723348901329565,
2.284800802722946056, 2.272108990226823888, 2.259637095172217780, 2.247375032945807760,
2.235313384928327984, 2.223443340090905718, 2.211756642882544366, 2.200245546609647995,
2.188902771624720689, 2.177721467738641614, 2.166695180352645966, 2.155817819875063268,
2.145083634046203613, 2.134487182844320152, 2.124023315687815661, 2.113687150684933957,
2.103474055713146829, 2.093379631137050279, 2.083399693996551783, 2.073530263516978778,
2.063767547809956415, 2.054107931648864849, 2.044547965215732788, 2.035084353727808715,
2.025713947862032960, 2.016433734904371722, 2.007240830558684852, 1.998132471356564244,
1.989106007615571325, 1.980158896898598364, 1.971288697931769640, 1.962493064942461896,
1.953769742382734043, 1.945116560006753925, 1.936531428273758904, 1.928012334050718257,
1.919557336591228847, 1.911164563769282232, 1.902832208548446369, 1.894558525668710081,
1.886341828534776388, 1.878180486290977669, 1.870072921069236838, 1.862017605397632281,
1.854013059758148119, 1.846057850283119750, 1.838150586580728607, 1.830289919680666566,
1.822474540091783224, 1.814703175964167636, 1.806974591348693426, 1.799287584547580199,
1.791640986550010028, 1.784033659547276329, 1.776464495522344977, 1.768932414909077933,
1.761436365316706665, 1.753975320315455111, 1.746548278279492994, 1.739154261283669012,
1.731792314050707216, 1.724461502945775715, 1.717160915015540690, 1.709889657069006086,
1.702646854797613907, 1.695431651932238548, 1.688243209434858727, 1.681080704722823338,
1.673943330923760353, 1.666830296159286684, 1.659740822855789499, 1.652674147080648526,
1.645629517902360339, 1.638606196773111146, 1.631603456932422036, 1.624620582830568427,
1.617656869570534228, 1.610711622367333673, 1.603784156023583041, 1.596873794420261339,
1.589979870021648534, 1.583101723393471438, 1.576238702733332886, 1.569390163412534456,
1.562555467528439657, 1.555733983466554893, 1.548925085471535512, 1.542128153226347553,
1.535342571438843118, 1.528567729435024614, 1.521803020758293101, 1.515047842773992404,
1.508301596278571965, 1.501563685112706548, 1.494833515777718391, 1.488110497054654369,
1.481394039625375747, 1.474683555695025516, 1.467978458615230908, 1.461278162507407830,
1.454582081885523293, 1.447889631277669675, 1.441200224845798017, 1.434513276002946425,
1.427828197027290358, 1.421144398672323117, 1.414461289772464658, 1.407778276843371534,
1.401094763676202559, 1.394410150925071257, 1.387723835686884621, 1.381035211072741964,
1.374343665770030531, 1.367648583594317957, 1.360949343030101844, 1.354245316759430606,
1.347535871177359290, 1.340820365893152122, 1.334098153216083604, 1.327368577624624679,
1.320630975217730096, 1.313884673146868964, 1.307128989027353860, 1.300363230327433728,
1.293586693733517645, 1.286798664489786415, 1.279998415710333237, 1.273185207661843732,
1.266358287014688333, 1.259516886060144225, 1.252660221891297887, 1.245787495544997903,
1.238897891102027415, 1.231990574742445110, 1.225064693752808020, 1.218119375481726552,
1.211153726239911244, 1.204166830140560140, 1.197157747875585931, 1.190125515422801650,
1.183069142678760732, 1.175987612011489825, 1.168879876726833800, 1.161744859441574240,
1.154581450355851802, 1.147388505416733873, 1.140164844363995789, 1.132909248648336975,
1.125620459211294389, 1.118297174115062909, 1.110938046009249502, 1.103541679420268151,
1.096106627847603487, 1.088631390649514197, 1.081114409698889389, 1.073554065787871714,
1.065948674757506653, 1.058296483326006454, 1.050595664586207123, 1.042844313139370538,
1.035040439828605274, 1.027181966030751292, 1.019266717460529215, 1.011292417434978441,
1.003256679539591412, 0.995156999629943084, 0.986990747093846266, 0.978755155288937750,
0.970447311058864615, 0.962064143217605250, 0.953602409875572654, 0.945058684462571130,
0.936429340280896860, 0.927710533396234771, 0.918898183643734989, 0.909987953490768997,
0.900975224455174528, 0.891855070726792376, 0.882622229578910122, 0.873271068082494550,
0.863795545546826915, 0.854189171001560554, 0.844444954902423661, 0.834555354079518752,
0.824512208745288633, 0.814306670128064347, 0.803929116982664893, 0.793369058833152785,
0.782615023299588763, 0.771654424216739354, 0.760473406422083165, 0.749056662009581653,
0.737387211425838629, 0.725446140901303549, 0.713212285182022732, 0.700661841097584448,
0.687767892786257717, 0.674499822827436479, 0.660822574234205984, 0.646695714884388928,
0.632072236375024632, 0.616896989996235545, 0.601104617743940417, 0.584616766093722262,
0.567338257040473026, 0.549151702313026790, 0.529909720646495108, 0.509423329585933393,
0.487443966121754335, 0.463634336771763245, 0.437518402186662658, 0.408389134588000746,
0.375121332850465727, 0.335737519180459465, 0.286174591747260509, 0.215241895913273806,
0.000000000000000000];
#[rustfmt::skip]
pub static ZIG_NORM_F: [f64; 257] =
[0.000477467764586655, 0.001260285930498598, 0.002609072746106363, 0.004037972593371872,
0.005522403299264754, 0.007050875471392110, 0.008616582769422917, 0.010214971439731100,
0.011842757857943104, 0.013497450601780807, 0.015177088307982072, 0.016880083152595839,
0.018605121275783350, 0.020351096230109354, 0.022117062707379922, 0.023902203305873237,
0.025705804008632656, 0.027527235669693315, 0.029365939758230111, 0.031221417192023690,
0.033093219458688698, 0.034980941461833073, 0.036884215688691151, 0.038802707404656918,
0.040736110656078753, 0.042684144916619378, 0.044646552251446536, 0.046623094902089664,
0.048613553216035145, 0.050617723861121788, 0.052635418276973649, 0.054666461325077916,
0.056710690106399467, 0.058767952921137984, 0.060838108349751806, 0.062921024437977854,
0.065016577971470438, 0.067124653828023989, 0.069245144397250269, 0.071377949059141965,
0.073522973714240991, 0.075680130359194964, 0.077849336702372207, 0.080030515814947509,
0.082223595813495684, 0.084428509570654661, 0.086645194450867782, 0.088873592068594229,
0.091113648066700734, 0.093365311913026619, 0.095628536713353335, 0.097903279039215627,
0.100189498769172020, 0.102487158942306270, 0.104796225622867056, 0.107116667775072880,
0.109448457147210021, 0.111791568164245583, 0.114145977828255210, 0.116511665626037014,
0.118888613443345698, 0.121276805485235437, 0.123676228202051403, 0.126086870220650349,
0.128508722280473636, 0.130941777174128166, 0.133386029692162844, 0.135841476571757352,
0.138308116449064322, 0.140785949814968309, 0.143274978974047118, 0.145775208006537926,
0.148286642733128721, 0.150809290682410169, 0.153343161060837674, 0.155888264725064563,
0.158444614156520225, 0.161012223438117663, 0.163591108232982951, 0.166181285765110071,
0.168782774801850333, 0.171395595638155623, 0.174019770082499359, 0.176655321444406654,
0.179302274523530397, 0.181960655600216487, 0.184630492427504539, 0.187311814224516926,
0.190004651671193070, 0.192709036904328807, 0.195425003514885592, 0.198152586546538112,
0.200891822495431333, 0.203642749311121501, 0.206405406398679298, 0.209179834621935651,
0.211966076307852941, 0.214764175252008499, 0.217574176725178370, 0.220396127481011589,
0.223230075764789593, 0.226076071323264877, 0.228934165415577484, 0.231804410825248525,
0.234686861873252689, 0.237581574432173676, 0.240488605941449107, 0.243408015423711988,
0.246339863502238771, 0.249284212419516704, 0.252241126056943765, 0.255210669955677150,
0.258192911338648023, 0.261187919133763713, 0.264195763998317568, 0.267216518344631837,
0.270250256366959984, 0.273297054069675804, 0.276356989296781264, 0.279430141762765316,
0.282516593084849388, 0.285616426816658109, 0.288729728483353931, 0.291856585618280984,
0.294997087801162572, 0.298151326697901342, 0.301319396102034120, 0.304501391977896274,
0.307697412505553769, 0.310907558127563710, 0.314131931597630143, 0.317370638031222396,
0.320623784958230129, 0.323891482377732021, 0.327173842814958593, 0.330470981380537099,
0.333783015832108509, 0.337110066638412809, 0.340452257045945450, 0.343809713148291340,
0.347182563958251478, 0.350570941482881204, 0.353974980801569250, 0.357394820147290515,
0.360830600991175754, 0.364282468130549597, 0.367750569780596226, 0.371235057669821344,
0.374736087139491414, 0.378253817247238111, 0.381788410875031348, 0.385340034841733958,
0.388908860020464597, 0.392495061461010764, 0.396098818517547080, 0.399720314981931668,
0.403359739222868885, 0.407017284331247953, 0.410693148271983222, 0.414387534042706784,
0.418100649839684591, 0.421832709231353298, 0.425583931339900579, 0.429354541031341519,
0.433144769114574058, 0.436954852549929273, 0.440785034667769915, 0.444635565397727750,
0.448506701509214067, 0.452398706863882505, 0.456311852680773566, 0.460246417814923481,
0.464202689050278838, 0.468180961407822172, 0.472181538469883255, 0.476204732721683788,
0.480250865911249714, 0.484320269428911598, 0.488413284707712059, 0.492530263646148658,
0.496671569054796314, 0.500837575128482149, 0.505028667945828791, 0.509245245998136142,
0.513487720749743026, 0.517756517232200619, 0.522052074674794864, 0.526374847174186700,
0.530725304406193921, 0.535103932383019565, 0.539511234259544614, 0.543947731192649941,
0.548413963257921133, 0.552910490428519918, 0.557437893621486324, 0.561996775817277916,
0.566587763258951771, 0.571211506738074970, 0.575868682975210544, 0.580559996103683473,
0.585286179266300333, 0.590047996335791969, 0.594846243770991268, 0.599681752622167719,
0.604555390700549533, 0.609468064928895381, 0.614420723892076803, 0.619414360609039205,
0.624450015550274240, 0.629528779928128279, 0.634651799290960050, 0.639820277456438991,
0.645035480824251883, 0.650298743114294586, 0.655611470583224665, 0.660975147780241357,
0.666391343912380640, 0.671861719900766374, 0.677388036222513090, 0.682972161648791376,
0.688616083008527058, 0.694321916130032579, 0.700091918140490099, 0.705928501336797409,
0.711834248882358467, 0.717811932634901395, 0.723864533472881599, 0.729995264565802437,
0.736207598131266683, 0.742505296344636245, 0.748892447223726720, 0.755373506511754500,
0.761953346841546475, 0.768637315803334831, 0.775431304986138326, 0.782341832659861902,
0.789376143571198563, 0.796542330428254619, 0.803849483176389490, 0.811307874318219935,
0.818929191609414797, 0.826726833952094231, 0.834716292992930375, 0.842915653118441077,
0.851346258465123684, 0.860033621203008636, 0.869008688043793165, 0.878309655816146839,
0.887984660763399880, 0.898095921906304051, 0.908726440060562912, 0.919991505048360247,
0.932060075968990209, 0.945198953453078028, 0.959879091812415930, 0.977101701282731328,
1.000000000000000000];
pub const ZIG_EXP_R: f64 = 7.697117470131050077;
#[rustfmt::skip]
pub static ZIG_EXP_X: [f64; 257] =
[8.697117470131052741, 7.697117470131050077, 6.941033629377212577, 6.478378493832569696,
6.144164665772472667, 5.882144315795399869, 5.666410167454033697, 5.482890627526062488,
5.323090505754398016, 5.181487281301500047, 5.054288489981304089, 4.938777085901250530,
4.832939741025112035, 4.735242996601741083, 4.644491885420085175, 4.559737061707351380,
4.480211746528421912, 4.405287693473573185, 4.334443680317273007, 4.267242480277365857,
4.203313713735184365, 4.142340865664051464, 4.084051310408297830, 4.028208544647936762,
3.974606066673788796, 3.923062500135489739, 3.873417670399509127, 3.825529418522336744,
3.779270992411667862, 3.734528894039797375, 3.691201090237418825, 3.649195515760853770,
3.608428813128909507, 3.568825265648337020, 3.530315889129343354, 3.492837654774059608,
3.456332821132760191, 3.420748357251119920, 3.386035442460300970, 3.352149030900109405,
3.319047470970748037, 3.286692171599068679, 3.255047308570449882, 3.224079565286264160,
3.193757903212240290, 3.164053358025972873, 3.134938858084440394, 3.106389062339824481,
3.078380215254090224, 3.050890016615455114, 3.023897504455676621, 2.997382949516130601,
2.971327759921089662, 2.945714394895045718, 2.920526286512740821, 2.895747768600141825,
2.871364012015536371, 2.847360965635188812, 2.823725302450035279, 2.800444370250737780,
2.777506146439756574, 2.754899196562344610, 2.732612636194700073, 2.710636095867928752,
2.688959688741803689, 2.667573980773266573, 2.646469963151809157, 2.625639026797788489,
2.605072938740835564, 2.584763820214140750, 2.564704126316905253, 2.544886627111869970,
2.525304390037828028, 2.505950763528594027, 2.486819361740209455, 2.467904050297364815,
2.449198932978249754, 2.430698339264419694, 2.412396812688870629, 2.394289099921457886,
2.376370140536140596, 2.358635057409337321, 2.341079147703034380, 2.323697874390196372,
2.306486858283579799, 2.289441870532269441, 2.272558825553154804, 2.255833774367219213,
2.239262898312909034, 2.222842503111036816, 2.206569013257663858, 2.190438966723220027,
2.174449009937774679, 2.158595893043885994, 2.142876465399842001, 2.127287671317368289,
2.111826546019042183, 2.096490211801715020, 2.081275874393225145, 2.066180819490575526,
2.051202409468584786, 2.036338080248769611, 2.021585338318926173, 2.006941757894518563,
1.992404978213576650, 1.977972700957360441, 1.963642687789548313, 1.949412758007184943,
1.935280786297051359, 1.921244700591528076, 1.907302480018387536, 1.893452152939308242,
1.879691795072211180, 1.866019527692827973, 1.852433515911175554, 1.838931967018879954,
1.825513128903519799, 1.812175288526390649, 1.798916770460290859, 1.785735935484126014,
1.772631179231305643, 1.759600930889074766, 1.746643651946074405, 1.733757834985571566,
1.720942002521935299, 1.708194705878057773, 1.695514524101537912, 1.682900062917553896,
1.670349953716452118, 1.657862852574172763, 1.645437439303723659, 1.633072416535991334,
1.620766508828257901, 1.608518461798858379, 1.596327041286483395, 1.584191032532688892,
1.572109239386229707, 1.560080483527888084, 1.548103603714513499, 1.536177455041032092,
1.524300908219226258, 1.512472848872117082, 1.500692176842816750, 1.488957805516746058,
1.477268661156133867, 1.465623682245745352, 1.454021818848793446, 1.442462031972012504,
1.430943292938879674, 1.419464582769983219, 1.408024891569535697, 1.396623217917042137,
1.385258568263121992, 1.373929956328490576, 1.362636402505086775, 1.351376933258335189,
1.340150580529504643, 1.328956381137116560, 1.317793376176324749, 1.306660610415174117,
1.295557131686601027, 1.284481990275012642, 1.273434238296241139, 1.262412929069615330,
1.251417116480852521, 1.240445854334406572, 1.229498195693849105, 1.218573192208790124,
1.207669893426761121, 1.196787346088403092, 1.185924593404202199, 1.175080674310911677,
1.164254622705678921, 1.153445466655774743, 1.142652227581672841, 1.131873919411078511,
1.121109547701330200, 1.110358108727411031, 1.099618588532597308, 1.088889961938546813,
1.078171191511372307, 1.067461226479967662, 1.056759001602551429, 1.046063435977044209,
1.035373431790528542, 1.024687873002617211, 1.014005623957096480, 1.003325527915696735,
0.992646405507275897, 0.981967053085062602, 0.971286240983903260, 0.960602711668666509,
0.949915177764075969, 0.939222319955262286, 0.928522784747210395, 0.917815182070044311,
0.907098082715690257, 0.896370015589889935, 0.885629464761751528, 0.874874866291025066,
0.864104604811004484, 0.853317009842373353, 0.842510351810368485, 0.831682837734273206,
0.820832606554411814, 0.809957724057418282, 0.799056177355487174, 0.788125868869492430,
0.777164609759129710, 0.766170112735434672, 0.755139984181982249, 0.744071715500508102,
0.732962673584365398, 0.721810090308756203, 0.710611050909655040, 0.699362481103231959,
0.688061132773747808, 0.676703568029522584, 0.665286141392677943, 0.653804979847664947,
0.642255960424536365, 0.630634684933490286, 0.618936451394876075, 0.607156221620300030,
0.595288584291502887, 0.583327712748769489, 0.571267316532588332, 0.559100585511540626,
0.546820125163310577, 0.534417881237165604, 0.521885051592135052, 0.509211982443654398,
0.496388045518671162, 0.483401491653461857, 0.470239275082169006, 0.456886840931420235,
0.443327866073552401, 0.429543940225410703, 0.415514169600356364, 0.401214678896277765,
0.386617977941119573, 0.371692145329917234, 0.356399760258393816, 0.340696481064849122,
0.324529117016909452, 0.307832954674932158, 0.290527955491230394, 0.272513185478464703,
0.253658363385912022, 0.233790483059674731, 0.212671510630966620, 0.189958689622431842,
0.165127622564187282, 0.137304980940012589, 0.104838507565818778, 0.063852163815001570,
0.000000000000000000];
#[rustfmt::skip]
pub static ZIG_EXP_F: [f64; 257] =
[0.000167066692307963, 0.000454134353841497, 0.000967269282327174, 0.001536299780301573,
0.002145967743718907, 0.002788798793574076, 0.003460264777836904, 0.004157295120833797,
0.004877655983542396, 0.005619642207205489, 0.006381905937319183, 0.007163353183634991,
0.007963077438017043, 0.008780314985808977, 0.009614413642502212, 0.010464810181029981,
0.011331013597834600, 0.012212592426255378, 0.013109164931254991, 0.014020391403181943,
0.014945968011691148, 0.015885621839973156, 0.016839106826039941, 0.017806200410911355,
0.018786700744696024, 0.019780424338009740, 0.020787204072578114, 0.021806887504283581,
0.022839335406385240, 0.023884420511558174, 0.024942026419731787, 0.026012046645134221,
0.027094383780955803, 0.028188948763978646, 0.029295660224637411, 0.030414443910466622,
0.031545232172893622, 0.032687963508959555, 0.033842582150874358, 0.035009037697397431,
0.036187284781931443, 0.037377282772959382, 0.038578995503074871, 0.039792391023374139,
0.041017441380414840, 0.042254122413316254, 0.043502413568888197, 0.044762297732943289,
0.046033761076175184, 0.047316792913181561, 0.048611385573379504, 0.049917534282706379,
0.051235237055126281, 0.052564494593071685, 0.053905310196046080, 0.055257689676697030,
0.056621641283742870, 0.057997175631200659, 0.059384305633420280, 0.060783046445479660,
0.062193415408541036, 0.063615431999807376, 0.065049117786753805, 0.066494496385339816,
0.067951593421936643, 0.069420436498728783, 0.070901055162371843, 0.072393480875708752,
0.073897746992364746, 0.075413888734058410, 0.076941943170480517, 0.078481949201606435,
0.080033947542319905, 0.081597980709237419, 0.083174093009632397, 0.084762330532368146,
0.086362741140756927, 0.087975374467270231, 0.089600281910032886, 0.091237516631040197,
0.092887133556043569, 0.094549189376055873, 0.096223742550432825, 0.097910853311492213,
0.099610583670637132, 0.101322997425953631, 0.103048160171257702, 0.104786139306570145,
0.106537004050001632, 0.108300825451033755, 0.110077676405185357, 0.111867631670056283,
0.113670767882744286, 0.115487163578633506, 0.117316899211555525, 0.119160057175327641,
0.121016721826674792, 0.122886979509545108, 0.124770918580830933, 0.126668629437510671,
0.128580204545228199, 0.130505738468330773, 0.132445327901387494, 0.134399071702213602,
0.136367070926428829, 0.138349428863580176, 0.140346251074862399, 0.142357645432472146,
0.144383722160634720, 0.146424593878344889, 0.148480375643866735, 0.150551185001039839,
0.152637142027442801, 0.154738369384468027, 0.156854992369365148, 0.158987138969314129,
0.161134939917591952, 0.163298528751901734, 0.165478041874935922, 0.167673618617250081,
0.169885401302527550, 0.172113535315319977, 0.174358169171353411, 0.176619454590494829,
0.178897546572478278, 0.181192603475496261, 0.183504787097767436, 0.185834262762197083,
0.188181199404254262, 0.190545769663195363, 0.192928149976771296, 0.195328520679563189,
0.197747066105098818, 0.200183974691911210, 0.202639439093708962, 0.205113656293837654,
0.207606827724221982, 0.210119159388988230, 0.212650861992978224, 0.215202151075378628,
0.217773247148700472, 0.220364375843359439, 0.222975768058120111, 0.225607660116683956,
0.228260293930716618, 0.230933917169627356, 0.233628783437433291, 0.236345152457059560,
0.239083290262449094, 0.241843469398877131, 0.244625969131892024, 0.247431075665327543,
0.250259082368862240, 0.253110290015629402, 0.255985007030415324, 0.258883549749016173,
0.261806242689362922, 0.264753418835062149, 0.267725419932044739, 0.270722596799059967,
0.273745309652802915, 0.276793928448517301, 0.279868833236972869, 0.282970414538780746,
0.286099073737076826, 0.289255223489677693, 0.292439288161892630, 0.295651704281261252,
0.298892921015581847, 0.302163400675693528, 0.305463619244590256, 0.308794066934560185,
0.312155248774179606, 0.315547685227128949, 0.318971912844957239, 0.322428484956089223,
0.325917972393556354, 0.329440964264136438, 0.332998068761809096, 0.336589914028677717,
0.340217149066780189, 0.343880444704502575, 0.347580494621637148, 0.351318016437483449,
0.355093752866787626, 0.358908472948750001, 0.362762973354817997, 0.366658079781514379,
0.370594648435146223, 0.374573567615902381, 0.378595759409581067, 0.382662181496010056,
0.386773829084137932, 0.390931736984797384, 0.395136981833290435, 0.399390684475231350,
0.403694012530530555, 0.408048183152032673, 0.412454465997161457, 0.416914186433003209,
0.421428728997616908, 0.425999541143034677, 0.430628137288459167, 0.435316103215636907,
0.440065100842354173, 0.444876873414548846, 0.449753251162755330, 0.454696157474615836,
0.459707615642138023, 0.464789756250426511, 0.469944825283960310, 0.475175193037377708,
0.480483363930454543, 0.485871987341885248, 0.491343869594032867, 0.496901987241549881,
0.502549501841348056, 0.508289776410643213, 0.514126393814748894, 0.520063177368233931,
0.526104213983620062, 0.532253880263043655, 0.538516872002862246, 0.544898237672440056,
0.551403416540641733, 0.558038282262587892, 0.564809192912400615, 0.571723048664826150,
0.578787358602845359, 0.586010318477268366, 0.593400901691733762, 0.600968966365232560,
0.608725382079622346, 0.616682180915207878, 0.624852738703666200, 0.633251994214366398,
0.641896716427266423, 0.650805833414571433, 0.660000841079000145, 0.669506316731925177,
0.679350572264765806, 0.689566496117078431, 0.700192655082788606, 0.711274760805076456,
0.722867659593572465, 0.735038092431424039, 0.747868621985195658, 0.761463388849896838,
0.775956852040116218, 0.791527636972496285, 0.808421651523009044, 0.826993296643051101,
0.847785500623990496, 0.871704332381204705, 0.900469929925747703, 0.938143680862176477,
1.000000000000000000];

374
vendor/rand_distr/src/zipf.rs vendored Normal file
View File

@@ -0,0 +1,374 @@
// Copyright 2021 Developers of the Rand project.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The Zeta and related distributions.
use num_traits::Float;
use crate::{Distribution, Standard};
use rand::{Rng, distributions::OpenClosed01};
use core::fmt;
/// Samples integers according to the [zeta distribution].
///
/// The zeta distribution is a limit of the [`Zipf`] distribution. Sometimes it
/// is called one of the following: discrete Pareto, Riemann-Zeta, Zipf, or
/// ZipfEstoup distribution.
///
/// It has the density function `f(k) = k^(-a) / C(a)` for `k >= 1`, where `a`
/// is the parameter and `C(a)` is the Riemann zeta function.
///
/// # Example
/// ```
/// use rand::prelude::*;
/// use rand_distr::Zeta;
///
/// let val: f64 = thread_rng().sample(Zeta::new(1.5).unwrap());
/// println!("{}", val);
/// ```
///
/// # Remarks
///
/// The zeta distribution has no upper limit. Sampled values may be infinite.
/// In particular, a value of infinity might be returned for the following
/// reasons:
/// 1. it is the best representation in the type `F` of the actual sample.
/// 2. to prevent infinite loops for very small `a`.
///
/// # Implementation details
///
/// We are using the algorithm from [Non-Uniform Random Variate Generation],
/// Section 6.1, page 551.
///
/// [zeta distribution]: https://en.wikipedia.org/wiki/Zeta_distribution
/// [Non-Uniform Random Variate Generation]: https://doi.org/10.1007/978-1-4613-8643-8
#[derive(Clone, Copy, Debug)]
pub struct Zeta<F>
where F: Float, Standard: Distribution<F>, OpenClosed01: Distribution<F>
{
a_minus_1: F,
b: F,
}
/// Error type returned from `Zeta::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ZetaError {
/// `a <= 1` or `nan`.
ATooSmall,
}
impl fmt::Display for ZetaError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
ZetaError::ATooSmall => "a <= 1 or is NaN in Zeta distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for ZetaError {}
impl<F> Zeta<F>
where F: Float, Standard: Distribution<F>, OpenClosed01: Distribution<F>
{
/// Construct a new `Zeta` distribution with given `a` parameter.
#[inline]
pub fn new(a: F) -> Result<Zeta<F>, ZetaError> {
if !(a > F::one()) {
return Err(ZetaError::ATooSmall);
}
let a_minus_1 = a - F::one();
let two = F::one() + F::one();
Ok(Zeta {
a_minus_1,
b: two.powf(a_minus_1),
})
}
}
impl<F> Distribution<F> for Zeta<F>
where F: Float, Standard: Distribution<F>, OpenClosed01: Distribution<F>
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
loop {
let u = rng.sample(OpenClosed01);
let x = u.powf(-F::one() / self.a_minus_1).floor();
debug_assert!(x >= F::one());
if x.is_infinite() {
// For sufficiently small `a`, `x` will always be infinite,
// which is rejected, resulting in an infinite loop. We avoid
// this by always returning infinity instead.
return x;
}
let t = (F::one() + F::one() / x).powf(self.a_minus_1);
let v = rng.sample(Standard);
if v * x * (t - F::one()) * self.b <= t * (self.b - F::one()) {
return x;
}
}
}
}
/// Samples integers according to the Zipf distribution.
///
/// The samples follow Zipf's law: The frequency of each sample from a finite
/// set of size `n` is inversely proportional to a power of its frequency rank
/// (with exponent `s`).
///
/// For large `n`, this converges to the [`Zeta`] distribution.
///
/// For `s = 0`, this becomes a uniform distribution.
///
/// # Example
/// ```
/// use rand::prelude::*;
/// use rand_distr::Zipf;
///
/// let val: f64 = thread_rng().sample(Zipf::new(10, 1.5).unwrap());
/// println!("{}", val);
/// ```
///
/// # Implementation details
///
/// Implemented via [rejection sampling](https://en.wikipedia.org/wiki/Rejection_sampling),
/// due to Jason Crease[1].
///
/// [1]: https://jasoncrease.medium.com/rejection-sampling-the-zipf-distribution-6b359792cffa
#[derive(Clone, Copy, Debug)]
pub struct Zipf<F>
where F: Float, Standard: Distribution<F> {
n: F,
s: F,
t: F,
q: F,
}
/// Error type returned from `Zipf::new`.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ZipfError {
/// `s < 0` or `nan`.
STooSmall,
/// `n < 1`.
NTooSmall,
}
impl fmt::Display for ZipfError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
ZipfError::STooSmall => "s < 0 or is NaN in Zipf distribution",
ZipfError::NTooSmall => "n < 1 in Zipf distribution",
})
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
impl std::error::Error for ZipfError {}
impl<F> Zipf<F>
where F: Float, Standard: Distribution<F> {
/// Construct a new `Zipf` distribution for a set with `n` elements and a
/// frequency rank exponent `s`.
///
/// For large `n`, rounding may occur to fit the number into the float type.
#[inline]
pub fn new(n: u64, s: F) -> Result<Zipf<F>, ZipfError> {
if !(s >= F::zero()) {
return Err(ZipfError::STooSmall);
}
if n < 1 {
return Err(ZipfError::NTooSmall);
}
let n = F::from(n).unwrap(); // This does not fail.
let q = if s != F::one() {
// Make sure to calculate the division only once.
F::one() / (F::one() - s)
} else {
// This value is never used.
F::zero()
};
let t = if s != F::one() {
(n.powf(F::one() - s) - s) * q
} else {
F::one() + n.ln()
};
debug_assert!(t > F::zero());
Ok(Zipf {
n, s, t, q
})
}
/// Inverse cumulative density function
#[inline]
fn inv_cdf(&self, p: F) -> F {
let one = F::one();
let pt = p * self.t;
if pt <= one {
pt
} else if self.s != one {
(pt * (one - self.s) + self.s).powf(self.q)
} else {
(pt - one).exp()
}
}
}
impl<F> Distribution<F> for Zipf<F>
where F: Float, Standard: Distribution<F>
{
#[inline]
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> F {
let one = F::one();
loop {
let inv_b = self.inv_cdf(rng.sample(Standard));
let x = (inv_b + one).floor();
let mut ratio = x.powf(-self.s);
if x > one {
ratio = ratio * inv_b.powf(self.s)
};
let y = rng.sample(Standard);
if y < ratio {
return x;
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn test_samples<F: Float + core::fmt::Debug, D: Distribution<F>>(
distr: D, zero: F, expected: &[F],
) {
let mut rng = crate::test::rng(213);
let mut buf = [zero; 4];
for x in &mut buf {
*x = rng.sample(&distr);
}
assert_eq!(buf, expected);
}
#[test]
#[should_panic]
fn zeta_invalid() {
Zeta::new(1.).unwrap();
}
#[test]
#[should_panic]
fn zeta_nan() {
Zeta::new(core::f64::NAN).unwrap();
}
#[test]
fn zeta_sample() {
let a = 2.0;
let d = Zeta::new(a).unwrap();
let mut rng = crate::test::rng(1);
for _ in 0..1000 {
let r = d.sample(&mut rng);
assert!(r >= 1.);
}
}
#[test]
fn zeta_small_a() {
let a = 1. + 1e-15;
let d = Zeta::new(a).unwrap();
let mut rng = crate::test::rng(2);
for _ in 0..1000 {
let r = d.sample(&mut rng);
assert!(r >= 1.);
}
}
#[test]
fn zeta_value_stability() {
test_samples(Zeta::new(1.5).unwrap(), 0f32, &[
1.0, 2.0, 1.0, 1.0,
]);
test_samples(Zeta::new(2.0).unwrap(), 0f64, &[
2.0, 1.0, 1.0, 1.0,
]);
}
#[test]
#[should_panic]
fn zipf_s_too_small() {
Zipf::new(10, -1.).unwrap();
}
#[test]
#[should_panic]
fn zipf_n_too_small() {
Zipf::new(0, 1.).unwrap();
}
#[test]
#[should_panic]
fn zipf_nan() {
Zipf::new(10, core::f64::NAN).unwrap();
}
#[test]
fn zipf_sample() {
let d = Zipf::new(10, 0.5).unwrap();
let mut rng = crate::test::rng(2);
for _ in 0..1000 {
let r = d.sample(&mut rng);
assert!(r >= 1.);
}
}
#[test]
fn zipf_sample_s_1() {
let d = Zipf::new(10, 1.).unwrap();
let mut rng = crate::test::rng(2);
for _ in 0..1000 {
let r = d.sample(&mut rng);
assert!(r >= 1.);
}
}
#[test]
fn zipf_sample_s_0() {
let d = Zipf::new(10, 0.).unwrap();
let mut rng = crate::test::rng(2);
for _ in 0..1000 {
let r = d.sample(&mut rng);
assert!(r >= 1.);
}
// TODO: verify that this is a uniform distribution
}
#[test]
fn zipf_sample_large_n() {
let d = Zipf::new(core::u64::MAX, 1.5).unwrap();
let mut rng = crate::test::rng(2);
for _ in 0..1000 {
let r = d.sample(&mut rng);
assert!(r >= 1.);
}
// TODO: verify that this is a zeta distribution
}
#[test]
fn zipf_value_stability() {
test_samples(Zipf::new(10, 0.5).unwrap(), 0f32, &[
10.0, 2.0, 6.0, 7.0
]);
test_samples(Zipf::new(10, 2.0).unwrap(), 0f64, &[
1.0, 2.0, 3.0, 2.0
]);
}
}