Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

1
vendor/heapless/.cargo-checksum.json vendored Normal file
View File

@@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"b1b8d3cf13b69edaacdd6d49579b5b7726aff21d742539a546778b61d2132640","CODE_OF_CONDUCT.md":"fba7846e321b6ac7f74932cbf5831d89a7116c71763f1b263ba1c49ac7c2f382","Cargo.toml":"98cbfa5f1ea18ccf182e3069c49ecbb74eb0bf7a664dde9bbf238255d95a4f26","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"035e70219855119df4273b3c5b97543ae82e0dd60c520416e759107c602f651b","README.md":"4d2b9295f8df105d7dc0853eb9e554d7ea2cd4b7dcb1c26c6014503f3ed0ceb8","build.rs":"9ab92dc01d7bbf84d7c96c9956c44747a82aed49809765e33053569fdc0581a9","src/binary_heap.rs":"bca69dbeda50579adc7125c0f2e5a8985a8f5645a39a699b5785031e454eaee8","src/de.rs":"52299e1989e48ec2855864ea5cb931f10b6f5d5206f3c55e905ba4c10a7e2f9a","src/defmt.rs":"3e6f55bfd75876eb71b4e7dc41aaaf4e6164a8962de13232f5e27a8a931e449f","src/deque.rs":"a25568cfa4433249f2c4e47c496556550d4e4a25ab7d47e64a574a21d461d14c","src/histbuf.rs":"08459125851dc6cba23487d3691b07397ab029cf1aaac634dc05297b7b957d16","src/indexmap.rs":"6e29fd513991b4062fa782206b0c69d65fe631491cce73c55f8a384654c5cd28","src/indexset.rs":"b9fe3e6c0760fb3c195a8a7b1ac328b59ae447aa6a60e1d85cd5009f15c95583","src/lib.rs":"8cb62c2b9e610641d1af904eea8ac0c1d6af3b1a325277c9568b466e123a60f8","src/linear_map.rs":"578b04a297bd90af7af21387f2c6d4a37bdf0b1ff4704ae6be8982da6a474130","src/mpmc.rs":"271428db2074de2dfcfaa62d40cdef0c4ebbaaf76acd91ab61cdf0b0b3a1c654","src/pool.rs":"9e3048f3fa080fc0fb17eb69e450067236a4596534050ebcd8938455d18b0078","src/pool/arc.rs":"39fb2b89ea8ae5dfd2e3a1cfaf712a5ba2fd445684e50b559d1295faeb824da8","src/pool/boxed.rs":"908646623bbfd2c40152ba3e2702a1665707b07d9bf7e6a31abd02f3d22f3b67","src/pool/object.rs":"8a4dda54b0624bca9322f7df8426b2d16daa6f7f25a7420e4489788fc64e32a8","src/pool/treiber.rs":"72438bd573e701756a8c8d48d86f409ef3b08346480dffc02ae0694702920de6","src/pool/treiber/cas.rs":"bf1a023116b11d3008692b44eb7b3237c78ee7e0c2f78dce23f8f3db8361bb2f","src/pool/treiber/llsc.rs":"5207a17c201dd6fbe097a4ac64c6694e95d407119e113abc5e93a12fba2ba207","src/sealed.rs":"cf8a6e50cd4a10a98d9073fc45172a6eb675e5f1591a4b1c4c561ccf31956180","src/ser.rs":"2d92858c95d207a92eb3f0ad860d7ae5edf1dcbd75679a388218ab5c02587d9c","src/sorted_linked_list.rs":"6c9c4635a7f7961a0411258bd720195a2b9a32e519095b929bbbc190abd4e7f7","src/spsc.rs":"1d0148218cb61368e5099c83d8fcb961f839e67c353dc1531d5dc687786de1ba","src/string.rs":"aff5bbf29159aa3e6c4cde5ef7c798ee6ac4859ae2135a6f78c84ec7c4a0e4d7","src/test_helpers.rs":"61f3293708627508a93ac24bac57220cd9dd5bf57b4fc340f6fc251f9082eec2","src/ufmt.rs":"c2f58e402dad2982d7ca77a1ef07c2b98b3138ee6380e16bb0295e1583686b1c","src/vec.rs":"994866ba6d13daaa4ab2b3f11351075808c376e830d35124e7994988ef832ed2","suppressions.txt":"531ca2293c65b2ced87e2ffff06abce49c60cceeb1a0c1fc4c551e816528a34a","tests/cpass.rs":"f853bb2f1fe5c27a68d0211a44d811d61e6b6aa9eb1de35a5f9889333e1b3ae3","tests/tsan.rs":"a747a5b71fd0af8fa7e0ca1e5801dcf6686d395901dbbac86149977dc9a10b8f"},"package":"0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad"}

613
vendor/heapless/CHANGELOG.md vendored Normal file
View File

@@ -0,0 +1,613 @@
# Change Log
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased]
## [v0.8.0] - 2023-11-07
### Added
- Add `Clone` and `PartialEq` implementations to `HistoryBuffer`.
- Added an object pool API. see the `pool::object` module level doc for details
- Add `HistoryBuffer::as_slices()`
- Implemented `retain` for `IndexMap` and `IndexSet`.
- Recover `StableDeref` trait for `pool::object::Object` and `pool::boxed::Box`.
- Add polyfills for ESP32S2
- Added `String::from_utf8` and `String::from_utf8_unchecked`.
### Changed
- updated from edition 2018 to edition 2021
- [breaking-change] `IndexMap` and `IndexSet` now require that keys implement the `core::hash::Hash`
trait instead of the `hash32::Hash` (v0.2.0) trait
- move `pool::singleton::Box` to the `pool::box` module
- renamed `pool::singleton::Pool` to `BoxPool` and moved it into the `pool::box` module
- move `pool::singleton::arc::Arc` to the `pool::arc` module
- renamed `pool::singleton::arc::Pool` to `ArcPool` and moved it into the `pool::arc` module
- [breaking-change] changed the target support of memory pool API to only support 32-bit x86 and a
subset of ARM targets. See the module level documentation of the `pool` module for details
- relax trait requirements on `IndexMap` and `IndexSet`.
- export `IndexSet` and `IndexMap` iterator types.
- [breaking-change] export `IndexMapKeys`, `IndexMapValues` and
`IndexMapValuesMut` iterator types.
- [breaking-change] this crate now uses `portable-atomic` v1.0 instead of `atomic-polyfill` for emulating
CAS instructions on targets where they're not natively available.
- [breaking-change] `From<&str>` for `String` was replaced with `TryFrom<&str>` because the `From` trait must not fail.
- [breaking-change] Renamed Cargo features
- `defmt-impl` is now `defmt-03`
- `ufmt-impl` is now `ufmt`
- `cas` is removed, atomic polyfilling is now opt-in via the `portable-atomic` feature.
- `Vec::as_mut_slice` is now a public method.
### Fixed
- Fixed a `dropping_references` warning in `LinearMap`.
- Fixed IndexMap entry API returning wrong slot after an insert on vacant entry. (#360)
### Removed
- [breaking-change] this crate no longer has a Minimum Supported Rust Version (MSRV) guarantee and
should be used with the latest stable version of the Rust toolchain.
- [breaking-change] removed the `Init` and `Uninint` type states from `pool::singleton::Box`
- [breaking-change] removed the following `pool::singleton::Box` methods: `freeze`, `forget` and `init`
- [breaking-change] removed the `pool::singleton::arc::ArcInner` type
- [breaking-change] removed support for attributes from `pool!` and `arc_pool!`
## [v0.7.16] - 2022-08-09
### Added
- add more `PartialEq` implementations to `Vec` where `Vec` is the RHS
### Changed
### Fixed
- clarify in the docs that the capacity `heapless::String` is in bytes, not characters
- Fixed some broken links in the documentation.
## [v0.7.15] - 2022-07-05
### Added
- Added `Vec::insert(index, element)`
- Added `Vec::remove(index)`
- Added `Vec::retain(f)`
- Added `Vec::retain_mut(f)`
## [v0.7.14] - 2022-06-15
### Added
- Added support for AVR architecture.
### Fixed
- `IndexSet` and `IndexMap`'s `default` method now compile time checks that their capacity is a power of two.
## [v0.7.13] - 2022-05-16
### Added
- Added `into_vec` to `BinaryHeap`
## [v0.7.12] - 2022-05-12
### Added
- Added support for AVR architecture.
- Add `entry` API to `IndexMap`
- Implement `IntoIterator` trait for `Indexmap`
- Implement `FromIterator` for `String`
- Add `first` and `last` methods to `IndexMap` and `IndexSet`
- Add `pop_{front_back}_unchecked` methods to `Deque`
### Changed
- Optimize the codegen of `Vec::clone`
- `riscv32i` and `riscv32imc` targets unconditionally (e.g. `build --no-default-features`) depends on `atomic-polyfill`
### Fixed
- Inserting an item that replaces an already present item will no longer
fail with an error
## [v0.7.11] - 2022-05-09
### Fixed
- Fixed `pool` example in docstring.
- Fixed undefined behavior in `Vec::truncate()`, `Vec::swap_remove_unchecked()`,
and `Hole::move_to()` (internal to the binary heap implementation).
- Fixed `BinaryHeap` elements are being dropped twice
## [v0.7.10] - 2022-01-21
### Fixed
- `cargo test` can now run on non-`x86` hosts
### Added
- Added `OldestOrdered` iterator for `HistoryBuffer`
### Changed
- `atomic-polyfill` is now enabled and used for `cas` atomic emulation on `riscv` targets
## [v0.7.9] - 2021-12-16
### Fixed
- Fix `IndexMap` and `IndexSet` bounds
- Make `IndexSet::new()` a `const fn`
## [v0.7.8] - 2021-11-11
### Added
- A span of `defmt` versions is now supported (`0.2` and `0.3`)
## [v0.7.7] - 2021-09-22
### Fixed
- Fixed so `Pool` is `Sync` on ARMv6
## [v0.7.6] - 2021-09-21
### Added
- Added `ArcPool`
- Added `Debug` impl for `Deque`
### Fixed
- ZSTs in `Pool` now works correctly
- Some MIRI errors were resolved
- Allow `pool!` on thumbv6
- Fixed possible UB in `Pool` on x86
## [v0.7.5] - 2021-08-16
### Added
- Added `SortedLinkedList`
- Added `Vec::is_empty`, one does not need to go through a slice anymore
### Changed
- `Vec::pop_unchecked` is now public
## [v0.7.4] - 2021-08-06
### Added
- Implement `Default` for `MpMcQueue`, `Queue` and `HistoryBuffer`
- Implement `PartialOrd` and `Ord` for `Vec` and `String`
### Fixed
- Fixed comments in SPSC
## [v0.7.3] - 2021-07-01
### Added
- Added `Deque`
### Changed
- `Box::freeze` is deprecated due to possibility of undefined behavior.
## [v0.7.2] - 2021-06-30
### Added
- Added new `Vec::into_array` method
- Added const-asserts to all data structures
## [v0.7.1] - 2021-05-23
### Changed
- MPMC is now more generic
### Added
- `defmt` for `Vec` and `String`
## [v0.7.0] - 2021-04-23
### Changed
- [breaking-change] Converted all data structures to use the `const generics` MVP
- [breaking-change] `HistoryBuffer` is now working with const constructors and non-`Copy` data
- [breaking-change] `HistoryBuffer::as_slice` and others now only return initialized values
- Added missing `Deref`, `AsRef` and `Debug` for `HistoryBuffer`
- [breaking-change] `MultiCore`/`SingleCore` and `Uxx` is now removed from `spsc::Queue`
- [breaking-change] `spsc::Queue` is now `usize` only
- [breaking-change] `spsc::Queue` now sacrifices one element for correctness (see issue #207), i.e. it creates an `N - 1` sized queue instead of the old that generated an size `N` queue
- [breaking-change] `String` has had `utf8` related methods removed as this can be done via `str`
- [breaking-change] No data structures implement `AsSlice` traits any more, now using `AsRef` and `AsMut` as they work with any size of array now
### Fixed
- `Pool` and `MPMC` now works on `thumbv6m`
- `IndexMap::new()` is now a `const-fn`
## [v0.6.1] - 2021-03-02
### Fixed
- Security issue.
## [v0.6.0] - 2021-02-02
### Changed
- [breaking-change] The version of the `generic-array` dependency has been
bumped to v0.14.2.
## [v0.5.6] - 2020-09-18
### Added
- Added `as_mut_vec` for `String`
- Added `set_len` for `Vec`
- Performance improvements in `histbuf`
### Fixed
- `Producer` was made `Send` for single core applications
## [v0.5.5] - 2020-05-04
### Added
- Added `HistoryBuffer`
- Added extra methods to `Vec`: `from_slice`, `starts_with`, `ends_with`
- Optional `ufmt` support for `String` and `Vec`
- Added `pool` support for bare-metal `armebv7r-` targets
- Added Sync to `pool` for `x86`
## [v0.5.4] - 2020-04-06
### Added
- Added `StableDeref` implementation for `pool::Box` and `pool::singleton::Box`.
## [v0.5.3] - 2020-01-27
### Added
- Extend the ARMv7-A `Pool` support to the bare-metal `armv7a-` targets.
## [v0.5.2] - 2020-01-15
### Fixed
- Fixed incorrect overflow behavior in computation of capacities
- Fixed edge case in `mpmc::Queue::dqueue` that led to an infinite loop
- IndexMap and LinerMap are now deserialized as maps, rather than as sequences
- Fixed compilation of this crates on built-in targets that don't have CAS instructions
### Changed
- `spsc::Queue` iterators now implement the double ended iterator trait
### Added
- opt-out `cas` feature to disable parts of the API that use CAS instructions.
Useful if using a custom (i.e. not built-in) rustc target that does not have CAS
instructions.
- singleton `Pool` support on ARMv7-A devices
## [v0.5.1] - 2019-08-29
### Added
- Added armv8 support
- Added `Queue::peek`
- Added `BinaryHeap::peek_mut`
## [v0.5.0] - 2019-07-12
### Added
- `Pool` now implements the `Sync` trait when targeting ARMv7-R.
- Most data structures can now be constructed in "const context" (e.g. `static
[mut]` variables) using a newtype in `heapless::i`.
- `Pool` has gained a `grow_exact` method to more efficiently use statically
allocated memory.
- The `pool!` macro now accepts attributes.
- `mpmc::Q*` a family of fixed capacity multiple-producer multiple-consumer
lock-free queues.
### Changed
- [breaking-change] `binary_heap::Kind` is now a sealed trait.
### Removed
- [breaking-change] The "smaller-atomics" feature has been removed. It is now
always enabled.
- [breaking-change] The "min-const-fn" feature has been removed. It is now
always enabled.
- [breaking-change] The MSRV has been bumped to Rust 1.36.0.
- [breaking-change] The version of the `generic-array` dependency has been
bumped to v0.13.0.
## [v0.4.4] - 2019-05-02
### Added
- Implemented `PartialEq`, `PartialOrd`, `Eq`, `Ord` and `Hash` for `pool::Box`
and `pool::singleton::Box`.
### Fixed
- Fixed UB in our internal, stable re-implementation of `core::mem::MaybeUninit`
that occurred when using some of our data structures with types that implement
`Drop`.
## [v0.4.3] - 2019-04-22
### Added
- Added a memory pool that's lock-free and interrupt-safe on the ARMv7-M
architecture.
- `IndexMap` have gained `Eq` and `PartialEq` implementations.
## [v0.4.2] - 2019-02-12
### Added
- All containers now implement `Clone`
- `spsc::Queue` now implements `Debug`, `Hash`, `PartialEq` and `Eq`
- `LinearMap` now implements `Debug`, `FromIterator`, `IntoIter`, `PartialEq`,
`Eq` and `Default`
- `BinaryHeap` now implements `Debug` and `Default`
- `String` now implements `FromStr`, `Hash`, `From<uxx>` and `Default`
- `Vec` now implements `Hash` and `Default`
- A "serde" Cargo feature that when enabled adds a `serde::Serialize` and
`serde::Deserialize` implementations to each collection.
## [v0.4.1] - 2018-12-16
### Changed
- Add a new type parameter to `spsc::Queue` that indicates whether the queue is
only single-core safe, or multi-core safe. By default the queue is multi-core
safe; this preserves the current semantics. New `unsafe` constructors have
been added to create the single-core variant.
## [v0.4.0] - 2018-10-19
### Changed
- [breaking-change] All Cargo features are disabled by default. This crate now
compiles on stable by default.
- [breaking-change] RingBuffer has been renamed to spsc::Queue. The ring_buffer
module has been renamed to spsc.
- [breaking-change] The bounds on spsc::Queue have changed.
### Removed
- [breaking-change] The sealed `Uxx` trait has been removed from the public API.
## [v0.3.7] - 2018-08-19
### Added
- Implemented `IntoIterator` and `FromIterator` for `Vec`
- `ready` methods to `ring_buffer::{Consumer,Producer}`
- An opt-out "const-fn" Cargo feature that turns `const` functions into normal functions when
disabled.
- An opt-out "smaller-atomics" Cargo feature that removes the ability to shrink the size of
`RingBuffer` when disabled.
### Changed
- This crate now compiles on stable when both the "const-fn" and "smaller-atomics" features are
disabled.
### Fixed
- The `RingBuffer.len` function
- Compilation on recent nightlies
## [v0.3.6] - 2018-05-04
### Fixed
- The capacity of `RingBuffer`. It should be the requested capacity plus not twice that plus one.
## [v0.3.5] - 2018-05-03
### Added
- `RingBuffer.enqueue_unchecked` an unchecked version of `RingBuffer.enqueue`
## [v0.3.4] - 2018-04-28
### Added
- `BinaryHeap.pop_unchecked` an unchecked version of `BinaryHeap.pop`
## [v0.3.3] - 2018-04-28
### Added
- `BinaryHeap.push_unchecked` an unchecked version of `BinaryHeap.push`
## [v0.3.2] - 2018-04-27
### Added
- A re-export of `generic_array::ArrayLength`, for convenience.
## [v0.3.1] - 2018-04-23
### Added
- Fixed capacity implementations of `IndexMap` and `IndexSet`.
- A `Extend` implementation to `Vec`
- More `PartialEq` implementations to `Vec`
## [v0.3.0] - 2018-04-22
### Changed
- [breaking-change] The capacity of all data structures must now be specified using type level
integers (cf. `typenum`). See documentation for details.
- [breaking-change] `BufferFullError` has been removed in favor of (a) returning ownership of the
item that couldn't be added to the collection (cf. `Vec.push`), or (b) returning the unit type
when the argument was not consumed (cf. `Vec.extend_from_slice`).
## [v0.2.7] - 2018-04-20
### Added
- Unchecked methods to dequeue and enqueue items into a `RingBuffer` via the `Consumer` and
`Producer` end points.
### Changed
- `RingBuffer` now has a generic index type, which default to `usize` for backward compatibility.
Changing the index type to `u8` or `u16` reduces the footprint of the `RingBuffer` but limits its
maximum capacity (254 and 65534, respectively).
## [v0.2.6] - 2018-04-18
### Added
- A `BinaryHeap` implementation. `BinaryHeap` is a priority queue implemented with a binary heap.
## [v0.2.5] - 2018-04-13
### Fixed
- Dereferencing `heapless::Vec` no longer incurs in a bounds check.
## [v0.2.4] - 2018-03-12
### Fixed
- `LinerMap::new` is now a const fn
## [v0.2.3] - 2018-03-11
### Added
- A `swap_remove` method to `Vec`
- A `LinearMap` implementation. `LinearMap` is a map / dict backed by an array and that performs
lookups via linear search.
## [v0.2.2] - 2018-03-01
### Added
- Fixed size version of `std::String`
## [v0.2.1] - 2017-12-21
### Added
- `Vec` now implements both `fmt::Debug`, `PartialEq` and `Eq`.
- `resize` and `resize_default` methods to `Vec`.
## [v0.2.0] - 2017-11-22
### Added
- A single producer single consumer mode to `RingBuffer`.
- A `truncate` method to `Vec`.
### Changed
- [breaking-change] Both `Vec::new` and `RingBuffer::new` no longer require an initial value. The
signature of `new` is now `const fn() -> Self`.
- [breaking-change] The error type of all operations that may fail has changed from `()` to
`BufferFullError`.
- Both `RingBuffer` and `Vec` now support arrays of _any_ size for their backup storage.
## [v0.1.0] - 2017-04-27
- Initial release
[Unreleased]: https://github.com/rust-embedded/heapless/compare/v0.8.0...HEAD
[v0.8.0]: https://github.com/rust-embedded/heapless/compare/v0.7.16...v0.8.0
[v0.7.16]: https://github.com/rust-embedded/heapless/compare/v0.7.15...v0.7.16
[v0.7.15]: https://github.com/rust-embedded/heapless/compare/v0.7.14...v0.7.15
[v0.7.14]: https://github.com/rust-embedded/heapless/compare/v0.7.13...v0.7.14
[v0.7.13]: https://github.com/rust-embedded/heapless/compare/v0.7.12...v0.7.13
[v0.7.12]: https://github.com/rust-embedded/heapless/compare/v0.7.11...v0.7.12
[v0.7.11]: https://github.com/rust-embedded/heapless/compare/v0.7.10...v0.7.11
[v0.7.10]: https://github.com/rust-embedded/heapless/compare/v0.7.9...v0.7.10
[v0.7.9]: https://github.com/rust-embedded/heapless/compare/v0.7.8...v0.7.9
[v0.7.8]: https://github.com/rust-embedded/heapless/compare/v0.7.7...v0.7.8
[v0.7.7]: https://github.com/rust-embedded/heapless/compare/v0.7.6...v0.7.7
[v0.7.6]: https://github.com/rust-embedded/heapless/compare/v0.7.5...v0.7.6
[v0.7.5]: https://github.com/rust-embedded/heapless/compare/v0.7.4...v0.7.5
[v0.7.4]: https://github.com/rust-embedded/heapless/compare/v0.7.3...v0.7.4
[v0.7.3]: https://github.com/rust-embedded/heapless/compare/v0.7.2...v0.7.3
[v0.7.2]: https://github.com/rust-embedded/heapless/compare/v0.7.1...v0.7.2
[v0.7.1]: https://github.com/rust-embedded/heapless/compare/v0.7.0...v0.7.1
[v0.7.0]: https://github.com/rust-embedded/heapless/compare/v0.6.1...v0.7.0
[v0.6.1]: https://github.com/rust-embedded/heapless/compare/v0.6.0...v0.6.1
[v0.6.0]: https://github.com/rust-embedded/heapless/compare/v0.5.5...v0.6.0
[v0.5.5]: https://github.com/rust-embedded/heapless/compare/v0.5.4...v0.5.5
[v0.5.4]: https://github.com/rust-embedded/heapless/compare/v0.5.3...v0.5.4
[v0.5.3]: https://github.com/rust-embedded/heapless/compare/v0.5.2...v0.5.3
[v0.5.2]: https://github.com/rust-embedded/heapless/compare/v0.5.1...v0.5.2
[v0.5.1]: https://github.com/rust-embedded/heapless/compare/v0.5.0...v0.5.1
[v0.5.0]: https://github.com/rust-embedded/heapless/compare/v0.4.4...v0.5.0
[v0.4.4]: https://github.com/rust-embedded/heapless/compare/v0.4.3...v0.4.4
[v0.4.3]: https://github.com/rust-embedded/heapless/compare/v0.4.2...v0.4.3
[v0.4.2]: https://github.com/rust-embedded/heapless/compare/v0.4.1...v0.4.2
[v0.4.1]: https://github.com/rust-embedded/heapless/compare/v0.4.0...v0.4.1
[v0.4.0]: https://github.com/rust-embedded/heapless/compare/v0.3.7...v0.4.0
[v0.3.7]: https://github.com/rust-embedded/heapless/compare/v0.3.6...v0.3.7
[v0.3.6]: https://github.com/rust-embedded/heapless/compare/v0.3.5...v0.3.6
[v0.3.5]: https://github.com/rust-embedded/heapless/compare/v0.3.4...v0.3.5
[v0.3.4]: https://github.com/rust-embedded/heapless/compare/v0.3.3...v0.3.4
[v0.3.3]: https://github.com/rust-embedded/heapless/compare/v0.3.2...v0.3.3
[v0.3.2]: https://github.com/rust-embedded/heapless/compare/v0.3.1...v0.3.2
[v0.3.1]: https://github.com/rust-embedded/heapless/compare/v0.3.0...v0.3.1
[v0.3.0]: https://github.com/rust-embedded/heapless/compare/v0.2.7...v0.3.0
[v0.2.7]: https://github.com/rust-embedded/heapless/compare/v0.2.6...v0.2.7
[v0.2.6]: https://github.com/rust-embedded/heapless/compare/v0.2.5...v0.2.6
[v0.2.5]: https://github.com/rust-embedded/heapless/compare/v0.2.4...v0.2.5
[v0.2.4]: https://github.com/rust-embedded/heapless/compare/v0.2.3...v0.2.4
[v0.2.3]: https://github.com/rust-embedded/heapless/compare/v0.2.2...v0.2.3
[v0.2.2]: https://github.com/rust-embedded/heapless/compare/v0.2.1...v0.2.2
[v0.2.1]: https://github.com/rust-embedded/heapless/compare/v0.2.0...v0.2.1
[v0.2.0]: https://github.com/rust-embedded/heapless/compare/v0.1.0...v0.2.0

37
vendor/heapless/CODE_OF_CONDUCT.md vendored Normal file
View File

@@ -0,0 +1,37 @@
# The Rust Code of Conduct
## Conduct
**Contact**: [Libs team](https://github.com/rust-embedded/wg#the-libs-team)
* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic.
* On IRC, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all.
* Please be kind and courteous. There's no need to be mean or rude.
* Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer.
* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behavior. We interpret the term "harassment" as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups.
* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel ops or any of the [Libs team][team] immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back.
* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behavior is not welcome.
## Moderation
These are the policies for upholding our community's standards of conduct.
1. Remarks that violate the Rust standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.)
2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed.
3. Moderators will first respond to such remarks with a warning.
4. If the warning is unheeded, the user will be "kicked," i.e., kicked out of the communication channel to cool off.
5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded.
6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology.
7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, **in private**. Complaints about bans in-channel are not allowed.
8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others.
In the Rust community we strive to go the extra step to look out for each other. Don't just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they're off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely.
And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could've communicated better — remember that it's your responsibility to make your fellow Rustaceans comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust.
The enforcement policies listed above apply to all official embedded WG venues; including official IRC channels (#rust-embedded); GitHub repositories under rust-embedded; and all forums under rust-embedded.org (forum.rust-embedded.org).
*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling) as well as the [Contributor Covenant v1.3.0](https://www.contributor-covenant.org/version/1/3/0/).*
[team]: https://github.com/rust-embedded/wg#the-libs-team

91
vendor/heapless/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,91 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
name = "heapless"
version = "0.8.0"
authors = [
"Jorge Aparicio <jorge@japaric.io>",
"Per Lindgren <per.lindgren@ltu.se>",
"Emil Fresk <emil.fresk@gmail.com>",
]
description = "`static` friendly data structures that don't require dynamic memory allocation"
documentation = "https://docs.rs/heapless"
readme = "README.md"
keywords = [
"static",
"no-heap",
]
categories = [
"data-structures",
"no-std",
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-embedded/heapless"
[package.metadata.docs.rs]
features = [
"ufmt",
"serde",
"defmt-03",
"mpmc_large",
"portable-atomic-critical-section",
]
rustdoc-args = [
"--cfg",
"docsrs",
]
targets = ["i686-unknown-linux-gnu"]
[dependencies.defmt]
version = ">=0.2.0,<0.4"
optional = true
[dependencies.hash32]
version = "0.3.0"
[dependencies.portable-atomic]
version = "1.0"
optional = true
[dependencies.serde]
version = "1"
optional = true
default-features = false
[dependencies.stable_deref_trait]
version = "1"
default-features = false
[dependencies.ufmt-write]
version = "0.1"
optional = true
[dev-dependencies.ufmt]
version = "0.2"
[features]
defmt-03 = ["dep:defmt"]
mpmc_large = []
portable-atomic = ["dep:portable-atomic"]
portable-atomic-critical-section = [
"dep:portable-atomic",
"portable-atomic",
"portable-atomic?/critical-section",
]
portable-atomic-unsafe-assume-single-core = [
"dep:portable-atomic",
"portable-atomic",
"portable-atomic?/unsafe-assume-single-core",
]
serde = ["dep:serde"]
ufmt = ["dep:ufmt-write"]

201
vendor/heapless/LICENSE-APACHE vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
vendor/heapless/LICENSE-MIT vendored Normal file
View File

@@ -0,0 +1,25 @@
Copyright (c) 2017 Jorge Aparicio
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

41
vendor/heapless/README.md vendored Normal file
View File

@@ -0,0 +1,41 @@
[![crates.io](https://img.shields.io/crates/v/heapless.svg)](https://crates.io/crates/heapless)
[![crates.io](https://img.shields.io/crates/d/heapless.svg)](https://crates.io/crates/heapless)
# `heapless`
> `static` friendly data structures that don't require dynamic memory allocation
This project is developed and maintained by the [libs team].
## [Documentation](https://docs.rs/heapless/latest/heapless)
## [Change log](CHANGELOG.md)
## Tests
``` console
$ # run all
$ cargo test --features serde
$ # run only for example histbuf tests
$ cargo test histbuf --features serde
```
## License
Licensed under either of
- Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or
http://www.apache.org/licenses/LICENSE-2.0)
- MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
## Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.
[libs team]: https://github.com/rust-embedded/wg#the-libs-team

92
vendor/heapless/build.rs vendored Normal file
View File

@@ -0,0 +1,92 @@
#![deny(warnings)]
use std::{
env,
error::Error,
fs,
path::Path,
process::{Command, ExitStatus, Stdio},
};
fn main() -> Result<(), Box<dyn Error>> {
let target = env::var("TARGET")?;
// Manually list targets that have atomic load/store, but no CAS.
// Remove when `cfg(target_has_atomic_load_store)` is stable.
// last updated nightly-2023-10-28
match &target[..] {
"armv4t-none-eabi"
| "armv5te-none-eabi"
| "avr-unknown-gnu-atmega328"
| "bpfeb-unknown-none"
| "bpfel-unknown-none"
| "thumbv4t-none-eabi"
| "thumbv5te-none-eabi"
| "thumbv6m-none-eabi" => println!("cargo:rustc-cfg=has_atomic_load_store"),
_ => {}
};
// AArch64 instruction set contains `clrex` but not `ldrex` or `strex`; the
// probe will succeed when we already know to deny this target from LLSC.
if !target.starts_with("aarch64") {
match compile_probe(ARM_LLSC_PROBE) {
Some(status) if status.success() => println!("cargo:rustc-cfg=arm_llsc"),
_ => {}
}
}
Ok(())
}
const ARM_LLSC_PROBE: &str = r#"
#![no_std]
// `no_mangle` forces codegen, which makes llvm check the contents of the `asm!` macro
#[no_mangle]
unsafe fn asm() {
core::arch::asm!("clrex");
}
"#;
// this function was taken from anyhow v1.0.63 build script
// https://crates.io/crates/anyhow/1.0.63 (last visited 2022-09-02)
// the code is licensed under 'MIT or APACHE-2.0'
fn compile_probe(source: &str) -> Option<ExitStatus> {
let rustc = env::var_os("RUSTC")?;
let out_dir = env::var_os("OUT_DIR")?;
let probefile = Path::new(&out_dir).join("probe.rs");
fs::write(&probefile, source).ok()?;
// Make sure to pick up Cargo rustc configuration.
let mut cmd = if let Some(wrapper) = env::var_os("RUSTC_WRAPPER") {
let mut cmd = Command::new(wrapper);
// The wrapper's first argument is supposed to be the path to rustc.
cmd.arg(rustc);
cmd
} else {
Command::new(rustc)
};
cmd.stderr(Stdio::null())
.arg("--edition=2018")
.arg("--crate-name=probe")
.arg("--crate-type=lib")
.arg("--out-dir")
.arg(out_dir)
.arg(probefile);
if let Some(target) = env::var_os("TARGET") {
cmd.arg("--target").arg(target);
}
// If Cargo wants to set RUSTFLAGS, use that.
if let Ok(rustflags) = env::var("CARGO_ENCODED_RUSTFLAGS") {
if !rustflags.is_empty() {
for arg in rustflags.split('\x1f') {
cmd.arg(arg);
}
}
}
cmd.status().ok()
}

738
vendor/heapless/src/binary_heap.rs vendored Normal file
View File

@@ -0,0 +1,738 @@
//! A priority queue implemented with a binary heap.
//!
//! Insertion and popping the largest element have `O(log n)` time complexity. Checking the largest
//! / smallest element is `O(1)`.
// TODO not yet implemented
// Converting a vector to a binary heap can be done in-place, and has `O(n)` complexity. A binary
// heap can also be converted to a sorted vector in-place, allowing it to be used for an `O(n log
// n)` in-place heapsort.
use core::{
cmp::Ordering,
fmt,
marker::PhantomData,
mem::{self, ManuallyDrop},
ops::{Deref, DerefMut},
ptr, slice,
};
use crate::vec::Vec;
/// Min-heap
pub enum Min {}
/// Max-heap
pub enum Max {}
/// The binary heap kind: min-heap or max-heap
pub trait Kind: private::Sealed {
#[doc(hidden)]
fn ordering() -> Ordering;
}
impl Kind for Min {
fn ordering() -> Ordering {
Ordering::Less
}
}
impl Kind for Max {
fn ordering() -> Ordering {
Ordering::Greater
}
}
/// Sealed traits
mod private {
pub trait Sealed {}
}
impl private::Sealed for Max {}
impl private::Sealed for Min {}
/// A priority queue implemented with a binary heap.
///
/// This can be either a min-heap or a max-heap.
///
/// It is a logic error for an item to be modified in such a way that the item's ordering relative
/// to any other item, as determined by the `Ord` trait, changes while it is in the heap. This is
/// normally only possible through `Cell`, `RefCell`, global state, I/O, or unsafe code.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
///
/// // We can use peek to look at the next item in the heap. In this case,
/// // there's no items in there yet so we get None.
/// assert_eq!(heap.peek(), None);
///
/// // Let's add some scores...
/// heap.push(1).unwrap();
/// heap.push(5).unwrap();
/// heap.push(2).unwrap();
///
/// // Now peek shows the most important item in the heap.
/// assert_eq!(heap.peek(), Some(&5));
///
/// // We can check the length of a heap.
/// assert_eq!(heap.len(), 3);
///
/// // We can iterate over the items in the heap, although they are returned in
/// // a random order.
/// for x in &heap {
/// println!("{}", x);
/// }
///
/// // If we instead pop these scores, they should come back in order.
/// assert_eq!(heap.pop(), Some(5));
/// assert_eq!(heap.pop(), Some(2));
/// assert_eq!(heap.pop(), Some(1));
/// assert_eq!(heap.pop(), None);
///
/// // We can clear the heap of any remaining items.
/// heap.clear();
///
/// // The heap should now be empty.
/// assert!(heap.is_empty())
/// ```
pub struct BinaryHeap<T, K, const N: usize> {
pub(crate) _kind: PhantomData<K>,
pub(crate) data: Vec<T, N>,
}
impl<T, K, const N: usize> BinaryHeap<T, K, N> {
/* Constructors */
/// Creates an empty BinaryHeap as a $K-heap.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// // allocate the binary heap on the stack
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(4).unwrap();
///
/// // allocate the binary heap in a static variable
/// static mut HEAP: BinaryHeap<i32, Max, 8> = BinaryHeap::new();
/// ```
pub const fn new() -> Self {
Self {
_kind: PhantomData,
data: Vec::new(),
}
}
}
impl<T, K, const N: usize> BinaryHeap<T, K, N>
where
T: Ord,
K: Kind,
{
/* Public API */
/// Returns the capacity of the binary heap.
pub fn capacity(&self) -> usize {
self.data.capacity()
}
/// Drops all items from the binary heap.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(1).unwrap();
/// heap.push(3).unwrap();
///
/// assert!(!heap.is_empty());
///
/// heap.clear();
///
/// assert!(heap.is_empty());
/// ```
pub fn clear(&mut self) {
self.data.clear()
}
/// Returns the length of the binary heap.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(1).unwrap();
/// heap.push(3).unwrap();
///
/// assert_eq!(heap.len(), 2);
/// ```
pub fn len(&self) -> usize {
self.data.len()
}
/// Checks if the binary heap is empty.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
///
/// assert!(heap.is_empty());
///
/// heap.push(3).unwrap();
/// heap.push(5).unwrap();
/// heap.push(1).unwrap();
///
/// assert!(!heap.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns an iterator visiting all values in the underlying vector, in arbitrary order.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(1).unwrap();
/// heap.push(2).unwrap();
/// heap.push(3).unwrap();
/// heap.push(4).unwrap();
///
/// // Print 1, 2, 3, 4 in arbitrary order
/// for x in heap.iter() {
/// println!("{}", x);
///
/// }
/// ```
pub fn iter(&self) -> slice::Iter<'_, T> {
self.data.as_slice().iter()
}
/// Returns a mutable iterator visiting all values in the underlying vector, in arbitrary order.
///
/// **WARNING** Mutating the items in the binary heap can leave the heap in an inconsistent
/// state.
pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> {
self.data.as_mut_slice().iter_mut()
}
/// Returns the *top* (greatest if max-heap, smallest if min-heap) item in the binary heap, or
/// None if it is empty.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// assert_eq!(heap.peek(), None);
///
/// heap.push(1).unwrap();
/// heap.push(5).unwrap();
/// heap.push(2).unwrap();
/// assert_eq!(heap.peek(), Some(&5));
/// ```
pub fn peek(&self) -> Option<&T> {
self.data.as_slice().get(0)
}
/// Returns a mutable reference to the greatest item in the binary heap, or
/// `None` if it is empty.
///
/// Note: If the `PeekMut` value is leaked, the heap may be in an
/// inconsistent state.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// assert!(heap.peek_mut().is_none());
///
/// heap.push(1);
/// heap.push(5);
/// heap.push(2);
/// {
/// let mut val = heap.peek_mut().unwrap();
/// *val = 0;
/// }
///
/// assert_eq!(heap.peek(), Some(&2));
/// ```
pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T, K, N>> {
if self.is_empty() {
None
} else {
Some(PeekMut {
heap: self,
sift: true,
})
}
}
/// Removes the *top* (greatest if max-heap, smallest if min-heap) item from the binary heap and
/// returns it, or None if it is empty.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(1).unwrap();
/// heap.push(3).unwrap();
///
/// assert_eq!(heap.pop(), Some(3));
/// assert_eq!(heap.pop(), Some(1));
/// assert_eq!(heap.pop(), None);
/// ```
pub fn pop(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
Some(unsafe { self.pop_unchecked() })
}
}
/// Removes the *top* (greatest if max-heap, smallest if min-heap) item from the binary heap and
/// returns it, without checking if the binary heap is empty.
pub unsafe fn pop_unchecked(&mut self) -> T {
let mut item = self.data.pop_unchecked();
if !self.is_empty() {
mem::swap(&mut item, self.data.as_mut_slice().get_unchecked_mut(0));
self.sift_down_to_bottom(0);
}
item
}
/// Pushes an item onto the binary heap.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(3).unwrap();
/// heap.push(5).unwrap();
/// heap.push(1).unwrap();
///
/// assert_eq!(heap.len(), 3);
/// assert_eq!(heap.peek(), Some(&5));
/// ```
pub fn push(&mut self, item: T) -> Result<(), T> {
if self.data.is_full() {
return Err(item);
}
unsafe { self.push_unchecked(item) }
Ok(())
}
/// Pushes an item onto the binary heap without first checking if it's full.
pub unsafe fn push_unchecked(&mut self, item: T) {
let old_len = self.len();
self.data.push_unchecked(item);
self.sift_up(0, old_len);
}
/// Returns the underlying ```Vec<T,N>```. Order is arbitrary and time is O(1).
pub fn into_vec(self) -> Vec<T, N> {
self.data
}
/* Private API */
fn sift_down_to_bottom(&mut self, mut pos: usize) {
let end = self.len();
let start = pos;
unsafe {
let mut hole = Hole::new(self.data.as_mut_slice(), pos);
let mut child = 2 * pos + 1;
while child < end {
let right = child + 1;
// compare with the greater of the two children
if right < end && hole.get(child).cmp(hole.get(right)) != K::ordering() {
child = right;
}
hole.move_to(child);
child = 2 * hole.pos() + 1;
}
pos = hole.pos;
}
self.sift_up(start, pos);
}
fn sift_up(&mut self, start: usize, pos: usize) -> usize {
unsafe {
// Take out the value at `pos` and create a hole.
let mut hole = Hole::new(self.data.as_mut_slice(), pos);
while hole.pos() > start {
let parent = (hole.pos() - 1) / 2;
if hole.element().cmp(hole.get(parent)) != K::ordering() {
break;
}
hole.move_to(parent);
}
hole.pos()
}
}
}
/// Hole represents a hole in a slice i.e. an index without valid value
/// (because it was moved from or duplicated).
/// In drop, `Hole` will restore the slice by filling the hole
/// position with the value that was originally removed.
struct Hole<'a, T> {
data: &'a mut [T],
/// `elt` is always `Some` from new until drop.
elt: ManuallyDrop<T>,
pos: usize,
}
impl<'a, T> Hole<'a, T> {
/// Create a new Hole at index `pos`.
///
/// Unsafe because pos must be within the data slice.
#[inline]
unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
debug_assert!(pos < data.len());
let elt = ptr::read(data.get_unchecked(pos));
Hole {
data,
elt: ManuallyDrop::new(elt),
pos,
}
}
#[inline]
fn pos(&self) -> usize {
self.pos
}
/// Returns a reference to the element removed.
#[inline]
fn element(&self) -> &T {
&self.elt
}
/// Returns a reference to the element at `index`.
///
/// Unsafe because index must be within the data slice and not equal to pos.
#[inline]
unsafe fn get(&self, index: usize) -> &T {
debug_assert!(index != self.pos);
debug_assert!(index < self.data.len());
self.data.get_unchecked(index)
}
/// Move hole to new location
///
/// Unsafe because index must be within the data slice and not equal to pos.
#[inline]
unsafe fn move_to(&mut self, index: usize) {
debug_assert!(index != self.pos);
debug_assert!(index < self.data.len());
let ptr = self.data.as_mut_ptr();
let index_ptr: *const _ = ptr.add(index);
let hole_ptr = ptr.add(self.pos);
ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
self.pos = index;
}
}
/// Structure wrapping a mutable reference to the greatest item on a
/// `BinaryHeap`.
///
/// This `struct` is created by [`BinaryHeap::peek_mut`].
/// See its documentation for more.
pub struct PeekMut<'a, T, K, const N: usize>
where
T: Ord,
K: Kind,
{
heap: &'a mut BinaryHeap<T, K, N>,
sift: bool,
}
impl<T, K, const N: usize> Drop for PeekMut<'_, T, K, N>
where
T: Ord,
K: Kind,
{
fn drop(&mut self) {
if self.sift {
self.heap.sift_down_to_bottom(0);
}
}
}
impl<T, K, const N: usize> Deref for PeekMut<'_, T, K, N>
where
T: Ord,
K: Kind,
{
type Target = T;
fn deref(&self) -> &T {
debug_assert!(!self.heap.is_empty());
// SAFE: PeekMut is only instantiated for non-empty heaps
unsafe { self.heap.data.as_slice().get_unchecked(0) }
}
}
impl<T, K, const N: usize> DerefMut for PeekMut<'_, T, K, N>
where
T: Ord,
K: Kind,
{
fn deref_mut(&mut self) -> &mut T {
debug_assert!(!self.heap.is_empty());
// SAFE: PeekMut is only instantiated for non-empty heaps
unsafe { self.heap.data.as_mut_slice().get_unchecked_mut(0) }
}
}
impl<'a, T, K, const N: usize> PeekMut<'a, T, K, N>
where
T: Ord,
K: Kind,
{
/// Removes the peeked value from the heap and returns it.
pub fn pop(mut this: PeekMut<'a, T, K, N>) -> T {
let value = this.heap.pop().unwrap();
this.sift = false;
value
}
}
impl<'a, T> Drop for Hole<'a, T> {
#[inline]
fn drop(&mut self) {
// fill the hole again
unsafe {
let pos = self.pos;
ptr::write(self.data.get_unchecked_mut(pos), ptr::read(&*self.elt));
}
}
}
impl<T, K, const N: usize> Default for BinaryHeap<T, K, N>
where
T: Ord,
K: Kind,
{
fn default() -> Self {
Self::new()
}
}
impl<T, K, const N: usize> Clone for BinaryHeap<T, K, N>
where
K: Kind,
T: Ord + Clone,
{
fn clone(&self) -> Self {
Self {
_kind: self._kind,
data: self.data.clone(),
}
}
}
impl<T, K, const N: usize> fmt::Debug for BinaryHeap<T, K, N>
where
K: Kind,
T: Ord + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
impl<'a, T, K, const N: usize> IntoIterator for &'a BinaryHeap<T, K, N>
where
K: Kind,
T: Ord,
{
type Item = &'a T;
type IntoIter = slice::Iter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
#[cfg(test)]
mod tests {
use std::vec::Vec;
use crate::binary_heap::{BinaryHeap, Max, Min};
#[test]
fn static_new() {
static mut _B: BinaryHeap<i32, Min, 16> = BinaryHeap::new();
}
#[test]
fn drop() {
droppable!();
{
let mut v: BinaryHeap<Droppable, Max, 2> = BinaryHeap::new();
v.push(Droppable::new()).ok().unwrap();
v.push(Droppable::new()).ok().unwrap();
v.pop().unwrap();
}
assert_eq!(Droppable::count(), 0);
{
let mut v: BinaryHeap<Droppable, Max, 2> = BinaryHeap::new();
v.push(Droppable::new()).ok().unwrap();
v.push(Droppable::new()).ok().unwrap();
}
assert_eq!(Droppable::count(), 0);
{
let mut v: BinaryHeap<Droppable, Min, 2> = BinaryHeap::new();
v.push(Droppable::new()).ok().unwrap();
v.push(Droppable::new()).ok().unwrap();
v.pop().unwrap();
}
assert_eq!(Droppable::count(), 0);
{
let mut v: BinaryHeap<Droppable, Min, 2> = BinaryHeap::new();
v.push(Droppable::new()).ok().unwrap();
v.push(Droppable::new()).ok().unwrap();
}
assert_eq!(Droppable::count(), 0);
}
#[test]
fn into_vec() {
droppable!();
let mut h: BinaryHeap<Droppable, Max, 2> = BinaryHeap::new();
h.push(Droppable::new()).ok().unwrap();
h.push(Droppable::new()).ok().unwrap();
h.pop().unwrap();
assert_eq!(Droppable::count(), 1);
let v = h.into_vec();
assert_eq!(Droppable::count(), 1);
core::mem::drop(v);
assert_eq!(Droppable::count(), 0);
}
#[test]
fn min() {
let mut heap = BinaryHeap::<_, Min, 16>::new();
heap.push(1).unwrap();
heap.push(2).unwrap();
heap.push(3).unwrap();
heap.push(17).unwrap();
heap.push(19).unwrap();
heap.push(36).unwrap();
heap.push(7).unwrap();
heap.push(25).unwrap();
heap.push(100).unwrap();
assert_eq!(
heap.iter().cloned().collect::<Vec<_>>(),
[1, 2, 3, 17, 19, 36, 7, 25, 100]
);
assert_eq!(heap.pop(), Some(1));
assert_eq!(
heap.iter().cloned().collect::<Vec<_>>(),
[2, 17, 3, 25, 19, 36, 7, 100]
);
assert_eq!(heap.pop(), Some(2));
assert_eq!(heap.pop(), Some(3));
assert_eq!(heap.pop(), Some(7));
assert_eq!(heap.pop(), Some(17));
assert_eq!(heap.pop(), Some(19));
assert_eq!(heap.pop(), Some(25));
assert_eq!(heap.pop(), Some(36));
assert_eq!(heap.pop(), Some(100));
assert_eq!(heap.pop(), None);
assert!(heap.peek_mut().is_none());
heap.push(1).unwrap();
heap.push(2).unwrap();
heap.push(10).unwrap();
{
let mut val = heap.peek_mut().unwrap();
*val = 7;
}
assert_eq!(heap.pop(), Some(2));
assert_eq!(heap.pop(), Some(7));
assert_eq!(heap.pop(), Some(10));
assert_eq!(heap.pop(), None);
}
#[test]
fn max() {
let mut heap = BinaryHeap::<_, Max, 16>::new();
heap.push(1).unwrap();
heap.push(2).unwrap();
heap.push(3).unwrap();
heap.push(17).unwrap();
heap.push(19).unwrap();
heap.push(36).unwrap();
heap.push(7).unwrap();
heap.push(25).unwrap();
heap.push(100).unwrap();
assert_eq!(
heap.iter().cloned().collect::<Vec<_>>(),
[100, 36, 19, 25, 3, 2, 7, 1, 17]
);
assert_eq!(heap.pop(), Some(100));
assert_eq!(
heap.iter().cloned().collect::<Vec<_>>(),
[36, 25, 19, 17, 3, 2, 7, 1]
);
assert_eq!(heap.pop(), Some(36));
assert_eq!(heap.pop(), Some(25));
assert_eq!(heap.pop(), Some(19));
assert_eq!(heap.pop(), Some(17));
assert_eq!(heap.pop(), Some(7));
assert_eq!(heap.pop(), Some(3));
assert_eq!(heap.pop(), Some(2));
assert_eq!(heap.pop(), Some(1));
assert_eq!(heap.pop(), None);
assert!(heap.peek_mut().is_none());
heap.push(1).unwrap();
heap.push(9).unwrap();
heap.push(10).unwrap();
{
let mut val = heap.peek_mut().unwrap();
*val = 7;
}
assert_eq!(heap.pop(), Some(9));
assert_eq!(heap.pop(), Some(7));
assert_eq!(heap.pop(), Some(1));
assert_eq!(heap.pop(), None);
}
}

306
vendor/heapless/src/de.rs vendored Normal file
View File

@@ -0,0 +1,306 @@
use crate::{
binary_heap::Kind as BinaryHeapKind, BinaryHeap, Deque, IndexMap, IndexSet, LinearMap, String,
Vec,
};
use core::{
fmt,
hash::{Hash, Hasher},
marker::PhantomData,
};
use hash32::BuildHasherDefault;
use serde::de::{self, Deserialize, Deserializer, Error, MapAccess, SeqAccess};
// Sequential containers
impl<'de, T, KIND, const N: usize> Deserialize<'de> for BinaryHeap<T, KIND, N>
where
T: Ord + Deserialize<'de>,
KIND: BinaryHeapKind,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, T, KIND, const N: usize>(PhantomData<(&'de (), T, KIND)>);
impl<'de, T, KIND, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, KIND, N>
where
T: Ord + Deserialize<'de>,
KIND: BinaryHeapKind,
{
type Value = BinaryHeap<T, KIND, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = BinaryHeap::new();
while let Some(value) = seq.next_element()? {
if values.push(value).is_err() {
return Err(A::Error::invalid_length(values.capacity() + 1, &self))?;
}
}
Ok(values)
}
}
deserializer.deserialize_seq(ValueVisitor(PhantomData))
}
}
impl<'de, T, S, const N: usize> Deserialize<'de> for IndexSet<T, BuildHasherDefault<S>, N>
where
T: Eq + Hash + Deserialize<'de>,
S: Hasher + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, T, S, const N: usize>(PhantomData<(&'de (), T, S)>);
impl<'de, T, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, S, N>
where
T: Eq + Hash + Deserialize<'de>,
S: Hasher + Default,
{
type Value = IndexSet<T, BuildHasherDefault<S>, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = IndexSet::new();
while let Some(value) = seq.next_element()? {
if values.insert(value).is_err() {
return Err(A::Error::invalid_length(values.capacity() + 1, &self))?;
}
}
Ok(values)
}
}
deserializer.deserialize_seq(ValueVisitor(PhantomData))
}
}
impl<'de, T, const N: usize> Deserialize<'de> for Vec<T, N>
where
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, T, const N: usize>(PhantomData<(&'de (), T)>);
impl<'de, T, const N: usize> serde::de::Visitor<'de> for ValueVisitor<'de, T, N>
where
T: Deserialize<'de>,
{
type Value = Vec<T, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = Vec::new();
while let Some(value) = seq.next_element()? {
if values.push(value).is_err() {
return Err(A::Error::invalid_length(values.capacity() + 1, &self))?;
}
}
Ok(values)
}
}
deserializer.deserialize_seq(ValueVisitor(PhantomData))
}
}
impl<'de, T, const N: usize> Deserialize<'de> for Deque<T, N>
where
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, T, const N: usize>(PhantomData<(&'de (), T)>);
impl<'de, T, const N: usize> serde::de::Visitor<'de> for ValueVisitor<'de, T, N>
where
T: Deserialize<'de>,
{
type Value = Deque<T, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = Deque::new();
while let Some(value) = seq.next_element()? {
if values.push_back(value).is_err() {
return Err(A::Error::invalid_length(values.capacity() + 1, &self))?;
}
}
Ok(values)
}
}
deserializer.deserialize_seq(ValueVisitor(PhantomData))
}
}
// Dictionaries
impl<'de, K, V, S, const N: usize> Deserialize<'de> for IndexMap<K, V, BuildHasherDefault<S>, N>
where
K: Eq + Hash + Deserialize<'de>,
V: Deserialize<'de>,
S: Default + Hasher,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, K, V, S, const N: usize>(PhantomData<(&'de (), K, V, S)>);
impl<'de, K, V, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, S, N>
where
K: Eq + Hash + Deserialize<'de>,
V: Deserialize<'de>,
S: Default + Hasher,
{
type Value = IndexMap<K, V, BuildHasherDefault<S>, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut values = IndexMap::new();
while let Some((key, value)) = map.next_entry()? {
if values.insert(key, value).is_err() {
return Err(A::Error::invalid_length(values.capacity() + 1, &self))?;
}
}
Ok(values)
}
}
deserializer.deserialize_map(ValueVisitor(PhantomData))
}
}
impl<'de, K, V, const N: usize> Deserialize<'de> for LinearMap<K, V, N>
where
K: Eq + Deserialize<'de>,
V: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, K, V, const N: usize>(PhantomData<(&'de (), K, V)>);
impl<'de, K, V, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, N>
where
K: Eq + Deserialize<'de>,
V: Deserialize<'de>,
{
type Value = LinearMap<K, V, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut values = LinearMap::new();
while let Some((key, value)) = map.next_entry()? {
if values.insert(key, value).is_err() {
return Err(A::Error::invalid_length(values.capacity() + 1, &self))?;
}
}
Ok(values)
}
}
deserializer.deserialize_map(ValueVisitor(PhantomData))
}
}
// String containers
impl<'de, const N: usize> Deserialize<'de> for String<N> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, const N: usize>(PhantomData<&'de ()>);
impl<'de, const N: usize> de::Visitor<'de> for ValueVisitor<'de, N> {
type Value = String<N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "a string no more than {} bytes long", N as u64)
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
let mut s = String::new();
s.push_str(v)
.map_err(|_| E::invalid_length(v.len(), &self))?;
Ok(s)
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: de::Error,
{
let mut s = String::new();
s.push_str(
core::str::from_utf8(v)
.map_err(|_| E::invalid_value(de::Unexpected::Bytes(v), &self))?,
)
.map_err(|_| E::invalid_length(v.len(), &self))?;
Ok(s)
}
}
deserializer.deserialize_str(ValueVisitor::<'de, N>(PhantomData))
}
}

23
vendor/heapless/src/defmt.rs vendored Normal file
View File

@@ -0,0 +1,23 @@
//! Defmt implementations for heapless types
//!
use crate::Vec;
use defmt::Formatter;
impl<T, const N: usize> defmt::Format for Vec<T, N>
where
T: defmt::Format,
{
fn format(&self, fmt: Formatter<'_>) {
defmt::write!(fmt, "{=[?]}", self.as_slice())
}
}
impl<const N: usize> defmt::Format for crate::String<N>
where
u8: defmt::Format,
{
fn format(&self, fmt: Formatter<'_>) {
defmt::write!(fmt, "{=str}", self.as_str());
}
}

831
vendor/heapless/src/deque.rs vendored Normal file
View File

@@ -0,0 +1,831 @@
use core::fmt;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem::MaybeUninit;
use core::{ptr, slice};
/// A fixed capacity double-ended queue.
///
/// # Examples
///
/// ```
/// use heapless::Deque;
///
/// // A deque with a fixed capacity of 8 elements allocated on the stack
/// let mut deque = Deque::<_, 8>::new();
///
/// // You can use it as a good old FIFO queue.
/// deque.push_back(1);
/// deque.push_back(2);
/// assert_eq!(deque.len(), 2);
///
/// assert_eq!(deque.pop_front(), Some(1));
/// assert_eq!(deque.pop_front(), Some(2));
/// assert_eq!(deque.len(), 0);
///
/// // Deque is double-ended, you can push and pop from the front and back.
/// deque.push_back(1);
/// deque.push_front(2);
/// deque.push_back(3);
/// deque.push_front(4);
/// assert_eq!(deque.pop_front(), Some(4));
/// assert_eq!(deque.pop_front(), Some(2));
/// assert_eq!(deque.pop_front(), Some(1));
/// assert_eq!(deque.pop_front(), Some(3));
///
/// // You can iterate it, yielding all the elements front-to-back.
/// for x in &deque {
/// println!("{}", x);
/// }
/// ```
pub struct Deque<T, const N: usize> {
buffer: [MaybeUninit<T>; N],
/// Front index. Always 0..=(N-1)
front: usize,
/// Back index. Always 0..=(N-1).
back: usize,
/// Used to distinguish "empty" and "full" cases when `front == back`.
/// May only be `true` if `front == back`, always `false` otherwise.
full: bool,
}
impl<T, const N: usize> Deque<T, N> {
const INIT: MaybeUninit<T> = MaybeUninit::uninit();
/// Constructs a new, empty deque with a fixed capacity of `N`
///
/// # Examples
///
/// ```
/// use heapless::Deque;
///
/// // allocate the deque on the stack
/// let mut x: Deque<u8, 16> = Deque::new();
///
/// // allocate the deque in a static variable
/// static mut X: Deque<u8, 16> = Deque::new();
/// ```
pub const fn new() -> Self {
// Const assert N > 0
crate::sealed::greater_than_0::<N>();
Self {
buffer: [Self::INIT; N],
front: 0,
back: 0,
full: false,
}
}
fn increment(i: usize) -> usize {
if i + 1 == N {
0
} else {
i + 1
}
}
fn decrement(i: usize) -> usize {
if i == 0 {
N - 1
} else {
i - 1
}
}
/// Returns the maximum number of elements the deque can hold.
pub const fn capacity(&self) -> usize {
N
}
/// Returns the number of elements currently in the deque.
pub const fn len(&self) -> usize {
if self.full {
N
} else if self.back < self.front {
self.back + N - self.front
} else {
self.back - self.front
}
}
/// Clears the deque, removing all values.
pub fn clear(&mut self) {
// safety: we're immediately setting a consistent empty state.
unsafe { self.drop_contents() }
self.front = 0;
self.back = 0;
self.full = false;
}
/// Drop all items in the `Deque`, leaving the state `back/front/full` unmodified.
///
/// safety: leaves the `Deque` in an inconsistent state, so can cause duplicate drops.
unsafe fn drop_contents(&mut self) {
// We drop each element used in the deque by turning into a &mut[T]
let (a, b) = self.as_mut_slices();
ptr::drop_in_place(a);
ptr::drop_in_place(b);
}
/// Returns whether the deque is empty.
pub fn is_empty(&self) -> bool {
self.front == self.back && !self.full
}
/// Returns whether the deque is full (i.e. if `len() == capacity()`.
pub fn is_full(&self) -> bool {
self.full
}
/// Returns a pair of slices which contain, in order, the contents of the `Deque`.
pub fn as_slices(&self) -> (&[T], &[T]) {
// NOTE(unsafe) avoid bound checks in the slicing operation
unsafe {
if self.is_empty() {
(&[], &[])
} else if self.back <= self.front {
(
slice::from_raw_parts(
self.buffer.as_ptr().add(self.front) as *const T,
N - self.front,
),
slice::from_raw_parts(self.buffer.as_ptr() as *const T, self.back),
)
} else {
(
slice::from_raw_parts(
self.buffer.as_ptr().add(self.front) as *const T,
self.back - self.front,
),
&[],
)
}
}
}
/// Returns a pair of mutable slices which contain, in order, the contents of the `Deque`.
pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
let ptr = self.buffer.as_mut_ptr();
// NOTE(unsafe) avoid bound checks in the slicing operation
unsafe {
if self.is_empty() {
(&mut [], &mut [])
} else if self.back <= self.front {
(
slice::from_raw_parts_mut(ptr.add(self.front) as *mut T, N - self.front),
slice::from_raw_parts_mut(ptr as *mut T, self.back),
)
} else {
(
slice::from_raw_parts_mut(
ptr.add(self.front) as *mut T,
self.back - self.front,
),
&mut [],
)
}
}
}
/// Provides a reference to the front element, or None if the `Deque` is empty.
pub fn front(&self) -> Option<&T> {
if self.is_empty() {
None
} else {
Some(unsafe { &*self.buffer.get_unchecked(self.front).as_ptr() })
}
}
/// Provides a mutable reference to the front element, or None if the `Deque` is empty.
pub fn front_mut(&mut self) -> Option<&mut T> {
if self.is_empty() {
None
} else {
Some(unsafe { &mut *self.buffer.get_unchecked_mut(self.front).as_mut_ptr() })
}
}
/// Provides a reference to the back element, or None if the `Deque` is empty.
pub fn back(&self) -> Option<&T> {
if self.is_empty() {
None
} else {
let index = Self::decrement(self.back);
Some(unsafe { &*self.buffer.get_unchecked(index).as_ptr() })
}
}
/// Provides a mutable reference to the back element, or None if the `Deque` is empty.
pub fn back_mut(&mut self) -> Option<&mut T> {
if self.is_empty() {
None
} else {
let index = Self::decrement(self.back);
Some(unsafe { &mut *self.buffer.get_unchecked_mut(index).as_mut_ptr() })
}
}
/// Removes the item from the front of the deque and returns it, or `None` if it's empty
pub fn pop_front(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
Some(unsafe { self.pop_front_unchecked() })
}
}
/// Removes the item from the back of the deque and returns it, or `None` if it's empty
pub fn pop_back(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
Some(unsafe { self.pop_back_unchecked() })
}
}
/// Appends an `item` to the front of the deque
///
/// Returns back the `item` if the deque is full
pub fn push_front(&mut self, item: T) -> Result<(), T> {
if self.is_full() {
Err(item)
} else {
unsafe { self.push_front_unchecked(item) }
Ok(())
}
}
/// Appends an `item` to the back of the deque
///
/// Returns back the `item` if the deque is full
pub fn push_back(&mut self, item: T) -> Result<(), T> {
if self.is_full() {
Err(item)
} else {
unsafe { self.push_back_unchecked(item) }
Ok(())
}
}
/// Removes an item from the front of the deque and returns it, without checking that the deque
/// is not empty
///
/// # Safety
///
/// It's undefined behavior to call this on an empty deque
pub unsafe fn pop_front_unchecked(&mut self) -> T {
debug_assert!(!self.is_empty());
let index = self.front;
self.full = false;
self.front = Self::increment(self.front);
(self.buffer.get_unchecked_mut(index).as_ptr() as *const T).read()
}
/// Removes an item from the back of the deque and returns it, without checking that the deque
/// is not empty
///
/// # Safety
///
/// It's undefined behavior to call this on an empty deque
pub unsafe fn pop_back_unchecked(&mut self) -> T {
debug_assert!(!self.is_empty());
self.full = false;
self.back = Self::decrement(self.back);
(self.buffer.get_unchecked_mut(self.back).as_ptr() as *const T).read()
}
/// Appends an `item` to the front of the deque
///
/// # Safety
///
/// This assumes the deque is not full.
pub unsafe fn push_front_unchecked(&mut self, item: T) {
debug_assert!(!self.is_full());
let index = Self::decrement(self.front);
// NOTE: the memory slot that we are about to write to is uninitialized. We assign
// a `MaybeUninit` to avoid running `T`'s destructor on the uninitialized memory
*self.buffer.get_unchecked_mut(index) = MaybeUninit::new(item);
self.front = index;
if self.front == self.back {
self.full = true;
}
}
/// Appends an `item` to the back of the deque
///
/// # Safety
///
/// This assumes the deque is not full.
pub unsafe fn push_back_unchecked(&mut self, item: T) {
debug_assert!(!self.is_full());
// NOTE: the memory slot that we are about to write to is uninitialized. We assign
// a `MaybeUninit` to avoid running `T`'s destructor on the uninitialized memory
*self.buffer.get_unchecked_mut(self.back) = MaybeUninit::new(item);
self.back = Self::increment(self.back);
if self.front == self.back {
self.full = true;
}
}
/// Returns an iterator over the deque.
pub fn iter(&self) -> Iter<'_, T, N> {
let done = self.is_empty();
Iter {
_phantom: PhantomData,
buffer: &self.buffer as *const MaybeUninit<T>,
front: self.front,
back: self.back,
done,
}
}
/// Returns an iterator that allows modifying each value.
pub fn iter_mut(&mut self) -> IterMut<'_, T, N> {
let done = self.is_empty();
IterMut {
_phantom: PhantomData,
buffer: &mut self.buffer as *mut _ as *mut MaybeUninit<T>,
front: self.front,
back: self.back,
done,
}
}
}
// Trait implementations
impl<T, const N: usize> Default for Deque<T, N> {
fn default() -> Self {
Self::new()
}
}
impl<T, const N: usize> Drop for Deque<T, N> {
fn drop(&mut self) {
// safety: `self` is left in an inconsistent state but it doesn't matter since
// it's getting dropped. Nothing should be able to observe `self` after drop.
unsafe { self.drop_contents() }
}
}
impl<T: fmt::Debug, const N: usize> fmt::Debug for Deque<T, N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self).finish()
}
}
/// An iterator that moves out of a [`Deque`].
///
/// This struct is created by calling the `into_iter` method.
///
#[derive(Clone)]
pub struct IntoIter<T, const N: usize> {
deque: Deque<T, N>,
}
impl<T, const N: usize> Iterator for IntoIter<T, N> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.deque.pop_front()
}
}
impl<T, const N: usize> IntoIterator for Deque<T, N> {
type Item = T;
type IntoIter = IntoIter<T, N>;
fn into_iter(self) -> Self::IntoIter {
IntoIter { deque: self }
}
}
/// An iterator over the elements of a [`Deque`].
///
/// This struct is created by calling the `iter` method.
#[derive(Clone)]
pub struct Iter<'a, T, const N: usize> {
buffer: *const MaybeUninit<T>,
_phantom: PhantomData<&'a T>,
front: usize,
back: usize,
done: bool,
}
impl<'a, T, const N: usize> Iterator for Iter<'a, T, N> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
if self.done {
None
} else {
let index = self.front;
self.front = Deque::<T, N>::increment(self.front);
if self.front == self.back {
self.done = true;
}
Some(unsafe { &*(self.buffer.add(index) as *const T) })
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = if self.done {
0
} else if self.back <= self.front {
self.back + N - self.front
} else {
self.back - self.front
};
(len, Some(len))
}
}
impl<'a, T, const N: usize> DoubleEndedIterator for Iter<'a, T, N> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.done {
None
} else {
self.back = Deque::<T, N>::decrement(self.back);
if self.front == self.back {
self.done = true;
}
Some(unsafe { &*(self.buffer.add(self.back) as *const T) })
}
}
}
impl<'a, T, const N: usize> ExactSizeIterator for Iter<'a, T, N> {}
impl<'a, T, const N: usize> FusedIterator for Iter<'a, T, N> {}
/// An iterator over the elements of a [`Deque`].
///
/// This struct is created by calling the `iter` method.
pub struct IterMut<'a, T, const N: usize> {
buffer: *mut MaybeUninit<T>,
_phantom: PhantomData<&'a mut T>,
front: usize,
back: usize,
done: bool,
}
impl<'a, T, const N: usize> Iterator for IterMut<'a, T, N> {
type Item = &'a mut T;
fn next(&mut self) -> Option<Self::Item> {
if self.done {
None
} else {
let index = self.front;
self.front = Deque::<T, N>::increment(self.front);
if self.front == self.back {
self.done = true;
}
Some(unsafe { &mut *(self.buffer.add(index) as *mut T) })
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = if self.done {
0
} else if self.back <= self.front {
self.back + N - self.front
} else {
self.back - self.front
};
(len, Some(len))
}
}
impl<'a, T, const N: usize> DoubleEndedIterator for IterMut<'a, T, N> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.done {
None
} else {
self.back = Deque::<T, N>::decrement(self.back);
if self.front == self.back {
self.done = true;
}
Some(unsafe { &mut *(self.buffer.add(self.back) as *mut T) })
}
}
}
impl<'a, T, const N: usize> ExactSizeIterator for IterMut<'a, T, N> {}
impl<'a, T, const N: usize> FusedIterator for IterMut<'a, T, N> {}
impl<'a, T, const N: usize> IntoIterator for &'a Deque<T, N> {
type Item = &'a T;
type IntoIter = Iter<'a, T, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T, const N: usize> IntoIterator for &'a mut Deque<T, N> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
impl<T, const N: usize> Clone for Deque<T, N>
where
T: Clone,
{
fn clone(&self) -> Self {
let mut res = Deque::new();
for i in self {
// safety: the original and new deques have the same capacity, so it can
// not become full.
unsafe { res.push_back_unchecked(i.clone()) }
}
res
}
}
#[cfg(test)]
mod tests {
use crate::Deque;
#[test]
fn static_new() {
static mut _V: Deque<i32, 4> = Deque::new();
}
#[test]
fn stack_new() {
let mut _v: Deque<i32, 4> = Deque::new();
}
#[test]
fn drop() {
droppable!();
{
let mut v: Deque<Droppable, 2> = Deque::new();
v.push_back(Droppable::new()).ok().unwrap();
v.push_back(Droppable::new()).ok().unwrap();
v.pop_front().unwrap();
}
assert_eq!(Droppable::count(), 0);
{
let mut v: Deque<Droppable, 2> = Deque::new();
v.push_back(Droppable::new()).ok().unwrap();
v.push_back(Droppable::new()).ok().unwrap();
}
assert_eq!(Droppable::count(), 0);
{
let mut v: Deque<Droppable, 2> = Deque::new();
v.push_front(Droppable::new()).ok().unwrap();
v.push_front(Droppable::new()).ok().unwrap();
}
assert_eq!(Droppable::count(), 0);
}
#[test]
fn full() {
let mut v: Deque<i32, 4> = Deque::new();
v.push_back(0).unwrap();
v.push_front(1).unwrap();
v.push_back(2).unwrap();
v.push_back(3).unwrap();
assert!(v.push_front(4).is_err());
assert!(v.push_back(4).is_err());
assert!(v.is_full());
}
#[test]
fn empty() {
let mut v: Deque<i32, 4> = Deque::new();
assert!(v.is_empty());
v.push_back(0).unwrap();
assert!(!v.is_empty());
v.push_front(1).unwrap();
assert!(!v.is_empty());
v.pop_front().unwrap();
v.pop_front().unwrap();
assert!(v.pop_front().is_none());
assert!(v.pop_back().is_none());
assert!(v.is_empty());
}
#[test]
fn front_back() {
let mut v: Deque<i32, 4> = Deque::new();
assert_eq!(v.front(), None);
assert_eq!(v.front_mut(), None);
assert_eq!(v.back(), None);
assert_eq!(v.back_mut(), None);
v.push_back(4).unwrap();
assert_eq!(v.front(), Some(&4));
assert_eq!(v.front_mut(), Some(&mut 4));
assert_eq!(v.back(), Some(&4));
assert_eq!(v.back_mut(), Some(&mut 4));
v.push_front(3).unwrap();
assert_eq!(v.front(), Some(&3));
assert_eq!(v.front_mut(), Some(&mut 3));
assert_eq!(v.back(), Some(&4));
assert_eq!(v.back_mut(), Some(&mut 4));
v.pop_back().unwrap();
assert_eq!(v.front(), Some(&3));
assert_eq!(v.front_mut(), Some(&mut 3));
assert_eq!(v.back(), Some(&3));
assert_eq!(v.back_mut(), Some(&mut 3));
v.pop_front().unwrap();
assert_eq!(v.front(), None);
assert_eq!(v.front_mut(), None);
assert_eq!(v.back(), None);
assert_eq!(v.back_mut(), None);
}
#[test]
fn iter() {
let mut v: Deque<i32, 4> = Deque::new();
v.push_back(0).unwrap();
v.push_back(1).unwrap();
v.push_front(2).unwrap();
v.push_front(3).unwrap();
v.pop_back().unwrap();
v.push_front(4).unwrap();
let mut items = v.iter();
assert_eq!(items.next(), Some(&4));
assert_eq!(items.next(), Some(&3));
assert_eq!(items.next(), Some(&2));
assert_eq!(items.next(), Some(&0));
assert_eq!(items.next(), None);
}
#[test]
fn iter_mut() {
let mut v: Deque<i32, 4> = Deque::new();
v.push_back(0).unwrap();
v.push_back(1).unwrap();
v.push_front(2).unwrap();
v.push_front(3).unwrap();
v.pop_back().unwrap();
v.push_front(4).unwrap();
let mut items = v.iter_mut();
assert_eq!(items.next(), Some(&mut 4));
assert_eq!(items.next(), Some(&mut 3));
assert_eq!(items.next(), Some(&mut 2));
assert_eq!(items.next(), Some(&mut 0));
assert_eq!(items.next(), None);
}
#[test]
fn iter_move() {
let mut v: Deque<i32, 4> = Deque::new();
v.push_back(0).unwrap();
v.push_back(1).unwrap();
v.push_back(2).unwrap();
v.push_back(3).unwrap();
let mut items = v.into_iter();
assert_eq!(items.next(), Some(0));
assert_eq!(items.next(), Some(1));
assert_eq!(items.next(), Some(2));
assert_eq!(items.next(), Some(3));
assert_eq!(items.next(), None);
}
#[test]
fn iter_move_drop() {
droppable!();
{
let mut deque: Deque<Droppable, 2> = Deque::new();
deque.push_back(Droppable::new()).ok().unwrap();
deque.push_back(Droppable::new()).ok().unwrap();
let mut items = deque.into_iter();
// Move all
let _ = items.next();
let _ = items.next();
}
assert_eq!(Droppable::count(), 0);
{
let mut deque: Deque<Droppable, 2> = Deque::new();
deque.push_back(Droppable::new()).ok().unwrap();
deque.push_back(Droppable::new()).ok().unwrap();
let _items = deque.into_iter();
// Move none
}
assert_eq!(Droppable::count(), 0);
{
let mut deque: Deque<Droppable, 2> = Deque::new();
deque.push_back(Droppable::new()).ok().unwrap();
deque.push_back(Droppable::new()).ok().unwrap();
let mut items = deque.into_iter();
let _ = items.next(); // Move partly
}
assert_eq!(Droppable::count(), 0);
}
#[test]
fn push_and_pop() {
let mut q: Deque<i32, 4> = Deque::new();
assert_eq!(q.len(), 0);
assert_eq!(q.pop_front(), None);
assert_eq!(q.pop_back(), None);
assert_eq!(q.len(), 0);
q.push_back(0).unwrap();
assert_eq!(q.len(), 1);
assert_eq!(q.pop_back(), Some(0));
assert_eq!(q.len(), 0);
q.push_back(0).unwrap();
q.push_back(1).unwrap();
q.push_front(2).unwrap();
q.push_front(3).unwrap();
assert_eq!(q.len(), 4);
// deque contains: 3 2 0 1
assert_eq!(q.pop_front(), Some(3));
assert_eq!(q.len(), 3);
assert_eq!(q.pop_front(), Some(2));
assert_eq!(q.len(), 2);
assert_eq!(q.pop_back(), Some(1));
assert_eq!(q.len(), 1);
assert_eq!(q.pop_front(), Some(0));
assert_eq!(q.len(), 0);
// deque is now empty
assert_eq!(q.pop_front(), None);
assert_eq!(q.pop_back(), None);
assert_eq!(q.len(), 0);
}
#[test]
fn as_slices() {
let mut q: Deque<i32, 4> = Deque::new();
assert_eq!(q.len(), 0);
q.push_back(0).unwrap();
q.push_back(1).unwrap();
q.push_back(2).unwrap();
q.push_back(3).unwrap();
assert_eq!(q.as_slices(), (&[0, 1, 2, 3][..], &[][..]));
q.pop_front().unwrap();
assert_eq!(q.as_slices(), (&[1, 2, 3][..], &[][..]));
q.push_back(4).unwrap();
assert_eq!(q.as_slices(), (&[1, 2, 3][..], &[4][..]));
}
#[test]
fn clear() {
let mut q: Deque<i32, 4> = Deque::new();
assert_eq!(q.len(), 0);
q.push_back(0).unwrap();
q.push_back(1).unwrap();
q.push_back(2).unwrap();
q.push_back(3).unwrap();
assert_eq!(q.len(), 4);
q.clear();
assert_eq!(q.len(), 0);
q.push_back(0).unwrap();
assert_eq!(q.len(), 1);
}
}

578
vendor/heapless/src/histbuf.rs vendored Normal file
View File

@@ -0,0 +1,578 @@
use core::fmt;
use core::mem::MaybeUninit;
use core::ops::Deref;
use core::ptr;
use core::slice;
/// A "history buffer", similar to a write-only ring buffer of fixed length.
///
/// This buffer keeps a fixed number of elements. On write, the oldest element
/// is overwritten. Thus, the buffer is useful to keep a history of values with
/// some desired depth, and for example calculate a rolling average.
///
/// # Examples
/// ```
/// use heapless::HistoryBuffer;
///
/// // Initialize a new buffer with 8 elements.
/// let mut buf = HistoryBuffer::<_, 8>::new();
///
/// // Starts with no data
/// assert_eq!(buf.recent(), None);
///
/// buf.write(3);
/// buf.write(5);
/// buf.extend(&[4, 4]);
///
/// // The most recent written element is a four.
/// assert_eq!(buf.recent(), Some(&4));
///
/// // To access all elements in an unspecified order, use `as_slice()`.
/// for el in buf.as_slice() { println!("{:?}", el); }
///
/// // Now we can prepare an average of all values, which comes out to 4.
/// let avg = buf.as_slice().iter().sum::<usize>() / buf.len();
/// assert_eq!(avg, 4);
/// ```
pub struct HistoryBuffer<T, const N: usize> {
data: [MaybeUninit<T>; N],
write_at: usize,
filled: bool,
}
impl<T, const N: usize> HistoryBuffer<T, N> {
const INIT: MaybeUninit<T> = MaybeUninit::uninit();
/// Constructs a new history buffer.
///
/// The construction of a `HistoryBuffer` works in `const` contexts.
///
/// # Examples
///
/// ```
/// use heapless::HistoryBuffer;
///
/// // Allocate a 16-element buffer on the stack
/// let x: HistoryBuffer<u8, 16> = HistoryBuffer::new();
/// assert_eq!(x.len(), 0);
/// ```
#[inline]
pub const fn new() -> Self {
// Const assert
crate::sealed::greater_than_0::<N>();
Self {
data: [Self::INIT; N],
write_at: 0,
filled: false,
}
}
/// Clears the buffer, replacing every element with the default value of
/// type `T`.
pub fn clear(&mut self) {
*self = Self::new();
}
}
impl<T, const N: usize> HistoryBuffer<T, N>
where
T: Copy + Clone,
{
/// Constructs a new history buffer, where every element is the given value.
///
/// # Examples
///
/// ```
/// use heapless::HistoryBuffer;
///
/// // Allocate a 16-element buffer on the stack
/// let mut x: HistoryBuffer<u8, 16> = HistoryBuffer::new_with(4);
/// // All elements are four
/// assert_eq!(x.as_slice(), [4; 16]);
/// ```
#[inline]
pub fn new_with(t: T) -> Self {
Self {
data: [MaybeUninit::new(t); N],
write_at: 0,
filled: true,
}
}
/// Clears the buffer, replacing every element with the given value.
pub fn clear_with(&mut self, t: T) {
*self = Self::new_with(t);
}
}
impl<T, const N: usize> HistoryBuffer<T, N> {
/// Returns the current fill level of the buffer.
#[inline]
pub fn len(&self) -> usize {
if self.filled {
N
} else {
self.write_at
}
}
/// Returns the capacity of the buffer, which is the length of the
/// underlying backing array.
#[inline]
pub fn capacity(&self) -> usize {
N
}
/// Writes an element to the buffer, overwriting the oldest value.
pub fn write(&mut self, t: T) {
if self.filled {
// Drop the old before we overwrite it.
unsafe { ptr::drop_in_place(self.data[self.write_at].as_mut_ptr()) }
}
self.data[self.write_at] = MaybeUninit::new(t);
self.write_at += 1;
if self.write_at == self.capacity() {
self.write_at = 0;
self.filled = true;
}
}
/// Clones and writes all elements in a slice to the buffer.
///
/// If the slice is longer than the buffer, only the last `self.len()`
/// elements will actually be stored.
pub fn extend_from_slice(&mut self, other: &[T])
where
T: Clone,
{
for item in other {
self.write(item.clone());
}
}
/// Returns a reference to the most recently written value.
///
/// # Examples
///
/// ```
/// use heapless::HistoryBuffer;
///
/// let mut x: HistoryBuffer<u8, 16> = HistoryBuffer::new();
/// x.write(4);
/// x.write(10);
/// assert_eq!(x.recent(), Some(&10));
/// ```
pub fn recent(&self) -> Option<&T> {
if self.write_at == 0 {
if self.filled {
Some(unsafe { &*self.data[self.capacity() - 1].as_ptr() })
} else {
None
}
} else {
Some(unsafe { &*self.data[self.write_at - 1].as_ptr() })
}
}
/// Returns the array slice backing the buffer, without keeping track
/// of the write position. Therefore, the element order is unspecified.
pub fn as_slice(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.data.as_ptr() as *const _, self.len()) }
}
/// Returns a pair of slices which contain, in order, the contents of the buffer.
///
/// # Examples
///
/// ```
/// use heapless::HistoryBuffer;
///
/// let mut buffer: HistoryBuffer<u8, 6> = HistoryBuffer::new();
/// buffer.extend([0, 0, 0]);
/// buffer.extend([1, 2, 3, 4, 5, 6]);
/// assert_eq!(buffer.as_slices(), (&[1, 2, 3][..], &[4, 5, 6][..]));
/// ```
pub fn as_slices(&self) -> (&[T], &[T]) {
let buffer = self.as_slice();
if !self.filled {
(buffer, &[])
} else {
(&buffer[self.write_at..], &buffer[..self.write_at])
}
}
/// Returns an iterator for iterating over the buffer from oldest to newest.
///
/// # Examples
///
/// ```
/// use heapless::HistoryBuffer;
///
/// let mut buffer: HistoryBuffer<u8, 6> = HistoryBuffer::new();
/// buffer.extend([0, 0, 0, 1, 2, 3, 4, 5, 6]);
/// let expected = [1, 2, 3, 4, 5, 6];
/// for (x, y) in buffer.oldest_ordered().zip(expected.iter()) {
/// assert_eq!(x, y)
/// }
///
/// ```
pub fn oldest_ordered<'a>(&'a self) -> OldestOrdered<'a, T, N> {
if self.filled {
OldestOrdered {
buf: self,
cur: self.write_at,
wrapped: false,
}
} else {
// special case: act like we wrapped already to handle empty buffer.
OldestOrdered {
buf: self,
cur: 0,
wrapped: true,
}
}
}
}
impl<T, const N: usize> Extend<T> for HistoryBuffer<T, N> {
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = T>,
{
for item in iter.into_iter() {
self.write(item);
}
}
}
impl<'a, T, const N: usize> Extend<&'a T> for HistoryBuffer<T, N>
where
T: 'a + Clone,
{
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = &'a T>,
{
self.extend(iter.into_iter().cloned())
}
}
impl<T, const N: usize> Clone for HistoryBuffer<T, N>
where
T: Clone,
{
fn clone(&self) -> Self {
let mut ret = Self::new();
for (new, old) in ret.data.iter_mut().zip(self.as_slice()) {
new.write(old.clone());
}
ret.filled = self.filled;
ret.write_at = self.write_at;
ret
}
}
impl<T, const N: usize> Drop for HistoryBuffer<T, N> {
fn drop(&mut self) {
unsafe {
ptr::drop_in_place(ptr::slice_from_raw_parts_mut(
self.data.as_mut_ptr() as *mut T,
self.len(),
))
}
}
}
impl<T, const N: usize> Deref for HistoryBuffer<T, N> {
type Target = [T];
fn deref(&self) -> &[T] {
self.as_slice()
}
}
impl<T, const N: usize> AsRef<[T]> for HistoryBuffer<T, N> {
#[inline]
fn as_ref(&self) -> &[T] {
self
}
}
impl<T, const N: usize> fmt::Debug for HistoryBuffer<T, N>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<[T] as fmt::Debug>::fmt(self, f)
}
}
impl<T, const N: usize> Default for HistoryBuffer<T, N> {
fn default() -> Self {
Self::new()
}
}
impl<T, const N: usize> PartialEq for HistoryBuffer<T, N>
where
T: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.oldest_ordered().eq(other.oldest_ordered())
}
}
/// An iterator on the underlying buffer ordered from oldest data to newest
#[derive(Clone)]
pub struct OldestOrdered<'a, T, const N: usize> {
buf: &'a HistoryBuffer<T, N>,
cur: usize,
wrapped: bool,
}
impl<'a, T, const N: usize> Iterator for OldestOrdered<'a, T, N> {
type Item = &'a T;
fn next(&mut self) -> Option<&'a T> {
if self.cur == self.buf.len() && self.buf.filled {
// roll-over
self.cur = 0;
self.wrapped = true;
}
if self.cur == self.buf.write_at && self.wrapped {
return None;
}
let item = &self.buf[self.cur];
self.cur += 1;
Some(item)
}
}
#[cfg(test)]
mod tests {
use crate::HistoryBuffer;
use core::fmt::Debug;
use core::sync::atomic::{AtomicUsize, Ordering};
#[test]
fn new() {
let x: HistoryBuffer<u8, 4> = HistoryBuffer::new_with(1);
assert_eq!(x.len(), 4);
assert_eq!(x.as_slice(), [1; 4]);
assert_eq!(*x, [1; 4]);
let x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
assert_eq!(x.as_slice(), []);
}
#[test]
fn write() {
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
x.write(1);
x.write(4);
assert_eq!(x.as_slice(), [1, 4]);
x.write(5);
x.write(6);
x.write(10);
assert_eq!(x.as_slice(), [10, 4, 5, 6]);
x.extend([11, 12].iter());
assert_eq!(x.as_slice(), [10, 11, 12, 6]);
}
#[test]
fn clear() {
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new_with(1);
x.clear();
assert_eq!(x.as_slice(), []);
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
x.clear_with(1);
assert_eq!(x.as_slice(), [1; 4]);
}
#[test]
fn clone() {
let mut x: HistoryBuffer<u8, 3> = HistoryBuffer::new();
for i in 0..10 {
assert_eq!(x.as_slice(), x.clone().as_slice());
x.write(i);
}
// Records number of clones locally and globally.
static GLOBAL: AtomicUsize = AtomicUsize::new(0);
#[derive(Default, PartialEq, Debug)]
struct InstrumentedClone(usize);
impl Clone for InstrumentedClone {
fn clone(&self) -> Self {
GLOBAL.fetch_add(1, Ordering::Relaxed);
Self(self.0 + 1)
}
}
let mut y: HistoryBuffer<InstrumentedClone, 2> = HistoryBuffer::new();
let _ = y.clone();
assert_eq!(GLOBAL.load(Ordering::Relaxed), 0);
y.write(InstrumentedClone(0));
assert_eq!(GLOBAL.load(Ordering::Relaxed), 0);
assert_eq!(y.clone().as_slice(), [InstrumentedClone(1)]);
assert_eq!(GLOBAL.load(Ordering::Relaxed), 1);
y.write(InstrumentedClone(0));
assert_eq!(GLOBAL.load(Ordering::Relaxed), 1);
assert_eq!(
y.clone().as_slice(),
[InstrumentedClone(1), InstrumentedClone(1)]
);
assert_eq!(GLOBAL.load(Ordering::Relaxed), 3);
assert_eq!(
y.clone().clone().clone().as_slice(),
[InstrumentedClone(3), InstrumentedClone(3)]
);
assert_eq!(GLOBAL.load(Ordering::Relaxed), 9);
}
#[test]
fn recent() {
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
assert_eq!(x.recent(), None);
x.write(1);
x.write(4);
assert_eq!(x.recent(), Some(&4));
x.write(5);
x.write(6);
x.write(10);
assert_eq!(x.recent(), Some(&10));
}
#[test]
fn as_slice() {
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
assert_eq!(x.as_slice(), []);
x.extend([1, 2, 3, 4, 5].iter());
assert_eq!(x.as_slice(), [5, 2, 3, 4]);
}
/// Test whether .as_slices() behaves as expected.
#[test]
fn as_slices() {
let mut buffer: HistoryBuffer<u8, 4> = HistoryBuffer::new();
let mut extend_then_assert = |extend: &[u8], assert: (&[u8], &[u8])| {
buffer.extend(extend);
assert_eq!(buffer.as_slices(), assert);
};
extend_then_assert(b"a", (b"a", b""));
extend_then_assert(b"bcd", (b"abcd", b""));
extend_then_assert(b"efg", (b"d", b"efg"));
extend_then_assert(b"h", (b"efgh", b""));
extend_then_assert(b"123456", (b"34", b"56"));
}
/// Test whether .as_slices() and .oldest_ordered() produce elements in the same order.
#[test]
fn as_slices_equals_ordered() {
let mut buffer: HistoryBuffer<u8, 6> = HistoryBuffer::new();
for n in 0..20 {
buffer.write(n);
let (head, tail) = buffer.as_slices();
assert_eq_iter(
[head, tail].iter().copied().flatten(),
buffer.oldest_ordered(),
)
}
}
#[test]
fn ordered() {
// test on an empty buffer
let buffer: HistoryBuffer<u8, 6> = HistoryBuffer::new();
let mut iter = buffer.oldest_ordered();
assert_eq!(iter.next(), None);
assert_eq!(iter.next(), None);
// test on a un-filled buffer
let mut buffer: HistoryBuffer<u8, 6> = HistoryBuffer::new();
buffer.extend([1, 2, 3]);
assert_eq!(buffer.len(), 3);
assert_eq_iter(buffer.oldest_ordered(), &[1, 2, 3]);
// test on a filled buffer
let mut buffer: HistoryBuffer<u8, 6> = HistoryBuffer::new();
buffer.extend([0, 0, 0, 1, 2, 3, 4, 5, 6]);
assert_eq!(buffer.len(), 6);
assert_eq_iter(buffer.oldest_ordered(), &[1, 2, 3, 4, 5, 6]);
// comprehensive test all cases
for n in 0..50 {
const N: usize = 7;
let mut buffer: HistoryBuffer<u8, N> = HistoryBuffer::new();
buffer.extend(0..n);
assert_eq_iter(
buffer.oldest_ordered().copied(),
n.saturating_sub(N as u8)..n,
);
}
}
/// Compares two iterators item by item, making sure they stop at the same time.
fn assert_eq_iter<I: Eq + Debug>(
a: impl IntoIterator<Item = I>,
b: impl IntoIterator<Item = I>,
) {
let mut a = a.into_iter();
let mut b = b.into_iter();
let mut i = 0;
loop {
let a_item = a.next();
let b_item = b.next();
assert_eq!(a_item, b_item, "{}", i);
i += 1;
if b_item.is_none() {
break;
}
}
}
#[test]
fn partial_eq() {
let mut x: HistoryBuffer<u8, 3> = HistoryBuffer::new();
let mut y: HistoryBuffer<u8, 3> = HistoryBuffer::new();
assert_eq!(x, y);
x.write(1);
assert_ne!(x, y);
y.write(1);
assert_eq!(x, y);
for _ in 0..4 {
x.write(2);
assert_ne!(x, y);
for i in 0..5 {
x.write(i);
y.write(i);
}
assert_eq!(
x,
y,
"{:?} {:?}",
x.iter().collect::<Vec<_>>(),
y.iter().collect::<Vec<_>>()
);
}
}
}

1555
vendor/heapless/src/indexmap.rs vendored Normal file

File diff suppressed because it is too large Load Diff

658
vendor/heapless/src/indexset.rs vendored Normal file
View File

@@ -0,0 +1,658 @@
use crate::indexmap::{self, IndexMap};
use core::{
borrow::Borrow,
fmt,
hash::{BuildHasher, Hash},
iter::FromIterator,
};
use hash32::{BuildHasherDefault, FnvHasher};
/// A [`IndexSet`] using the
/// default FNV hasher.
/// A list of all Methods and Traits available for `FnvIndexSet` can be found in
/// the [`IndexSet`] documentation.
///
/// # Examples
/// ```
/// use heapless::FnvIndexSet;
///
/// // A hash set with a capacity of 16 elements allocated on the stack
/// let mut books = FnvIndexSet::<_, 16>::new();
///
/// // Add some books.
/// books.insert("A Dance With Dragons").unwrap();
/// books.insert("To Kill a Mockingbird").unwrap();
/// books.insert("The Odyssey").unwrap();
/// books.insert("The Great Gatsby").unwrap();
///
/// // Check for a specific one.
/// if !books.contains("The Winds of Winter") {
/// println!("We have {} books, but The Winds of Winter ain't one.",
/// books.len());
/// }
///
/// // Remove a book.
/// books.remove("The Odyssey");
///
/// // Iterate over everything.
/// for book in &books {
/// println!("{}", book);
/// }
/// ```
pub type FnvIndexSet<T, const N: usize> = IndexSet<T, BuildHasherDefault<FnvHasher>, N>;
/// Fixed capacity [`IndexSet`](https://docs.rs/indexmap/2/indexmap/set/struct.IndexSet.html).
///
/// Note that you cannot use `IndexSet` directly, since it is generic around the hashing algorithm
/// in use. Pick a concrete instantiation like [`FnvIndexSet`] instead
/// or create your own.
///
/// Note that the capacity of the `IndexSet` must be a power of 2.
///
/// # Examples
/// Since `IndexSet` cannot be used directly, we're using its `FnvIndexSet` instantiation
/// for this example.
///
/// ```
/// use heapless::FnvIndexSet;
///
/// // A hash set with a capacity of 16 elements allocated on the stack
/// let mut books = FnvIndexSet::<_, 16>::new();
///
/// // Add some books.
/// books.insert("A Dance With Dragons").unwrap();
/// books.insert("To Kill a Mockingbird").unwrap();
/// books.insert("The Odyssey").unwrap();
/// books.insert("The Great Gatsby").unwrap();
///
/// // Check for a specific one.
/// if !books.contains("The Winds of Winter") {
/// println!("We have {} books, but The Winds of Winter ain't one.",
/// books.len());
/// }
///
/// // Remove a book.
/// books.remove("The Odyssey");
///
/// // Iterate over everything.
/// for book in &books {
/// println!("{}", book);
/// }
/// ```
pub struct IndexSet<T, S, const N: usize> {
map: IndexMap<T, (), S, N>,
}
impl<T, S, const N: usize> IndexSet<T, BuildHasherDefault<S>, N> {
/// Creates an empty `IndexSet`
pub const fn new() -> Self {
IndexSet {
map: IndexMap::new(),
}
}
}
impl<T, S, const N: usize> IndexSet<T, S, N> {
/// Returns the number of elements the set can hold
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let set = FnvIndexSet::<i32, 16>::new();
/// assert_eq!(set.capacity(), 16);
/// ```
pub fn capacity(&self) -> usize {
self.map.capacity()
}
/// Return an iterator over the values of the set, in insertion order
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut set = FnvIndexSet::<_, 16>::new();
/// set.insert("a").unwrap();
/// set.insert("b").unwrap();
///
/// // Will print in insertion order: a, b
/// for x in set.iter() {
/// println!("{}", x);
/// }
/// ```
pub fn iter(&self) -> Iter<'_, T> {
Iter {
iter: self.map.iter(),
}
}
/// Get the first value
///
/// Computes in **O(1)** time
pub fn first(&self) -> Option<&T> {
self.map.first().map(|(k, _v)| k)
}
/// Get the last value
///
/// Computes in **O(1)** time
pub fn last(&self) -> Option<&T> {
self.map.last().map(|(k, _v)| k)
}
/// Returns the number of elements in the set.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new();
/// assert_eq!(v.len(), 0);
/// v.insert(1).unwrap();
/// assert_eq!(v.len(), 1);
/// ```
pub fn len(&self) -> usize {
self.map.len()
}
/// Returns `true` if the set contains no elements.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new();
/// assert!(v.is_empty());
/// v.insert(1).unwrap();
/// assert!(!v.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// Clears the set, removing all values.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new();
/// v.insert(1).unwrap();
/// v.clear();
/// assert!(v.is_empty());
/// ```
pub fn clear(&mut self) {
self.map.clear()
}
}
impl<T, S, const N: usize> IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher,
{
/// Visits the values representing the difference, i.e. the values that are in `self` but not in
/// `other`.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect();
///
/// // Can be seen as `a - b`.
/// for x in a.difference(&b) {
/// println!("{}", x); // Print 1
/// }
///
/// let diff: FnvIndexSet<_, 16> = a.difference(&b).collect();
/// assert_eq!(diff, [1].iter().collect::<FnvIndexSet<_, 16>>());
///
/// // Note that difference is not symmetric,
/// // and `b - a` means something else:
/// let diff: FnvIndexSet<_, 16> = b.difference(&a).collect();
/// assert_eq!(diff, [4].iter().collect::<FnvIndexSet<_, 16>>());
/// ```
pub fn difference<'a, S2, const N2: usize>(
&'a self,
other: &'a IndexSet<T, S2, N2>,
) -> Difference<'a, T, S2, N2>
where
S2: BuildHasher,
{
Difference {
iter: self.iter(),
other,
}
}
/// Visits the values representing the symmetric difference, i.e. the values that are in `self`
/// or in `other` but not in both.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect();
///
/// // Print 1, 4 in that order.
/// for x in a.symmetric_difference(&b) {
/// println!("{}", x);
/// }
///
/// let diff1: FnvIndexSet<_, 16> = a.symmetric_difference(&b).collect();
/// let diff2: FnvIndexSet<_, 16> = b.symmetric_difference(&a).collect();
///
/// assert_eq!(diff1, diff2);
/// assert_eq!(diff1, [1, 4].iter().collect::<FnvIndexSet<_, 16>>());
/// ```
pub fn symmetric_difference<'a, S2, const N2: usize>(
&'a self,
other: &'a IndexSet<T, S2, N2>,
) -> impl Iterator<Item = &'a T>
where
S2: BuildHasher,
{
self.difference(other).chain(other.difference(self))
}
/// Visits the values representing the intersection, i.e. the values that are both in `self` and
/// `other`.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect();
///
/// // Print 2, 3 in that order.
/// for x in a.intersection(&b) {
/// println!("{}", x);
/// }
///
/// let intersection: FnvIndexSet<_, 16> = a.intersection(&b).collect();
/// assert_eq!(intersection, [2, 3].iter().collect::<FnvIndexSet<_, 16>>());
/// ```
pub fn intersection<'a, S2, const N2: usize>(
&'a self,
other: &'a IndexSet<T, S2, N2>,
) -> Intersection<'a, T, S2, N2>
where
S2: BuildHasher,
{
Intersection {
iter: self.iter(),
other,
}
}
/// Visits the values representing the union, i.e. all the values in `self` or `other`, without
/// duplicates.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect();
///
/// // Print 1, 2, 3, 4 in that order.
/// for x in a.union(&b) {
/// println!("{}", x);
/// }
///
/// let union: FnvIndexSet<_, 16> = a.union(&b).collect();
/// assert_eq!(union, [1, 2, 3, 4].iter().collect::<FnvIndexSet<_, 16>>());
/// ```
pub fn union<'a, S2, const N2: usize>(
&'a self,
other: &'a IndexSet<T, S2, N2>,
) -> impl Iterator<Item = &'a T>
where
S2: BuildHasher,
{
self.iter().chain(other.difference(self))
}
/// Returns `true` if the set contains a value.
///
/// The value may be any borrowed form of the set's value type, but `Hash` and `Eq` on the
/// borrowed form must match those for the value type.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let set: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// assert_eq!(set.contains(&1), true);
/// assert_eq!(set.contains(&4), false);
/// ```
pub fn contains<Q>(&self, value: &Q) -> bool
where
T: Borrow<Q>,
Q: ?Sized + Eq + Hash,
{
self.map.contains_key(value)
}
/// Returns `true` if `self` has no elements in common with `other`. This is equivalent to
/// checking for an empty intersection.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b = FnvIndexSet::<_, 16>::new();
///
/// assert_eq!(a.is_disjoint(&b), true);
/// b.insert(4).unwrap();
/// assert_eq!(a.is_disjoint(&b), true);
/// b.insert(1).unwrap();
/// assert_eq!(a.is_disjoint(&b), false);
/// ```
pub fn is_disjoint<S2, const N2: usize>(&self, other: &IndexSet<T, S2, N2>) -> bool
where
S2: BuildHasher,
{
self.iter().all(|v| !other.contains(v))
}
/// Returns `true` if the set is a subset of another, i.e. `other` contains at least all the
/// values in `self`.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let sup: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut set = FnvIndexSet::<_, 16>::new();
///
/// assert_eq!(set.is_subset(&sup), true);
/// set.insert(2).unwrap();
/// assert_eq!(set.is_subset(&sup), true);
/// set.insert(4).unwrap();
/// assert_eq!(set.is_subset(&sup), false);
/// ```
pub fn is_subset<S2, const N2: usize>(&self, other: &IndexSet<T, S2, N2>) -> bool
where
S2: BuildHasher,
{
self.iter().all(|v| other.contains(v))
}
// Returns `true` if the set is a superset of another, i.e. `self` contains at least all the
// values in `other`.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let sub: FnvIndexSet<_, 16> = [1, 2].iter().cloned().collect();
/// let mut set = FnvIndexSet::<_, 16>::new();
///
/// assert_eq!(set.is_superset(&sub), false);
///
/// set.insert(0).unwrap();
/// set.insert(1).unwrap();
/// assert_eq!(set.is_superset(&sub), false);
///
/// set.insert(2).unwrap();
/// assert_eq!(set.is_superset(&sub), true);
/// ```
pub fn is_superset<S2, const N2: usize>(&self, other: &IndexSet<T, S2, N2>) -> bool
where
S2: BuildHasher,
{
other.is_subset(self)
}
/// Adds a value to the set.
///
/// If the set did not have this value present, `true` is returned.
///
/// If the set did have this value present, `false` is returned.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut set = FnvIndexSet::<_, 16>::new();
///
/// assert_eq!(set.insert(2).unwrap(), true);
/// assert_eq!(set.insert(2).unwrap(), false);
/// assert_eq!(set.len(), 1);
/// ```
pub fn insert(&mut self, value: T) -> Result<bool, T> {
self.map
.insert(value, ())
.map(|old| old.is_none())
.map_err(|(k, _)| k)
}
/// Removes a value from the set. Returns `true` if the value was present in the set.
///
/// The value may be any borrowed form of the set's value type, but `Hash` and `Eq` on the
/// borrowed form must match those for the value type.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut set = FnvIndexSet::<_, 16>::new();
///
/// set.insert(2).unwrap();
/// assert_eq!(set.remove(&2), true);
/// assert_eq!(set.remove(&2), false);
/// ```
pub fn remove<Q>(&mut self, value: &Q) -> bool
where
T: Borrow<Q>,
Q: ?Sized + Eq + Hash,
{
self.map.remove(value).is_some()
}
/// Retains only the elements specified by the predicate.
///
/// In other words, remove all elements `e` for which `f(&e)` returns `false`.
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&T) -> bool,
{
self.map.retain(move |k, _| f(k));
}
}
impl<T, S, const N: usize> Clone for IndexSet<T, S, N>
where
T: Clone,
S: Clone,
{
fn clone(&self) -> Self {
Self {
map: self.map.clone(),
}
}
}
impl<T, S, const N: usize> fmt::Debug for IndexSet<T, S, N>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_set().entries(self.iter()).finish()
}
}
impl<T, S, const N: usize> Default for IndexSet<T, S, N>
where
S: Default,
{
fn default() -> Self {
IndexSet {
map: <_>::default(),
}
}
}
impl<T, S1, S2, const N1: usize, const N2: usize> PartialEq<IndexSet<T, S2, N2>>
for IndexSet<T, S1, N1>
where
T: Eq + Hash,
S1: BuildHasher,
S2: BuildHasher,
{
fn eq(&self, other: &IndexSet<T, S2, N2>) -> bool {
self.len() == other.len() && self.is_subset(other)
}
}
impl<T, S, const N: usize> Extend<T> for IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher,
{
fn extend<I>(&mut self, iterable: I)
where
I: IntoIterator<Item = T>,
{
self.map.extend(iterable.into_iter().map(|k| (k, ())))
}
}
impl<'a, T, S, const N: usize> Extend<&'a T> for IndexSet<T, S, N>
where
T: 'a + Eq + Hash + Copy,
S: BuildHasher,
{
fn extend<I>(&mut self, iterable: I)
where
I: IntoIterator<Item = &'a T>,
{
self.extend(iterable.into_iter().cloned())
}
}
impl<T, S, const N: usize> FromIterator<T> for IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher + Default,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
let mut set = IndexSet::default();
set.extend(iter);
set
}
}
impl<'a, T, S, const N: usize> IntoIterator for &'a IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher,
{
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// An iterator over the items of a [`IndexSet`].
///
/// This `struct` is created by the [`iter`](IndexSet::iter) method on [`IndexSet`]. See its
/// documentation for more.
pub struct Iter<'a, T> {
iter: indexmap::Iter<'a, T, ()>,
}
impl<'a, T> Iterator for Iter<'a, T> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|(k, _)| k)
}
}
impl<'a, T> Clone for Iter<'a, T> {
fn clone(&self) -> Self {
Self {
iter: self.iter.clone(),
}
}
}
pub struct Difference<'a, T, S, const N: usize>
where
S: BuildHasher,
T: Eq + Hash,
{
iter: Iter<'a, T>,
other: &'a IndexSet<T, S, N>,
}
impl<'a, T, S, const N: usize> Iterator for Difference<'a, T, S, N>
where
S: BuildHasher,
T: Eq + Hash,
{
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
loop {
let elt = self.iter.next()?;
if !self.other.contains(elt) {
return Some(elt);
}
}
}
}
pub struct Intersection<'a, T, S, const N: usize>
where
S: BuildHasher,
T: Eq + Hash,
{
iter: Iter<'a, T>,
other: &'a IndexSet<T, S, N>,
}
impl<'a, T, S, const N: usize> Iterator for Intersection<'a, T, S, N>
where
S: BuildHasher,
T: Eq + Hash,
{
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
loop {
let elt = self.iter.next()?;
if self.other.contains(elt) {
return Some(elt);
}
}
}
}

145
vendor/heapless/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,145 @@
//! `static` friendly data structures that don't require dynamic memory allocation
//!
//! The core principle behind `heapless` is that its data structures are backed by a *static* memory
//! allocation. For example, you can think of `heapless::Vec` as an alternative version of
//! `std::Vec` with fixed capacity and that can't be re-allocated on the fly (e.g. via `push`).
//!
//! All `heapless` data structures store their memory allocation *inline* and specify their capacity
//! via their type parameter `N`. This means that you can instantiate a `heapless` data structure on
//! the stack, in a `static` variable, or even in the heap.
//!
//! ```
//! use heapless::Vec; // fixed capacity `std::Vec`
//!
//! // on the stack
//! let mut xs: Vec<u8, 8> = Vec::new(); // can hold up to 8 elements
//! xs.push(42).unwrap();
//! assert_eq!(xs.pop(), Some(42));
//!
//! // in a `static` variable
//! static mut XS: Vec<u8, 8> = Vec::new();
//!
//! let xs = unsafe { &mut XS };
//!
//! xs.push(42);
//! assert_eq!(xs.pop(), Some(42));
//!
//! // in the heap (though kind of pointless because no reallocation)
//! let mut ys: Box<Vec<u8, 8>> = Box::new(Vec::new());
//! ys.push(42).unwrap();
//! assert_eq!(ys.pop(), Some(42));
//! ```
//!
//! Because they have fixed capacity `heapless` data structures don't implicitly reallocate. This
//! means that operations like `heapless::Vec.push` are *truly* constant time rather than amortized
//! constant time with potentially unbounded (depends on the allocator) worst case execution time
//! (which is bad / unacceptable for hard real time applications).
//!
//! `heapless` data structures don't use a memory allocator which means no risk of an uncatchable
//! Out Of Memory (OOM) condition while performing operations on them. It's certainly possible to
//! run out of capacity while growing `heapless` data structures, but the API lets you handle this
//! possibility by returning a `Result` on operations that may exhaust the capacity of the data
//! structure.
//!
//! List of currently implemented data structures:
//!
#![cfg_attr(
any(arm_llsc, target_arch = "x86"),
doc = "- [`Arc`](pool::arc::Arc) -- like `std::sync::Arc` but backed by a lock-free memory pool rather than `#[global_allocator]`"
)]
#![cfg_attr(
any(arm_llsc, target_arch = "x86"),
doc = "- [`Box`](pool::boxed::Box) -- like `std::boxed::Box` but backed by a lock-free memory pool rather than `#[global_allocator]`"
)]
//! - [`BinaryHeap`] -- priority queue
//! - [`IndexMap`] -- hash table
//! - [`IndexSet`] -- hash set
//! - [`LinearMap`]
#![cfg_attr(
any(arm_llsc, target_arch = "x86"),
doc = "- [`Object`](pool::object::Object) -- objects managed by an object pool"
)]
//! - [`String`]
//! - [`Vec`]
//! - [`mpmc::Q*`](mpmc) -- multiple producer multiple consumer lock-free queue
//! - [`spsc::Queue`] -- single producer single consumer lock-free queue
//!
//! # Optional Features
//!
//! The `heapless` crate provides the following optional Cargo features:
//!
//! - `ufmt`: Implement [`ufmt_write::uWrite`] for `String<N>` and `Vec<u8, N>`
//!
//! [`ufmt_write::uWrite`]: https://docs.rs/ufmt-write/
//!
//! # Minimum Supported Rust Version (MSRV)
//!
//! This crate does *not* have a Minimum Supported Rust Version (MSRV) and may make use of language
//! features and API in the standard library available in the latest stable Rust version.
//!
//! In other words, changes in the Rust version requirement of this crate are not considered semver
//! breaking change and may occur in patch version releases.
#![cfg_attr(docsrs, feature(doc_cfg), feature(doc_auto_cfg))]
#![cfg_attr(not(test), no_std)]
#![deny(missing_docs)]
#![deny(warnings)]
pub use binary_heap::BinaryHeap;
pub use deque::Deque;
pub use histbuf::{HistoryBuffer, OldestOrdered};
pub use indexmap::{
Bucket, Entry, FnvIndexMap, IndexMap, Iter as IndexMapIter, IterMut as IndexMapIterMut,
Keys as IndexMapKeys, OccupiedEntry, Pos, VacantEntry, Values as IndexMapValues,
ValuesMut as IndexMapValuesMut,
};
pub use indexset::{FnvIndexSet, IndexSet, Iter as IndexSetIter};
pub use linear_map::LinearMap;
pub use string::String;
pub use vec::Vec;
#[macro_use]
#[cfg(test)]
mod test_helpers;
mod deque;
mod histbuf;
mod indexmap;
mod indexset;
mod linear_map;
mod string;
mod vec;
#[cfg(feature = "serde")]
mod de;
#[cfg(feature = "serde")]
mod ser;
pub mod binary_heap;
#[cfg(feature = "defmt-03")]
mod defmt;
#[cfg(any(
// assume we have all atomics available if we're using portable-atomic
feature = "portable-atomic",
// target has native atomic CAS (mpmc_large requires usize, otherwise just u8)
all(feature = "mpmc_large", target_has_atomic = "ptr"),
all(not(feature = "mpmc_large"), target_has_atomic = "8")
))]
pub mod mpmc;
#[cfg(any(arm_llsc, target_arch = "x86"))]
pub mod pool;
pub mod sorted_linked_list;
#[cfg(any(
// assume we have all atomics available if we're using portable-atomic
feature = "portable-atomic",
// target has native atomic CAS. Note this is too restrictive, spsc requires load/store only, not CAS.
// This should be `cfg(target_has_atomic_load_store)`, but that's not stable yet.
target_has_atomic = "ptr",
// or the current target is in a list in build.rs of targets known to have load/store but no CAS.
has_atomic_load_store
))]
pub mod spsc;
#[cfg(feature = "ufmt")]
mod ufmt;
mod sealed;

555
vendor/heapless/src/linear_map.rs vendored Normal file
View File

@@ -0,0 +1,555 @@
use crate::Vec;
use core::{borrow::Borrow, fmt, iter::FromIterator, mem, ops, slice};
/// A fixed capacity map / dictionary that performs lookups via linear search
///
/// Note that as this map doesn't use hashing so most operations are **O(N)** instead of O(1)
pub struct LinearMap<K, V, const N: usize> {
pub(crate) buffer: Vec<(K, V), N>,
}
impl<K, V, const N: usize> LinearMap<K, V, N> {
/// Creates an empty `LinearMap`
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// // allocate the map on the stack
/// let mut map: LinearMap<&str, isize, 8> = LinearMap::new();
///
/// // allocate the map in a static variable
/// static mut MAP: LinearMap<&str, isize, 8> = LinearMap::new();
/// ```
pub const fn new() -> Self {
Self { buffer: Vec::new() }
}
}
impl<K, V, const N: usize> LinearMap<K, V, N>
where
K: Eq,
{
/// Returns the number of elements that the map can hold
///
/// Computes in **O(1)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let map: LinearMap<&str, isize, 8> = LinearMap::new();
/// assert_eq!(map.capacity(), 8);
/// ```
pub fn capacity(&self) -> usize {
N
}
/// Clears the map, removing all key-value pairs
///
/// Computes in **O(1)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// map.clear();
/// assert!(map.is_empty());
/// ```
pub fn clear(&mut self) {
self.buffer.clear()
}
/// Returns true if the map contains a value for the specified key.
///
/// Computes in **O(N)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
pub fn contains_key(&self, key: &K) -> bool {
self.get(key).is_some()
}
/// Returns a reference to the value corresponding to the key
///
/// Computes in **O(N)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
pub fn get<Q>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Eq + ?Sized,
{
self.iter()
.find(|&(k, _)| k.borrow() == key)
.map(|(_, v)| v)
}
/// Returns a mutable reference to the value corresponding to the key
///
/// Computes in **O(N)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Eq + ?Sized,
{
self.iter_mut()
.find(|&(k, _)| k.borrow() == key)
.map(|(_, v)| v)
}
/// Returns the number of elements in this map
///
/// Computes in **O(1)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut a: LinearMap<_, _, 8> = LinearMap::new();
/// assert_eq!(a.len(), 0);
/// a.insert(1, "a").unwrap();
/// assert_eq!(a.len(), 1);
/// ```
pub fn len(&self) -> usize {
self.buffer.len()
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the value is updated, and the old value is returned.
///
/// Computes in **O(N)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// assert_eq!(map.insert(37, "a").unwrap(), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b").unwrap();
/// assert_eq!(map.insert(37, "c").unwrap(), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
pub fn insert(&mut self, key: K, mut value: V) -> Result<Option<V>, (K, V)> {
if let Some((_, v)) = self.iter_mut().find(|&(k, _)| *k == key) {
mem::swap(v, &mut value);
return Ok(Some(value));
}
self.buffer.push((key, value))?;
Ok(None)
}
/// Returns true if the map contains no elements
///
/// Computes in **O(1)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut a: LinearMap<_, _, 8> = LinearMap::new();
/// assert!(a.is_empty());
/// a.insert(1, "a").unwrap();
/// assert!(!a.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// An iterator visiting all key-value pairs in arbitrary order.
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
///
/// for (key, val) in map.iter() {
/// println!("key: {} val: {}", key, val);
/// }
/// ```
pub fn iter(&self) -> Iter<'_, K, V> {
Iter {
iter: self.buffer.as_slice().iter(),
}
}
/// An iterator visiting all key-value pairs in arbitrary order, with mutable references to the
/// values
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
///
/// // Update all values
/// for (_, val) in map.iter_mut() {
/// *val = 2;
/// }
///
/// for (key, val) in &map {
/// println!("key: {} val: {}", key, val);
/// }
/// ```
pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
IterMut {
iter: self.buffer.as_mut_slice().iter_mut(),
}
}
/// An iterator visiting all keys in arbitrary order
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
///
/// for key in map.keys() {
/// println!("{}", key);
/// }
/// ```
pub fn keys(&self) -> impl Iterator<Item = &K> {
self.iter().map(|(k, _)| k)
}
/// Removes a key from the map, returning the value at the key if the key was previously in the
/// map
///
/// Computes in **O(N)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Eq + ?Sized,
{
let idx = self
.keys()
.enumerate()
.find(|&(_, k)| k.borrow() == key)
.map(|(idx, _)| idx);
idx.map(|idx| self.buffer.swap_remove(idx).1)
}
/// An iterator visiting all values in arbitrary order
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
///
/// for val in map.values() {
/// println!("{}", val);
/// }
/// ```
pub fn values(&self) -> impl Iterator<Item = &V> {
self.iter().map(|(_, v)| v)
}
/// An iterator visiting all values mutably in arbitrary order
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
///
/// for val in map.values_mut() {
/// *val += 10;
/// }
///
/// for val in map.values() {
/// println!("{}", val);
/// }
/// ```
pub fn values_mut(&mut self) -> impl Iterator<Item = &mut V> {
self.iter_mut().map(|(_, v)| v)
}
}
impl<'a, K, V, Q, const N: usize> ops::Index<&'a Q> for LinearMap<K, V, N>
where
K: Borrow<Q> + Eq,
Q: Eq + ?Sized,
{
type Output = V;
fn index(&self, key: &Q) -> &V {
self.get(key).expect("no entry found for key")
}
}
impl<'a, K, V, Q, const N: usize> ops::IndexMut<&'a Q> for LinearMap<K, V, N>
where
K: Borrow<Q> + Eq,
Q: Eq + ?Sized,
{
fn index_mut(&mut self, key: &Q) -> &mut V {
self.get_mut(key).expect("no entry found for key")
}
}
impl<K, V, const N: usize> Default for LinearMap<K, V, N>
where
K: Eq,
{
fn default() -> Self {
Self::new()
}
}
impl<K, V, const N: usize> Clone for LinearMap<K, V, N>
where
K: Eq + Clone,
V: Clone,
{
fn clone(&self) -> Self {
Self {
buffer: self.buffer.clone(),
}
}
}
impl<K, V, const N: usize> fmt::Debug for LinearMap<K, V, N>
where
K: Eq + fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
impl<K, V, const N: usize> FromIterator<(K, V)> for LinearMap<K, V, N>
where
K: Eq,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (K, V)>,
{
let mut out = Self::new();
out.buffer.extend(iter);
out
}
}
pub struct IntoIter<K, V, const N: usize>
where
K: Eq,
{
inner: <Vec<(K, V), N> as IntoIterator>::IntoIter,
}
impl<K, V, const N: usize> Iterator for IntoIter<K, V, N>
where
K: Eq,
{
type Item = (K, V);
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
}
impl<'a, K, V, const N: usize> IntoIterator for &'a LinearMap<K, V, N>
where
K: Eq,
{
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
pub struct Iter<'a, K, V> {
iter: slice::Iter<'a, (K, V)>,
}
impl<'a, K, V> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|&(ref k, ref v)| (k, v))
}
}
impl<'a, K, V> Clone for Iter<'a, K, V> {
fn clone(&self) -> Self {
Self {
iter: self.iter.clone(),
}
}
}
pub struct IterMut<'a, K, V> {
iter: slice::IterMut<'a, (K, V)>,
}
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|&mut (ref k, ref mut v)| (k, v))
}
}
impl<K, V, const N: usize, const N2: usize> PartialEq<LinearMap<K, V, N2>> for LinearMap<K, V, N>
where
K: Eq,
V: PartialEq,
{
fn eq(&self, other: &LinearMap<K, V, N2>) -> bool {
self.len() == other.len()
&& self
.iter()
.all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
}
}
impl<K, V, const N: usize> Eq for LinearMap<K, V, N>
where
K: Eq,
V: PartialEq,
{
}
#[cfg(test)]
mod test {
use crate::LinearMap;
#[test]
fn static_new() {
static mut _L: LinearMap<i32, i32, 8> = LinearMap::new();
}
#[test]
fn partial_eq() {
{
let mut a = LinearMap::<_, _, 1>::new();
a.insert("k1", "v1").unwrap();
let mut b = LinearMap::<_, _, 2>::new();
b.insert("k1", "v1").unwrap();
assert!(a == b);
b.insert("k2", "v2").unwrap();
assert!(a != b);
}
{
let mut a = LinearMap::<_, _, 2>::new();
a.insert("k1", "v1").unwrap();
a.insert("k2", "v2").unwrap();
let mut b = LinearMap::<_, _, 2>::new();
b.insert("k2", "v2").unwrap();
b.insert("k1", "v1").unwrap();
assert!(a == b);
}
}
#[test]
fn drop() {
droppable!();
{
let mut v: LinearMap<i32, Droppable, 2> = LinearMap::new();
v.insert(0, Droppable::new()).ok().unwrap();
v.insert(1, Droppable::new()).ok().unwrap();
v.remove(&1).unwrap();
}
assert_eq!(Droppable::count(), 0);
{
let mut v: LinearMap<i32, Droppable, 2> = LinearMap::new();
v.insert(0, Droppable::new()).ok().unwrap();
v.insert(1, Droppable::new()).ok().unwrap();
}
assert_eq!(Droppable::count(), 0);
}
}

325
vendor/heapless/src/mpmc.rs vendored Normal file
View File

@@ -0,0 +1,325 @@
//! A fixed capacity Multiple-Producer Multiple-Consumer (MPMC) lock-free queue
//!
//! NOTE: This module requires atomic CAS operations. On targets where they're not natively available,
//! they are emulated by the [`portable-atomic`](https://crates.io/crates/portable-atomic) crate.
//!
//! # Example
//!
//! This queue can be constructed in "const context". Placing it in a `static` variable lets *all*
//! contexts (interrupts / threads / `main`) safely enqueue and dequeue items from it.
//!
//! ``` ignore
//! #![no_main]
//! #![no_std]
//!
//! use panic_semihosting as _;
//!
//! use cortex_m::{asm, peripheral::syst::SystClkSource};
//! use cortex_m_rt::{entry, exception};
//! use cortex_m_semihosting::hprintln;
//! use heapless::mpmc::Q2;
//!
//! static Q: Q2<u8> = Q2::new();
//!
//! #[entry]
//! fn main() -> ! {
//! if let Some(p) = cortex_m::Peripherals::take() {
//! let mut syst = p.SYST;
//!
//! // configures the system timer to trigger a SysTick exception every second
//! syst.set_clock_source(SystClkSource::Core);
//! syst.set_reload(12_000_000);
//! syst.enable_counter();
//! syst.enable_interrupt();
//! }
//!
//! loop {
//! if let Some(x) = Q.dequeue() {
//! hprintln!("{}", x).ok();
//! } else {
//! asm::wfi();
//! }
//! }
//! }
//!
//! #[exception]
//! fn SysTick() {
//! static mut COUNT: u8 = 0;
//!
//! Q.enqueue(*COUNT).ok();
//! *COUNT += 1;
//! }
//! ```
//!
//! # Benchmark
//!
//! Measured on a ARM Cortex-M3 core running at 8 MHz and with zero Flash wait cycles
//!
//! N| `Q8::<u8>::enqueue().ok()` (`z`) | `Q8::<u8>::dequeue()` (`z`) |
//! -|----------------------------------|-----------------------------|
//! 0|34 |35 |
//! 1|52 |53 |
//! 2|69 |71 |
//!
//! - `N` denotes the number of *interruptions*. On Cortex-M, an interruption consists of an
//! interrupt handler preempting the would-be atomic section of the `enqueue` / `dequeue`
//! operation. Note that it does *not* matter if the higher priority handler uses the queue or
//! not.
//! - All execution times are in clock cycles. 1 clock cycle = 125 ns.
//! - Execution time is *dependent* of `mem::size_of::<T>()`. Both operations include one
//! `memcpy(T)` in their successful path.
//! - The optimization level is indicated in parentheses.
//! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue`
//! and `Ok` is returned by `enqueue`).
//!
//! # Portability
//!
//! This module requires CAS atomic instructions which are not available on all architectures
//! (e.g. ARMv6-M (`thumbv6m-none-eabi`) and MSP430 (`msp430-none-elf`)). These atomics can be
//! emulated however with [`portable-atomic`](https://crates.io/crates/portable-atomic), which is
//! enabled with the `cas` feature and is enabled by default for `thumbv6m-none-eabi` and `riscv32`
//! targets.
//!
//! # References
//!
//! This is an implementation of Dmitry Vyukov's ["Bounded MPMC queue"][0] minus the cache padding.
//!
//! [0]: http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
use core::{cell::UnsafeCell, mem::MaybeUninit};
#[cfg(not(feature = "portable-atomic"))]
use core::sync::atomic;
#[cfg(feature = "portable-atomic")]
use portable_atomic as atomic;
use atomic::Ordering;
#[cfg(feature = "mpmc_large")]
type AtomicTargetSize = atomic::AtomicUsize;
#[cfg(not(feature = "mpmc_large"))]
type AtomicTargetSize = atomic::AtomicU8;
#[cfg(feature = "mpmc_large")]
type IntSize = usize;
#[cfg(not(feature = "mpmc_large"))]
type IntSize = u8;
/// MPMC queue with a capability for 2 elements.
pub type Q2<T> = MpMcQueue<T, 2>;
/// MPMC queue with a capability for 4 elements.
pub type Q4<T> = MpMcQueue<T, 4>;
/// MPMC queue with a capability for 8 elements.
pub type Q8<T> = MpMcQueue<T, 8>;
/// MPMC queue with a capability for 16 elements.
pub type Q16<T> = MpMcQueue<T, 16>;
/// MPMC queue with a capability for 32 elements.
pub type Q32<T> = MpMcQueue<T, 32>;
/// MPMC queue with a capability for 64 elements.
pub type Q64<T> = MpMcQueue<T, 64>;
/// MPMC queue with a capacity for N elements
/// N must be a power of 2
/// The max value of N is u8::MAX - 1 if `mpmc_large` feature is not enabled.
pub struct MpMcQueue<T, const N: usize> {
buffer: UnsafeCell<[Cell<T>; N]>,
dequeue_pos: AtomicTargetSize,
enqueue_pos: AtomicTargetSize,
}
impl<T, const N: usize> MpMcQueue<T, N> {
const MASK: IntSize = (N - 1) as IntSize;
const EMPTY_CELL: Cell<T> = Cell::new(0);
const ASSERT: [(); 1] = [()];
/// Creates an empty queue
pub const fn new() -> Self {
// Const assert
crate::sealed::greater_than_1::<N>();
crate::sealed::power_of_two::<N>();
// Const assert on size.
Self::ASSERT[!(N < (IntSize::MAX as usize)) as usize];
let mut cell_count = 0;
let mut result_cells: [Cell<T>; N] = [Self::EMPTY_CELL; N];
while cell_count != N {
result_cells[cell_count] = Cell::new(cell_count);
cell_count += 1;
}
Self {
buffer: UnsafeCell::new(result_cells),
dequeue_pos: AtomicTargetSize::new(0),
enqueue_pos: AtomicTargetSize::new(0),
}
}
/// Returns the item in the front of the queue, or `None` if the queue is empty
pub fn dequeue(&self) -> Option<T> {
unsafe { dequeue(self.buffer.get() as *mut _, &self.dequeue_pos, Self::MASK) }
}
/// Adds an `item` to the end of the queue
///
/// Returns back the `item` if the queue is full
pub fn enqueue(&self, item: T) -> Result<(), T> {
unsafe {
enqueue(
self.buffer.get() as *mut _,
&self.enqueue_pos,
Self::MASK,
item,
)
}
}
}
impl<T, const N: usize> Default for MpMcQueue<T, N> {
fn default() -> Self {
Self::new()
}
}
unsafe impl<T, const N: usize> Sync for MpMcQueue<T, N> where T: Send {}
struct Cell<T> {
data: MaybeUninit<T>,
sequence: AtomicTargetSize,
}
impl<T> Cell<T> {
const fn new(seq: usize) -> Self {
Self {
data: MaybeUninit::uninit(),
sequence: AtomicTargetSize::new(seq as IntSize),
}
}
}
unsafe fn dequeue<T>(
buffer: *mut Cell<T>,
dequeue_pos: &AtomicTargetSize,
mask: IntSize,
) -> Option<T> {
let mut pos = dequeue_pos.load(Ordering::Relaxed);
let mut cell;
loop {
cell = buffer.add(usize::from(pos & mask));
let seq = (*cell).sequence.load(Ordering::Acquire);
let dif = (seq as i8).wrapping_sub((pos.wrapping_add(1)) as i8);
if dif == 0 {
if dequeue_pos
.compare_exchange_weak(
pos,
pos.wrapping_add(1),
Ordering::Relaxed,
Ordering::Relaxed,
)
.is_ok()
{
break;
}
} else if dif < 0 {
return None;
} else {
pos = dequeue_pos.load(Ordering::Relaxed);
}
}
let data = (*cell).data.as_ptr().read();
(*cell)
.sequence
.store(pos.wrapping_add(mask).wrapping_add(1), Ordering::Release);
Some(data)
}
unsafe fn enqueue<T>(
buffer: *mut Cell<T>,
enqueue_pos: &AtomicTargetSize,
mask: IntSize,
item: T,
) -> Result<(), T> {
let mut pos = enqueue_pos.load(Ordering::Relaxed);
let mut cell;
loop {
cell = buffer.add(usize::from(pos & mask));
let seq = (*cell).sequence.load(Ordering::Acquire);
let dif = (seq as i8).wrapping_sub(pos as i8);
if dif == 0 {
if enqueue_pos
.compare_exchange_weak(
pos,
pos.wrapping_add(1),
Ordering::Relaxed,
Ordering::Relaxed,
)
.is_ok()
{
break;
}
} else if dif < 0 {
return Err(item);
} else {
pos = enqueue_pos.load(Ordering::Relaxed);
}
}
(*cell).data.as_mut_ptr().write(item);
(*cell)
.sequence
.store(pos.wrapping_add(1), Ordering::Release);
Ok(())
}
#[cfg(test)]
mod tests {
use super::Q2;
#[test]
fn sanity() {
let q = Q2::new();
q.enqueue(0).unwrap();
q.enqueue(1).unwrap();
assert!(q.enqueue(2).is_err());
assert_eq!(q.dequeue(), Some(0));
assert_eq!(q.dequeue(), Some(1));
assert_eq!(q.dequeue(), None);
}
#[test]
fn drain_at_pos255() {
let q = Q2::new();
for _ in 0..255 {
assert!(q.enqueue(0).is_ok());
assert_eq!(q.dequeue(), Some(0));
}
// this should not block forever
assert_eq!(q.dequeue(), None);
}
#[test]
fn full_at_wrapped_pos0() {
let q = Q2::new();
for _ in 0..254 {
assert!(q.enqueue(0).is_ok());
assert_eq!(q.dequeue(), Some(0));
}
assert!(q.enqueue(0).is_ok());
assert!(q.enqueue(0).is_ok());
// this should not block forever
assert!(q.enqueue(0).is_err());
}
}

59
vendor/heapless/src/pool.rs vendored Normal file
View File

@@ -0,0 +1,59 @@
//! Memory and object pools
//!
//! # Target support
//!
//! This module / API is only available on these compilation targets:
//!
//! - ARM architectures which instruction set include the LDREX, CLREX and STREX instructions, e.g.
//! `thumbv7m-none-eabi` but not `thumbv6m-none-eabi`
//! - 32-bit x86, e.g. `i686-unknown-linux-gnu`
//!
//! # Benchmarks
//!
//! - compilation settings
//! - `codegen-units = 1`
//! - `lto = 'fat'`
//! - `opt-level = 'z'`
//! - compilation target: `thumbv7em-none-eabihf`
//! - CPU: ARM Cortex-M4F
//!
//! - test program:
//!
//! ``` no_run
//! use heapless::box_pool;
//!
//! box_pool!(P: ()); // or `arc_pool!` or `object_pool!`
//!
//! bkpt();
//! let res = P.alloc(());
//! bkpt();
//!
//! if let Ok(boxed) = res {
//! bkpt();
//! drop(boxed);
//! bkpt();
//! }
//! # fn bkpt() {}
//! ```
//!
//! - measurement method: the cycle counter (CYCCNT) register was sampled each time a breakpoint
//! (`bkpt`) was hit. the difference between the "after" and the "before" value of CYCCNT yields the
//! execution time in clock cycles.
//!
//! | API | clock cycles |
//! |------------------------------|--------------|
//! | `BoxPool::alloc` | 23 |
//! | `pool::boxed::Box::drop` | 23 |
//! | `ArcPool::alloc` | 28 |
//! | `pool::arc::Arc::drop` | 59 |
//! | `ObjectPool::request` | 23 |
//! | `pool::object::Object::drop` | 23 |
//!
//! Note that the execution time won't include `T`'s initialization nor `T`'s destructor which will
//! be present in the general case for `Box` and `Arc`.
mod treiber;
pub mod arc;
pub mod boxed;
pub mod object;

526
vendor/heapless/src/pool/arc.rs vendored Normal file
View File

@@ -0,0 +1,526 @@
//! `std::sync::Arc`-like API on top of a lock-free memory pool
//!
//! # Example usage
//!
//! ```
//! use heapless::{arc_pool, pool::arc::{Arc, ArcBlock}};
//!
//! arc_pool!(P: u128);
//!
//! // cannot allocate without first giving memory blocks to the pool
//! assert!(P.alloc(42).is_err());
//!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut ArcBlock<u128> = unsafe {
//! static mut B: ArcBlock<u128> = ArcBlock::new();
//! &mut B
//! };
//!
//! P.manage(block);
//!
//! let arc = P.alloc(1).unwrap();
//!
//! // number of smart pointers is limited to the number of blocks managed by the pool
//! let res = P.alloc(2);
//! assert!(res.is_err());
//!
//! // but cloning does not consume an `ArcBlock`
//! let arc2 = arc.clone();
//!
//! assert_eq!(1, *arc2);
//!
//! // `arc`'s destructor returns the memory block to the pool
//! drop(arc2); // decrease reference counter
//! drop(arc); // release memory
//!
//! // it's now possible to allocate a new `Arc` smart pointer
//! let res = P.alloc(3);
//!
//! assert!(res.is_ok());
//! ```
//!
//! # Array block initialization
//!
//! You can create a static variable that contains an array of memory blocks and give all the blocks
//! to the `ArcPool`. This requires an intermediate `const` value as shown below:
//!
//! ```
//! use heapless::{arc_pool, pool::arc::ArcBlock};
//!
//! arc_pool!(P: u128);
//!
//! const POOL_CAPACITY: usize = 8;
//!
//! let blocks: &'static mut [ArcBlock<u128>] = {
//! const BLOCK: ArcBlock<u128> = ArcBlock::new(); // <=
//! static mut BLOCKS: [ArcBlock<u128>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY];
//! unsafe { &mut BLOCKS }
//! };
//!
//! for block in blocks {
//! P.manage(block);
//! }
//! ```
// reference counting logic is based on version 1.63.0 of the Rust standard library (`alloc` crate)
// which is licensed under 'MIT or APACHE-2.0'
// https://github.com/rust-lang/rust/blob/1.63.0/library/alloc/src/sync.rs#L235 (last visited
// 2022-09-05)
use core::{
fmt,
hash::{Hash, Hasher},
mem::{ManuallyDrop, MaybeUninit},
ops, ptr,
sync::atomic::{self, AtomicUsize, Ordering},
};
use super::treiber::{NonNullPtr, Stack, UnionNode};
/// Creates a new `ArcPool` singleton with the given `$name` that manages the specified `$data_type`
///
/// For more extensive documentation see the [module level documentation](crate::pool::arc)
#[macro_export]
macro_rules! arc_pool {
($name:ident: $data_type:ty) => {
pub struct $name;
impl $crate::pool::arc::ArcPool for $name {
type Data = $data_type;
fn singleton() -> &'static $crate::pool::arc::ArcPoolImpl<$data_type> {
static $name: $crate::pool::arc::ArcPoolImpl<$data_type> =
$crate::pool::arc::ArcPoolImpl::new();
&$name
}
}
impl $name {
/// Inherent method version of `ArcPool::alloc`
#[allow(dead_code)]
pub fn alloc(
&self,
value: $data_type,
) -> Result<$crate::pool::arc::Arc<$name>, $data_type> {
<$name as $crate::pool::arc::ArcPool>::alloc(value)
}
/// Inherent method version of `ArcPool::manage`
#[allow(dead_code)]
pub fn manage(&self, block: &'static mut $crate::pool::arc::ArcBlock<$data_type>) {
<$name as $crate::pool::arc::ArcPool>::manage(block)
}
}
};
}
/// A singleton that manages `pool::arc::Arc` smart pointers
pub trait ArcPool: Sized {
/// The data type managed by the memory pool
type Data: 'static;
/// `arc_pool!` implementation detail
#[doc(hidden)]
fn singleton() -> &'static ArcPoolImpl<Self::Data>;
/// Allocate a new `Arc` smart pointer initialized to the given `value`
///
/// `manage` should be called at least once before calling `alloc`
///
/// # Errors
///
/// The `Err`or variant is returned when the memory pool has run out of memory blocks
fn alloc(value: Self::Data) -> Result<Arc<Self>, Self::Data> {
Ok(Arc {
node_ptr: Self::singleton().alloc(value)?,
})
}
/// Add a statically allocated memory block to the memory pool
fn manage(block: &'static mut ArcBlock<Self::Data>) {
Self::singleton().manage(block)
}
}
/// `arc_pool!` implementation detail
// newtype to avoid having to make field types public
#[doc(hidden)]
pub struct ArcPoolImpl<T> {
stack: Stack<UnionNode<MaybeUninit<ArcInner<T>>>>,
}
impl<T> ArcPoolImpl<T> {
/// `arc_pool!` implementation detail
#[doc(hidden)]
pub const fn new() -> Self {
Self {
stack: Stack::new(),
}
}
fn alloc(&self, value: T) -> Result<NonNullPtr<UnionNode<MaybeUninit<ArcInner<T>>>>, T> {
if let Some(node_ptr) = self.stack.try_pop() {
let inner = ArcInner {
data: value,
strong: AtomicUsize::new(1),
};
unsafe { node_ptr.as_ptr().cast::<ArcInner<T>>().write(inner) }
Ok(node_ptr)
} else {
Err(value)
}
}
fn manage(&self, block: &'static mut ArcBlock<T>) {
let node: &'static mut _ = &mut block.node;
unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
}
}
unsafe impl<T> Sync for ArcPoolImpl<T> {}
/// Like `std::sync::Arc` but managed by memory pool `P`
pub struct Arc<P>
where
P: ArcPool,
{
node_ptr: NonNullPtr<UnionNode<MaybeUninit<ArcInner<P::Data>>>>,
}
impl<P> Arc<P>
where
P: ArcPool,
{
fn inner(&self) -> &ArcInner<P::Data> {
unsafe { &*self.node_ptr.as_ptr().cast::<ArcInner<P::Data>>() }
}
fn from_inner(node_ptr: NonNullPtr<UnionNode<MaybeUninit<ArcInner<P::Data>>>>) -> Self {
Self { node_ptr }
}
unsafe fn get_mut_unchecked(this: &mut Self) -> &mut P::Data {
&mut *ptr::addr_of_mut!((*this.node_ptr.as_ptr().cast::<ArcInner<P::Data>>()).data)
}
#[inline(never)]
unsafe fn drop_slow(&mut self) {
// run `P::Data`'s destructor
ptr::drop_in_place(Self::get_mut_unchecked(self));
// return memory to pool
P::singleton().stack.push(self.node_ptr);
}
}
impl<P> AsRef<P::Data> for Arc<P>
where
P: ArcPool,
{
fn as_ref(&self) -> &P::Data {
&**self
}
}
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
impl<P> Clone for Arc<P>
where
P: ArcPool,
{
fn clone(&self) -> Self {
let old_size = self.inner().strong.fetch_add(1, Ordering::Relaxed);
if old_size > MAX_REFCOUNT {
// XXX original code calls `intrinsics::abort` which is unstable API
panic!();
}
Self::from_inner(self.node_ptr)
}
}
impl<A> fmt::Debug for Arc<A>
where
A: ArcPool,
A::Data: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<P> ops::Deref for Arc<P>
where
P: ArcPool,
{
type Target = P::Data;
fn deref(&self) -> &Self::Target {
unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr().cast::<ArcInner<P::Data>>()).data) }
}
}
impl<A> fmt::Display for Arc<A>
where
A: ArcPool,
A::Data: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<A> Drop for Arc<A>
where
A: ArcPool,
{
fn drop(&mut self) {
if self.inner().strong.fetch_sub(1, Ordering::Release) != 1 {
return;
}
atomic::fence(Ordering::Acquire);
unsafe { self.drop_slow() }
}
}
impl<A> Eq for Arc<A>
where
A: ArcPool,
A::Data: Eq,
{
}
impl<A> Hash for Arc<A>
where
A: ArcPool,
A::Data: Hash,
{
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
(**self).hash(state)
}
}
impl<A> Ord for Arc<A>
where
A: ArcPool,
A::Data: Ord,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
A::Data::cmp(self, other)
}
}
impl<A, B> PartialEq<Arc<B>> for Arc<A>
where
A: ArcPool,
B: ArcPool,
A::Data: PartialEq<B::Data>,
{
fn eq(&self, other: &Arc<B>) -> bool {
A::Data::eq(self, &**other)
}
}
impl<A, B> PartialOrd<Arc<B>> for Arc<A>
where
A: ArcPool,
B: ArcPool,
A::Data: PartialOrd<B::Data>,
{
fn partial_cmp(&self, other: &Arc<B>) -> Option<core::cmp::Ordering> {
A::Data::partial_cmp(self, &**other)
}
}
unsafe impl<A> Send for Arc<A>
where
A: ArcPool,
A::Data: Sync + Send,
{
}
unsafe impl<A> Sync for Arc<A>
where
A: ArcPool,
A::Data: Sync + Send,
{
}
impl<A> Unpin for Arc<A> where A: ArcPool {}
struct ArcInner<T> {
data: T,
strong: AtomicUsize,
}
/// A chunk of memory that an `ArcPool` can manage
pub struct ArcBlock<T> {
node: UnionNode<MaybeUninit<ArcInner<T>>>,
}
impl<T> ArcBlock<T> {
/// Creates a new memory block
pub const fn new() -> Self {
Self {
node: UnionNode {
data: ManuallyDrop::new(MaybeUninit::uninit()),
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn cannot_alloc_if_empty() {
arc_pool!(P: i32);
assert_eq!(Err(42), P.alloc(42),);
}
#[test]
fn can_alloc_if_manages_one_block() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
assert_eq!(42, *P.alloc(42).unwrap());
}
#[test]
fn alloc_drop_alloc() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(1).unwrap();
drop(arc);
assert_eq!(2, *P.alloc(2).unwrap());
}
#[test]
fn strong_count_starts_at_one() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(1).ok().unwrap();
assert_eq!(1, arc.inner().strong.load(Ordering::Relaxed));
}
#[test]
fn clone_increases_strong_count() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(1).ok().unwrap();
let before = arc.inner().strong.load(Ordering::Relaxed);
let arc2 = arc.clone();
let expected = before + 1;
assert_eq!(expected, arc.inner().strong.load(Ordering::Relaxed));
assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed));
}
#[test]
fn drop_decreases_strong_count() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(1).ok().unwrap();
let arc2 = arc.clone();
let before = arc.inner().strong.load(Ordering::Relaxed);
drop(arc);
let expected = before - 1;
assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed));
}
#[test]
fn runs_destructor_exactly_once_when_strong_count_reaches_zero() {
static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S;
impl Drop for S {
fn drop(&mut self) {
COUNT.fetch_add(1, Ordering::Relaxed);
}
}
arc_pool!(P: S);
let block = unsafe {
static mut B: ArcBlock<S> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(S).ok().unwrap();
assert_eq!(0, COUNT.load(Ordering::Relaxed));
drop(arc);
assert_eq!(1, COUNT.load(Ordering::Relaxed));
}
#[test]
fn zst_is_well_aligned() {
#[repr(align(4096))]
pub struct Zst4096;
arc_pool!(P: Zst4096);
let block = unsafe {
static mut B: ArcBlock<Zst4096> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(Zst4096).ok().unwrap();
let raw = &*arc as *const Zst4096;
assert_eq!(0, raw as usize % 4096);
}
}

560
vendor/heapless/src/pool/boxed.rs vendored Normal file
View File

@@ -0,0 +1,560 @@
//! `std::boxed::Box`-like API on top of a lock-free memory pool
//!
//! # Example usage
//!
//! ```
//! use heapless::{box_pool, pool::boxed::{Box, BoxBlock}};
//!
//! box_pool!(P: u128);
//!
//! // cannot allocate without first giving memory blocks to the pool
//! assert!(P.alloc(42).is_err());
//!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut BoxBlock<u128> = unsafe {
//! static mut B: BoxBlock <u128>= BoxBlock::new();
//! &mut B
//! };
//!
//! // give block of memory to the pool
//! P.manage(block);
//!
//! // it's now possible to allocate
//! let mut boxed = P.alloc(1).unwrap();
//!
//! // mutation is possible
//! *boxed += 1;
//! assert_eq!(2, *boxed);
//!
//! // number of boxes is limited to the number of blocks managed by the pool
//! let res = P.alloc(3);
//! assert!(res.is_err());
//!
//! // give another memory block to the pool
//! P.manage(unsafe {
//! static mut B: BoxBlock<u128> = BoxBlock::new();
//! &mut B
//! });
//!
//! // cloning also consumes a memory block from the pool
//! let mut separate_box = boxed.clone();
//! *separate_box += 1;
//! assert_eq!(3, *separate_box);
//!
//! // after the clone it's not possible to allocate again
//! let res = P.alloc(4);
//! assert!(res.is_err());
//!
//! // `boxed`'s destructor returns the memory block to the pool
//! drop(boxed);
//!
//! // it's possible to allocate again
//! let res = P.alloc(5);
//!
//! assert!(res.is_ok());
//! ```
//!
//! # Array block initialization
//!
//! You can create a static variable that contains an array of memory blocks and give all the blocks
//! to the `BoxPool`. This requires an intermediate `const` value as shown below:
//!
//! ```
//! use heapless::{box_pool, pool::boxed::BoxBlock};
//!
//! box_pool!(P: u128);
//!
//! const POOL_CAPACITY: usize = 8;
//!
//! let blocks: &'static mut [BoxBlock<u128>] = {
//! const BLOCK: BoxBlock<u128> = BoxBlock::new(); // <=
//! static mut BLOCKS: [BoxBlock<u128>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY];
//! unsafe { &mut BLOCKS }
//! };
//!
//! for block in blocks {
//! P.manage(block);
//! }
//! ```
use core::{
fmt,
hash::{Hash, Hasher},
mem::{ManuallyDrop, MaybeUninit},
ops, ptr,
};
use stable_deref_trait::StableDeref;
use super::treiber::{NonNullPtr, Stack, UnionNode};
/// Creates a new `BoxPool` singleton with the given `$name` that manages the specified `$data_type`
///
/// For more extensive documentation see the [module level documentation](crate::pool::boxed)
#[macro_export]
macro_rules! box_pool {
($name:ident: $data_type:ty) => {
pub struct $name;
impl $crate::pool::boxed::BoxPool for $name {
type Data = $data_type;
fn singleton() -> &'static $crate::pool::boxed::BoxPoolImpl<$data_type> {
static $name: $crate::pool::boxed::BoxPoolImpl<$data_type> =
$crate::pool::boxed::BoxPoolImpl::new();
&$name
}
}
impl $name {
/// Inherent method version of `BoxPool::alloc`
#[allow(dead_code)]
pub fn alloc(
&self,
value: $data_type,
) -> Result<$crate::pool::boxed::Box<$name>, $data_type> {
<$name as $crate::pool::boxed::BoxPool>::alloc(value)
}
/// Inherent method version of `BoxPool::manage`
#[allow(dead_code)]
pub fn manage(&self, block: &'static mut $crate::pool::boxed::BoxBlock<$data_type>) {
<$name as $crate::pool::boxed::BoxPool>::manage(block)
}
}
};
}
/// A singleton that manages `pool::boxed::Box`-es
///
/// # Usage
///
/// Do not implement this trait yourself; instead use the `box_pool!` macro to create a type that
/// implements this trait.
///
/// # Semver guarantees
///
/// *Implementing* this trait is exempt from semver guarantees.
/// i.e. a new patch release is allowed to break downstream `BoxPool` implementations.
///
/// *Using* the trait, e.g. in generic code, does fall under semver guarantees.
pub trait BoxPool: Sized {
/// The data type managed by the memory pool
type Data: 'static;
/// `box_pool!` implementation detail
#[doc(hidden)]
fn singleton() -> &'static BoxPoolImpl<Self::Data>;
/// Allocate a new `Box` initialized to the given `value`
///
/// `manage` should be called at least once before calling `alloc`
///
/// # Errors
///
/// The `Err`or variant is returned when the memory pool has run out of memory blocks
fn alloc(value: Self::Data) -> Result<Box<Self>, Self::Data> {
Ok(Box {
node_ptr: Self::singleton().alloc(value)?,
})
}
/// Add a statically allocated memory block to the memory pool
fn manage(block: &'static mut BoxBlock<Self::Data>) {
Self::singleton().manage(block)
}
}
/// Like `std::boxed::Box` but managed by memory pool `P` rather than `#[global_allocator]`
pub struct Box<P>
where
P: BoxPool,
{
node_ptr: NonNullPtr<UnionNode<MaybeUninit<P::Data>>>,
}
impl<A> Clone for Box<A>
where
A: BoxPool,
A::Data: Clone,
{
fn clone(&self) -> Self {
A::alloc((**self).clone()).ok().expect("OOM")
}
}
impl<A> fmt::Debug for Box<A>
where
A: BoxPool,
A::Data: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<P> ops::Deref for Box<P>
where
P: BoxPool,
{
type Target = P::Data;
fn deref(&self) -> &Self::Target {
unsafe { &*self.node_ptr.as_ptr().cast::<P::Data>() }
}
}
impl<P> ops::DerefMut for Box<P>
where
P: BoxPool,
{
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.node_ptr.as_ptr().cast::<P::Data>() }
}
}
unsafe impl<P> StableDeref for Box<P> where P: BoxPool {}
impl<A> fmt::Display for Box<A>
where
A: BoxPool,
A::Data: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<P> Drop for Box<P>
where
P: BoxPool,
{
fn drop(&mut self) {
let node = self.node_ptr;
unsafe { ptr::drop_in_place(node.as_ptr().cast::<P::Data>()) }
unsafe { P::singleton().stack.push(node) }
}
}
impl<A> Eq for Box<A>
where
A: BoxPool,
A::Data: Eq,
{
}
impl<A> Hash for Box<A>
where
A: BoxPool,
A::Data: Hash,
{
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
(**self).hash(state)
}
}
impl<A> Ord for Box<A>
where
A: BoxPool,
A::Data: Ord,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
A::Data::cmp(self, other)
}
}
impl<A, B> PartialEq<Box<B>> for Box<A>
where
A: BoxPool,
B: BoxPool,
A::Data: PartialEq<B::Data>,
{
fn eq(&self, other: &Box<B>) -> bool {
A::Data::eq(self, other)
}
}
impl<A, B> PartialOrd<Box<B>> for Box<A>
where
A: BoxPool,
B: BoxPool,
A::Data: PartialOrd<B::Data>,
{
fn partial_cmp(&self, other: &Box<B>) -> Option<core::cmp::Ordering> {
A::Data::partial_cmp(self, other)
}
}
unsafe impl<P> Send for Box<P>
where
P: BoxPool,
P::Data: Send,
{
}
unsafe impl<P> Sync for Box<P>
where
P: BoxPool,
P::Data: Sync,
{
}
/// `box_pool!` implementation detail
// newtype to avoid having to make field types public
#[doc(hidden)]
pub struct BoxPoolImpl<T> {
stack: Stack<UnionNode<MaybeUninit<T>>>,
}
impl<T> BoxPoolImpl<T> {
pub const fn new() -> Self {
Self {
stack: Stack::new(),
}
}
fn alloc(&self, value: T) -> Result<NonNullPtr<UnionNode<MaybeUninit<T>>>, T> {
if let Some(node_ptr) = self.stack.try_pop() {
unsafe { node_ptr.as_ptr().cast::<T>().write(value) }
Ok(node_ptr)
} else {
Err(value)
}
}
fn manage(&self, block: &'static mut BoxBlock<T>) {
let node: &'static mut _ = &mut block.node;
unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
}
}
unsafe impl<T> Sync for BoxPoolImpl<T> {}
/// A chunk of memory that a `BoxPool` singleton can manage
pub struct BoxBlock<T> {
node: UnionNode<MaybeUninit<T>>,
}
impl<T> BoxBlock<T> {
/// Creates a new memory block
pub const fn new() -> Self {
Self {
node: UnionNode {
data: ManuallyDrop::new(MaybeUninit::uninit()),
},
}
}
}
#[cfg(test)]
mod tests {
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::thread;
use super::*;
#[test]
fn cannot_alloc_if_empty() {
box_pool!(P: i32);
assert_eq!(Err(42), P.alloc(42));
}
#[test]
fn can_alloc_if_pool_manages_one_block() {
box_pool!(P: i32);
let block = unsafe {
static mut B: BoxBlock<i32> = BoxBlock::new();
&mut B
};
P.manage(block);
assert_eq!(42, *P.alloc(42).unwrap());
}
#[test]
fn alloc_drop_alloc() {
box_pool!(P: i32);
let block = unsafe {
static mut B: BoxBlock<i32> = BoxBlock::new();
&mut B
};
P.manage(block);
let boxed = P.alloc(1).unwrap();
drop(boxed);
assert_eq!(2, *P.alloc(2).unwrap());
}
#[test]
fn runs_destructor_exactly_once_on_drop() {
static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S;
impl Drop for S {
fn drop(&mut self) {
COUNT.fetch_add(1, Ordering::Relaxed);
}
}
box_pool!(P: S);
let block = unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
};
P.manage(block);
let boxed = P.alloc(S).ok().unwrap();
assert_eq!(0, COUNT.load(Ordering::Relaxed));
drop(boxed);
assert_eq!(1, COUNT.load(Ordering::Relaxed));
}
#[test]
fn zst_is_well_aligned() {
#[repr(align(4096))]
pub struct Zst4096;
box_pool!(P: Zst4096);
let block = unsafe {
static mut B: BoxBlock<Zst4096> = BoxBlock::new();
&mut B
};
P.manage(block);
let boxed = P.alloc(Zst4096).ok().unwrap();
let raw = &*boxed as *const Zst4096;
assert_eq!(0, raw as usize % 4096);
}
#[allow(clippy::redundant_clone)]
#[test]
fn can_clone_if_pool_is_not_exhausted() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S;
impl Clone for S {
fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
Self
}
}
box_pool!(P: S);
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
let first = P.alloc(S).ok().unwrap();
let _second = first.clone();
assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
let is_oom = P.alloc(S).is_err();
assert!(is_oom);
}
#[allow(clippy::redundant_clone)]
#[test]
fn clone_panics_if_pool_exhausted() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S;
impl Clone for S {
fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
Self
}
}
box_pool!(P: S);
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
let first = P.alloc(S).ok().unwrap();
let thread = thread::spawn(move || {
let _second = first.clone();
});
let thread_panicked = thread.join().is_err();
assert!(thread_panicked);
// we diverge from `alloc::Box<T>` in that we call `T::clone` first and then request
// memory from the allocator whereas `alloc::Box<T>` does it the other way around
// assert!(!STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
}
#[allow(clippy::redundant_clone)]
#[test]
fn panicking_clone_does_not_leak_memory() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S;
impl Clone for S {
fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
panic!()
}
}
box_pool!(P: S);
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
let boxed = P.alloc(S).ok().unwrap();
let thread = thread::spawn(move || {
let _boxed = boxed.clone();
});
let thread_panicked = thread.join().is_err();
assert!(thread_panicked);
assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
let once = P.alloc(S);
let twice = P.alloc(S);
assert!(once.is_ok());
assert!(twice.is_ok());
}
}

420
vendor/heapless/src/pool/object.rs vendored Normal file
View File

@@ -0,0 +1,420 @@
//! Object pool API
//!
//! # Example usage
//!
//! ```
//! use heapless::{object_pool, pool::object::{Object, ObjectBlock}};
//!
//! object_pool!(P: [u8; 128]);
//!
//! // cannot request objects without first giving object blocks to the pool
//! assert!(P.request().is_none());
//!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut ObjectBlock<[u8; 128]> = unsafe {
//! // unlike the memory pool APIs, an initial value must be specified here
//! static mut B: ObjectBlock<[u8; 128]>= ObjectBlock::new([0; 128]);
//! &mut B
//! };
//!
//! // give object block to the pool
//! P.manage(block);
//!
//! // it's now possible to request objects
//! // unlike the memory pool APIs, no initial value is required here
//! let mut object = P.request().unwrap();
//!
//! // mutation is possible
//! object.iter_mut().for_each(|byte| *byte = byte.wrapping_add(1));
//!
//! // the number of live objects is limited to the number of blocks managed by the pool
//! let res = P.request();
//! assert!(res.is_none());
//!
//! // `object`'s destructor returns the object to the pool
//! drop(object);
//!
//! // it's possible to request an `Object` again
//! let res = P.request();
//!
//! assert!(res.is_some());
//! ```
//!
//! # Array block initialization
//!
//! You can create a static variable that contains an array of memory blocks and give all the blocks
//! to the `ObjectPool`. This requires an intermediate `const` value as shown below:
//!
//! ```
//! use heapless::{object_pool, pool::object::ObjectBlock};
//!
//! object_pool!(P: [u8; 128]);
//!
//! const POOL_CAPACITY: usize = 8;
//!
//! let blocks: &'static mut [ObjectBlock<[u8; 128]>] = {
//! const BLOCK: ObjectBlock<[u8; 128]> = ObjectBlock::new([0; 128]); // <=
//! static mut BLOCKS: [ObjectBlock<[u8; 128]>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY];
//! unsafe { &mut BLOCKS }
//! };
//!
//! for block in blocks {
//! P.manage(block);
//! }
//! ```
use core::{
cmp::Ordering,
fmt,
hash::{Hash, Hasher},
mem::ManuallyDrop,
ops, ptr,
};
use stable_deref_trait::StableDeref;
use super::treiber::{AtomicPtr, NonNullPtr, Stack, StructNode};
/// Creates a new `ObjectPool` singleton with the given `$name` that manages the specified
/// `$data_type`
///
/// For more extensive documentation see the [module level documentation](crate::pool::object)
#[macro_export]
macro_rules! object_pool {
($name:ident: $data_type:ty) => {
pub struct $name;
impl $crate::pool::object::ObjectPool for $name {
type Data = $data_type;
fn singleton() -> &'static $crate::pool::object::ObjectPoolImpl<$data_type> {
static $name: $crate::pool::object::ObjectPoolImpl<$data_type> =
$crate::pool::object::ObjectPoolImpl::new();
&$name
}
}
impl $name {
/// Inherent method version of `ObjectPool::request`
#[allow(dead_code)]
pub fn request(&self) -> Option<$crate::pool::object::Object<$name>> {
<$name as $crate::pool::object::ObjectPool>::request()
}
/// Inherent method version of `ObjectPool::manage`
#[allow(dead_code)]
pub fn manage(
&self,
block: &'static mut $crate::pool::object::ObjectBlock<$data_type>,
) {
<$name as $crate::pool::object::ObjectPool>::manage(block)
}
}
};
}
/// A singleton that manages `pool::object::Object`s
pub trait ObjectPool: Sized {
/// The data type of the objects managed by the object pool
type Data: 'static;
/// `object_pool!` implementation detail
#[doc(hidden)]
fn singleton() -> &'static ObjectPoolImpl<Self::Data>;
/// Request a new object from the pool
fn request() -> Option<Object<Self>> {
Self::singleton()
.request()
.map(|node_ptr| Object { node_ptr })
}
/// Adds a statically allocate object to the pool
fn manage(block: &'static mut ObjectBlock<Self::Data>) {
Self::singleton().manage(block)
}
}
/// `object_pool!` implementation detail
#[doc(hidden)]
pub struct ObjectPoolImpl<T> {
stack: Stack<StructNode<T>>,
}
impl<T> ObjectPoolImpl<T> {
/// `object_pool!` implementation detail
#[doc(hidden)]
pub const fn new() -> Self {
Self {
stack: Stack::new(),
}
}
fn request(&self) -> Option<NonNullPtr<StructNode<T>>> {
self.stack.try_pop()
}
fn manage(&self, block: &'static mut ObjectBlock<T>) {
let node: &'static mut _ = &mut block.node;
unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
}
}
// `T needs` to be Send because returning an object from a thread and then
// requesting it from another is effectively a cross-thread 'send' operation
unsafe impl<T> Sync for ObjectPoolImpl<T> where T: Send {}
/// An object managed by object pool `P`
pub struct Object<P>
where
P: ObjectPool,
{
node_ptr: NonNullPtr<StructNode<P::Data>>,
}
impl<A, T, const N: usize> AsMut<[T]> for Object<A>
where
A: ObjectPool<Data = [T; N]>,
{
fn as_mut(&mut self) -> &mut [T] {
&mut **self
}
}
impl<A, T, const N: usize> AsRef<[T]> for Object<A>
where
A: ObjectPool<Data = [T; N]>,
{
fn as_ref(&self) -> &[T] {
&**self
}
}
impl<A> fmt::Debug for Object<A>
where
A: ObjectPool,
A::Data: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<A> ops::Deref for Object<A>
where
A: ObjectPool,
{
type Target = A::Data;
fn deref(&self) -> &Self::Target {
unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr()).data) }
}
}
impl<A> ops::DerefMut for Object<A>
where
A: ObjectPool,
{
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *ptr::addr_of_mut!((*self.node_ptr.as_ptr()).data) }
}
}
unsafe impl<A> StableDeref for Object<A> where A: ObjectPool {}
impl<A> fmt::Display for Object<A>
where
A: ObjectPool,
A::Data: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<P> Drop for Object<P>
where
P: ObjectPool,
{
fn drop(&mut self) {
unsafe { P::singleton().stack.push(self.node_ptr) }
}
}
impl<A> Eq for Object<A>
where
A: ObjectPool,
A::Data: Eq,
{
}
impl<A> Hash for Object<A>
where
A: ObjectPool,
A::Data: Hash,
{
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
(**self).hash(state)
}
}
impl<A> Ord for Object<A>
where
A: ObjectPool,
A::Data: Ord,
{
fn cmp(&self, other: &Self) -> Ordering {
A::Data::cmp(self, other)
}
}
impl<A, B> PartialEq<Object<B>> for Object<A>
where
A: ObjectPool,
B: ObjectPool,
A::Data: PartialEq<B::Data>,
{
fn eq(&self, other: &Object<B>) -> bool {
A::Data::eq(self, other)
}
}
impl<A, B> PartialOrd<Object<B>> for Object<A>
where
A: ObjectPool,
B: ObjectPool,
A::Data: PartialOrd<B::Data>,
{
fn partial_cmp(&self, other: &Object<B>) -> Option<Ordering> {
A::Data::partial_cmp(self, other)
}
}
unsafe impl<P> Send for Object<P>
where
P: ObjectPool,
P::Data: Send,
{
}
unsafe impl<P> Sync for Object<P>
where
P: ObjectPool,
P::Data: Sync,
{
}
/// An object "block" of data type `T` that has not yet been associated to an `ObjectPool`
pub struct ObjectBlock<T> {
node: StructNode<T>,
}
impl<T> ObjectBlock<T> {
/// Creates a new object block with the given `initial_value`
pub const fn new(initial_value: T) -> Self {
Self {
node: StructNode {
next: ManuallyDrop::new(AtomicPtr::null()),
data: ManuallyDrop::new(initial_value),
},
}
}
}
#[cfg(test)]
mod tests {
use core::sync::atomic::{self, AtomicUsize};
use super::*;
#[test]
fn cannot_request_if_empty() {
object_pool!(P: i32);
assert_eq!(None, P.request());
}
#[test]
fn can_request_if_manages_one_block() {
object_pool!(P: i32);
let block = unsafe {
static mut B: ObjectBlock<i32> = ObjectBlock::new(1);
&mut B
};
P.manage(block);
assert_eq!(1, *P.request().unwrap());
}
#[test]
fn request_drop_request() {
object_pool!(P: i32);
let block = unsafe {
static mut B: ObjectBlock<i32> = ObjectBlock::new(1);
&mut B
};
P.manage(block);
let mut object = P.request().unwrap();
*object = 2;
drop(object);
assert_eq!(2, *P.request().unwrap());
}
#[test]
fn destructor_does_not_run_on_drop() {
static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S;
impl Drop for S {
fn drop(&mut self) {
COUNT.fetch_add(1, atomic::Ordering::Relaxed);
}
}
object_pool!(P: S);
let block = unsafe {
static mut B: ObjectBlock<S> = ObjectBlock::new(S);
&mut B
};
P.manage(block);
let object = P.request().unwrap();
assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed));
drop(object);
assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed));
}
#[test]
fn zst_is_well_aligned() {
#[repr(align(4096))]
pub struct Zst4096;
object_pool!(P: Zst4096);
let block = unsafe {
static mut B: ObjectBlock<Zst4096> = ObjectBlock::new(Zst4096);
&mut B
};
P.manage(block);
let object = P.request().unwrap();
let raw = &*object as *const Zst4096;
assert_eq!(0, raw as usize % 4096);
}
}

91
vendor/heapless/src/pool/treiber.rs vendored Normal file
View File

@@ -0,0 +1,91 @@
use core::mem::ManuallyDrop;
#[cfg_attr(target_arch = "x86", path = "treiber/cas.rs")]
#[cfg_attr(arm_llsc, path = "treiber/llsc.rs")]
mod impl_;
pub use impl_::{AtomicPtr, NonNullPtr};
pub struct Stack<N>
where
N: Node,
{
top: AtomicPtr<N>,
}
impl<N> Stack<N>
where
N: Node,
{
pub const fn new() -> Self {
Self {
top: AtomicPtr::null(),
}
}
/// # Safety
/// - `node` must be a valid pointer
/// - aliasing rules must be enforced by the caller. e.g, the same `node` may not be pushed more than once
pub unsafe fn push(&self, node: NonNullPtr<N>) {
impl_::push(self, node)
}
pub fn try_pop(&self) -> Option<NonNullPtr<N>> {
impl_::try_pop(self)
}
}
pub trait Node: Sized {
type Data;
fn next(&self) -> &AtomicPtr<Self>;
fn next_mut(&mut self) -> &mut AtomicPtr<Self>;
}
pub union UnionNode<T> {
next: ManuallyDrop<AtomicPtr<UnionNode<T>>>,
pub data: ManuallyDrop<T>,
}
impl<T> Node for UnionNode<T> {
type Data = T;
fn next(&self) -> &AtomicPtr<Self> {
unsafe { &self.next }
}
fn next_mut(&mut self) -> &mut AtomicPtr<Self> {
unsafe { &mut self.next }
}
}
pub struct StructNode<T> {
pub next: ManuallyDrop<AtomicPtr<StructNode<T>>>,
pub data: ManuallyDrop<T>,
}
impl<T> Node for StructNode<T> {
type Data = T;
fn next(&self) -> &AtomicPtr<Self> {
&self.next
}
fn next_mut(&mut self) -> &mut AtomicPtr<Self> {
&mut self.next
}
}
#[cfg(test)]
mod tests {
use core::mem;
use super::*;
#[test]
fn node_is_never_zero_sized() {
struct Zst;
assert_ne!(mem::size_of::<UnionNode<Zst>>(), 0);
}
}

196
vendor/heapless/src/pool/treiber/cas.rs vendored Normal file
View File

@@ -0,0 +1,196 @@
use core::{
marker::PhantomData,
num::{NonZeroU32, NonZeroU64},
ptr::NonNull,
sync::atomic::{AtomicU64, Ordering},
};
use super::{Node, Stack};
pub struct AtomicPtr<N>
where
N: Node,
{
inner: AtomicU64,
_marker: PhantomData<*mut N>,
}
impl<N> AtomicPtr<N>
where
N: Node,
{
pub const fn null() -> Self {
Self {
inner: AtomicU64::new(0),
_marker: PhantomData,
}
}
fn compare_and_exchange_weak(
&self,
current: Option<NonNullPtr<N>>,
new: Option<NonNullPtr<N>>,
success: Ordering,
failure: Ordering,
) -> Result<(), Option<NonNullPtr<N>>> {
self.inner
.compare_exchange_weak(
current
.map(|pointer| pointer.into_u64())
.unwrap_or_default(),
new.map(|pointer| pointer.into_u64()).unwrap_or_default(),
success,
failure,
)
.map(drop)
.map_err(NonNullPtr::from_u64)
}
fn load(&self, order: Ordering) -> Option<NonNullPtr<N>> {
NonZeroU64::new(self.inner.load(order)).map(|inner| NonNullPtr {
inner,
_marker: PhantomData,
})
}
fn store(&self, value: Option<NonNullPtr<N>>, order: Ordering) {
self.inner.store(
value.map(|pointer| pointer.into_u64()).unwrap_or_default(),
order,
)
}
}
pub struct NonNullPtr<N>
where
N: Node,
{
inner: NonZeroU64,
_marker: PhantomData<*mut N>,
}
impl<N> Clone for NonNullPtr<N>
where
N: Node,
{
fn clone(&self) -> Self {
*self
}
}
impl<N> Copy for NonNullPtr<N> where N: Node {}
impl<N> NonNullPtr<N>
where
N: Node,
{
pub fn as_ptr(&self) -> *mut N {
self.inner.get() as *mut N
}
pub fn from_static_mut_ref(ref_: &'static mut N) -> NonNullPtr<N> {
let non_null = NonNull::from(ref_);
Self::from_non_null(non_null)
}
fn from_non_null(ptr: NonNull<N>) -> Self {
let address = ptr.as_ptr() as u32;
let tag = initial_tag().get();
let value = (u64::from(tag) << 32) | u64::from(address);
Self {
inner: unsafe { NonZeroU64::new_unchecked(value) },
_marker: PhantomData,
}
}
fn from_u64(value: u64) -> Option<Self> {
NonZeroU64::new(value).map(|inner| Self {
inner,
_marker: PhantomData,
})
}
fn non_null(&self) -> NonNull<N> {
unsafe { NonNull::new_unchecked(self.inner.get() as *mut N) }
}
fn tag(&self) -> NonZeroU32 {
unsafe { NonZeroU32::new_unchecked((self.inner.get() >> 32) as u32) }
}
fn into_u64(self) -> u64 {
self.inner.get()
}
fn increase_tag(&mut self) {
let address = self.as_ptr() as u32;
let new_tag = self
.tag()
.get()
.checked_add(1)
.map(|val| unsafe { NonZeroU32::new_unchecked(val) })
.unwrap_or_else(initial_tag)
.get();
let value = (u64::from(new_tag) << 32) | u64::from(address);
self.inner = unsafe { NonZeroU64::new_unchecked(value) };
}
}
fn initial_tag() -> NonZeroU32 {
unsafe { NonZeroU32::new_unchecked(1) }
}
pub unsafe fn push<N>(stack: &Stack<N>, new_top: NonNullPtr<N>)
where
N: Node,
{
let mut top = stack.top.load(Ordering::Relaxed);
loop {
new_top
.non_null()
.as_ref()
.next()
.store(top, Ordering::Relaxed);
if let Err(p) = stack.top.compare_and_exchange_weak(
top,
Some(new_top),
Ordering::Release,
Ordering::Relaxed,
) {
top = p;
} else {
return;
}
}
}
pub fn try_pop<N>(stack: &Stack<N>) -> Option<NonNullPtr<N>>
where
N: Node,
{
loop {
if let Some(mut top) = stack.top.load(Ordering::Acquire) {
let next = unsafe { top.non_null().as_ref().next().load(Ordering::Relaxed) };
if stack
.top
.compare_and_exchange_weak(Some(top), next, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
top.increase_tag();
return Some(top);
}
} else {
// stack observed as empty
return None;
}
}
}

145
vendor/heapless/src/pool/treiber/llsc.rs vendored Normal file
View File

@@ -0,0 +1,145 @@
use core::{
cell::UnsafeCell,
ptr::{self, NonNull},
};
use super::{Node, Stack};
pub struct AtomicPtr<N>
where
N: Node,
{
inner: UnsafeCell<Option<NonNull<N>>>,
}
impl<N> AtomicPtr<N>
where
N: Node,
{
pub const fn null() -> Self {
Self {
inner: UnsafeCell::new(None),
}
}
}
pub struct NonNullPtr<N>
where
N: Node,
{
inner: NonNull<N>,
}
impl<N> NonNullPtr<N>
where
N: Node,
{
pub fn as_ptr(&self) -> *mut N {
self.inner.as_ptr().cast()
}
pub fn from_static_mut_ref(ref_: &'static mut N) -> Self {
Self {
inner: NonNull::from(ref_),
}
}
}
impl<N> Clone for NonNullPtr<N>
where
N: Node,
{
fn clone(&self) -> Self {
Self { inner: self.inner }
}
}
impl<N> Copy for NonNullPtr<N> where N: Node {}
pub unsafe fn push<N>(stack: &Stack<N>, mut node: NonNullPtr<N>)
where
N: Node,
{
let top_addr = ptr::addr_of!(stack.top) as *mut usize;
loop {
let top = arch::load_link(top_addr);
node.inner
.as_mut()
.next_mut()
.inner
.get()
.write(NonNull::new(top as *mut _));
if arch::store_conditional(node.inner.as_ptr() as usize, top_addr).is_ok() {
break;
}
}
}
pub fn try_pop<N>(stack: &Stack<N>) -> Option<NonNullPtr<N>>
where
N: Node,
{
unsafe {
let top_addr = ptr::addr_of!(stack.top) as *mut usize;
loop {
let top = arch::load_link(top_addr);
if let Some(top) = NonNull::new(top as *mut N) {
let next = &top.as_ref().next();
if arch::store_conditional(
next.inner
.get()
.read()
.map(|non_null| non_null.as_ptr() as usize)
.unwrap_or_default(),
top_addr,
)
.is_ok()
{
break Some(NonNullPtr { inner: top });
}
} else {
arch::clear_load_link();
break None;
}
}
}
}
#[cfg(arm_llsc)]
mod arch {
use core::arch::asm;
#[inline(always)]
pub fn clear_load_link() {
unsafe { asm!("clrex", options(nomem, nostack)) }
}
/// # Safety
/// - `addr` must be a valid pointer
#[inline(always)]
pub unsafe fn load_link(addr: *const usize) -> usize {
let value;
asm!("ldrex {}, [{}]", out(reg) value, in(reg) addr, options(nostack));
value
}
/// # Safety
/// - `addr` must be a valid pointer
#[inline(always)]
pub unsafe fn store_conditional(value: usize, addr: *mut usize) -> Result<(), ()> {
let outcome: usize;
asm!("strex {}, {}, [{}]", out(reg) outcome, in(reg) value, in(reg) addr, options(nostack));
if outcome == 0 {
Ok(())
} else {
Err(())
}
}
}

58
vendor/heapless/src/sealed.rs vendored Normal file
View File

@@ -0,0 +1,58 @@
#[allow(dead_code)]
#[allow(path_statements)]
pub(crate) const fn smaller_than<const N: usize, const MAX: usize>() {
Assert::<N, MAX>::LESS;
}
#[allow(dead_code)]
#[allow(path_statements)]
pub(crate) const fn greater_than_eq_0<const N: usize>() {
Assert::<N, 0>::GREATER_EQ;
}
#[allow(dead_code)]
#[allow(path_statements)]
pub(crate) const fn greater_than_0<const N: usize>() {
Assert::<N, 0>::GREATER;
}
#[allow(dead_code)]
#[allow(path_statements)]
pub(crate) const fn greater_than_1<const N: usize>() {
Assert::<N, 1>::GREATER;
}
#[allow(dead_code)]
#[allow(path_statements)]
pub(crate) const fn power_of_two<const N: usize>() {
Assert::<N, 0>::GREATER;
Assert::<N, 0>::POWER_OF_TWO;
}
#[allow(dead_code)]
/// Const assert hack
pub struct Assert<const L: usize, const R: usize>;
#[allow(dead_code)]
impl<const L: usize, const R: usize> Assert<L, R> {
/// Const assert hack
pub const GREATER_EQ: usize = L - R;
/// Const assert hack
pub const LESS_EQ: usize = R - L;
/// Const assert hack
pub const NOT_EQ: isize = 0 / (R as isize - L as isize);
/// Const assert hack
pub const EQ: usize = (R - L) + (L - R);
/// Const assert hack
pub const GREATER: usize = L - R - 1;
/// Const assert hack
pub const LESS: usize = R - L - 1;
/// Const assert hack
pub const POWER_OF_TWO: usize = 0 - (L & (L - 1));
}

123
vendor/heapless/src/ser.rs vendored Normal file
View File

@@ -0,0 +1,123 @@
use core::hash::{BuildHasher, Hash};
use crate::{
binary_heap::Kind as BinaryHeapKind, BinaryHeap, Deque, IndexMap, IndexSet, LinearMap, String,
Vec,
};
use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer};
// Sequential containers
impl<T, KIND, const N: usize> Serialize for BinaryHeap<T, KIND, N>
where
T: Ord + Serialize,
KIND: BinaryHeapKind,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.len()))?;
for element in self {
seq.serialize_element(element)?;
}
seq.end()
}
}
impl<T, S, const N: usize> Serialize for IndexSet<T, S, N>
where
T: Eq + Hash + Serialize,
S: BuildHasher,
{
fn serialize<SER>(&self, serializer: SER) -> Result<SER::Ok, SER::Error>
where
SER: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.len()))?;
for element in self {
seq.serialize_element(element)?;
}
seq.end()
}
}
impl<T, const N: usize> Serialize for Vec<T, N>
where
T: Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.len()))?;
for element in self {
seq.serialize_element(element)?;
}
seq.end()
}
}
impl<T, const N: usize> Serialize for Deque<T, N>
where
T: Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.len()))?;
for element in self {
seq.serialize_element(element)?;
}
seq.end()
}
}
// Dictionaries
impl<K, V, S, const N: usize> Serialize for IndexMap<K, V, S, N>
where
K: Eq + Hash + Serialize,
S: BuildHasher,
V: Serialize,
{
fn serialize<SER>(&self, serializer: SER) -> Result<SER::Ok, SER::Error>
where
SER: Serializer,
{
let mut map = serializer.serialize_map(Some(self.len()))?;
for (k, v) in self {
map.serialize_entry(k, v)?;
}
map.end()
}
}
impl<K, V, const N: usize> Serialize for LinearMap<K, V, N>
where
K: Eq + Serialize,
V: Serialize,
{
fn serialize<SER>(&self, serializer: SER) -> Result<SER::Ok, SER::Error>
where
SER: Serializer,
{
let mut map = serializer.serialize_map(Some(self.len()))?;
for (k, v) in self {
map.serialize_entry(k, v)?;
}
map.end()
}
}
// String containers
impl<const N: usize> Serialize for String<N> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&*self)
}
}

View File

@@ -0,0 +1,866 @@
//! A fixed sorted priority linked list, similar to [`BinaryHeap`] but with different properties
//! on `push`, `pop`, etc.
//! For example, the sorting of the list will never `memcpy` the underlying value, so having large
//! objects in the list will not cause a performance hit.
//!
//! # Examples
//!
//! ```
//! use heapless::sorted_linked_list::{SortedLinkedList, Max};
//! let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
//!
//! // The largest value will always be first
//! ll.push(1).unwrap();
//! assert_eq!(ll.peek(), Some(&1));
//!
//! ll.push(2).unwrap();
//! assert_eq!(ll.peek(), Some(&2));
//!
//! ll.push(3).unwrap();
//! assert_eq!(ll.peek(), Some(&3));
//!
//! // This will not fit in the queue.
//! assert_eq!(ll.push(4), Err(4));
//! ```
//!
//! [`BinaryHeap`]: `crate::binary_heap::BinaryHeap`
use core::cmp::Ordering;
use core::fmt;
use core::marker::PhantomData;
use core::mem::MaybeUninit;
use core::ops::{Deref, DerefMut};
use core::ptr;
/// Trait for defining an index for the linked list, never implemented by users.
pub trait SortedLinkedListIndex: Copy {
#[doc(hidden)]
unsafe fn new_unchecked(val: usize) -> Self;
#[doc(hidden)]
unsafe fn get_unchecked(self) -> usize;
#[doc(hidden)]
fn option(self) -> Option<usize>;
#[doc(hidden)]
fn none() -> Self;
}
/// Marker for Min sorted [`SortedLinkedList`].
pub struct Min;
/// Marker for Max sorted [`SortedLinkedList`].
pub struct Max;
/// The linked list kind: min-list or max-list
pub trait Kind: private::Sealed {
#[doc(hidden)]
fn ordering() -> Ordering;
}
impl Kind for Min {
fn ordering() -> Ordering {
Ordering::Less
}
}
impl Kind for Max {
fn ordering() -> Ordering {
Ordering::Greater
}
}
/// Sealed traits
mod private {
pub trait Sealed {}
}
impl private::Sealed for Max {}
impl private::Sealed for Min {}
/// A node in the [`SortedLinkedList`].
pub struct Node<T, Idx> {
val: MaybeUninit<T>,
next: Idx,
}
/// The linked list.
pub struct SortedLinkedList<T, Idx, K, const N: usize>
where
Idx: SortedLinkedListIndex,
{
list: [Node<T, Idx>; N],
head: Idx,
free: Idx,
_kind: PhantomData<K>,
}
// Internal macro for generating indexes for the linkedlist and const new for the linked list
macro_rules! impl_index_and_const_new {
($name:ident, $ty:ty, $new_name:ident, $max_val:expr) => {
/// Index for the [`SortedLinkedList`] with specific backing storage.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct $name($ty);
impl SortedLinkedListIndex for $name {
#[inline(always)]
unsafe fn new_unchecked(val: usize) -> Self {
Self::new_unchecked(val as $ty)
}
/// This is only valid if `self.option()` is not `None`.
#[inline(always)]
unsafe fn get_unchecked(self) -> usize {
self.0 as usize
}
#[inline(always)]
fn option(self) -> Option<usize> {
if self.0 == <$ty>::MAX {
None
} else {
Some(self.0 as usize)
}
}
#[inline(always)]
fn none() -> Self {
Self::none()
}
}
impl $name {
/// Needed for a `const fn new()`.
#[inline]
const unsafe fn new_unchecked(value: $ty) -> Self {
$name(value)
}
/// Needed for a `const fn new()`.
#[inline]
const fn none() -> Self {
$name(<$ty>::MAX)
}
}
impl<T, K, const N: usize> SortedLinkedList<T, $name, K, N> {
const UNINIT: Node<T, $name> = Node {
val: MaybeUninit::uninit(),
next: $name::none(),
};
/// Create a new linked list.
pub const fn $new_name() -> Self {
// Const assert N < MAX
crate::sealed::smaller_than::<N, $max_val>();
let mut list = SortedLinkedList {
list: [Self::UNINIT; N],
head: $name::none(),
free: unsafe { $name::new_unchecked(0) },
_kind: PhantomData,
};
if N == 0 {
list.free = $name::none();
return list;
}
let mut free = 0;
// Initialize indexes
while free < N - 1 {
list.list[free].next = unsafe { $name::new_unchecked(free as $ty + 1) };
free += 1;
}
list
}
}
};
}
impl_index_and_const_new!(LinkedIndexU8, u8, new_u8, { u8::MAX as usize - 1 });
impl_index_and_const_new!(LinkedIndexU16, u16, new_u16, { u16::MAX as usize - 1 });
impl_index_and_const_new!(LinkedIndexUsize, usize, new_usize, { usize::MAX - 1 });
impl<T, Idx, K, const N: usize> SortedLinkedList<T, Idx, K, N>
where
Idx: SortedLinkedListIndex,
{
/// Internal access helper
#[inline(always)]
fn node_at(&self, index: usize) -> &Node<T, Idx> {
// Safety: The entire `self.list` is initialized in `new`, which makes this safe.
unsafe { self.list.get_unchecked(index) }
}
/// Internal access helper
#[inline(always)]
fn node_at_mut(&mut self, index: usize) -> &mut Node<T, Idx> {
// Safety: The entire `self.list` is initialized in `new`, which makes this safe.
unsafe { self.list.get_unchecked_mut(index) }
}
/// Internal access helper
#[inline(always)]
fn write_data_in_node_at(&mut self, index: usize, data: T) {
// Safety: The entire `self.list` is initialized in `new`, which makes this safe.
unsafe {
self.node_at_mut(index).val.as_mut_ptr().write(data);
}
}
/// Internal access helper
#[inline(always)]
fn read_data_in_node_at(&self, index: usize) -> &T {
// Safety: The entire `self.list` is initialized in `new`, which makes this safe.
unsafe { &*self.node_at(index).val.as_ptr() }
}
/// Internal access helper
#[inline(always)]
fn read_mut_data_in_node_at(&mut self, index: usize) -> &mut T {
// Safety: The entire `self.list` is initialized in `new`, which makes this safe.
unsafe { &mut *self.node_at_mut(index).val.as_mut_ptr() }
}
/// Internal access helper
#[inline(always)]
fn extract_data_in_node_at(&mut self, index: usize) -> T {
// Safety: The entire `self.list` is initialized in `new`, which makes this safe.
unsafe { self.node_at(index).val.as_ptr().read() }
}
}
impl<T, Idx, K, const N: usize> SortedLinkedList<T, Idx, K, N>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
/// Pushes a value onto the list without checking if the list is full.
///
/// Complexity is worst-case `O(N)`.
///
/// # Safety
///
/// Assumes that the list is not full.
pub unsafe fn push_unchecked(&mut self, value: T) {
let new = self.free.get_unchecked();
// Store the data and update the next free spot
self.write_data_in_node_at(new, value);
self.free = self.node_at(new).next;
if let Some(head) = self.head.option() {
// Check if we need to replace head
if self
.read_data_in_node_at(head)
.cmp(self.read_data_in_node_at(new))
!= K::ordering()
{
self.node_at_mut(new).next = self.head;
self.head = Idx::new_unchecked(new);
} else {
// It's not head, search the list for the correct placement
let mut current = head;
while let Some(next) = self.node_at(current).next.option() {
if self
.read_data_in_node_at(next)
.cmp(self.read_data_in_node_at(new))
!= K::ordering()
{
break;
}
current = next;
}
self.node_at_mut(new).next = self.node_at(current).next;
self.node_at_mut(current).next = Idx::new_unchecked(new);
}
} else {
self.node_at_mut(new).next = self.head;
self.head = Idx::new_unchecked(new);
}
}
/// Pushes an element to the linked list and sorts it into place.
///
/// Complexity is worst-case `O(N)`.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// // The largest value will always be first
/// ll.push(1).unwrap();
/// assert_eq!(ll.peek(), Some(&1));
///
/// ll.push(2).unwrap();
/// assert_eq!(ll.peek(), Some(&2));
///
/// ll.push(3).unwrap();
/// assert_eq!(ll.peek(), Some(&3));
///
/// // This will not fit in the queue.
/// assert_eq!(ll.push(4), Err(4));
/// ```
pub fn push(&mut self, value: T) -> Result<(), T> {
if !self.is_full() {
Ok(unsafe { self.push_unchecked(value) })
} else {
Err(value)
}
}
/// Get an iterator over the sorted list.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// ll.push(1).unwrap();
/// ll.push(2).unwrap();
///
/// let mut iter = ll.iter();
///
/// assert_eq!(iter.next(), Some(&2));
/// assert_eq!(iter.next(), Some(&1));
/// assert_eq!(iter.next(), None);
/// ```
pub fn iter(&self) -> Iter<'_, T, Idx, K, N> {
Iter {
list: self,
index: self.head,
}
}
/// Find an element in the list that can be changed and resorted.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// ll.push(1).unwrap();
/// ll.push(2).unwrap();
/// ll.push(3).unwrap();
///
/// // Find a value and update it
/// let mut find = ll.find_mut(|v| *v == 2).unwrap();
/// *find += 1000;
/// find.finish();
///
/// assert_eq!(ll.pop(), Ok(1002));
/// assert_eq!(ll.pop(), Ok(3));
/// assert_eq!(ll.pop(), Ok(1));
/// assert_eq!(ll.pop(), Err(()));
/// ```
pub fn find_mut<F>(&mut self, mut f: F) -> Option<FindMut<'_, T, Idx, K, N>>
where
F: FnMut(&T) -> bool,
{
let head = self.head.option()?;
// Special-case, first element
if f(self.read_data_in_node_at(head)) {
return Some(FindMut {
is_head: true,
prev_index: Idx::none(),
index: self.head,
list: self,
maybe_changed: false,
});
}
let mut current = head;
while let Some(next) = self.node_at(current).next.option() {
if f(self.read_data_in_node_at(next)) {
return Some(FindMut {
is_head: false,
prev_index: unsafe { Idx::new_unchecked(current) },
index: unsafe { Idx::new_unchecked(next) },
list: self,
maybe_changed: false,
});
}
current = next;
}
None
}
/// Peek at the first element.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max, Min};
/// let mut ll_max: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// // The largest value will always be first
/// ll_max.push(1).unwrap();
/// assert_eq!(ll_max.peek(), Some(&1));
/// ll_max.push(2).unwrap();
/// assert_eq!(ll_max.peek(), Some(&2));
/// ll_max.push(3).unwrap();
/// assert_eq!(ll_max.peek(), Some(&3));
///
/// let mut ll_min: SortedLinkedList<_, _, Min, 3> = SortedLinkedList::new_usize();
///
/// // The Smallest value will always be first
/// ll_min.push(3).unwrap();
/// assert_eq!(ll_min.peek(), Some(&3));
/// ll_min.push(2).unwrap();
/// assert_eq!(ll_min.peek(), Some(&2));
/// ll_min.push(1).unwrap();
/// assert_eq!(ll_min.peek(), Some(&1));
/// ```
pub fn peek(&self) -> Option<&T> {
self.head
.option()
.map(|head| self.read_data_in_node_at(head))
}
/// Pop an element from the list without checking so the list is not empty.
///
/// # Safety
///
/// Assumes that the list is not empty.
pub unsafe fn pop_unchecked(&mut self) -> T {
let head = self.head.get_unchecked();
let current = head;
self.head = self.node_at(head).next;
self.node_at_mut(current).next = self.free;
self.free = Idx::new_unchecked(current);
self.extract_data_in_node_at(current)
}
/// Pops the first element in the list.
///
/// Complexity is worst-case `O(1)`.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// ll.push(1).unwrap();
/// ll.push(2).unwrap();
///
/// assert_eq!(ll.pop(), Ok(2));
/// assert_eq!(ll.pop(), Ok(1));
/// assert_eq!(ll.pop(), Err(()));
/// ```
pub fn pop(&mut self) -> Result<T, ()> {
if !self.is_empty() {
Ok(unsafe { self.pop_unchecked() })
} else {
Err(())
}
}
/// Checks if the linked list is full.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// assert_eq!(ll.is_full(), false);
///
/// ll.push(1).unwrap();
/// assert_eq!(ll.is_full(), false);
/// ll.push(2).unwrap();
/// assert_eq!(ll.is_full(), false);
/// ll.push(3).unwrap();
/// assert_eq!(ll.is_full(), true);
/// ```
#[inline]
pub fn is_full(&self) -> bool {
self.free.option().is_none()
}
/// Checks if the linked list is empty.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// assert_eq!(ll.is_empty(), true);
///
/// ll.push(1).unwrap();
/// assert_eq!(ll.is_empty(), false);
/// ```
#[inline]
pub fn is_empty(&self) -> bool {
self.head.option().is_none()
}
}
/// Iterator for the linked list.
pub struct Iter<'a, T, Idx, K, const N: usize>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
list: &'a SortedLinkedList<T, Idx, K, N>,
index: Idx,
}
impl<'a, T, Idx, K, const N: usize> Iterator for Iter<'a, T, Idx, K, N>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
let index = self.index.option()?;
let node = self.list.node_at(index);
self.index = node.next;
Some(self.list.read_data_in_node_at(index))
}
}
/// Comes from [`SortedLinkedList::find_mut`].
pub struct FindMut<'a, T, Idx, K, const N: usize>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
list: &'a mut SortedLinkedList<T, Idx, K, N>,
is_head: bool,
prev_index: Idx,
index: Idx,
maybe_changed: bool,
}
impl<'a, T, Idx, K, const N: usize> FindMut<'a, T, Idx, K, N>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
fn pop_internal(&mut self) -> T {
if self.is_head {
// If it is the head element, we can do a normal pop
unsafe { self.list.pop_unchecked() }
} else {
// Somewhere in the list
let prev = unsafe { self.prev_index.get_unchecked() };
let curr = unsafe { self.index.get_unchecked() };
// Re-point the previous index
self.list.node_at_mut(prev).next = self.list.node_at_mut(curr).next;
// Release the index into the free queue
self.list.node_at_mut(curr).next = self.list.free;
self.list.free = self.index;
self.list.extract_data_in_node_at(curr)
}
}
/// This will pop the element from the list.
///
/// Complexity is worst-case `O(1)`.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// ll.push(1).unwrap();
/// ll.push(2).unwrap();
/// ll.push(3).unwrap();
///
/// // Find a value and update it
/// let mut find = ll.find_mut(|v| *v == 2).unwrap();
/// find.pop();
///
/// assert_eq!(ll.pop(), Ok(3));
/// assert_eq!(ll.pop(), Ok(1));
/// assert_eq!(ll.pop(), Err(()));
/// ```
#[inline]
pub fn pop(mut self) -> T {
self.pop_internal()
}
/// This will resort the element into the correct position in the list if needed. The resorting
/// will only happen if the element has been accessed mutably.
///
/// Same as calling `drop`.
///
/// Complexity is worst-case `O(N)`.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// ll.push(1).unwrap();
/// ll.push(2).unwrap();
/// ll.push(3).unwrap();
///
/// let mut find = ll.find_mut(|v| *v == 2).unwrap();
/// find.finish(); // No resort, we did not access the value.
///
/// let mut find = ll.find_mut(|v| *v == 2).unwrap();
/// *find += 1000;
/// find.finish(); // Will resort, we accessed (and updated) the value.
///
/// assert_eq!(ll.pop(), Ok(1002));
/// assert_eq!(ll.pop(), Ok(3));
/// assert_eq!(ll.pop(), Ok(1));
/// assert_eq!(ll.pop(), Err(()));
/// ```
#[inline]
pub fn finish(self) {
drop(self)
}
}
impl<T, Idx, K, const N: usize> Drop for FindMut<'_, T, Idx, K, N>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
fn drop(&mut self) {
// Only resort the list if the element has changed
if self.maybe_changed {
let val = self.pop_internal();
unsafe { self.list.push_unchecked(val) };
}
}
}
impl<T, Idx, K, const N: usize> Deref for FindMut<'_, T, Idx, K, N>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
type Target = T;
fn deref(&self) -> &Self::Target {
self.list
.read_data_in_node_at(unsafe { self.index.get_unchecked() })
}
}
impl<T, Idx, K, const N: usize> DerefMut for FindMut<'_, T, Idx, K, N>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
fn deref_mut(&mut self) -> &mut Self::Target {
self.maybe_changed = true;
self.list
.read_mut_data_in_node_at(unsafe { self.index.get_unchecked() })
}
}
// /// Useful for debug during development.
// impl<T, Idx, K, const N: usize> fmt::Debug for FindMut<'_, T, Idx, K, N>
// where
// T: Ord + core::fmt::Debug,
// Idx: SortedLinkedListIndex,
// K: Kind,
// {
// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// f.debug_struct("FindMut")
// .field("prev_index", &self.prev_index.option())
// .field("index", &self.index.option())
// .field(
// "prev_value",
// &self
// .list
// .read_data_in_node_at(self.prev_index.option().unwrap()),
// )
// .field(
// "value",
// &self.list.read_data_in_node_at(self.index.option().unwrap()),
// )
// .finish()
// }
// }
impl<T, Idx, K, const N: usize> fmt::Debug for SortedLinkedList<T, Idx, K, N>
where
T: Ord + core::fmt::Debug,
Idx: SortedLinkedListIndex,
K: Kind,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
impl<T, Idx, K, const N: usize> Drop for SortedLinkedList<T, Idx, K, N>
where
Idx: SortedLinkedListIndex,
{
fn drop(&mut self) {
let mut index = self.head;
while let Some(i) = index.option() {
let node = self.node_at_mut(i);
index = node.next;
unsafe {
ptr::drop_in_place(node.val.as_mut_ptr());
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn const_new() {
static mut _V1: SortedLinkedList<u32, LinkedIndexU8, Max, 100> = SortedLinkedList::new_u8();
static mut _V2: SortedLinkedList<u32, LinkedIndexU16, Max, 10_000> =
SortedLinkedList::new_u16();
static mut _V3: SortedLinkedList<u32, LinkedIndexUsize, Max, 100_000> =
SortedLinkedList::new_usize();
}
#[test]
fn test_peek() {
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
ll.push(1).unwrap();
assert_eq!(ll.peek().unwrap(), &1);
ll.push(2).unwrap();
assert_eq!(ll.peek().unwrap(), &2);
ll.push(3).unwrap();
assert_eq!(ll.peek().unwrap(), &3);
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Min, 3> = SortedLinkedList::new_usize();
ll.push(2).unwrap();
assert_eq!(ll.peek().unwrap(), &2);
ll.push(1).unwrap();
assert_eq!(ll.peek().unwrap(), &1);
ll.push(3).unwrap();
assert_eq!(ll.peek().unwrap(), &1);
}
#[test]
fn test_full() {
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
ll.push(1).unwrap();
ll.push(2).unwrap();
ll.push(3).unwrap();
assert!(ll.is_full())
}
#[test]
fn test_empty() {
let ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
assert!(ll.is_empty())
}
#[test]
fn test_zero_size() {
let ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 0> = SortedLinkedList::new_usize();
assert!(ll.is_empty());
assert!(ll.is_full());
}
#[test]
fn test_rejected_push() {
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
ll.push(1).unwrap();
ll.push(2).unwrap();
ll.push(3).unwrap();
// This won't fit
let r = ll.push(4);
assert_eq!(r, Err(4));
}
#[test]
fn test_updating() {
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
ll.push(1).unwrap();
ll.push(2).unwrap();
ll.push(3).unwrap();
let mut find = ll.find_mut(|v| *v == 2).unwrap();
*find += 1000;
find.finish();
assert_eq!(ll.peek().unwrap(), &1002);
let mut find = ll.find_mut(|v| *v == 3).unwrap();
*find += 1000;
find.finish();
assert_eq!(ll.peek().unwrap(), &1003);
// Remove largest element
ll.find_mut(|v| *v == 1003).unwrap().pop();
assert_eq!(ll.peek().unwrap(), &1002);
}
#[test]
fn test_updating_1() {
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
ll.push(1).unwrap();
let v = ll.pop().unwrap();
assert_eq!(v, 1);
}
#[test]
fn test_updating_2() {
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
ll.push(1).unwrap();
let mut find = ll.find_mut(|v| *v == 1).unwrap();
*find += 1000;
find.finish();
assert_eq!(ll.peek().unwrap(), &1001);
}
}

908
vendor/heapless/src/spsc.rs vendored Normal file
View File

@@ -0,0 +1,908 @@
//! Fixed capacity Single Producer Single Consumer (SPSC) queue
//!
//! Implementation based on <https://www.codeproject.com/Articles/43510/Lock-Free-Single-Producer-Single-Consumer-Circular>
//!
//! # Portability
//!
//! This module requires CAS atomic instructions which are not available on all architectures
//! (e.g. ARMv6-M (`thumbv6m-none-eabi`) and MSP430 (`msp430-none-elf`)). These atomics can be
//! emulated however with [`portable-atomic`](https://crates.io/crates/portable-atomic), which is
//! enabled with the `cas` feature and is enabled by default for `thumbv6m-none-eabi` and `riscv32`
//! targets.
//!
//! # Examples
//!
//! - `Queue` can be used as a plain queue
//!
//! ```
//! use heapless::spsc::Queue;
//!
//! let mut rb: Queue<u8, 4> = Queue::new();
//!
//! assert!(rb.enqueue(0).is_ok());
//! assert!(rb.enqueue(1).is_ok());
//! assert!(rb.enqueue(2).is_ok());
//! assert!(rb.enqueue(3).is_err()); // full
//!
//! assert_eq!(rb.dequeue(), Some(0));
//! ```
//!
//! - `Queue` can be `split` and then be used in Single Producer Single Consumer mode.
//!
//! "no alloc" applications can create a `&'static mut` reference to a `Queue` -- using a static
//! variable -- and then `split` it: this consumes the static reference. The resulting `Consumer`
//! and `Producer` can then be moved into different execution contexts (threads, interrupt handlers,
//! etc.)
//!
//! ```
//! use heapless::spsc::{Producer, Queue};
//!
//! enum Event { A, B }
//!
//! fn main() {
//! let queue: &'static mut Queue<Event, 4> = {
//! static mut Q: Queue<Event, 4> = Queue::new();
//! unsafe { &mut Q }
//! };
//!
//! let (producer, mut consumer) = queue.split();
//!
//! // `producer` can be moved into `interrupt_handler` using a static mutex or the mechanism
//! // provided by the concurrency framework you are using (e.g. a resource in RTIC)
//!
//! loop {
//! match consumer.dequeue() {
//! Some(Event::A) => { /* .. */ },
//! Some(Event::B) => { /* .. */ },
//! None => { /* sleep */ },
//! }
//! # break
//! }
//! }
//!
//! // this is a different execution context that can preempt `main`
//! fn interrupt_handler(producer: &mut Producer<'static, Event, 4>) {
//! # let condition = true;
//!
//! // ..
//!
//! if condition {
//! producer.enqueue(Event::A).ok().unwrap();
//! } else {
//! producer.enqueue(Event::B).ok().unwrap();
//! }
//!
//! // ..
//! }
//! ```
//!
//! # Benchmarks
//!
//! Measured on a ARM Cortex-M3 core running at 8 MHz and with zero Flash wait cycles
//!
//! `-C opt-level` |`3`|
//! -----------------------|---|
//! `Consumer<u8>::dequeue`| 15|
//! `Queue<u8>::dequeue` | 12|
//! `Producer<u8>::enqueue`| 16|
//! `Queue<u8>::enqueue` | 14|
//!
//! - All execution times are in clock cycles. 1 clock cycle = 125 ns.
//! - Execution time is *dependent* of `mem::size_of::<T>()`. Both operations include one
//! `memcpy(T)` in their successful path.
//! - The optimization level is indicated in the first row.
//! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue`
//! and `Ok` is returned by `enqueue`).
use core::{cell::UnsafeCell, fmt, hash, mem::MaybeUninit, ptr};
#[cfg(not(feature = "portable-atomic"))]
use core::sync::atomic;
#[cfg(feature = "portable-atomic")]
use portable_atomic as atomic;
use atomic::{AtomicUsize, Ordering};
/// A statically allocated single producer single consumer queue with a capacity of `N - 1` elements
///
/// *IMPORTANT*: To get better performance use a value for `N` that is a power of 2 (e.g. `16`, `32`,
/// etc.).
pub struct Queue<T, const N: usize> {
// this is from where we dequeue items
pub(crate) head: AtomicUsize,
// this is where we enqueue new items
pub(crate) tail: AtomicUsize,
pub(crate) buffer: [UnsafeCell<MaybeUninit<T>>; N],
}
impl<T, const N: usize> Queue<T, N> {
const INIT: UnsafeCell<MaybeUninit<T>> = UnsafeCell::new(MaybeUninit::uninit());
#[inline]
fn increment(val: usize) -> usize {
(val + 1) % N
}
/// Creates an empty queue with a fixed capacity of `N - 1`
pub const fn new() -> Self {
// Const assert N > 1
crate::sealed::greater_than_1::<N>();
Queue {
head: AtomicUsize::new(0),
tail: AtomicUsize::new(0),
buffer: [Self::INIT; N],
}
}
/// Returns the maximum number of elements the queue can hold
#[inline]
pub const fn capacity(&self) -> usize {
N - 1
}
/// Returns the number of elements in the queue
#[inline]
pub fn len(&self) -> usize {
let current_head = self.head.load(Ordering::Relaxed);
let current_tail = self.tail.load(Ordering::Relaxed);
current_tail.wrapping_sub(current_head).wrapping_add(N) % N
}
/// Returns `true` if the queue is empty
#[inline]
pub fn is_empty(&self) -> bool {
self.head.load(Ordering::Relaxed) == self.tail.load(Ordering::Relaxed)
}
/// Returns `true` if the queue is full
#[inline]
pub fn is_full(&self) -> bool {
Self::increment(self.tail.load(Ordering::Relaxed)) == self.head.load(Ordering::Relaxed)
}
/// Iterates from the front of the queue to the back
pub fn iter(&self) -> Iter<'_, T, N> {
Iter {
rb: self,
index: 0,
len: self.len(),
}
}
/// Returns an iterator that allows modifying each value
pub fn iter_mut(&mut self) -> IterMut<'_, T, N> {
let len = self.len();
IterMut {
rb: self,
index: 0,
len,
}
}
/// Adds an `item` to the end of the queue
///
/// Returns back the `item` if the queue is full
#[inline]
pub fn enqueue(&mut self, val: T) -> Result<(), T> {
unsafe { self.inner_enqueue(val) }
}
/// Returns the item in the front of the queue, or `None` if the queue is empty
#[inline]
pub fn dequeue(&mut self) -> Option<T> {
unsafe { self.inner_dequeue() }
}
/// Returns a reference to the item in the front of the queue without dequeuing, or
/// `None` if the queue is empty.
///
/// # Examples
/// ```
/// use heapless::spsc::Queue;
///
/// let mut queue: Queue<u8, 235> = Queue::new();
/// let (mut producer, mut consumer) = queue.split();
/// assert_eq!(None, consumer.peek());
/// producer.enqueue(1);
/// assert_eq!(Some(&1), consumer.peek());
/// assert_eq!(Some(1), consumer.dequeue());
/// assert_eq!(None, consumer.peek());
/// ```
pub fn peek(&self) -> Option<&T> {
if !self.is_empty() {
let head = self.head.load(Ordering::Relaxed);
Some(unsafe { &*(self.buffer.get_unchecked(head).get() as *const T) })
} else {
None
}
}
// The memory for enqueueing is "owned" by the tail pointer.
// NOTE: This internal function uses internal mutability to allow the [`Producer`] to enqueue
// items without doing pointer arithmetic and accessing internal fields of this type.
unsafe fn inner_enqueue(&self, val: T) -> Result<(), T> {
let current_tail = self.tail.load(Ordering::Relaxed);
let next_tail = Self::increment(current_tail);
if next_tail != self.head.load(Ordering::Acquire) {
(self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val));
self.tail.store(next_tail, Ordering::Release);
Ok(())
} else {
Err(val)
}
}
// The memory for enqueueing is "owned" by the tail pointer.
// NOTE: This internal function uses internal mutability to allow the [`Producer`] to enqueue
// items without doing pointer arithmetic and accessing internal fields of this type.
unsafe fn inner_enqueue_unchecked(&self, val: T) {
let current_tail = self.tail.load(Ordering::Relaxed);
(self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val));
self.tail
.store(Self::increment(current_tail), Ordering::Release);
}
/// Adds an `item` to the end of the queue, without checking if it's full
///
/// # Unsafety
///
/// If the queue is full this operation will leak a value (T's destructor won't run on
/// the value that got overwritten by `item`), *and* will allow the `dequeue` operation
/// to create a copy of `item`, which could result in `T`'s destructor running on `item`
/// twice.
pub unsafe fn enqueue_unchecked(&mut self, val: T) {
self.inner_enqueue_unchecked(val)
}
// The memory for dequeuing is "owned" by the head pointer,.
// NOTE: This internal function uses internal mutability to allow the [`Consumer`] to dequeue
// items without doing pointer arithmetic and accessing internal fields of this type.
unsafe fn inner_dequeue(&self) -> Option<T> {
let current_head = self.head.load(Ordering::Relaxed);
if current_head == self.tail.load(Ordering::Acquire) {
None
} else {
let v = (self.buffer.get_unchecked(current_head).get() as *const T).read();
self.head
.store(Self::increment(current_head), Ordering::Release);
Some(v)
}
}
// The memory for dequeuing is "owned" by the head pointer,.
// NOTE: This internal function uses internal mutability to allow the [`Consumer`] to dequeue
// items without doing pointer arithmetic and accessing internal fields of this type.
unsafe fn inner_dequeue_unchecked(&self) -> T {
let current_head = self.head.load(Ordering::Relaxed);
let v = (self.buffer.get_unchecked(current_head).get() as *const T).read();
self.head
.store(Self::increment(current_head), Ordering::Release);
v
}
/// Returns the item in the front of the queue, without checking if there is something in the
/// queue
///
/// # Unsafety
///
/// If the queue is empty this operation will return uninitialized memory.
pub unsafe fn dequeue_unchecked(&mut self) -> T {
self.inner_dequeue_unchecked()
}
/// Splits a queue into producer and consumer endpoints
pub fn split(&mut self) -> (Producer<'_, T, N>, Consumer<'_, T, N>) {
(Producer { rb: self }, Consumer { rb: self })
}
}
impl<T, const N: usize> Default for Queue<T, N> {
fn default() -> Self {
Self::new()
}
}
impl<T, const N: usize> Clone for Queue<T, N>
where
T: Clone,
{
fn clone(&self) -> Self {
let mut new: Queue<T, N> = Queue::new();
for s in self.iter() {
unsafe {
// NOTE(unsafe) new.capacity() == self.capacity() >= self.len()
// no overflow possible
new.enqueue_unchecked(s.clone());
}
}
new
}
}
impl<T, const N: usize, const N2: usize> PartialEq<Queue<T, N2>> for Queue<T, N>
where
T: PartialEq,
{
fn eq(&self, other: &Queue<T, N2>) -> bool {
self.len() == other.len() && self.iter().zip(other.iter()).all(|(v1, v2)| v1 == v2)
}
}
impl<T, const N: usize> Eq for Queue<T, N> where T: Eq {}
/// An iterator over the items of a queue
pub struct Iter<'a, T, const N: usize> {
rb: &'a Queue<T, N>,
index: usize,
len: usize,
}
impl<'a, T, const N: usize> Clone for Iter<'a, T, N> {
fn clone(&self) -> Self {
Self {
rb: self.rb,
index: self.index,
len: self.len,
}
}
}
/// A mutable iterator over the items of a queue
pub struct IterMut<'a, T, const N: usize> {
rb: &'a mut Queue<T, N>,
index: usize,
len: usize,
}
impl<'a, T, const N: usize> Iterator for Iter<'a, T, N> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let head = self.rb.head.load(Ordering::Relaxed);
let i = (head + self.index) % N;
self.index += 1;
Some(unsafe { &*(self.rb.buffer.get_unchecked(i).get() as *const T) })
} else {
None
}
}
}
impl<'a, T, const N: usize> Iterator for IterMut<'a, T, N> {
type Item = &'a mut T;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let head = self.rb.head.load(Ordering::Relaxed);
let i = (head + self.index) % N;
self.index += 1;
Some(unsafe { &mut *(self.rb.buffer.get_unchecked(i).get() as *mut T) })
} else {
None
}
}
}
impl<'a, T, const N: usize> DoubleEndedIterator for Iter<'a, T, N> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let head = self.rb.head.load(Ordering::Relaxed);
// self.len > 0, since it's larger than self.index > 0
let i = (head + self.len - 1) % N;
self.len -= 1;
Some(unsafe { &*(self.rb.buffer.get_unchecked(i).get() as *const T) })
} else {
None
}
}
}
impl<'a, T, const N: usize> DoubleEndedIterator for IterMut<'a, T, N> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let head = self.rb.head.load(Ordering::Relaxed);
// self.len > 0, since it's larger than self.index > 0
let i = (head + self.len - 1) % N;
self.len -= 1;
Some(unsafe { &mut *(self.rb.buffer.get_unchecked(i).get() as *mut T) })
} else {
None
}
}
}
impl<T, const N: usize> Drop for Queue<T, N> {
fn drop(&mut self) {
for item in self {
unsafe {
ptr::drop_in_place(item);
}
}
}
}
impl<T, const N: usize> fmt::Debug for Queue<T, N>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
impl<T, const N: usize> hash::Hash for Queue<T, N>
where
T: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) {
// iterate over self in order
for t in self.iter() {
hash::Hash::hash(t, state);
}
}
}
impl<'a, T, const N: usize> IntoIterator for &'a Queue<T, N> {
type Item = &'a T;
type IntoIter = Iter<'a, T, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T, const N: usize> IntoIterator for &'a mut Queue<T, N> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
/// A queue "consumer"; it can dequeue items from the queue
/// NOTE the consumer semantically owns the `head` pointer of the queue
pub struct Consumer<'a, T, const N: usize> {
rb: &'a Queue<T, N>,
}
unsafe impl<'a, T, const N: usize> Send for Consumer<'a, T, N> where T: Send {}
/// A queue "producer"; it can enqueue items into the queue
/// NOTE the producer semantically owns the `tail` pointer of the queue
pub struct Producer<'a, T, const N: usize> {
rb: &'a Queue<T, N>,
}
unsafe impl<'a, T, const N: usize> Send for Producer<'a, T, N> where T: Send {}
impl<'a, T, const N: usize> Consumer<'a, T, N> {
/// Returns the item in the front of the queue, or `None` if the queue is empty
#[inline]
pub fn dequeue(&mut self) -> Option<T> {
unsafe { self.rb.inner_dequeue() }
}
/// Returns the item in the front of the queue, without checking if there are elements in the
/// queue
///
/// See [`Queue::dequeue_unchecked`] for safety
#[inline]
pub unsafe fn dequeue_unchecked(&mut self) -> T {
self.rb.inner_dequeue_unchecked()
}
/// Returns if there are any items to dequeue. When this returns `true`, at least the
/// first subsequent dequeue will succeed
#[inline]
pub fn ready(&self) -> bool {
!self.rb.is_empty()
}
/// Returns the number of elements in the queue
#[inline]
pub fn len(&self) -> usize {
self.rb.len()
}
/// Returns the maximum number of elements the queue can hold
#[inline]
pub fn capacity(&self) -> usize {
self.rb.capacity()
}
/// Returns the item in the front of the queue without dequeuing, or `None` if the queue is
/// empty
///
/// # Examples
/// ```
/// use heapless::spsc::Queue;
///
/// let mut queue: Queue<u8, 235> = Queue::new();
/// let (mut producer, mut consumer) = queue.split();
/// assert_eq!(None, consumer.peek());
/// producer.enqueue(1);
/// assert_eq!(Some(&1), consumer.peek());
/// assert_eq!(Some(1), consumer.dequeue());
/// assert_eq!(None, consumer.peek());
/// ```
#[inline]
pub fn peek(&self) -> Option<&T> {
self.rb.peek()
}
}
impl<'a, T, const N: usize> Producer<'a, T, N> {
/// Adds an `item` to the end of the queue, returns back the `item` if the queue is full
#[inline]
pub fn enqueue(&mut self, val: T) -> Result<(), T> {
unsafe { self.rb.inner_enqueue(val) }
}
/// Adds an `item` to the end of the queue, without checking if the queue is full
///
/// See [`Queue::enqueue_unchecked`] for safety
#[inline]
pub unsafe fn enqueue_unchecked(&mut self, val: T) {
self.rb.inner_enqueue_unchecked(val)
}
/// Returns if there is any space to enqueue a new item. When this returns true, at
/// least the first subsequent enqueue will succeed.
#[inline]
pub fn ready(&self) -> bool {
!self.rb.is_full()
}
/// Returns the number of elements in the queue
#[inline]
pub fn len(&self) -> usize {
self.rb.len()
}
/// Returns the maximum number of elements the queue can hold
#[inline]
pub fn capacity(&self) -> usize {
self.rb.capacity()
}
}
#[cfg(test)]
mod tests {
use std::hash::{Hash, Hasher};
use crate::spsc::Queue;
#[test]
fn full() {
let mut rb: Queue<i32, 3> = Queue::new();
assert_eq!(rb.is_full(), false);
rb.enqueue(1).unwrap();
assert_eq!(rb.is_full(), false);
rb.enqueue(2).unwrap();
assert_eq!(rb.is_full(), true);
}
#[test]
fn empty() {
let mut rb: Queue<i32, 3> = Queue::new();
assert_eq!(rb.is_empty(), true);
rb.enqueue(1).unwrap();
assert_eq!(rb.is_empty(), false);
rb.enqueue(2).unwrap();
assert_eq!(rb.is_empty(), false);
}
#[test]
#[cfg_attr(miri, ignore)] // too slow
fn len() {
let mut rb: Queue<i32, 3> = Queue::new();
assert_eq!(rb.len(), 0);
rb.enqueue(1).unwrap();
assert_eq!(rb.len(), 1);
rb.enqueue(2).unwrap();
assert_eq!(rb.len(), 2);
for _ in 0..1_000_000 {
let v = rb.dequeue().unwrap();
println!("{}", v);
rb.enqueue(v).unwrap();
assert_eq!(rb.len(), 2);
}
}
#[test]
#[cfg_attr(miri, ignore)] // too slow
fn try_overflow() {
const N: usize = 23;
let mut rb: Queue<i32, N> = Queue::new();
for i in 0..N as i32 - 1 {
rb.enqueue(i).unwrap();
}
for _ in 0..1_000_000 {
for i in 0..N as i32 - 1 {
let d = rb.dequeue().unwrap();
assert_eq!(d, i);
rb.enqueue(i).unwrap();
}
}
}
#[test]
fn sanity() {
let mut rb: Queue<i32, 10> = Queue::new();
let (mut p, mut c) = rb.split();
assert_eq!(p.ready(), true);
assert_eq!(c.ready(), false);
assert_eq!(c.dequeue(), None);
p.enqueue(0).unwrap();
assert_eq!(c.dequeue(), Some(0));
}
#[test]
fn static_new() {
static mut _Q: Queue<i32, 4> = Queue::new();
}
#[test]
fn drop() {
struct Droppable;
impl Droppable {
fn new() -> Self {
unsafe {
COUNT += 1;
}
Droppable
}
}
impl Drop for Droppable {
fn drop(&mut self) {
unsafe {
COUNT -= 1;
}
}
}
static mut COUNT: i32 = 0;
{
let mut v: Queue<Droppable, 4> = Queue::new();
v.enqueue(Droppable::new()).ok().unwrap();
v.enqueue(Droppable::new()).ok().unwrap();
v.dequeue().unwrap();
}
assert_eq!(unsafe { COUNT }, 0);
{
let mut v: Queue<Droppable, 4> = Queue::new();
v.enqueue(Droppable::new()).ok().unwrap();
v.enqueue(Droppable::new()).ok().unwrap();
}
assert_eq!(unsafe { COUNT }, 0);
}
#[test]
fn iter() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.dequeue().unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
rb.enqueue(3).unwrap();
let mut items = rb.iter();
// assert_eq!(items.next(), Some(&0));
assert_eq!(items.next(), Some(&1));
assert_eq!(items.next(), Some(&2));
assert_eq!(items.next(), Some(&3));
assert_eq!(items.next(), None);
}
#[test]
fn iter_double_ended() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
let mut items = rb.iter();
assert_eq!(items.next(), Some(&0));
assert_eq!(items.next_back(), Some(&2));
assert_eq!(items.next(), Some(&1));
assert_eq!(items.next(), None);
assert_eq!(items.next_back(), None);
}
#[test]
fn iter_mut() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
let mut items = rb.iter_mut();
assert_eq!(items.next(), Some(&mut 0));
assert_eq!(items.next(), Some(&mut 1));
assert_eq!(items.next(), Some(&mut 2));
assert_eq!(items.next(), None);
}
#[test]
fn iter_mut_double_ended() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
let mut items = rb.iter_mut();
assert_eq!(items.next(), Some(&mut 0));
assert_eq!(items.next_back(), Some(&mut 2));
assert_eq!(items.next(), Some(&mut 1));
assert_eq!(items.next(), None);
assert_eq!(items.next_back(), None);
}
#[test]
fn wrap_around() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
rb.dequeue().unwrap();
rb.dequeue().unwrap();
rb.dequeue().unwrap();
rb.enqueue(3).unwrap();
rb.enqueue(4).unwrap();
assert_eq!(rb.len(), 2);
}
#[test]
fn ready_flag() {
let mut rb: Queue<i32, 3> = Queue::new();
let (mut p, mut c) = rb.split();
assert_eq!(c.ready(), false);
assert_eq!(p.ready(), true);
p.enqueue(0).unwrap();
assert_eq!(c.ready(), true);
assert_eq!(p.ready(), true);
p.enqueue(1).unwrap();
assert_eq!(c.ready(), true);
assert_eq!(p.ready(), false);
c.dequeue().unwrap();
assert_eq!(c.ready(), true);
assert_eq!(p.ready(), true);
c.dequeue().unwrap();
assert_eq!(c.ready(), false);
assert_eq!(p.ready(), true);
}
#[test]
fn clone() {
let mut rb1: Queue<i32, 4> = Queue::new();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
rb1.enqueue(0).unwrap();
let rb2 = rb1.clone();
assert_eq!(rb1.capacity(), rb2.capacity());
assert_eq!(rb1.len(), rb2.len());
assert!(rb1.iter().zip(rb2.iter()).all(|(v1, v2)| v1 == v2));
}
#[test]
fn eq() {
// generate two queues with same content
// but different buffer alignment
let mut rb1: Queue<i32, 4> = Queue::new();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
rb1.enqueue(0).unwrap();
let mut rb2: Queue<i32, 4> = Queue::new();
rb2.enqueue(0).unwrap();
rb2.enqueue(0).unwrap();
assert!(rb1 == rb2);
// test for symmetry
assert!(rb2 == rb1);
// test for changes in content
rb1.enqueue(0).unwrap();
assert!(rb1 != rb2);
rb2.enqueue(1).unwrap();
assert!(rb1 != rb2);
// test for refexive relation
assert!(rb1 == rb1);
assert!(rb2 == rb2);
}
#[test]
fn hash_equality() {
// generate two queues with same content
// but different buffer alignment
let rb1 = {
let mut rb1: Queue<i32, 4> = Queue::new();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
rb1.enqueue(0).unwrap();
rb1
};
let rb2 = {
let mut rb2: Queue<i32, 4> = Queue::new();
rb2.enqueue(0).unwrap();
rb2.enqueue(0).unwrap();
rb2
};
let hash1 = {
let mut hasher1 = hash32::FnvHasher::default();
rb1.hash(&mut hasher1);
let hash1 = hasher1.finish();
hash1
};
let hash2 = {
let mut hasher2 = hash32::FnvHasher::default();
rb2.hash(&mut hasher2);
let hash2 = hasher2.finish();
hash2
};
assert_eq!(hash1, hash2);
}
}

856
vendor/heapless/src/string.rs vendored Normal file
View File

@@ -0,0 +1,856 @@
use core::{
cmp::Ordering,
fmt,
fmt::Write,
hash, iter, ops,
str::{self, Utf8Error},
};
use crate::Vec;
/// A fixed capacity [`String`](https://doc.rust-lang.org/std/string/struct.String.html)
pub struct String<const N: usize> {
vec: Vec<u8, N>,
}
impl<const N: usize> String<N> {
/// Constructs a new, empty `String` with a fixed capacity of `N` bytes
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// // allocate the string on the stack
/// let mut s: String<4> = String::new();
///
/// // allocate the string in a static variable
/// static mut S: String<4> = String::new();
/// ```
#[inline]
pub const fn new() -> Self {
Self { vec: Vec::new() }
}
/// Convert UTF-8 bytes into a `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::{String, Vec};
///
/// let mut sparkle_heart = Vec::<u8, 4>::new();
/// sparkle_heart.extend_from_slice(&[240, 159, 146, 150]);
///
/// let sparkle_heart: String<4> = String::from_utf8(sparkle_heart)?;
/// assert_eq!("💖", sparkle_heart);
/// # Ok::<(), core::str::Utf8Error>(())
/// ```
///
/// Invalid UTF-8:
///
/// ```
/// use core::str::Utf8Error;
/// use heapless::{String, Vec};
///
/// let mut vec = Vec::<u8, 4>::new();
/// vec.extend_from_slice(&[0, 159, 146, 150]);
///
/// let e: Utf8Error = String::from_utf8(vec).unwrap_err();
/// assert_eq!(e.valid_up_to(), 1);
/// # Ok::<(), core::str::Utf8Error>(())
/// ```
#[inline]
pub fn from_utf8(vec: Vec<u8, N>) -> Result<Self, Utf8Error> {
core::str::from_utf8(&vec)?;
Ok(Self { vec })
}
/// Convert UTF-8 bytes into a `String`, without checking that the string
/// contains valid UTF-8.
///
/// # Safety
///
/// The bytes passed in must be valid UTF-8.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::{String, Vec};
///
/// let mut sparkle_heart = Vec::<u8, 4>::new();
/// sparkle_heart.extend_from_slice(&[240, 159, 146, 150]);
///
/// // Safety: `sparkle_heart` Vec is known to contain valid UTF-8
/// let sparkle_heart: String<4> = unsafe { String::from_utf8_unchecked(sparkle_heart) };
/// assert_eq!("💖", sparkle_heart);
/// ```
#[inline]
pub unsafe fn from_utf8_unchecked(vec: Vec<u8, N>) -> Self {
Self { vec }
}
/// Converts a `String` into a byte vector.
///
/// This consumes the `String`, so we do not need to copy its contents.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let s: String<4> = String::try_from("ab")?;
/// let b = s.into_bytes();
/// assert!(b.len() == 2);
///
/// assert_eq!(&['a' as u8, 'b' as u8], &b[..]);
/// # Ok::<(), ()>(())
/// ```
#[inline]
pub fn into_bytes(self) -> Vec<u8, N> {
self.vec
}
/// Extracts a string slice containing the entire string.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<4> = String::try_from("ab")?;
/// assert!(s.as_str() == "ab");
///
/// let _s = s.as_str();
/// // s.push('c'); // <- cannot borrow `s` as mutable because it is also borrowed as immutable
/// # Ok::<(), ()>(())
/// ```
#[inline]
pub fn as_str(&self) -> &str {
unsafe { str::from_utf8_unchecked(self.vec.as_slice()) }
}
/// Converts a `String` into a mutable string slice.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<4> = String::try_from("ab")?;
/// let s = s.as_mut_str();
/// s.make_ascii_uppercase();
/// # Ok::<(), ()>(())
/// ```
#[inline]
pub fn as_mut_str(&mut self) -> &mut str {
unsafe { str::from_utf8_unchecked_mut(self.vec.as_mut_slice()) }
}
/// Returns a mutable reference to the contents of this `String`.
///
/// # Safety
///
/// This function is unsafe because it does not check that the bytes passed
/// to it are valid UTF-8. If this constraint is violated, it may cause
/// memory unsafety issues with future users of the `String`, as the rest of
/// the library assumes that `String`s are valid UTF-8.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("hello")?;
///
/// unsafe {
/// let vec = s.as_mut_vec();
/// assert_eq!(&[104, 101, 108, 108, 111][..], &vec[..]);
///
/// vec.reverse();
/// }
/// assert_eq!(s, "olleh");
/// # Ok::<(), ()>(())
/// ```
pub unsafe fn as_mut_vec(&mut self) -> &mut Vec<u8, N> {
&mut self.vec
}
/// Appends a given string slice onto the end of this `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("foo")?;
///
/// assert!(s.push_str("bar").is_ok());
///
/// assert_eq!("foobar", s);
///
/// assert!(s.push_str("tender").is_err());
/// # Ok::<(), ()>(())
/// ```
#[inline]
pub fn push_str(&mut self, string: &str) -> Result<(), ()> {
self.vec.extend_from_slice(string.as_bytes())
}
/// Returns the maximum number of elements the String can hold
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<4> = String::new();
/// assert!(s.capacity() == 4);
/// ```
#[inline]
pub fn capacity(&self) -> usize {
self.vec.capacity()
}
/// Appends the given [`char`] to the end of this `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("abc")?;
///
/// s.push('1').unwrap();
/// s.push('2').unwrap();
/// s.push('3').unwrap();
///
/// assert!("abc123" == s.as_str());
///
/// assert_eq!("abc123", s);
/// # Ok::<(), ()>(())
/// ```
#[inline]
pub fn push(&mut self, c: char) -> Result<(), ()> {
match c.len_utf8() {
1 => self.vec.push(c as u8).map_err(|_| {}),
_ => self
.vec
.extend_from_slice(c.encode_utf8(&mut [0; 4]).as_bytes()),
}
}
/// Shortens this `String` to the specified length.
///
/// If `new_len` is greater than the string's current length, this has no
/// effect.
///
/// Note that this method has no effect on the allocated capacity
/// of the string
///
/// # Panics
///
/// Panics if `new_len` does not lie on a [`char`] boundary.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("hello")?;
///
/// s.truncate(2);
///
/// assert_eq!("he", s);
/// # Ok::<(), ()>(())
/// ```
#[inline]
pub fn truncate(&mut self, new_len: usize) {
if new_len <= self.len() {
assert!(self.is_char_boundary(new_len));
self.vec.truncate(new_len)
}
}
/// Removes the last character from the string buffer and returns it.
///
/// Returns [`None`] if this `String` is empty.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("foo")?;
///
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('f'));
///
/// assert_eq!(s.pop(), None);
/// Ok::<(), ()>(())
/// ```
pub fn pop(&mut self) -> Option<char> {
let ch = self.chars().rev().next()?;
// pop bytes that correspond to `ch`
for _ in 0..ch.len_utf8() {
unsafe {
self.vec.pop_unchecked();
}
}
Some(ch)
}
/// Removes a [`char`] from this `String` at a byte position and returns it.
///
/// Note: Because this shifts over the remaining elements, it has a
/// worst-case performance of *O*(*n*).
///
/// # Panics
///
/// Panics if `idx` is larger than or equal to the `String`'s length,
/// or if it does not lie on a [`char`] boundary.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("foo").unwrap();
///
/// assert_eq!(s.remove(0), 'f');
/// assert_eq!(s.remove(1), 'o');
/// assert_eq!(s.remove(0), 'o');
/// ```
#[inline]
pub fn remove(&mut self, index: usize) -> char {
let ch = match self[index..].chars().next() {
Some(ch) => ch,
None => panic!("cannot remove a char from the end of a string"),
};
let next = index + ch.len_utf8();
let len = self.len();
let ptr = self.vec.as_mut_ptr();
unsafe {
core::ptr::copy(ptr.add(next), ptr.add(index), len - next);
self.vec.set_len(len - (next - index));
}
ch
}
/// Truncates this `String`, removing all contents.
///
/// While this means the `String` will have a length of zero, it does not
/// touch its capacity.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("foo")?;
///
/// s.clear();
///
/// assert!(s.is_empty());
/// assert_eq!(0, s.len());
/// assert_eq!(8, s.capacity());
/// Ok::<(), ()>(())
/// ```
#[inline]
pub fn clear(&mut self) {
self.vec.clear()
}
}
impl<const N: usize> Default for String<N> {
fn default() -> Self {
Self::new()
}
}
impl<'a, const N: usize> TryFrom<&'a str> for String<N> {
type Error = ();
fn try_from(s: &'a str) -> Result<Self, Self::Error> {
let mut new = String::new();
new.push_str(s)?;
Ok(new)
}
}
impl<const N: usize> str::FromStr for String<N> {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut new = String::new();
new.push_str(s)?;
Ok(new)
}
}
impl<const N: usize> iter::FromIterator<char> for String<N> {
fn from_iter<T: IntoIterator<Item = char>>(iter: T) -> Self {
let mut new = String::new();
for c in iter {
new.push(c).unwrap();
}
new
}
}
impl<'a, const N: usize> iter::FromIterator<&'a char> for String<N> {
fn from_iter<T: IntoIterator<Item = &'a char>>(iter: T) -> Self {
let mut new = String::new();
for c in iter {
new.push(*c).unwrap();
}
new
}
}
impl<'a, const N: usize> iter::FromIterator<&'a str> for String<N> {
fn from_iter<T: IntoIterator<Item = &'a str>>(iter: T) -> Self {
let mut new = String::new();
for c in iter {
new.push_str(c).unwrap();
}
new
}
}
impl<const N: usize> Clone for String<N> {
fn clone(&self) -> Self {
Self {
vec: self.vec.clone(),
}
}
}
impl<const N: usize> fmt::Debug for String<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<str as fmt::Debug>::fmt(self, f)
}
}
impl<const N: usize> fmt::Display for String<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<str as fmt::Display>::fmt(self, f)
}
}
impl<const N: usize> hash::Hash for String<N> {
#[inline]
fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
<str as hash::Hash>::hash(self, hasher)
}
}
impl<const N: usize> fmt::Write for String<N> {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
self.push_str(s).map_err(|_| fmt::Error)
}
fn write_char(&mut self, c: char) -> Result<(), fmt::Error> {
self.push(c).map_err(|_| fmt::Error)
}
}
impl<const N: usize> ops::Deref for String<N> {
type Target = str;
fn deref(&self) -> &str {
self.as_str()
}
}
impl<const N: usize> ops::DerefMut for String<N> {
fn deref_mut(&mut self) -> &mut str {
self.as_mut_str()
}
}
impl<const N: usize> AsRef<str> for String<N> {
#[inline]
fn as_ref(&self) -> &str {
self
}
}
impl<const N: usize> AsRef<[u8]> for String<N> {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
impl<const N1: usize, const N2: usize> PartialEq<String<N2>> for String<N1> {
fn eq(&self, rhs: &String<N2>) -> bool {
str::eq(&**self, &**rhs)
}
fn ne(&self, rhs: &String<N2>) -> bool {
str::ne(&**self, &**rhs)
}
}
// String<N> == str
impl<const N: usize> PartialEq<str> for String<N> {
#[inline]
fn eq(&self, other: &str) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &str) -> bool {
str::ne(&self[..], &other[..])
}
}
// String<N> == &'str
impl<const N: usize> PartialEq<&str> for String<N> {
#[inline]
fn eq(&self, other: &&str) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &&str) -> bool {
str::ne(&self[..], &other[..])
}
}
// str == String<N>
impl<const N: usize> PartialEq<String<N>> for str {
#[inline]
fn eq(&self, other: &String<N>) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &String<N>) -> bool {
str::ne(&self[..], &other[..])
}
}
// &'str == String<N>
impl<const N: usize> PartialEq<String<N>> for &str {
#[inline]
fn eq(&self, other: &String<N>) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &String<N>) -> bool {
str::ne(&self[..], &other[..])
}
}
impl<const N: usize> Eq for String<N> {}
impl<const N1: usize, const N2: usize> PartialOrd<String<N2>> for String<N1> {
#[inline]
fn partial_cmp(&self, other: &String<N2>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
}
impl<const N: usize> Ord for String<N> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
macro_rules! impl_try_from_num {
($num:ty, $size:expr) => {
impl<const N: usize> core::convert::TryFrom<$num> for String<N> {
type Error = ();
fn try_from(s: $num) -> Result<Self, Self::Error> {
let mut new = String::new();
write!(&mut new, "{}", s).map_err(|_| ())?;
Ok(new)
}
}
};
}
impl_try_from_num!(i8, 4);
impl_try_from_num!(i16, 6);
impl_try_from_num!(i32, 11);
impl_try_from_num!(i64, 20);
impl_try_from_num!(u8, 3);
impl_try_from_num!(u16, 5);
impl_try_from_num!(u32, 10);
impl_try_from_num!(u64, 20);
#[cfg(test)]
mod tests {
use crate::{String, Vec};
use core::convert::TryFrom;
#[test]
fn static_new() {
static mut _S: String<8> = String::new();
}
#[test]
fn clone() {
let s1: String<20> = String::try_from("abcd").unwrap();
let mut s2 = s1.clone();
s2.push_str(" efgh").unwrap();
assert_eq!(s1, "abcd");
assert_eq!(s2, "abcd efgh");
}
#[test]
fn cmp() {
let s1: String<4> = String::try_from("abcd").unwrap();
let s2: String<4> = String::try_from("zzzz").unwrap();
assert!(s1 < s2);
}
#[test]
fn cmp_heterogenous_size() {
let s1: String<4> = String::try_from("abcd").unwrap();
let s2: String<8> = String::try_from("zzzz").unwrap();
assert!(s1 < s2);
}
#[test]
fn debug() {
use core::fmt::Write;
let s: String<8> = String::try_from("abcd").unwrap();
let mut std_s = std::string::String::new();
write!(std_s, "{:?}", s).unwrap();
assert_eq!("\"abcd\"", std_s);
}
#[test]
fn display() {
use core::fmt::Write;
let s: String<8> = String::try_from("abcd").unwrap();
let mut std_s = std::string::String::new();
write!(std_s, "{}", s).unwrap();
assert_eq!("abcd", std_s);
}
#[test]
fn empty() {
let s: String<4> = String::new();
assert!(s.capacity() == 4);
assert_eq!(s, "");
assert_eq!(s.len(), 0);
assert_ne!(s.len(), 4);
}
#[test]
fn try_from() {
let s: String<4> = String::try_from("123").unwrap();
assert!(s.len() == 3);
assert_eq!(s, "123");
let e: () = String::<2>::try_from("123").unwrap_err();
assert_eq!(e, ());
}
#[test]
fn from_str() {
use core::str::FromStr;
let s: String<4> = String::<4>::from_str("123").unwrap();
assert!(s.len() == 3);
assert_eq!(s, "123");
let e: () = String::<2>::from_str("123").unwrap_err();
assert_eq!(e, ());
}
#[test]
fn from_iter() {
let mut v: Vec<char, 5> = Vec::new();
v.push('h').unwrap();
v.push('e').unwrap();
v.push('l').unwrap();
v.push('l').unwrap();
v.push('o').unwrap();
let string1: String<5> = v.iter().collect(); //&char
let string2: String<5> = "hello".chars().collect(); //char
assert_eq!(string1, "hello");
assert_eq!(string2, "hello");
}
#[test]
#[should_panic]
fn from_panic() {
let _: String<4> = String::try_from("12345").unwrap();
}
#[test]
fn try_from_num() {
let v: String<20> = String::try_from(18446744073709551615 as u64).unwrap();
assert_eq!(v, "18446744073709551615");
let e: () = String::<2>::try_from(18446744073709551615 as u64).unwrap_err();
assert_eq!(e, ());
}
#[test]
fn into_bytes() {
let s: String<4> = String::try_from("ab").unwrap();
let b: Vec<u8, 4> = s.into_bytes();
assert_eq!(b.len(), 2);
assert_eq!(&['a' as u8, 'b' as u8], &b[..]);
}
#[test]
fn as_str() {
let s: String<4> = String::try_from("ab").unwrap();
assert_eq!(s.as_str(), "ab");
// should be moved to fail test
// let _s = s.as_str();
// s.push('c'); // <- cannot borrow `s` as mutable because it is also borrowed as immutable
}
#[test]
fn as_mut_str() {
let mut s: String<4> = String::try_from("ab").unwrap();
let s = s.as_mut_str();
s.make_ascii_uppercase();
assert_eq!(s, "AB");
}
#[test]
fn push_str() {
let mut s: String<8> = String::try_from("foo").unwrap();
assert!(s.push_str("bar").is_ok());
assert_eq!("foobar", s);
assert_eq!(s, "foobar");
assert!(s.push_str("tender").is_err());
assert_eq!("foobar", s);
assert_eq!(s, "foobar");
}
#[test]
fn push() {
let mut s: String<6> = String::try_from("abc").unwrap();
assert!(s.push('1').is_ok());
assert!(s.push('2').is_ok());
assert!(s.push('3').is_ok());
assert!(s.push('4').is_err());
assert!("abc123" == s.as_str());
}
#[test]
fn as_bytes() {
let s: String<8> = String::try_from("hello").unwrap();
assert_eq!(&[104, 101, 108, 108, 111], s.as_bytes());
}
#[test]
fn truncate() {
let mut s: String<8> = String::try_from("hello").unwrap();
s.truncate(6);
assert_eq!(s.len(), 5);
s.truncate(2);
assert_eq!(s.len(), 2);
assert_eq!("he", s);
assert_eq!(s, "he");
}
#[test]
fn pop() {
let mut s: String<8> = String::try_from("foo").unwrap();
assert_eq!(s.pop(), Some('o'));
assert_eq!(s.pop(), Some('o'));
assert_eq!(s.pop(), Some('f'));
assert_eq!(s.pop(), None);
}
#[test]
fn pop_uenc() {
let mut s: String<8> = String::try_from("").unwrap();
assert_eq!(s.len(), 3);
match s.pop() {
Some(c) => {
assert_eq!(s.len(), 1);
assert_eq!(c, '\u{0301}'); // accute accent of e
()
}
None => assert!(false),
};
}
#[test]
fn is_empty() {
let mut v: String<8> = String::new();
assert!(v.is_empty());
let _ = v.push('a');
assert!(!v.is_empty());
}
#[test]
fn clear() {
let mut s: String<8> = String::try_from("foo").unwrap();
s.clear();
assert!(s.is_empty());
assert_eq!(0, s.len());
assert_eq!(8, s.capacity());
}
#[test]
fn remove() {
let mut s: String<8> = String::try_from("foo").unwrap();
assert_eq!(s.remove(0), 'f');
assert_eq!(s.as_str(), "oo");
}
#[test]
fn remove_uenc() {
let mut s: String<8> = String::try_from("ĝėēƶ").unwrap();
assert_eq!(s.remove(2), 'ė');
assert_eq!(s.remove(2), 'ē');
assert_eq!(s.remove(2), 'ƶ');
assert_eq!(s.as_str(), "ĝ");
}
#[test]
fn remove_uenc_combo_characters() {
let mut s: String<8> = String::try_from("héy").unwrap();
assert_eq!(s.remove(2), '\u{0301}');
assert_eq!(s.as_str(), "hey");
}
}

23
vendor/heapless/src/test_helpers.rs vendored Normal file
View File

@@ -0,0 +1,23 @@
macro_rules! droppable {
() => {
static COUNT: core::sync::atomic::AtomicI32 = core::sync::atomic::AtomicI32::new(0);
#[derive(Eq, Ord, PartialEq, PartialOrd)]
struct Droppable(i32);
impl Droppable {
fn new() -> Self {
COUNT.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
Droppable(Self::count())
}
fn count() -> i32 {
COUNT.load(core::sync::atomic::Ordering::Relaxed)
}
}
impl Drop for Droppable {
fn drop(&mut self) {
COUNT.fetch_sub(1, core::sync::atomic::Ordering::Relaxed);
}
}
};
}

58
vendor/heapless/src/ufmt.rs vendored Normal file
View File

@@ -0,0 +1,58 @@
use crate::{string::String, vec::Vec};
use ufmt_write::uWrite;
impl<const N: usize> uWrite for String<N> {
type Error = ();
fn write_str(&mut self, s: &str) -> Result<(), Self::Error> {
self.push_str(s)
}
}
impl<const N: usize> uWrite for Vec<u8, N> {
type Error = ();
fn write_str(&mut self, s: &str) -> Result<(), Self::Error> {
self.extend_from_slice(s.as_bytes())
}
}
#[cfg(test)]
mod tests {
use super::*;
use ufmt::{derive::uDebug, uwrite};
#[derive(uDebug)]
struct Pair {
x: u32,
y: u32,
}
#[test]
fn test_string() {
let a = 123;
let b = Pair { x: 0, y: 1234 };
let mut s = String::<32>::new();
uwrite!(s, "{} -> {:?}", a, b).unwrap();
assert_eq!(s, "123 -> Pair { x: 0, y: 1234 }");
}
#[test]
fn test_string_err() {
let p = Pair { x: 0, y: 1234 };
let mut s = String::<4>::new();
assert!(uwrite!(s, "{:?}", p).is_err());
}
#[test]
fn test_vec() {
let a = 123;
let b = Pair { x: 0, y: 1234 };
let mut v = Vec::<u8, 32>::new();
uwrite!(v, "{} -> {:?}", a, b).unwrap();
assert_eq!(v, b"123 -> Pair { x: 0, y: 1234 }");
}
}

1581
vendor/heapless/src/vec.rs vendored Normal file

File diff suppressed because it is too large Load Diff

10
vendor/heapless/suppressions.txt vendored Normal file
View File

@@ -0,0 +1,10 @@
race:std::panic::catch_unwind
race:std::thread::scope
# std::thread::spawn false positive; seen on Ubuntu 20.04 but not on Arch Linux (2022-04-29)
race:drop_in_place*JoinHandle
race:alloc::sync::Arc<*>::drop_slow
race:__call_tls_dtors
# false positives in memcpy (?)
race:*memcpy*

25
vendor/heapless/tests/cpass.rs vendored Normal file
View File

@@ -0,0 +1,25 @@
//! Collections of `Send`-able things are `Send`
use heapless::{
spsc::{Consumer, Producer, Queue},
HistoryBuffer, Vec,
};
#[test]
fn send() {
struct IsSend;
unsafe impl Send for IsSend {}
fn is_send<T>()
where
T: Send,
{
}
is_send::<Consumer<IsSend, 4>>();
is_send::<Producer<IsSend, 4>>();
is_send::<Queue<IsSend, 4>>();
is_send::<Vec<IsSend, 4>>();
is_send::<HistoryBuffer<IsSend, 4>>();
}

239
vendor/heapless/tests/tsan.rs vendored Normal file
View File

@@ -0,0 +1,239 @@
#![deny(rust_2018_compatibility)]
#![deny(rust_2018_idioms)]
#![deny(warnings)]
use std::thread;
use heapless::spsc;
#[test]
fn once() {
static mut RB: spsc::Queue<i32, 4> = spsc::Queue::new();
let rb = unsafe { &mut RB };
rb.enqueue(0).unwrap();
let (mut p, mut c) = rb.split();
p.enqueue(1).unwrap();
thread::spawn(move || {
p.enqueue(1).unwrap();
});
thread::spawn(move || {
c.dequeue().unwrap();
});
}
#[test]
fn twice() {
static mut RB: spsc::Queue<i32, 5> = spsc::Queue::new();
let rb = unsafe { &mut RB };
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
let (mut p, mut c) = rb.split();
thread::spawn(move || {
p.enqueue(2).unwrap();
p.enqueue(3).unwrap();
});
thread::spawn(move || {
c.dequeue().unwrap();
c.dequeue().unwrap();
});
}
#[test]
fn scoped() {
let mut rb: spsc::Queue<i32, 5> = spsc::Queue::new();
rb.enqueue(0).unwrap();
{
let (mut p, mut c) = rb.split();
thread::scope(move |scope| {
scope.spawn(move || {
p.enqueue(1).unwrap();
});
scope.spawn(move || {
c.dequeue().unwrap();
});
});
}
rb.dequeue().unwrap();
}
#[test]
#[cfg_attr(miri, ignore)] // too slow
fn contention() {
const N: usize = 1024;
let mut rb: spsc::Queue<u8, 4> = spsc::Queue::new();
{
let (mut p, mut c) = rb.split();
thread::scope(move |scope| {
scope.spawn(move || {
let mut sum: u32 = 0;
for i in 0..(2 * N) {
sum = sum.wrapping_add(i as u32);
while let Err(_) = p.enqueue(i as u8) {}
}
println!("producer: {}", sum);
});
scope.spawn(move || {
let mut sum: u32 = 0;
for _ in 0..(2 * N) {
loop {
match c.dequeue() {
Some(v) => {
sum = sum.wrapping_add(v as u32);
break;
}
_ => {}
}
}
}
println!("consumer: {}", sum);
});
});
}
assert!(rb.is_empty());
}
#[test]
#[cfg_attr(miri, ignore)] // too slow
fn mpmc_contention() {
use std::sync::mpsc;
use heapless::mpmc::Q64;
const N: u32 = 64;
static Q: Q64<u32> = Q64::new();
let (s, r) = mpsc::channel();
thread::scope(|scope| {
let s1 = s.clone();
scope.spawn(move || {
let mut sum: u32 = 0;
for i in 0..(16 * N) {
sum = sum.wrapping_add(i);
println!("enqueue {}", i);
while let Err(_) = Q.enqueue(i) {}
}
s1.send(sum).unwrap();
});
let s2 = s.clone();
scope.spawn(move || {
let mut sum: u32 = 0;
for _ in 0..(16 * N) {
loop {
match Q.dequeue() {
Some(v) => {
sum = sum.wrapping_add(v);
println!("dequeue {}", v);
break;
}
_ => {}
}
}
}
s2.send(sum).unwrap();
});
});
assert_eq!(r.recv().unwrap(), r.recv().unwrap());
}
#[test]
#[cfg_attr(miri, ignore)] // too slow
fn unchecked() {
const N: usize = 1024;
let mut rb: spsc::Queue<u8, N> = spsc::Queue::new();
for _ in 0..N / 2 - 1 {
rb.enqueue(1).unwrap();
}
{
let (mut p, mut c) = rb.split();
thread::scope(move |scope| {
scope.spawn(move || {
for _ in 0..N / 2 - 1 {
p.enqueue(2).unwrap();
}
});
scope.spawn(move || {
let mut sum: usize = 0;
for _ in 0..N / 2 - 1 {
sum = sum.wrapping_add(usize::from(c.dequeue().unwrap()));
}
assert_eq!(sum, N / 2 - 1);
});
});
}
assert_eq!(rb.len(), N / 2 - 1);
}
#[test]
fn len_properly_wraps() {
const N: usize = 4;
let mut rb: spsc::Queue<u8, N> = spsc::Queue::new();
rb.enqueue(1).unwrap();
assert_eq!(rb.len(), 1);
rb.dequeue();
assert_eq!(rb.len(), 0);
rb.enqueue(2).unwrap();
assert_eq!(rb.len(), 1);
rb.enqueue(3).unwrap();
assert_eq!(rb.len(), 2);
rb.enqueue(4).unwrap();
assert_eq!(rb.len(), 3);
}
#[test]
fn iterator_properly_wraps() {
const N: usize = 4;
let mut rb: spsc::Queue<u8, N> = spsc::Queue::new();
rb.enqueue(1).unwrap();
rb.dequeue();
rb.enqueue(2).unwrap();
rb.enqueue(3).unwrap();
rb.enqueue(4).unwrap();
let expected = [2, 3, 4];
let mut actual = [0, 0, 0];
for (idx, el) in rb.iter().enumerate() {
actual[idx] = *el;
}
assert_eq!(expected, actual)
}