Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

738
vendor/heapless/src/binary_heap.rs vendored Normal file
View File

@@ -0,0 +1,738 @@
//! A priority queue implemented with a binary heap.
//!
//! Insertion and popping the largest element have `O(log n)` time complexity. Checking the largest
//! / smallest element is `O(1)`.
// TODO not yet implemented
// Converting a vector to a binary heap can be done in-place, and has `O(n)` complexity. A binary
// heap can also be converted to a sorted vector in-place, allowing it to be used for an `O(n log
// n)` in-place heapsort.
use core::{
cmp::Ordering,
fmt,
marker::PhantomData,
mem::{self, ManuallyDrop},
ops::{Deref, DerefMut},
ptr, slice,
};
use crate::vec::Vec;
/// Min-heap
pub enum Min {}
/// Max-heap
pub enum Max {}
/// The binary heap kind: min-heap or max-heap
pub trait Kind: private::Sealed {
#[doc(hidden)]
fn ordering() -> Ordering;
}
impl Kind for Min {
fn ordering() -> Ordering {
Ordering::Less
}
}
impl Kind for Max {
fn ordering() -> Ordering {
Ordering::Greater
}
}
/// Sealed traits
mod private {
pub trait Sealed {}
}
impl private::Sealed for Max {}
impl private::Sealed for Min {}
/// A priority queue implemented with a binary heap.
///
/// This can be either a min-heap or a max-heap.
///
/// It is a logic error for an item to be modified in such a way that the item's ordering relative
/// to any other item, as determined by the `Ord` trait, changes while it is in the heap. This is
/// normally only possible through `Cell`, `RefCell`, global state, I/O, or unsafe code.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
///
/// // We can use peek to look at the next item in the heap. In this case,
/// // there's no items in there yet so we get None.
/// assert_eq!(heap.peek(), None);
///
/// // Let's add some scores...
/// heap.push(1).unwrap();
/// heap.push(5).unwrap();
/// heap.push(2).unwrap();
///
/// // Now peek shows the most important item in the heap.
/// assert_eq!(heap.peek(), Some(&5));
///
/// // We can check the length of a heap.
/// assert_eq!(heap.len(), 3);
///
/// // We can iterate over the items in the heap, although they are returned in
/// // a random order.
/// for x in &heap {
/// println!("{}", x);
/// }
///
/// // If we instead pop these scores, they should come back in order.
/// assert_eq!(heap.pop(), Some(5));
/// assert_eq!(heap.pop(), Some(2));
/// assert_eq!(heap.pop(), Some(1));
/// assert_eq!(heap.pop(), None);
///
/// // We can clear the heap of any remaining items.
/// heap.clear();
///
/// // The heap should now be empty.
/// assert!(heap.is_empty())
/// ```
pub struct BinaryHeap<T, K, const N: usize> {
pub(crate) _kind: PhantomData<K>,
pub(crate) data: Vec<T, N>,
}
impl<T, K, const N: usize> BinaryHeap<T, K, N> {
/* Constructors */
/// Creates an empty BinaryHeap as a $K-heap.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// // allocate the binary heap on the stack
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(4).unwrap();
///
/// // allocate the binary heap in a static variable
/// static mut HEAP: BinaryHeap<i32, Max, 8> = BinaryHeap::new();
/// ```
pub const fn new() -> Self {
Self {
_kind: PhantomData,
data: Vec::new(),
}
}
}
impl<T, K, const N: usize> BinaryHeap<T, K, N>
where
T: Ord,
K: Kind,
{
/* Public API */
/// Returns the capacity of the binary heap.
pub fn capacity(&self) -> usize {
self.data.capacity()
}
/// Drops all items from the binary heap.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(1).unwrap();
/// heap.push(3).unwrap();
///
/// assert!(!heap.is_empty());
///
/// heap.clear();
///
/// assert!(heap.is_empty());
/// ```
pub fn clear(&mut self) {
self.data.clear()
}
/// Returns the length of the binary heap.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(1).unwrap();
/// heap.push(3).unwrap();
///
/// assert_eq!(heap.len(), 2);
/// ```
pub fn len(&self) -> usize {
self.data.len()
}
/// Checks if the binary heap is empty.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
///
/// assert!(heap.is_empty());
///
/// heap.push(3).unwrap();
/// heap.push(5).unwrap();
/// heap.push(1).unwrap();
///
/// assert!(!heap.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns an iterator visiting all values in the underlying vector, in arbitrary order.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(1).unwrap();
/// heap.push(2).unwrap();
/// heap.push(3).unwrap();
/// heap.push(4).unwrap();
///
/// // Print 1, 2, 3, 4 in arbitrary order
/// for x in heap.iter() {
/// println!("{}", x);
///
/// }
/// ```
pub fn iter(&self) -> slice::Iter<'_, T> {
self.data.as_slice().iter()
}
/// Returns a mutable iterator visiting all values in the underlying vector, in arbitrary order.
///
/// **WARNING** Mutating the items in the binary heap can leave the heap in an inconsistent
/// state.
pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> {
self.data.as_mut_slice().iter_mut()
}
/// Returns the *top* (greatest if max-heap, smallest if min-heap) item in the binary heap, or
/// None if it is empty.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// assert_eq!(heap.peek(), None);
///
/// heap.push(1).unwrap();
/// heap.push(5).unwrap();
/// heap.push(2).unwrap();
/// assert_eq!(heap.peek(), Some(&5));
/// ```
pub fn peek(&self) -> Option<&T> {
self.data.as_slice().get(0)
}
/// Returns a mutable reference to the greatest item in the binary heap, or
/// `None` if it is empty.
///
/// Note: If the `PeekMut` value is leaked, the heap may be in an
/// inconsistent state.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// assert!(heap.peek_mut().is_none());
///
/// heap.push(1);
/// heap.push(5);
/// heap.push(2);
/// {
/// let mut val = heap.peek_mut().unwrap();
/// *val = 0;
/// }
///
/// assert_eq!(heap.peek(), Some(&2));
/// ```
pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T, K, N>> {
if self.is_empty() {
None
} else {
Some(PeekMut {
heap: self,
sift: true,
})
}
}
/// Removes the *top* (greatest if max-heap, smallest if min-heap) item from the binary heap and
/// returns it, or None if it is empty.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(1).unwrap();
/// heap.push(3).unwrap();
///
/// assert_eq!(heap.pop(), Some(3));
/// assert_eq!(heap.pop(), Some(1));
/// assert_eq!(heap.pop(), None);
/// ```
pub fn pop(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
Some(unsafe { self.pop_unchecked() })
}
}
/// Removes the *top* (greatest if max-heap, smallest if min-heap) item from the binary heap and
/// returns it, without checking if the binary heap is empty.
pub unsafe fn pop_unchecked(&mut self) -> T {
let mut item = self.data.pop_unchecked();
if !self.is_empty() {
mem::swap(&mut item, self.data.as_mut_slice().get_unchecked_mut(0));
self.sift_down_to_bottom(0);
}
item
}
/// Pushes an item onto the binary heap.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
///
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(3).unwrap();
/// heap.push(5).unwrap();
/// heap.push(1).unwrap();
///
/// assert_eq!(heap.len(), 3);
/// assert_eq!(heap.peek(), Some(&5));
/// ```
pub fn push(&mut self, item: T) -> Result<(), T> {
if self.data.is_full() {
return Err(item);
}
unsafe { self.push_unchecked(item) }
Ok(())
}
/// Pushes an item onto the binary heap without first checking if it's full.
pub unsafe fn push_unchecked(&mut self, item: T) {
let old_len = self.len();
self.data.push_unchecked(item);
self.sift_up(0, old_len);
}
/// Returns the underlying ```Vec<T,N>```. Order is arbitrary and time is O(1).
pub fn into_vec(self) -> Vec<T, N> {
self.data
}
/* Private API */
fn sift_down_to_bottom(&mut self, mut pos: usize) {
let end = self.len();
let start = pos;
unsafe {
let mut hole = Hole::new(self.data.as_mut_slice(), pos);
let mut child = 2 * pos + 1;
while child < end {
let right = child + 1;
// compare with the greater of the two children
if right < end && hole.get(child).cmp(hole.get(right)) != K::ordering() {
child = right;
}
hole.move_to(child);
child = 2 * hole.pos() + 1;
}
pos = hole.pos;
}
self.sift_up(start, pos);
}
fn sift_up(&mut self, start: usize, pos: usize) -> usize {
unsafe {
// Take out the value at `pos` and create a hole.
let mut hole = Hole::new(self.data.as_mut_slice(), pos);
while hole.pos() > start {
let parent = (hole.pos() - 1) / 2;
if hole.element().cmp(hole.get(parent)) != K::ordering() {
break;
}
hole.move_to(parent);
}
hole.pos()
}
}
}
/// Hole represents a hole in a slice i.e. an index without valid value
/// (because it was moved from or duplicated).
/// In drop, `Hole` will restore the slice by filling the hole
/// position with the value that was originally removed.
struct Hole<'a, T> {
data: &'a mut [T],
/// `elt` is always `Some` from new until drop.
elt: ManuallyDrop<T>,
pos: usize,
}
impl<'a, T> Hole<'a, T> {
/// Create a new Hole at index `pos`.
///
/// Unsafe because pos must be within the data slice.
#[inline]
unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
debug_assert!(pos < data.len());
let elt = ptr::read(data.get_unchecked(pos));
Hole {
data,
elt: ManuallyDrop::new(elt),
pos,
}
}
#[inline]
fn pos(&self) -> usize {
self.pos
}
/// Returns a reference to the element removed.
#[inline]
fn element(&self) -> &T {
&self.elt
}
/// Returns a reference to the element at `index`.
///
/// Unsafe because index must be within the data slice and not equal to pos.
#[inline]
unsafe fn get(&self, index: usize) -> &T {
debug_assert!(index != self.pos);
debug_assert!(index < self.data.len());
self.data.get_unchecked(index)
}
/// Move hole to new location
///
/// Unsafe because index must be within the data slice and not equal to pos.
#[inline]
unsafe fn move_to(&mut self, index: usize) {
debug_assert!(index != self.pos);
debug_assert!(index < self.data.len());
let ptr = self.data.as_mut_ptr();
let index_ptr: *const _ = ptr.add(index);
let hole_ptr = ptr.add(self.pos);
ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
self.pos = index;
}
}
/// Structure wrapping a mutable reference to the greatest item on a
/// `BinaryHeap`.
///
/// This `struct` is created by [`BinaryHeap::peek_mut`].
/// See its documentation for more.
pub struct PeekMut<'a, T, K, const N: usize>
where
T: Ord,
K: Kind,
{
heap: &'a mut BinaryHeap<T, K, N>,
sift: bool,
}
impl<T, K, const N: usize> Drop for PeekMut<'_, T, K, N>
where
T: Ord,
K: Kind,
{
fn drop(&mut self) {
if self.sift {
self.heap.sift_down_to_bottom(0);
}
}
}
impl<T, K, const N: usize> Deref for PeekMut<'_, T, K, N>
where
T: Ord,
K: Kind,
{
type Target = T;
fn deref(&self) -> &T {
debug_assert!(!self.heap.is_empty());
// SAFE: PeekMut is only instantiated for non-empty heaps
unsafe { self.heap.data.as_slice().get_unchecked(0) }
}
}
impl<T, K, const N: usize> DerefMut for PeekMut<'_, T, K, N>
where
T: Ord,
K: Kind,
{
fn deref_mut(&mut self) -> &mut T {
debug_assert!(!self.heap.is_empty());
// SAFE: PeekMut is only instantiated for non-empty heaps
unsafe { self.heap.data.as_mut_slice().get_unchecked_mut(0) }
}
}
impl<'a, T, K, const N: usize> PeekMut<'a, T, K, N>
where
T: Ord,
K: Kind,
{
/// Removes the peeked value from the heap and returns it.
pub fn pop(mut this: PeekMut<'a, T, K, N>) -> T {
let value = this.heap.pop().unwrap();
this.sift = false;
value
}
}
impl<'a, T> Drop for Hole<'a, T> {
#[inline]
fn drop(&mut self) {
// fill the hole again
unsafe {
let pos = self.pos;
ptr::write(self.data.get_unchecked_mut(pos), ptr::read(&*self.elt));
}
}
}
impl<T, K, const N: usize> Default for BinaryHeap<T, K, N>
where
T: Ord,
K: Kind,
{
fn default() -> Self {
Self::new()
}
}
impl<T, K, const N: usize> Clone for BinaryHeap<T, K, N>
where
K: Kind,
T: Ord + Clone,
{
fn clone(&self) -> Self {
Self {
_kind: self._kind,
data: self.data.clone(),
}
}
}
impl<T, K, const N: usize> fmt::Debug for BinaryHeap<T, K, N>
where
K: Kind,
T: Ord + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
impl<'a, T, K, const N: usize> IntoIterator for &'a BinaryHeap<T, K, N>
where
K: Kind,
T: Ord,
{
type Item = &'a T;
type IntoIter = slice::Iter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
#[cfg(test)]
mod tests {
use std::vec::Vec;
use crate::binary_heap::{BinaryHeap, Max, Min};
#[test]
fn static_new() {
static mut _B: BinaryHeap<i32, Min, 16> = BinaryHeap::new();
}
#[test]
fn drop() {
droppable!();
{
let mut v: BinaryHeap<Droppable, Max, 2> = BinaryHeap::new();
v.push(Droppable::new()).ok().unwrap();
v.push(Droppable::new()).ok().unwrap();
v.pop().unwrap();
}
assert_eq!(Droppable::count(), 0);
{
let mut v: BinaryHeap<Droppable, Max, 2> = BinaryHeap::new();
v.push(Droppable::new()).ok().unwrap();
v.push(Droppable::new()).ok().unwrap();
}
assert_eq!(Droppable::count(), 0);
{
let mut v: BinaryHeap<Droppable, Min, 2> = BinaryHeap::new();
v.push(Droppable::new()).ok().unwrap();
v.push(Droppable::new()).ok().unwrap();
v.pop().unwrap();
}
assert_eq!(Droppable::count(), 0);
{
let mut v: BinaryHeap<Droppable, Min, 2> = BinaryHeap::new();
v.push(Droppable::new()).ok().unwrap();
v.push(Droppable::new()).ok().unwrap();
}
assert_eq!(Droppable::count(), 0);
}
#[test]
fn into_vec() {
droppable!();
let mut h: BinaryHeap<Droppable, Max, 2> = BinaryHeap::new();
h.push(Droppable::new()).ok().unwrap();
h.push(Droppable::new()).ok().unwrap();
h.pop().unwrap();
assert_eq!(Droppable::count(), 1);
let v = h.into_vec();
assert_eq!(Droppable::count(), 1);
core::mem::drop(v);
assert_eq!(Droppable::count(), 0);
}
#[test]
fn min() {
let mut heap = BinaryHeap::<_, Min, 16>::new();
heap.push(1).unwrap();
heap.push(2).unwrap();
heap.push(3).unwrap();
heap.push(17).unwrap();
heap.push(19).unwrap();
heap.push(36).unwrap();
heap.push(7).unwrap();
heap.push(25).unwrap();
heap.push(100).unwrap();
assert_eq!(
heap.iter().cloned().collect::<Vec<_>>(),
[1, 2, 3, 17, 19, 36, 7, 25, 100]
);
assert_eq!(heap.pop(), Some(1));
assert_eq!(
heap.iter().cloned().collect::<Vec<_>>(),
[2, 17, 3, 25, 19, 36, 7, 100]
);
assert_eq!(heap.pop(), Some(2));
assert_eq!(heap.pop(), Some(3));
assert_eq!(heap.pop(), Some(7));
assert_eq!(heap.pop(), Some(17));
assert_eq!(heap.pop(), Some(19));
assert_eq!(heap.pop(), Some(25));
assert_eq!(heap.pop(), Some(36));
assert_eq!(heap.pop(), Some(100));
assert_eq!(heap.pop(), None);
assert!(heap.peek_mut().is_none());
heap.push(1).unwrap();
heap.push(2).unwrap();
heap.push(10).unwrap();
{
let mut val = heap.peek_mut().unwrap();
*val = 7;
}
assert_eq!(heap.pop(), Some(2));
assert_eq!(heap.pop(), Some(7));
assert_eq!(heap.pop(), Some(10));
assert_eq!(heap.pop(), None);
}
#[test]
fn max() {
let mut heap = BinaryHeap::<_, Max, 16>::new();
heap.push(1).unwrap();
heap.push(2).unwrap();
heap.push(3).unwrap();
heap.push(17).unwrap();
heap.push(19).unwrap();
heap.push(36).unwrap();
heap.push(7).unwrap();
heap.push(25).unwrap();
heap.push(100).unwrap();
assert_eq!(
heap.iter().cloned().collect::<Vec<_>>(),
[100, 36, 19, 25, 3, 2, 7, 1, 17]
);
assert_eq!(heap.pop(), Some(100));
assert_eq!(
heap.iter().cloned().collect::<Vec<_>>(),
[36, 25, 19, 17, 3, 2, 7, 1]
);
assert_eq!(heap.pop(), Some(36));
assert_eq!(heap.pop(), Some(25));
assert_eq!(heap.pop(), Some(19));
assert_eq!(heap.pop(), Some(17));
assert_eq!(heap.pop(), Some(7));
assert_eq!(heap.pop(), Some(3));
assert_eq!(heap.pop(), Some(2));
assert_eq!(heap.pop(), Some(1));
assert_eq!(heap.pop(), None);
assert!(heap.peek_mut().is_none());
heap.push(1).unwrap();
heap.push(9).unwrap();
heap.push(10).unwrap();
{
let mut val = heap.peek_mut().unwrap();
*val = 7;
}
assert_eq!(heap.pop(), Some(9));
assert_eq!(heap.pop(), Some(7));
assert_eq!(heap.pop(), Some(1));
assert_eq!(heap.pop(), None);
}
}

306
vendor/heapless/src/de.rs vendored Normal file
View File

@@ -0,0 +1,306 @@
use crate::{
binary_heap::Kind as BinaryHeapKind, BinaryHeap, Deque, IndexMap, IndexSet, LinearMap, String,
Vec,
};
use core::{
fmt,
hash::{Hash, Hasher},
marker::PhantomData,
};
use hash32::BuildHasherDefault;
use serde::de::{self, Deserialize, Deserializer, Error, MapAccess, SeqAccess};
// Sequential containers
impl<'de, T, KIND, const N: usize> Deserialize<'de> for BinaryHeap<T, KIND, N>
where
T: Ord + Deserialize<'de>,
KIND: BinaryHeapKind,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, T, KIND, const N: usize>(PhantomData<(&'de (), T, KIND)>);
impl<'de, T, KIND, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, KIND, N>
where
T: Ord + Deserialize<'de>,
KIND: BinaryHeapKind,
{
type Value = BinaryHeap<T, KIND, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = BinaryHeap::new();
while let Some(value) = seq.next_element()? {
if values.push(value).is_err() {
return Err(A::Error::invalid_length(values.capacity() + 1, &self))?;
}
}
Ok(values)
}
}
deserializer.deserialize_seq(ValueVisitor(PhantomData))
}
}
impl<'de, T, S, const N: usize> Deserialize<'de> for IndexSet<T, BuildHasherDefault<S>, N>
where
T: Eq + Hash + Deserialize<'de>,
S: Hasher + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, T, S, const N: usize>(PhantomData<(&'de (), T, S)>);
impl<'de, T, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, S, N>
where
T: Eq + Hash + Deserialize<'de>,
S: Hasher + Default,
{
type Value = IndexSet<T, BuildHasherDefault<S>, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = IndexSet::new();
while let Some(value) = seq.next_element()? {
if values.insert(value).is_err() {
return Err(A::Error::invalid_length(values.capacity() + 1, &self))?;
}
}
Ok(values)
}
}
deserializer.deserialize_seq(ValueVisitor(PhantomData))
}
}
impl<'de, T, const N: usize> Deserialize<'de> for Vec<T, N>
where
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, T, const N: usize>(PhantomData<(&'de (), T)>);
impl<'de, T, const N: usize> serde::de::Visitor<'de> for ValueVisitor<'de, T, N>
where
T: Deserialize<'de>,
{
type Value = Vec<T, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = Vec::new();
while let Some(value) = seq.next_element()? {
if values.push(value).is_err() {
return Err(A::Error::invalid_length(values.capacity() + 1, &self))?;
}
}
Ok(values)
}
}
deserializer.deserialize_seq(ValueVisitor(PhantomData))
}
}
impl<'de, T, const N: usize> Deserialize<'de> for Deque<T, N>
where
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, T, const N: usize>(PhantomData<(&'de (), T)>);
impl<'de, T, const N: usize> serde::de::Visitor<'de> for ValueVisitor<'de, T, N>
where
T: Deserialize<'de>,
{
type Value = Deque<T, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = Deque::new();
while let Some(value) = seq.next_element()? {
if values.push_back(value).is_err() {
return Err(A::Error::invalid_length(values.capacity() + 1, &self))?;
}
}
Ok(values)
}
}
deserializer.deserialize_seq(ValueVisitor(PhantomData))
}
}
// Dictionaries
impl<'de, K, V, S, const N: usize> Deserialize<'de> for IndexMap<K, V, BuildHasherDefault<S>, N>
where
K: Eq + Hash + Deserialize<'de>,
V: Deserialize<'de>,
S: Default + Hasher,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, K, V, S, const N: usize>(PhantomData<(&'de (), K, V, S)>);
impl<'de, K, V, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, S, N>
where
K: Eq + Hash + Deserialize<'de>,
V: Deserialize<'de>,
S: Default + Hasher,
{
type Value = IndexMap<K, V, BuildHasherDefault<S>, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut values = IndexMap::new();
while let Some((key, value)) = map.next_entry()? {
if values.insert(key, value).is_err() {
return Err(A::Error::invalid_length(values.capacity() + 1, &self))?;
}
}
Ok(values)
}
}
deserializer.deserialize_map(ValueVisitor(PhantomData))
}
}
impl<'de, K, V, const N: usize> Deserialize<'de> for LinearMap<K, V, N>
where
K: Eq + Deserialize<'de>,
V: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, K, V, const N: usize>(PhantomData<(&'de (), K, V)>);
impl<'de, K, V, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, N>
where
K: Eq + Deserialize<'de>,
V: Deserialize<'de>,
{
type Value = LinearMap<K, V, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut values = LinearMap::new();
while let Some((key, value)) = map.next_entry()? {
if values.insert(key, value).is_err() {
return Err(A::Error::invalid_length(values.capacity() + 1, &self))?;
}
}
Ok(values)
}
}
deserializer.deserialize_map(ValueVisitor(PhantomData))
}
}
// String containers
impl<'de, const N: usize> Deserialize<'de> for String<N> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, const N: usize>(PhantomData<&'de ()>);
impl<'de, const N: usize> de::Visitor<'de> for ValueVisitor<'de, N> {
type Value = String<N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "a string no more than {} bytes long", N as u64)
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
let mut s = String::new();
s.push_str(v)
.map_err(|_| E::invalid_length(v.len(), &self))?;
Ok(s)
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: de::Error,
{
let mut s = String::new();
s.push_str(
core::str::from_utf8(v)
.map_err(|_| E::invalid_value(de::Unexpected::Bytes(v), &self))?,
)
.map_err(|_| E::invalid_length(v.len(), &self))?;
Ok(s)
}
}
deserializer.deserialize_str(ValueVisitor::<'de, N>(PhantomData))
}
}

23
vendor/heapless/src/defmt.rs vendored Normal file
View File

@@ -0,0 +1,23 @@
//! Defmt implementations for heapless types
//!
use crate::Vec;
use defmt::Formatter;
impl<T, const N: usize> defmt::Format for Vec<T, N>
where
T: defmt::Format,
{
fn format(&self, fmt: Formatter<'_>) {
defmt::write!(fmt, "{=[?]}", self.as_slice())
}
}
impl<const N: usize> defmt::Format for crate::String<N>
where
u8: defmt::Format,
{
fn format(&self, fmt: Formatter<'_>) {
defmt::write!(fmt, "{=str}", self.as_str());
}
}

831
vendor/heapless/src/deque.rs vendored Normal file
View File

@@ -0,0 +1,831 @@
use core::fmt;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem::MaybeUninit;
use core::{ptr, slice};
/// A fixed capacity double-ended queue.
///
/// # Examples
///
/// ```
/// use heapless::Deque;
///
/// // A deque with a fixed capacity of 8 elements allocated on the stack
/// let mut deque = Deque::<_, 8>::new();
///
/// // You can use it as a good old FIFO queue.
/// deque.push_back(1);
/// deque.push_back(2);
/// assert_eq!(deque.len(), 2);
///
/// assert_eq!(deque.pop_front(), Some(1));
/// assert_eq!(deque.pop_front(), Some(2));
/// assert_eq!(deque.len(), 0);
///
/// // Deque is double-ended, you can push and pop from the front and back.
/// deque.push_back(1);
/// deque.push_front(2);
/// deque.push_back(3);
/// deque.push_front(4);
/// assert_eq!(deque.pop_front(), Some(4));
/// assert_eq!(deque.pop_front(), Some(2));
/// assert_eq!(deque.pop_front(), Some(1));
/// assert_eq!(deque.pop_front(), Some(3));
///
/// // You can iterate it, yielding all the elements front-to-back.
/// for x in &deque {
/// println!("{}", x);
/// }
/// ```
pub struct Deque<T, const N: usize> {
buffer: [MaybeUninit<T>; N],
/// Front index. Always 0..=(N-1)
front: usize,
/// Back index. Always 0..=(N-1).
back: usize,
/// Used to distinguish "empty" and "full" cases when `front == back`.
/// May only be `true` if `front == back`, always `false` otherwise.
full: bool,
}
impl<T, const N: usize> Deque<T, N> {
const INIT: MaybeUninit<T> = MaybeUninit::uninit();
/// Constructs a new, empty deque with a fixed capacity of `N`
///
/// # Examples
///
/// ```
/// use heapless::Deque;
///
/// // allocate the deque on the stack
/// let mut x: Deque<u8, 16> = Deque::new();
///
/// // allocate the deque in a static variable
/// static mut X: Deque<u8, 16> = Deque::new();
/// ```
pub const fn new() -> Self {
// Const assert N > 0
crate::sealed::greater_than_0::<N>();
Self {
buffer: [Self::INIT; N],
front: 0,
back: 0,
full: false,
}
}
fn increment(i: usize) -> usize {
if i + 1 == N {
0
} else {
i + 1
}
}
fn decrement(i: usize) -> usize {
if i == 0 {
N - 1
} else {
i - 1
}
}
/// Returns the maximum number of elements the deque can hold.
pub const fn capacity(&self) -> usize {
N
}
/// Returns the number of elements currently in the deque.
pub const fn len(&self) -> usize {
if self.full {
N
} else if self.back < self.front {
self.back + N - self.front
} else {
self.back - self.front
}
}
/// Clears the deque, removing all values.
pub fn clear(&mut self) {
// safety: we're immediately setting a consistent empty state.
unsafe { self.drop_contents() }
self.front = 0;
self.back = 0;
self.full = false;
}
/// Drop all items in the `Deque`, leaving the state `back/front/full` unmodified.
///
/// safety: leaves the `Deque` in an inconsistent state, so can cause duplicate drops.
unsafe fn drop_contents(&mut self) {
// We drop each element used in the deque by turning into a &mut[T]
let (a, b) = self.as_mut_slices();
ptr::drop_in_place(a);
ptr::drop_in_place(b);
}
/// Returns whether the deque is empty.
pub fn is_empty(&self) -> bool {
self.front == self.back && !self.full
}
/// Returns whether the deque is full (i.e. if `len() == capacity()`.
pub fn is_full(&self) -> bool {
self.full
}
/// Returns a pair of slices which contain, in order, the contents of the `Deque`.
pub fn as_slices(&self) -> (&[T], &[T]) {
// NOTE(unsafe) avoid bound checks in the slicing operation
unsafe {
if self.is_empty() {
(&[], &[])
} else if self.back <= self.front {
(
slice::from_raw_parts(
self.buffer.as_ptr().add(self.front) as *const T,
N - self.front,
),
slice::from_raw_parts(self.buffer.as_ptr() as *const T, self.back),
)
} else {
(
slice::from_raw_parts(
self.buffer.as_ptr().add(self.front) as *const T,
self.back - self.front,
),
&[],
)
}
}
}
/// Returns a pair of mutable slices which contain, in order, the contents of the `Deque`.
pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
let ptr = self.buffer.as_mut_ptr();
// NOTE(unsafe) avoid bound checks in the slicing operation
unsafe {
if self.is_empty() {
(&mut [], &mut [])
} else if self.back <= self.front {
(
slice::from_raw_parts_mut(ptr.add(self.front) as *mut T, N - self.front),
slice::from_raw_parts_mut(ptr as *mut T, self.back),
)
} else {
(
slice::from_raw_parts_mut(
ptr.add(self.front) as *mut T,
self.back - self.front,
),
&mut [],
)
}
}
}
/// Provides a reference to the front element, or None if the `Deque` is empty.
pub fn front(&self) -> Option<&T> {
if self.is_empty() {
None
} else {
Some(unsafe { &*self.buffer.get_unchecked(self.front).as_ptr() })
}
}
/// Provides a mutable reference to the front element, or None if the `Deque` is empty.
pub fn front_mut(&mut self) -> Option<&mut T> {
if self.is_empty() {
None
} else {
Some(unsafe { &mut *self.buffer.get_unchecked_mut(self.front).as_mut_ptr() })
}
}
/// Provides a reference to the back element, or None if the `Deque` is empty.
pub fn back(&self) -> Option<&T> {
if self.is_empty() {
None
} else {
let index = Self::decrement(self.back);
Some(unsafe { &*self.buffer.get_unchecked(index).as_ptr() })
}
}
/// Provides a mutable reference to the back element, or None if the `Deque` is empty.
pub fn back_mut(&mut self) -> Option<&mut T> {
if self.is_empty() {
None
} else {
let index = Self::decrement(self.back);
Some(unsafe { &mut *self.buffer.get_unchecked_mut(index).as_mut_ptr() })
}
}
/// Removes the item from the front of the deque and returns it, or `None` if it's empty
pub fn pop_front(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
Some(unsafe { self.pop_front_unchecked() })
}
}
/// Removes the item from the back of the deque and returns it, or `None` if it's empty
pub fn pop_back(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
Some(unsafe { self.pop_back_unchecked() })
}
}
/// Appends an `item` to the front of the deque
///
/// Returns back the `item` if the deque is full
pub fn push_front(&mut self, item: T) -> Result<(), T> {
if self.is_full() {
Err(item)
} else {
unsafe { self.push_front_unchecked(item) }
Ok(())
}
}
/// Appends an `item` to the back of the deque
///
/// Returns back the `item` if the deque is full
pub fn push_back(&mut self, item: T) -> Result<(), T> {
if self.is_full() {
Err(item)
} else {
unsafe { self.push_back_unchecked(item) }
Ok(())
}
}
/// Removes an item from the front of the deque and returns it, without checking that the deque
/// is not empty
///
/// # Safety
///
/// It's undefined behavior to call this on an empty deque
pub unsafe fn pop_front_unchecked(&mut self) -> T {
debug_assert!(!self.is_empty());
let index = self.front;
self.full = false;
self.front = Self::increment(self.front);
(self.buffer.get_unchecked_mut(index).as_ptr() as *const T).read()
}
/// Removes an item from the back of the deque and returns it, without checking that the deque
/// is not empty
///
/// # Safety
///
/// It's undefined behavior to call this on an empty deque
pub unsafe fn pop_back_unchecked(&mut self) -> T {
debug_assert!(!self.is_empty());
self.full = false;
self.back = Self::decrement(self.back);
(self.buffer.get_unchecked_mut(self.back).as_ptr() as *const T).read()
}
/// Appends an `item` to the front of the deque
///
/// # Safety
///
/// This assumes the deque is not full.
pub unsafe fn push_front_unchecked(&mut self, item: T) {
debug_assert!(!self.is_full());
let index = Self::decrement(self.front);
// NOTE: the memory slot that we are about to write to is uninitialized. We assign
// a `MaybeUninit` to avoid running `T`'s destructor on the uninitialized memory
*self.buffer.get_unchecked_mut(index) = MaybeUninit::new(item);
self.front = index;
if self.front == self.back {
self.full = true;
}
}
/// Appends an `item` to the back of the deque
///
/// # Safety
///
/// This assumes the deque is not full.
pub unsafe fn push_back_unchecked(&mut self, item: T) {
debug_assert!(!self.is_full());
// NOTE: the memory slot that we are about to write to is uninitialized. We assign
// a `MaybeUninit` to avoid running `T`'s destructor on the uninitialized memory
*self.buffer.get_unchecked_mut(self.back) = MaybeUninit::new(item);
self.back = Self::increment(self.back);
if self.front == self.back {
self.full = true;
}
}
/// Returns an iterator over the deque.
pub fn iter(&self) -> Iter<'_, T, N> {
let done = self.is_empty();
Iter {
_phantom: PhantomData,
buffer: &self.buffer as *const MaybeUninit<T>,
front: self.front,
back: self.back,
done,
}
}
/// Returns an iterator that allows modifying each value.
pub fn iter_mut(&mut self) -> IterMut<'_, T, N> {
let done = self.is_empty();
IterMut {
_phantom: PhantomData,
buffer: &mut self.buffer as *mut _ as *mut MaybeUninit<T>,
front: self.front,
back: self.back,
done,
}
}
}
// Trait implementations
impl<T, const N: usize> Default for Deque<T, N> {
fn default() -> Self {
Self::new()
}
}
impl<T, const N: usize> Drop for Deque<T, N> {
fn drop(&mut self) {
// safety: `self` is left in an inconsistent state but it doesn't matter since
// it's getting dropped. Nothing should be able to observe `self` after drop.
unsafe { self.drop_contents() }
}
}
impl<T: fmt::Debug, const N: usize> fmt::Debug for Deque<T, N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self).finish()
}
}
/// An iterator that moves out of a [`Deque`].
///
/// This struct is created by calling the `into_iter` method.
///
#[derive(Clone)]
pub struct IntoIter<T, const N: usize> {
deque: Deque<T, N>,
}
impl<T, const N: usize> Iterator for IntoIter<T, N> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.deque.pop_front()
}
}
impl<T, const N: usize> IntoIterator for Deque<T, N> {
type Item = T;
type IntoIter = IntoIter<T, N>;
fn into_iter(self) -> Self::IntoIter {
IntoIter { deque: self }
}
}
/// An iterator over the elements of a [`Deque`].
///
/// This struct is created by calling the `iter` method.
#[derive(Clone)]
pub struct Iter<'a, T, const N: usize> {
buffer: *const MaybeUninit<T>,
_phantom: PhantomData<&'a T>,
front: usize,
back: usize,
done: bool,
}
impl<'a, T, const N: usize> Iterator for Iter<'a, T, N> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
if self.done {
None
} else {
let index = self.front;
self.front = Deque::<T, N>::increment(self.front);
if self.front == self.back {
self.done = true;
}
Some(unsafe { &*(self.buffer.add(index) as *const T) })
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = if self.done {
0
} else if self.back <= self.front {
self.back + N - self.front
} else {
self.back - self.front
};
(len, Some(len))
}
}
impl<'a, T, const N: usize> DoubleEndedIterator for Iter<'a, T, N> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.done {
None
} else {
self.back = Deque::<T, N>::decrement(self.back);
if self.front == self.back {
self.done = true;
}
Some(unsafe { &*(self.buffer.add(self.back) as *const T) })
}
}
}
impl<'a, T, const N: usize> ExactSizeIterator for Iter<'a, T, N> {}
impl<'a, T, const N: usize> FusedIterator for Iter<'a, T, N> {}
/// An iterator over the elements of a [`Deque`].
///
/// This struct is created by calling the `iter` method.
pub struct IterMut<'a, T, const N: usize> {
buffer: *mut MaybeUninit<T>,
_phantom: PhantomData<&'a mut T>,
front: usize,
back: usize,
done: bool,
}
impl<'a, T, const N: usize> Iterator for IterMut<'a, T, N> {
type Item = &'a mut T;
fn next(&mut self) -> Option<Self::Item> {
if self.done {
None
} else {
let index = self.front;
self.front = Deque::<T, N>::increment(self.front);
if self.front == self.back {
self.done = true;
}
Some(unsafe { &mut *(self.buffer.add(index) as *mut T) })
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = if self.done {
0
} else if self.back <= self.front {
self.back + N - self.front
} else {
self.back - self.front
};
(len, Some(len))
}
}
impl<'a, T, const N: usize> DoubleEndedIterator for IterMut<'a, T, N> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.done {
None
} else {
self.back = Deque::<T, N>::decrement(self.back);
if self.front == self.back {
self.done = true;
}
Some(unsafe { &mut *(self.buffer.add(self.back) as *mut T) })
}
}
}
impl<'a, T, const N: usize> ExactSizeIterator for IterMut<'a, T, N> {}
impl<'a, T, const N: usize> FusedIterator for IterMut<'a, T, N> {}
impl<'a, T, const N: usize> IntoIterator for &'a Deque<T, N> {
type Item = &'a T;
type IntoIter = Iter<'a, T, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T, const N: usize> IntoIterator for &'a mut Deque<T, N> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
impl<T, const N: usize> Clone for Deque<T, N>
where
T: Clone,
{
fn clone(&self) -> Self {
let mut res = Deque::new();
for i in self {
// safety: the original and new deques have the same capacity, so it can
// not become full.
unsafe { res.push_back_unchecked(i.clone()) }
}
res
}
}
#[cfg(test)]
mod tests {
use crate::Deque;
#[test]
fn static_new() {
static mut _V: Deque<i32, 4> = Deque::new();
}
#[test]
fn stack_new() {
let mut _v: Deque<i32, 4> = Deque::new();
}
#[test]
fn drop() {
droppable!();
{
let mut v: Deque<Droppable, 2> = Deque::new();
v.push_back(Droppable::new()).ok().unwrap();
v.push_back(Droppable::new()).ok().unwrap();
v.pop_front().unwrap();
}
assert_eq!(Droppable::count(), 0);
{
let mut v: Deque<Droppable, 2> = Deque::new();
v.push_back(Droppable::new()).ok().unwrap();
v.push_back(Droppable::new()).ok().unwrap();
}
assert_eq!(Droppable::count(), 0);
{
let mut v: Deque<Droppable, 2> = Deque::new();
v.push_front(Droppable::new()).ok().unwrap();
v.push_front(Droppable::new()).ok().unwrap();
}
assert_eq!(Droppable::count(), 0);
}
#[test]
fn full() {
let mut v: Deque<i32, 4> = Deque::new();
v.push_back(0).unwrap();
v.push_front(1).unwrap();
v.push_back(2).unwrap();
v.push_back(3).unwrap();
assert!(v.push_front(4).is_err());
assert!(v.push_back(4).is_err());
assert!(v.is_full());
}
#[test]
fn empty() {
let mut v: Deque<i32, 4> = Deque::new();
assert!(v.is_empty());
v.push_back(0).unwrap();
assert!(!v.is_empty());
v.push_front(1).unwrap();
assert!(!v.is_empty());
v.pop_front().unwrap();
v.pop_front().unwrap();
assert!(v.pop_front().is_none());
assert!(v.pop_back().is_none());
assert!(v.is_empty());
}
#[test]
fn front_back() {
let mut v: Deque<i32, 4> = Deque::new();
assert_eq!(v.front(), None);
assert_eq!(v.front_mut(), None);
assert_eq!(v.back(), None);
assert_eq!(v.back_mut(), None);
v.push_back(4).unwrap();
assert_eq!(v.front(), Some(&4));
assert_eq!(v.front_mut(), Some(&mut 4));
assert_eq!(v.back(), Some(&4));
assert_eq!(v.back_mut(), Some(&mut 4));
v.push_front(3).unwrap();
assert_eq!(v.front(), Some(&3));
assert_eq!(v.front_mut(), Some(&mut 3));
assert_eq!(v.back(), Some(&4));
assert_eq!(v.back_mut(), Some(&mut 4));
v.pop_back().unwrap();
assert_eq!(v.front(), Some(&3));
assert_eq!(v.front_mut(), Some(&mut 3));
assert_eq!(v.back(), Some(&3));
assert_eq!(v.back_mut(), Some(&mut 3));
v.pop_front().unwrap();
assert_eq!(v.front(), None);
assert_eq!(v.front_mut(), None);
assert_eq!(v.back(), None);
assert_eq!(v.back_mut(), None);
}
#[test]
fn iter() {
let mut v: Deque<i32, 4> = Deque::new();
v.push_back(0).unwrap();
v.push_back(1).unwrap();
v.push_front(2).unwrap();
v.push_front(3).unwrap();
v.pop_back().unwrap();
v.push_front(4).unwrap();
let mut items = v.iter();
assert_eq!(items.next(), Some(&4));
assert_eq!(items.next(), Some(&3));
assert_eq!(items.next(), Some(&2));
assert_eq!(items.next(), Some(&0));
assert_eq!(items.next(), None);
}
#[test]
fn iter_mut() {
let mut v: Deque<i32, 4> = Deque::new();
v.push_back(0).unwrap();
v.push_back(1).unwrap();
v.push_front(2).unwrap();
v.push_front(3).unwrap();
v.pop_back().unwrap();
v.push_front(4).unwrap();
let mut items = v.iter_mut();
assert_eq!(items.next(), Some(&mut 4));
assert_eq!(items.next(), Some(&mut 3));
assert_eq!(items.next(), Some(&mut 2));
assert_eq!(items.next(), Some(&mut 0));
assert_eq!(items.next(), None);
}
#[test]
fn iter_move() {
let mut v: Deque<i32, 4> = Deque::new();
v.push_back(0).unwrap();
v.push_back(1).unwrap();
v.push_back(2).unwrap();
v.push_back(3).unwrap();
let mut items = v.into_iter();
assert_eq!(items.next(), Some(0));
assert_eq!(items.next(), Some(1));
assert_eq!(items.next(), Some(2));
assert_eq!(items.next(), Some(3));
assert_eq!(items.next(), None);
}
#[test]
fn iter_move_drop() {
droppable!();
{
let mut deque: Deque<Droppable, 2> = Deque::new();
deque.push_back(Droppable::new()).ok().unwrap();
deque.push_back(Droppable::new()).ok().unwrap();
let mut items = deque.into_iter();
// Move all
let _ = items.next();
let _ = items.next();
}
assert_eq!(Droppable::count(), 0);
{
let mut deque: Deque<Droppable, 2> = Deque::new();
deque.push_back(Droppable::new()).ok().unwrap();
deque.push_back(Droppable::new()).ok().unwrap();
let _items = deque.into_iter();
// Move none
}
assert_eq!(Droppable::count(), 0);
{
let mut deque: Deque<Droppable, 2> = Deque::new();
deque.push_back(Droppable::new()).ok().unwrap();
deque.push_back(Droppable::new()).ok().unwrap();
let mut items = deque.into_iter();
let _ = items.next(); // Move partly
}
assert_eq!(Droppable::count(), 0);
}
#[test]
fn push_and_pop() {
let mut q: Deque<i32, 4> = Deque::new();
assert_eq!(q.len(), 0);
assert_eq!(q.pop_front(), None);
assert_eq!(q.pop_back(), None);
assert_eq!(q.len(), 0);
q.push_back(0).unwrap();
assert_eq!(q.len(), 1);
assert_eq!(q.pop_back(), Some(0));
assert_eq!(q.len(), 0);
q.push_back(0).unwrap();
q.push_back(1).unwrap();
q.push_front(2).unwrap();
q.push_front(3).unwrap();
assert_eq!(q.len(), 4);
// deque contains: 3 2 0 1
assert_eq!(q.pop_front(), Some(3));
assert_eq!(q.len(), 3);
assert_eq!(q.pop_front(), Some(2));
assert_eq!(q.len(), 2);
assert_eq!(q.pop_back(), Some(1));
assert_eq!(q.len(), 1);
assert_eq!(q.pop_front(), Some(0));
assert_eq!(q.len(), 0);
// deque is now empty
assert_eq!(q.pop_front(), None);
assert_eq!(q.pop_back(), None);
assert_eq!(q.len(), 0);
}
#[test]
fn as_slices() {
let mut q: Deque<i32, 4> = Deque::new();
assert_eq!(q.len(), 0);
q.push_back(0).unwrap();
q.push_back(1).unwrap();
q.push_back(2).unwrap();
q.push_back(3).unwrap();
assert_eq!(q.as_slices(), (&[0, 1, 2, 3][..], &[][..]));
q.pop_front().unwrap();
assert_eq!(q.as_slices(), (&[1, 2, 3][..], &[][..]));
q.push_back(4).unwrap();
assert_eq!(q.as_slices(), (&[1, 2, 3][..], &[4][..]));
}
#[test]
fn clear() {
let mut q: Deque<i32, 4> = Deque::new();
assert_eq!(q.len(), 0);
q.push_back(0).unwrap();
q.push_back(1).unwrap();
q.push_back(2).unwrap();
q.push_back(3).unwrap();
assert_eq!(q.len(), 4);
q.clear();
assert_eq!(q.len(), 0);
q.push_back(0).unwrap();
assert_eq!(q.len(), 1);
}
}

578
vendor/heapless/src/histbuf.rs vendored Normal file
View File

@@ -0,0 +1,578 @@
use core::fmt;
use core::mem::MaybeUninit;
use core::ops::Deref;
use core::ptr;
use core::slice;
/// A "history buffer", similar to a write-only ring buffer of fixed length.
///
/// This buffer keeps a fixed number of elements. On write, the oldest element
/// is overwritten. Thus, the buffer is useful to keep a history of values with
/// some desired depth, and for example calculate a rolling average.
///
/// # Examples
/// ```
/// use heapless::HistoryBuffer;
///
/// // Initialize a new buffer with 8 elements.
/// let mut buf = HistoryBuffer::<_, 8>::new();
///
/// // Starts with no data
/// assert_eq!(buf.recent(), None);
///
/// buf.write(3);
/// buf.write(5);
/// buf.extend(&[4, 4]);
///
/// // The most recent written element is a four.
/// assert_eq!(buf.recent(), Some(&4));
///
/// // To access all elements in an unspecified order, use `as_slice()`.
/// for el in buf.as_slice() { println!("{:?}", el); }
///
/// // Now we can prepare an average of all values, which comes out to 4.
/// let avg = buf.as_slice().iter().sum::<usize>() / buf.len();
/// assert_eq!(avg, 4);
/// ```
pub struct HistoryBuffer<T, const N: usize> {
data: [MaybeUninit<T>; N],
write_at: usize,
filled: bool,
}
impl<T, const N: usize> HistoryBuffer<T, N> {
const INIT: MaybeUninit<T> = MaybeUninit::uninit();
/// Constructs a new history buffer.
///
/// The construction of a `HistoryBuffer` works in `const` contexts.
///
/// # Examples
///
/// ```
/// use heapless::HistoryBuffer;
///
/// // Allocate a 16-element buffer on the stack
/// let x: HistoryBuffer<u8, 16> = HistoryBuffer::new();
/// assert_eq!(x.len(), 0);
/// ```
#[inline]
pub const fn new() -> Self {
// Const assert
crate::sealed::greater_than_0::<N>();
Self {
data: [Self::INIT; N],
write_at: 0,
filled: false,
}
}
/// Clears the buffer, replacing every element with the default value of
/// type `T`.
pub fn clear(&mut self) {
*self = Self::new();
}
}
impl<T, const N: usize> HistoryBuffer<T, N>
where
T: Copy + Clone,
{
/// Constructs a new history buffer, where every element is the given value.
///
/// # Examples
///
/// ```
/// use heapless::HistoryBuffer;
///
/// // Allocate a 16-element buffer on the stack
/// let mut x: HistoryBuffer<u8, 16> = HistoryBuffer::new_with(4);
/// // All elements are four
/// assert_eq!(x.as_slice(), [4; 16]);
/// ```
#[inline]
pub fn new_with(t: T) -> Self {
Self {
data: [MaybeUninit::new(t); N],
write_at: 0,
filled: true,
}
}
/// Clears the buffer, replacing every element with the given value.
pub fn clear_with(&mut self, t: T) {
*self = Self::new_with(t);
}
}
impl<T, const N: usize> HistoryBuffer<T, N> {
/// Returns the current fill level of the buffer.
#[inline]
pub fn len(&self) -> usize {
if self.filled {
N
} else {
self.write_at
}
}
/// Returns the capacity of the buffer, which is the length of the
/// underlying backing array.
#[inline]
pub fn capacity(&self) -> usize {
N
}
/// Writes an element to the buffer, overwriting the oldest value.
pub fn write(&mut self, t: T) {
if self.filled {
// Drop the old before we overwrite it.
unsafe { ptr::drop_in_place(self.data[self.write_at].as_mut_ptr()) }
}
self.data[self.write_at] = MaybeUninit::new(t);
self.write_at += 1;
if self.write_at == self.capacity() {
self.write_at = 0;
self.filled = true;
}
}
/// Clones and writes all elements in a slice to the buffer.
///
/// If the slice is longer than the buffer, only the last `self.len()`
/// elements will actually be stored.
pub fn extend_from_slice(&mut self, other: &[T])
where
T: Clone,
{
for item in other {
self.write(item.clone());
}
}
/// Returns a reference to the most recently written value.
///
/// # Examples
///
/// ```
/// use heapless::HistoryBuffer;
///
/// let mut x: HistoryBuffer<u8, 16> = HistoryBuffer::new();
/// x.write(4);
/// x.write(10);
/// assert_eq!(x.recent(), Some(&10));
/// ```
pub fn recent(&self) -> Option<&T> {
if self.write_at == 0 {
if self.filled {
Some(unsafe { &*self.data[self.capacity() - 1].as_ptr() })
} else {
None
}
} else {
Some(unsafe { &*self.data[self.write_at - 1].as_ptr() })
}
}
/// Returns the array slice backing the buffer, without keeping track
/// of the write position. Therefore, the element order is unspecified.
pub fn as_slice(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.data.as_ptr() as *const _, self.len()) }
}
/// Returns a pair of slices which contain, in order, the contents of the buffer.
///
/// # Examples
///
/// ```
/// use heapless::HistoryBuffer;
///
/// let mut buffer: HistoryBuffer<u8, 6> = HistoryBuffer::new();
/// buffer.extend([0, 0, 0]);
/// buffer.extend([1, 2, 3, 4, 5, 6]);
/// assert_eq!(buffer.as_slices(), (&[1, 2, 3][..], &[4, 5, 6][..]));
/// ```
pub fn as_slices(&self) -> (&[T], &[T]) {
let buffer = self.as_slice();
if !self.filled {
(buffer, &[])
} else {
(&buffer[self.write_at..], &buffer[..self.write_at])
}
}
/// Returns an iterator for iterating over the buffer from oldest to newest.
///
/// # Examples
///
/// ```
/// use heapless::HistoryBuffer;
///
/// let mut buffer: HistoryBuffer<u8, 6> = HistoryBuffer::new();
/// buffer.extend([0, 0, 0, 1, 2, 3, 4, 5, 6]);
/// let expected = [1, 2, 3, 4, 5, 6];
/// for (x, y) in buffer.oldest_ordered().zip(expected.iter()) {
/// assert_eq!(x, y)
/// }
///
/// ```
pub fn oldest_ordered<'a>(&'a self) -> OldestOrdered<'a, T, N> {
if self.filled {
OldestOrdered {
buf: self,
cur: self.write_at,
wrapped: false,
}
} else {
// special case: act like we wrapped already to handle empty buffer.
OldestOrdered {
buf: self,
cur: 0,
wrapped: true,
}
}
}
}
impl<T, const N: usize> Extend<T> for HistoryBuffer<T, N> {
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = T>,
{
for item in iter.into_iter() {
self.write(item);
}
}
}
impl<'a, T, const N: usize> Extend<&'a T> for HistoryBuffer<T, N>
where
T: 'a + Clone,
{
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = &'a T>,
{
self.extend(iter.into_iter().cloned())
}
}
impl<T, const N: usize> Clone for HistoryBuffer<T, N>
where
T: Clone,
{
fn clone(&self) -> Self {
let mut ret = Self::new();
for (new, old) in ret.data.iter_mut().zip(self.as_slice()) {
new.write(old.clone());
}
ret.filled = self.filled;
ret.write_at = self.write_at;
ret
}
}
impl<T, const N: usize> Drop for HistoryBuffer<T, N> {
fn drop(&mut self) {
unsafe {
ptr::drop_in_place(ptr::slice_from_raw_parts_mut(
self.data.as_mut_ptr() as *mut T,
self.len(),
))
}
}
}
impl<T, const N: usize> Deref for HistoryBuffer<T, N> {
type Target = [T];
fn deref(&self) -> &[T] {
self.as_slice()
}
}
impl<T, const N: usize> AsRef<[T]> for HistoryBuffer<T, N> {
#[inline]
fn as_ref(&self) -> &[T] {
self
}
}
impl<T, const N: usize> fmt::Debug for HistoryBuffer<T, N>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<[T] as fmt::Debug>::fmt(self, f)
}
}
impl<T, const N: usize> Default for HistoryBuffer<T, N> {
fn default() -> Self {
Self::new()
}
}
impl<T, const N: usize> PartialEq for HistoryBuffer<T, N>
where
T: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.oldest_ordered().eq(other.oldest_ordered())
}
}
/// An iterator on the underlying buffer ordered from oldest data to newest
#[derive(Clone)]
pub struct OldestOrdered<'a, T, const N: usize> {
buf: &'a HistoryBuffer<T, N>,
cur: usize,
wrapped: bool,
}
impl<'a, T, const N: usize> Iterator for OldestOrdered<'a, T, N> {
type Item = &'a T;
fn next(&mut self) -> Option<&'a T> {
if self.cur == self.buf.len() && self.buf.filled {
// roll-over
self.cur = 0;
self.wrapped = true;
}
if self.cur == self.buf.write_at && self.wrapped {
return None;
}
let item = &self.buf[self.cur];
self.cur += 1;
Some(item)
}
}
#[cfg(test)]
mod tests {
use crate::HistoryBuffer;
use core::fmt::Debug;
use core::sync::atomic::{AtomicUsize, Ordering};
#[test]
fn new() {
let x: HistoryBuffer<u8, 4> = HistoryBuffer::new_with(1);
assert_eq!(x.len(), 4);
assert_eq!(x.as_slice(), [1; 4]);
assert_eq!(*x, [1; 4]);
let x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
assert_eq!(x.as_slice(), []);
}
#[test]
fn write() {
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
x.write(1);
x.write(4);
assert_eq!(x.as_slice(), [1, 4]);
x.write(5);
x.write(6);
x.write(10);
assert_eq!(x.as_slice(), [10, 4, 5, 6]);
x.extend([11, 12].iter());
assert_eq!(x.as_slice(), [10, 11, 12, 6]);
}
#[test]
fn clear() {
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new_with(1);
x.clear();
assert_eq!(x.as_slice(), []);
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
x.clear_with(1);
assert_eq!(x.as_slice(), [1; 4]);
}
#[test]
fn clone() {
let mut x: HistoryBuffer<u8, 3> = HistoryBuffer::new();
for i in 0..10 {
assert_eq!(x.as_slice(), x.clone().as_slice());
x.write(i);
}
// Records number of clones locally and globally.
static GLOBAL: AtomicUsize = AtomicUsize::new(0);
#[derive(Default, PartialEq, Debug)]
struct InstrumentedClone(usize);
impl Clone for InstrumentedClone {
fn clone(&self) -> Self {
GLOBAL.fetch_add(1, Ordering::Relaxed);
Self(self.0 + 1)
}
}
let mut y: HistoryBuffer<InstrumentedClone, 2> = HistoryBuffer::new();
let _ = y.clone();
assert_eq!(GLOBAL.load(Ordering::Relaxed), 0);
y.write(InstrumentedClone(0));
assert_eq!(GLOBAL.load(Ordering::Relaxed), 0);
assert_eq!(y.clone().as_slice(), [InstrumentedClone(1)]);
assert_eq!(GLOBAL.load(Ordering::Relaxed), 1);
y.write(InstrumentedClone(0));
assert_eq!(GLOBAL.load(Ordering::Relaxed), 1);
assert_eq!(
y.clone().as_slice(),
[InstrumentedClone(1), InstrumentedClone(1)]
);
assert_eq!(GLOBAL.load(Ordering::Relaxed), 3);
assert_eq!(
y.clone().clone().clone().as_slice(),
[InstrumentedClone(3), InstrumentedClone(3)]
);
assert_eq!(GLOBAL.load(Ordering::Relaxed), 9);
}
#[test]
fn recent() {
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
assert_eq!(x.recent(), None);
x.write(1);
x.write(4);
assert_eq!(x.recent(), Some(&4));
x.write(5);
x.write(6);
x.write(10);
assert_eq!(x.recent(), Some(&10));
}
#[test]
fn as_slice() {
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
assert_eq!(x.as_slice(), []);
x.extend([1, 2, 3, 4, 5].iter());
assert_eq!(x.as_slice(), [5, 2, 3, 4]);
}
/// Test whether .as_slices() behaves as expected.
#[test]
fn as_slices() {
let mut buffer: HistoryBuffer<u8, 4> = HistoryBuffer::new();
let mut extend_then_assert = |extend: &[u8], assert: (&[u8], &[u8])| {
buffer.extend(extend);
assert_eq!(buffer.as_slices(), assert);
};
extend_then_assert(b"a", (b"a", b""));
extend_then_assert(b"bcd", (b"abcd", b""));
extend_then_assert(b"efg", (b"d", b"efg"));
extend_then_assert(b"h", (b"efgh", b""));
extend_then_assert(b"123456", (b"34", b"56"));
}
/// Test whether .as_slices() and .oldest_ordered() produce elements in the same order.
#[test]
fn as_slices_equals_ordered() {
let mut buffer: HistoryBuffer<u8, 6> = HistoryBuffer::new();
for n in 0..20 {
buffer.write(n);
let (head, tail) = buffer.as_slices();
assert_eq_iter(
[head, tail].iter().copied().flatten(),
buffer.oldest_ordered(),
)
}
}
#[test]
fn ordered() {
// test on an empty buffer
let buffer: HistoryBuffer<u8, 6> = HistoryBuffer::new();
let mut iter = buffer.oldest_ordered();
assert_eq!(iter.next(), None);
assert_eq!(iter.next(), None);
// test on a un-filled buffer
let mut buffer: HistoryBuffer<u8, 6> = HistoryBuffer::new();
buffer.extend([1, 2, 3]);
assert_eq!(buffer.len(), 3);
assert_eq_iter(buffer.oldest_ordered(), &[1, 2, 3]);
// test on a filled buffer
let mut buffer: HistoryBuffer<u8, 6> = HistoryBuffer::new();
buffer.extend([0, 0, 0, 1, 2, 3, 4, 5, 6]);
assert_eq!(buffer.len(), 6);
assert_eq_iter(buffer.oldest_ordered(), &[1, 2, 3, 4, 5, 6]);
// comprehensive test all cases
for n in 0..50 {
const N: usize = 7;
let mut buffer: HistoryBuffer<u8, N> = HistoryBuffer::new();
buffer.extend(0..n);
assert_eq_iter(
buffer.oldest_ordered().copied(),
n.saturating_sub(N as u8)..n,
);
}
}
/// Compares two iterators item by item, making sure they stop at the same time.
fn assert_eq_iter<I: Eq + Debug>(
a: impl IntoIterator<Item = I>,
b: impl IntoIterator<Item = I>,
) {
let mut a = a.into_iter();
let mut b = b.into_iter();
let mut i = 0;
loop {
let a_item = a.next();
let b_item = b.next();
assert_eq!(a_item, b_item, "{}", i);
i += 1;
if b_item.is_none() {
break;
}
}
}
#[test]
fn partial_eq() {
let mut x: HistoryBuffer<u8, 3> = HistoryBuffer::new();
let mut y: HistoryBuffer<u8, 3> = HistoryBuffer::new();
assert_eq!(x, y);
x.write(1);
assert_ne!(x, y);
y.write(1);
assert_eq!(x, y);
for _ in 0..4 {
x.write(2);
assert_ne!(x, y);
for i in 0..5 {
x.write(i);
y.write(i);
}
assert_eq!(
x,
y,
"{:?} {:?}",
x.iter().collect::<Vec<_>>(),
y.iter().collect::<Vec<_>>()
);
}
}
}

1555
vendor/heapless/src/indexmap.rs vendored Normal file

File diff suppressed because it is too large Load Diff

658
vendor/heapless/src/indexset.rs vendored Normal file
View File

@@ -0,0 +1,658 @@
use crate::indexmap::{self, IndexMap};
use core::{
borrow::Borrow,
fmt,
hash::{BuildHasher, Hash},
iter::FromIterator,
};
use hash32::{BuildHasherDefault, FnvHasher};
/// A [`IndexSet`] using the
/// default FNV hasher.
/// A list of all Methods and Traits available for `FnvIndexSet` can be found in
/// the [`IndexSet`] documentation.
///
/// # Examples
/// ```
/// use heapless::FnvIndexSet;
///
/// // A hash set with a capacity of 16 elements allocated on the stack
/// let mut books = FnvIndexSet::<_, 16>::new();
///
/// // Add some books.
/// books.insert("A Dance With Dragons").unwrap();
/// books.insert("To Kill a Mockingbird").unwrap();
/// books.insert("The Odyssey").unwrap();
/// books.insert("The Great Gatsby").unwrap();
///
/// // Check for a specific one.
/// if !books.contains("The Winds of Winter") {
/// println!("We have {} books, but The Winds of Winter ain't one.",
/// books.len());
/// }
///
/// // Remove a book.
/// books.remove("The Odyssey");
///
/// // Iterate over everything.
/// for book in &books {
/// println!("{}", book);
/// }
/// ```
pub type FnvIndexSet<T, const N: usize> = IndexSet<T, BuildHasherDefault<FnvHasher>, N>;
/// Fixed capacity [`IndexSet`](https://docs.rs/indexmap/2/indexmap/set/struct.IndexSet.html).
///
/// Note that you cannot use `IndexSet` directly, since it is generic around the hashing algorithm
/// in use. Pick a concrete instantiation like [`FnvIndexSet`] instead
/// or create your own.
///
/// Note that the capacity of the `IndexSet` must be a power of 2.
///
/// # Examples
/// Since `IndexSet` cannot be used directly, we're using its `FnvIndexSet` instantiation
/// for this example.
///
/// ```
/// use heapless::FnvIndexSet;
///
/// // A hash set with a capacity of 16 elements allocated on the stack
/// let mut books = FnvIndexSet::<_, 16>::new();
///
/// // Add some books.
/// books.insert("A Dance With Dragons").unwrap();
/// books.insert("To Kill a Mockingbird").unwrap();
/// books.insert("The Odyssey").unwrap();
/// books.insert("The Great Gatsby").unwrap();
///
/// // Check for a specific one.
/// if !books.contains("The Winds of Winter") {
/// println!("We have {} books, but The Winds of Winter ain't one.",
/// books.len());
/// }
///
/// // Remove a book.
/// books.remove("The Odyssey");
///
/// // Iterate over everything.
/// for book in &books {
/// println!("{}", book);
/// }
/// ```
pub struct IndexSet<T, S, const N: usize> {
map: IndexMap<T, (), S, N>,
}
impl<T, S, const N: usize> IndexSet<T, BuildHasherDefault<S>, N> {
/// Creates an empty `IndexSet`
pub const fn new() -> Self {
IndexSet {
map: IndexMap::new(),
}
}
}
impl<T, S, const N: usize> IndexSet<T, S, N> {
/// Returns the number of elements the set can hold
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let set = FnvIndexSet::<i32, 16>::new();
/// assert_eq!(set.capacity(), 16);
/// ```
pub fn capacity(&self) -> usize {
self.map.capacity()
}
/// Return an iterator over the values of the set, in insertion order
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut set = FnvIndexSet::<_, 16>::new();
/// set.insert("a").unwrap();
/// set.insert("b").unwrap();
///
/// // Will print in insertion order: a, b
/// for x in set.iter() {
/// println!("{}", x);
/// }
/// ```
pub fn iter(&self) -> Iter<'_, T> {
Iter {
iter: self.map.iter(),
}
}
/// Get the first value
///
/// Computes in **O(1)** time
pub fn first(&self) -> Option<&T> {
self.map.first().map(|(k, _v)| k)
}
/// Get the last value
///
/// Computes in **O(1)** time
pub fn last(&self) -> Option<&T> {
self.map.last().map(|(k, _v)| k)
}
/// Returns the number of elements in the set.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new();
/// assert_eq!(v.len(), 0);
/// v.insert(1).unwrap();
/// assert_eq!(v.len(), 1);
/// ```
pub fn len(&self) -> usize {
self.map.len()
}
/// Returns `true` if the set contains no elements.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new();
/// assert!(v.is_empty());
/// v.insert(1).unwrap();
/// assert!(!v.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// Clears the set, removing all values.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new();
/// v.insert(1).unwrap();
/// v.clear();
/// assert!(v.is_empty());
/// ```
pub fn clear(&mut self) {
self.map.clear()
}
}
impl<T, S, const N: usize> IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher,
{
/// Visits the values representing the difference, i.e. the values that are in `self` but not in
/// `other`.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect();
///
/// // Can be seen as `a - b`.
/// for x in a.difference(&b) {
/// println!("{}", x); // Print 1
/// }
///
/// let diff: FnvIndexSet<_, 16> = a.difference(&b).collect();
/// assert_eq!(diff, [1].iter().collect::<FnvIndexSet<_, 16>>());
///
/// // Note that difference is not symmetric,
/// // and `b - a` means something else:
/// let diff: FnvIndexSet<_, 16> = b.difference(&a).collect();
/// assert_eq!(diff, [4].iter().collect::<FnvIndexSet<_, 16>>());
/// ```
pub fn difference<'a, S2, const N2: usize>(
&'a self,
other: &'a IndexSet<T, S2, N2>,
) -> Difference<'a, T, S2, N2>
where
S2: BuildHasher,
{
Difference {
iter: self.iter(),
other,
}
}
/// Visits the values representing the symmetric difference, i.e. the values that are in `self`
/// or in `other` but not in both.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect();
///
/// // Print 1, 4 in that order.
/// for x in a.symmetric_difference(&b) {
/// println!("{}", x);
/// }
///
/// let diff1: FnvIndexSet<_, 16> = a.symmetric_difference(&b).collect();
/// let diff2: FnvIndexSet<_, 16> = b.symmetric_difference(&a).collect();
///
/// assert_eq!(diff1, diff2);
/// assert_eq!(diff1, [1, 4].iter().collect::<FnvIndexSet<_, 16>>());
/// ```
pub fn symmetric_difference<'a, S2, const N2: usize>(
&'a self,
other: &'a IndexSet<T, S2, N2>,
) -> impl Iterator<Item = &'a T>
where
S2: BuildHasher,
{
self.difference(other).chain(other.difference(self))
}
/// Visits the values representing the intersection, i.e. the values that are both in `self` and
/// `other`.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect();
///
/// // Print 2, 3 in that order.
/// for x in a.intersection(&b) {
/// println!("{}", x);
/// }
///
/// let intersection: FnvIndexSet<_, 16> = a.intersection(&b).collect();
/// assert_eq!(intersection, [2, 3].iter().collect::<FnvIndexSet<_, 16>>());
/// ```
pub fn intersection<'a, S2, const N2: usize>(
&'a self,
other: &'a IndexSet<T, S2, N2>,
) -> Intersection<'a, T, S2, N2>
where
S2: BuildHasher,
{
Intersection {
iter: self.iter(),
other,
}
}
/// Visits the values representing the union, i.e. all the values in `self` or `other`, without
/// duplicates.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect();
///
/// // Print 1, 2, 3, 4 in that order.
/// for x in a.union(&b) {
/// println!("{}", x);
/// }
///
/// let union: FnvIndexSet<_, 16> = a.union(&b).collect();
/// assert_eq!(union, [1, 2, 3, 4].iter().collect::<FnvIndexSet<_, 16>>());
/// ```
pub fn union<'a, S2, const N2: usize>(
&'a self,
other: &'a IndexSet<T, S2, N2>,
) -> impl Iterator<Item = &'a T>
where
S2: BuildHasher,
{
self.iter().chain(other.difference(self))
}
/// Returns `true` if the set contains a value.
///
/// The value may be any borrowed form of the set's value type, but `Hash` and `Eq` on the
/// borrowed form must match those for the value type.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let set: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// assert_eq!(set.contains(&1), true);
/// assert_eq!(set.contains(&4), false);
/// ```
pub fn contains<Q>(&self, value: &Q) -> bool
where
T: Borrow<Q>,
Q: ?Sized + Eq + Hash,
{
self.map.contains_key(value)
}
/// Returns `true` if `self` has no elements in common with `other`. This is equivalent to
/// checking for an empty intersection.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b = FnvIndexSet::<_, 16>::new();
///
/// assert_eq!(a.is_disjoint(&b), true);
/// b.insert(4).unwrap();
/// assert_eq!(a.is_disjoint(&b), true);
/// b.insert(1).unwrap();
/// assert_eq!(a.is_disjoint(&b), false);
/// ```
pub fn is_disjoint<S2, const N2: usize>(&self, other: &IndexSet<T, S2, N2>) -> bool
where
S2: BuildHasher,
{
self.iter().all(|v| !other.contains(v))
}
/// Returns `true` if the set is a subset of another, i.e. `other` contains at least all the
/// values in `self`.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let sup: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut set = FnvIndexSet::<_, 16>::new();
///
/// assert_eq!(set.is_subset(&sup), true);
/// set.insert(2).unwrap();
/// assert_eq!(set.is_subset(&sup), true);
/// set.insert(4).unwrap();
/// assert_eq!(set.is_subset(&sup), false);
/// ```
pub fn is_subset<S2, const N2: usize>(&self, other: &IndexSet<T, S2, N2>) -> bool
where
S2: BuildHasher,
{
self.iter().all(|v| other.contains(v))
}
// Returns `true` if the set is a superset of another, i.e. `self` contains at least all the
// values in `other`.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let sub: FnvIndexSet<_, 16> = [1, 2].iter().cloned().collect();
/// let mut set = FnvIndexSet::<_, 16>::new();
///
/// assert_eq!(set.is_superset(&sub), false);
///
/// set.insert(0).unwrap();
/// set.insert(1).unwrap();
/// assert_eq!(set.is_superset(&sub), false);
///
/// set.insert(2).unwrap();
/// assert_eq!(set.is_superset(&sub), true);
/// ```
pub fn is_superset<S2, const N2: usize>(&self, other: &IndexSet<T, S2, N2>) -> bool
where
S2: BuildHasher,
{
other.is_subset(self)
}
/// Adds a value to the set.
///
/// If the set did not have this value present, `true` is returned.
///
/// If the set did have this value present, `false` is returned.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut set = FnvIndexSet::<_, 16>::new();
///
/// assert_eq!(set.insert(2).unwrap(), true);
/// assert_eq!(set.insert(2).unwrap(), false);
/// assert_eq!(set.len(), 1);
/// ```
pub fn insert(&mut self, value: T) -> Result<bool, T> {
self.map
.insert(value, ())
.map(|old| old.is_none())
.map_err(|(k, _)| k)
}
/// Removes a value from the set. Returns `true` if the value was present in the set.
///
/// The value may be any borrowed form of the set's value type, but `Hash` and `Eq` on the
/// borrowed form must match those for the value type.
///
/// # Examples
///
/// ```
/// use heapless::FnvIndexSet;
///
/// let mut set = FnvIndexSet::<_, 16>::new();
///
/// set.insert(2).unwrap();
/// assert_eq!(set.remove(&2), true);
/// assert_eq!(set.remove(&2), false);
/// ```
pub fn remove<Q>(&mut self, value: &Q) -> bool
where
T: Borrow<Q>,
Q: ?Sized + Eq + Hash,
{
self.map.remove(value).is_some()
}
/// Retains only the elements specified by the predicate.
///
/// In other words, remove all elements `e` for which `f(&e)` returns `false`.
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&T) -> bool,
{
self.map.retain(move |k, _| f(k));
}
}
impl<T, S, const N: usize> Clone for IndexSet<T, S, N>
where
T: Clone,
S: Clone,
{
fn clone(&self) -> Self {
Self {
map: self.map.clone(),
}
}
}
impl<T, S, const N: usize> fmt::Debug for IndexSet<T, S, N>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_set().entries(self.iter()).finish()
}
}
impl<T, S, const N: usize> Default for IndexSet<T, S, N>
where
S: Default,
{
fn default() -> Self {
IndexSet {
map: <_>::default(),
}
}
}
impl<T, S1, S2, const N1: usize, const N2: usize> PartialEq<IndexSet<T, S2, N2>>
for IndexSet<T, S1, N1>
where
T: Eq + Hash,
S1: BuildHasher,
S2: BuildHasher,
{
fn eq(&self, other: &IndexSet<T, S2, N2>) -> bool {
self.len() == other.len() && self.is_subset(other)
}
}
impl<T, S, const N: usize> Extend<T> for IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher,
{
fn extend<I>(&mut self, iterable: I)
where
I: IntoIterator<Item = T>,
{
self.map.extend(iterable.into_iter().map(|k| (k, ())))
}
}
impl<'a, T, S, const N: usize> Extend<&'a T> for IndexSet<T, S, N>
where
T: 'a + Eq + Hash + Copy,
S: BuildHasher,
{
fn extend<I>(&mut self, iterable: I)
where
I: IntoIterator<Item = &'a T>,
{
self.extend(iterable.into_iter().cloned())
}
}
impl<T, S, const N: usize> FromIterator<T> for IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher + Default,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = T>,
{
let mut set = IndexSet::default();
set.extend(iter);
set
}
}
impl<'a, T, S, const N: usize> IntoIterator for &'a IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher,
{
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
/// An iterator over the items of a [`IndexSet`].
///
/// This `struct` is created by the [`iter`](IndexSet::iter) method on [`IndexSet`]. See its
/// documentation for more.
pub struct Iter<'a, T> {
iter: indexmap::Iter<'a, T, ()>,
}
impl<'a, T> Iterator for Iter<'a, T> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|(k, _)| k)
}
}
impl<'a, T> Clone for Iter<'a, T> {
fn clone(&self) -> Self {
Self {
iter: self.iter.clone(),
}
}
}
pub struct Difference<'a, T, S, const N: usize>
where
S: BuildHasher,
T: Eq + Hash,
{
iter: Iter<'a, T>,
other: &'a IndexSet<T, S, N>,
}
impl<'a, T, S, const N: usize> Iterator for Difference<'a, T, S, N>
where
S: BuildHasher,
T: Eq + Hash,
{
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
loop {
let elt = self.iter.next()?;
if !self.other.contains(elt) {
return Some(elt);
}
}
}
}
pub struct Intersection<'a, T, S, const N: usize>
where
S: BuildHasher,
T: Eq + Hash,
{
iter: Iter<'a, T>,
other: &'a IndexSet<T, S, N>,
}
impl<'a, T, S, const N: usize> Iterator for Intersection<'a, T, S, N>
where
S: BuildHasher,
T: Eq + Hash,
{
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
loop {
let elt = self.iter.next()?;
if self.other.contains(elt) {
return Some(elt);
}
}
}
}

145
vendor/heapless/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,145 @@
//! `static` friendly data structures that don't require dynamic memory allocation
//!
//! The core principle behind `heapless` is that its data structures are backed by a *static* memory
//! allocation. For example, you can think of `heapless::Vec` as an alternative version of
//! `std::Vec` with fixed capacity and that can't be re-allocated on the fly (e.g. via `push`).
//!
//! All `heapless` data structures store their memory allocation *inline* and specify their capacity
//! via their type parameter `N`. This means that you can instantiate a `heapless` data structure on
//! the stack, in a `static` variable, or even in the heap.
//!
//! ```
//! use heapless::Vec; // fixed capacity `std::Vec`
//!
//! // on the stack
//! let mut xs: Vec<u8, 8> = Vec::new(); // can hold up to 8 elements
//! xs.push(42).unwrap();
//! assert_eq!(xs.pop(), Some(42));
//!
//! // in a `static` variable
//! static mut XS: Vec<u8, 8> = Vec::new();
//!
//! let xs = unsafe { &mut XS };
//!
//! xs.push(42);
//! assert_eq!(xs.pop(), Some(42));
//!
//! // in the heap (though kind of pointless because no reallocation)
//! let mut ys: Box<Vec<u8, 8>> = Box::new(Vec::new());
//! ys.push(42).unwrap();
//! assert_eq!(ys.pop(), Some(42));
//! ```
//!
//! Because they have fixed capacity `heapless` data structures don't implicitly reallocate. This
//! means that operations like `heapless::Vec.push` are *truly* constant time rather than amortized
//! constant time with potentially unbounded (depends on the allocator) worst case execution time
//! (which is bad / unacceptable for hard real time applications).
//!
//! `heapless` data structures don't use a memory allocator which means no risk of an uncatchable
//! Out Of Memory (OOM) condition while performing operations on them. It's certainly possible to
//! run out of capacity while growing `heapless` data structures, but the API lets you handle this
//! possibility by returning a `Result` on operations that may exhaust the capacity of the data
//! structure.
//!
//! List of currently implemented data structures:
//!
#![cfg_attr(
any(arm_llsc, target_arch = "x86"),
doc = "- [`Arc`](pool::arc::Arc) -- like `std::sync::Arc` but backed by a lock-free memory pool rather than `#[global_allocator]`"
)]
#![cfg_attr(
any(arm_llsc, target_arch = "x86"),
doc = "- [`Box`](pool::boxed::Box) -- like `std::boxed::Box` but backed by a lock-free memory pool rather than `#[global_allocator]`"
)]
//! - [`BinaryHeap`] -- priority queue
//! - [`IndexMap`] -- hash table
//! - [`IndexSet`] -- hash set
//! - [`LinearMap`]
#![cfg_attr(
any(arm_llsc, target_arch = "x86"),
doc = "- [`Object`](pool::object::Object) -- objects managed by an object pool"
)]
//! - [`String`]
//! - [`Vec`]
//! - [`mpmc::Q*`](mpmc) -- multiple producer multiple consumer lock-free queue
//! - [`spsc::Queue`] -- single producer single consumer lock-free queue
//!
//! # Optional Features
//!
//! The `heapless` crate provides the following optional Cargo features:
//!
//! - `ufmt`: Implement [`ufmt_write::uWrite`] for `String<N>` and `Vec<u8, N>`
//!
//! [`ufmt_write::uWrite`]: https://docs.rs/ufmt-write/
//!
//! # Minimum Supported Rust Version (MSRV)
//!
//! This crate does *not* have a Minimum Supported Rust Version (MSRV) and may make use of language
//! features and API in the standard library available in the latest stable Rust version.
//!
//! In other words, changes in the Rust version requirement of this crate are not considered semver
//! breaking change and may occur in patch version releases.
#![cfg_attr(docsrs, feature(doc_cfg), feature(doc_auto_cfg))]
#![cfg_attr(not(test), no_std)]
#![deny(missing_docs)]
#![deny(warnings)]
pub use binary_heap::BinaryHeap;
pub use deque::Deque;
pub use histbuf::{HistoryBuffer, OldestOrdered};
pub use indexmap::{
Bucket, Entry, FnvIndexMap, IndexMap, Iter as IndexMapIter, IterMut as IndexMapIterMut,
Keys as IndexMapKeys, OccupiedEntry, Pos, VacantEntry, Values as IndexMapValues,
ValuesMut as IndexMapValuesMut,
};
pub use indexset::{FnvIndexSet, IndexSet, Iter as IndexSetIter};
pub use linear_map::LinearMap;
pub use string::String;
pub use vec::Vec;
#[macro_use]
#[cfg(test)]
mod test_helpers;
mod deque;
mod histbuf;
mod indexmap;
mod indexset;
mod linear_map;
mod string;
mod vec;
#[cfg(feature = "serde")]
mod de;
#[cfg(feature = "serde")]
mod ser;
pub mod binary_heap;
#[cfg(feature = "defmt-03")]
mod defmt;
#[cfg(any(
// assume we have all atomics available if we're using portable-atomic
feature = "portable-atomic",
// target has native atomic CAS (mpmc_large requires usize, otherwise just u8)
all(feature = "mpmc_large", target_has_atomic = "ptr"),
all(not(feature = "mpmc_large"), target_has_atomic = "8")
))]
pub mod mpmc;
#[cfg(any(arm_llsc, target_arch = "x86"))]
pub mod pool;
pub mod sorted_linked_list;
#[cfg(any(
// assume we have all atomics available if we're using portable-atomic
feature = "portable-atomic",
// target has native atomic CAS. Note this is too restrictive, spsc requires load/store only, not CAS.
// This should be `cfg(target_has_atomic_load_store)`, but that's not stable yet.
target_has_atomic = "ptr",
// or the current target is in a list in build.rs of targets known to have load/store but no CAS.
has_atomic_load_store
))]
pub mod spsc;
#[cfg(feature = "ufmt")]
mod ufmt;
mod sealed;

555
vendor/heapless/src/linear_map.rs vendored Normal file
View File

@@ -0,0 +1,555 @@
use crate::Vec;
use core::{borrow::Borrow, fmt, iter::FromIterator, mem, ops, slice};
/// A fixed capacity map / dictionary that performs lookups via linear search
///
/// Note that as this map doesn't use hashing so most operations are **O(N)** instead of O(1)
pub struct LinearMap<K, V, const N: usize> {
pub(crate) buffer: Vec<(K, V), N>,
}
impl<K, V, const N: usize> LinearMap<K, V, N> {
/// Creates an empty `LinearMap`
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// // allocate the map on the stack
/// let mut map: LinearMap<&str, isize, 8> = LinearMap::new();
///
/// // allocate the map in a static variable
/// static mut MAP: LinearMap<&str, isize, 8> = LinearMap::new();
/// ```
pub const fn new() -> Self {
Self { buffer: Vec::new() }
}
}
impl<K, V, const N: usize> LinearMap<K, V, N>
where
K: Eq,
{
/// Returns the number of elements that the map can hold
///
/// Computes in **O(1)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let map: LinearMap<&str, isize, 8> = LinearMap::new();
/// assert_eq!(map.capacity(), 8);
/// ```
pub fn capacity(&self) -> usize {
N
}
/// Clears the map, removing all key-value pairs
///
/// Computes in **O(1)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// map.clear();
/// assert!(map.is_empty());
/// ```
pub fn clear(&mut self) {
self.buffer.clear()
}
/// Returns true if the map contains a value for the specified key.
///
/// Computes in **O(N)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
/// ```
pub fn contains_key(&self, key: &K) -> bool {
self.get(key).is_some()
}
/// Returns a reference to the value corresponding to the key
///
/// Computes in **O(N)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
/// ```
pub fn get<Q>(&self, key: &Q) -> Option<&V>
where
K: Borrow<Q>,
Q: Eq + ?Sized,
{
self.iter()
.find(|&(k, _)| k.borrow() == key)
.map(|(_, v)| v)
}
/// Returns a mutable reference to the value corresponding to the key
///
/// Computes in **O(N)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
/// }
/// assert_eq!(map[&1], "b");
/// ```
pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
where
K: Borrow<Q>,
Q: Eq + ?Sized,
{
self.iter_mut()
.find(|&(k, _)| k.borrow() == key)
.map(|(_, v)| v)
}
/// Returns the number of elements in this map
///
/// Computes in **O(1)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut a: LinearMap<_, _, 8> = LinearMap::new();
/// assert_eq!(a.len(), 0);
/// a.insert(1, "a").unwrap();
/// assert_eq!(a.len(), 1);
/// ```
pub fn len(&self) -> usize {
self.buffer.len()
}
/// Inserts a key-value pair into the map.
///
/// If the map did not have this key present, `None` is returned.
///
/// If the map did have this key present, the value is updated, and the old value is returned.
///
/// Computes in **O(N)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// assert_eq!(map.insert(37, "a").unwrap(), None);
/// assert_eq!(map.is_empty(), false);
///
/// map.insert(37, "b").unwrap();
/// assert_eq!(map.insert(37, "c").unwrap(), Some("b"));
/// assert_eq!(map[&37], "c");
/// ```
pub fn insert(&mut self, key: K, mut value: V) -> Result<Option<V>, (K, V)> {
if let Some((_, v)) = self.iter_mut().find(|&(k, _)| *k == key) {
mem::swap(v, &mut value);
return Ok(Some(value));
}
self.buffer.push((key, value))?;
Ok(None)
}
/// Returns true if the map contains no elements
///
/// Computes in **O(1)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut a: LinearMap<_, _, 8> = LinearMap::new();
/// assert!(a.is_empty());
/// a.insert(1, "a").unwrap();
/// assert!(!a.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// An iterator visiting all key-value pairs in arbitrary order.
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
///
/// for (key, val) in map.iter() {
/// println!("key: {} val: {}", key, val);
/// }
/// ```
pub fn iter(&self) -> Iter<'_, K, V> {
Iter {
iter: self.buffer.as_slice().iter(),
}
}
/// An iterator visiting all key-value pairs in arbitrary order, with mutable references to the
/// values
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
///
/// // Update all values
/// for (_, val) in map.iter_mut() {
/// *val = 2;
/// }
///
/// for (key, val) in &map {
/// println!("key: {} val: {}", key, val);
/// }
/// ```
pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
IterMut {
iter: self.buffer.as_mut_slice().iter_mut(),
}
}
/// An iterator visiting all keys in arbitrary order
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
///
/// for key in map.keys() {
/// println!("{}", key);
/// }
/// ```
pub fn keys(&self) -> impl Iterator<Item = &K> {
self.iter().map(|(k, _)| k)
}
/// Removes a key from the map, returning the value at the key if the key was previously in the
/// map
///
/// Computes in **O(N)** time
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
/// ```
pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
where
K: Borrow<Q>,
Q: Eq + ?Sized,
{
let idx = self
.keys()
.enumerate()
.find(|&(_, k)| k.borrow() == key)
.map(|(idx, _)| idx);
idx.map(|idx| self.buffer.swap_remove(idx).1)
}
/// An iterator visiting all values in arbitrary order
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
///
/// for val in map.values() {
/// println!("{}", val);
/// }
/// ```
pub fn values(&self) -> impl Iterator<Item = &V> {
self.iter().map(|(_, v)| v)
}
/// An iterator visiting all values mutably in arbitrary order
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
///
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
///
/// for val in map.values_mut() {
/// *val += 10;
/// }
///
/// for val in map.values() {
/// println!("{}", val);
/// }
/// ```
pub fn values_mut(&mut self) -> impl Iterator<Item = &mut V> {
self.iter_mut().map(|(_, v)| v)
}
}
impl<'a, K, V, Q, const N: usize> ops::Index<&'a Q> for LinearMap<K, V, N>
where
K: Borrow<Q> + Eq,
Q: Eq + ?Sized,
{
type Output = V;
fn index(&self, key: &Q) -> &V {
self.get(key).expect("no entry found for key")
}
}
impl<'a, K, V, Q, const N: usize> ops::IndexMut<&'a Q> for LinearMap<K, V, N>
where
K: Borrow<Q> + Eq,
Q: Eq + ?Sized,
{
fn index_mut(&mut self, key: &Q) -> &mut V {
self.get_mut(key).expect("no entry found for key")
}
}
impl<K, V, const N: usize> Default for LinearMap<K, V, N>
where
K: Eq,
{
fn default() -> Self {
Self::new()
}
}
impl<K, V, const N: usize> Clone for LinearMap<K, V, N>
where
K: Eq + Clone,
V: Clone,
{
fn clone(&self) -> Self {
Self {
buffer: self.buffer.clone(),
}
}
}
impl<K, V, const N: usize> fmt::Debug for LinearMap<K, V, N>
where
K: Eq + fmt::Debug,
V: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
impl<K, V, const N: usize> FromIterator<(K, V)> for LinearMap<K, V, N>
where
K: Eq,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (K, V)>,
{
let mut out = Self::new();
out.buffer.extend(iter);
out
}
}
pub struct IntoIter<K, V, const N: usize>
where
K: Eq,
{
inner: <Vec<(K, V), N> as IntoIterator>::IntoIter,
}
impl<K, V, const N: usize> Iterator for IntoIter<K, V, N>
where
K: Eq,
{
type Item = (K, V);
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
}
impl<'a, K, V, const N: usize> IntoIterator for &'a LinearMap<K, V, N>
where
K: Eq,
{
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
pub struct Iter<'a, K, V> {
iter: slice::Iter<'a, (K, V)>,
}
impl<'a, K, V> Iterator for Iter<'a, K, V> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|&(ref k, ref v)| (k, v))
}
}
impl<'a, K, V> Clone for Iter<'a, K, V> {
fn clone(&self) -> Self {
Self {
iter: self.iter.clone(),
}
}
}
pub struct IterMut<'a, K, V> {
iter: slice::IterMut<'a, (K, V)>,
}
impl<'a, K, V> Iterator for IterMut<'a, K, V> {
type Item = (&'a K, &'a mut V);
fn next(&mut self) -> Option<Self::Item> {
self.iter.next().map(|&mut (ref k, ref mut v)| (k, v))
}
}
impl<K, V, const N: usize, const N2: usize> PartialEq<LinearMap<K, V, N2>> for LinearMap<K, V, N>
where
K: Eq,
V: PartialEq,
{
fn eq(&self, other: &LinearMap<K, V, N2>) -> bool {
self.len() == other.len()
&& self
.iter()
.all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
}
}
impl<K, V, const N: usize> Eq for LinearMap<K, V, N>
where
K: Eq,
V: PartialEq,
{
}
#[cfg(test)]
mod test {
use crate::LinearMap;
#[test]
fn static_new() {
static mut _L: LinearMap<i32, i32, 8> = LinearMap::new();
}
#[test]
fn partial_eq() {
{
let mut a = LinearMap::<_, _, 1>::new();
a.insert("k1", "v1").unwrap();
let mut b = LinearMap::<_, _, 2>::new();
b.insert("k1", "v1").unwrap();
assert!(a == b);
b.insert("k2", "v2").unwrap();
assert!(a != b);
}
{
let mut a = LinearMap::<_, _, 2>::new();
a.insert("k1", "v1").unwrap();
a.insert("k2", "v2").unwrap();
let mut b = LinearMap::<_, _, 2>::new();
b.insert("k2", "v2").unwrap();
b.insert("k1", "v1").unwrap();
assert!(a == b);
}
}
#[test]
fn drop() {
droppable!();
{
let mut v: LinearMap<i32, Droppable, 2> = LinearMap::new();
v.insert(0, Droppable::new()).ok().unwrap();
v.insert(1, Droppable::new()).ok().unwrap();
v.remove(&1).unwrap();
}
assert_eq!(Droppable::count(), 0);
{
let mut v: LinearMap<i32, Droppable, 2> = LinearMap::new();
v.insert(0, Droppable::new()).ok().unwrap();
v.insert(1, Droppable::new()).ok().unwrap();
}
assert_eq!(Droppable::count(), 0);
}
}

325
vendor/heapless/src/mpmc.rs vendored Normal file
View File

@@ -0,0 +1,325 @@
//! A fixed capacity Multiple-Producer Multiple-Consumer (MPMC) lock-free queue
//!
//! NOTE: This module requires atomic CAS operations. On targets where they're not natively available,
//! they are emulated by the [`portable-atomic`](https://crates.io/crates/portable-atomic) crate.
//!
//! # Example
//!
//! This queue can be constructed in "const context". Placing it in a `static` variable lets *all*
//! contexts (interrupts / threads / `main`) safely enqueue and dequeue items from it.
//!
//! ``` ignore
//! #![no_main]
//! #![no_std]
//!
//! use panic_semihosting as _;
//!
//! use cortex_m::{asm, peripheral::syst::SystClkSource};
//! use cortex_m_rt::{entry, exception};
//! use cortex_m_semihosting::hprintln;
//! use heapless::mpmc::Q2;
//!
//! static Q: Q2<u8> = Q2::new();
//!
//! #[entry]
//! fn main() -> ! {
//! if let Some(p) = cortex_m::Peripherals::take() {
//! let mut syst = p.SYST;
//!
//! // configures the system timer to trigger a SysTick exception every second
//! syst.set_clock_source(SystClkSource::Core);
//! syst.set_reload(12_000_000);
//! syst.enable_counter();
//! syst.enable_interrupt();
//! }
//!
//! loop {
//! if let Some(x) = Q.dequeue() {
//! hprintln!("{}", x).ok();
//! } else {
//! asm::wfi();
//! }
//! }
//! }
//!
//! #[exception]
//! fn SysTick() {
//! static mut COUNT: u8 = 0;
//!
//! Q.enqueue(*COUNT).ok();
//! *COUNT += 1;
//! }
//! ```
//!
//! # Benchmark
//!
//! Measured on a ARM Cortex-M3 core running at 8 MHz and with zero Flash wait cycles
//!
//! N| `Q8::<u8>::enqueue().ok()` (`z`) | `Q8::<u8>::dequeue()` (`z`) |
//! -|----------------------------------|-----------------------------|
//! 0|34 |35 |
//! 1|52 |53 |
//! 2|69 |71 |
//!
//! - `N` denotes the number of *interruptions*. On Cortex-M, an interruption consists of an
//! interrupt handler preempting the would-be atomic section of the `enqueue` / `dequeue`
//! operation. Note that it does *not* matter if the higher priority handler uses the queue or
//! not.
//! - All execution times are in clock cycles. 1 clock cycle = 125 ns.
//! - Execution time is *dependent* of `mem::size_of::<T>()`. Both operations include one
//! `memcpy(T)` in their successful path.
//! - The optimization level is indicated in parentheses.
//! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue`
//! and `Ok` is returned by `enqueue`).
//!
//! # Portability
//!
//! This module requires CAS atomic instructions which are not available on all architectures
//! (e.g. ARMv6-M (`thumbv6m-none-eabi`) and MSP430 (`msp430-none-elf`)). These atomics can be
//! emulated however with [`portable-atomic`](https://crates.io/crates/portable-atomic), which is
//! enabled with the `cas` feature and is enabled by default for `thumbv6m-none-eabi` and `riscv32`
//! targets.
//!
//! # References
//!
//! This is an implementation of Dmitry Vyukov's ["Bounded MPMC queue"][0] minus the cache padding.
//!
//! [0]: http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
use core::{cell::UnsafeCell, mem::MaybeUninit};
#[cfg(not(feature = "portable-atomic"))]
use core::sync::atomic;
#[cfg(feature = "portable-atomic")]
use portable_atomic as atomic;
use atomic::Ordering;
#[cfg(feature = "mpmc_large")]
type AtomicTargetSize = atomic::AtomicUsize;
#[cfg(not(feature = "mpmc_large"))]
type AtomicTargetSize = atomic::AtomicU8;
#[cfg(feature = "mpmc_large")]
type IntSize = usize;
#[cfg(not(feature = "mpmc_large"))]
type IntSize = u8;
/// MPMC queue with a capability for 2 elements.
pub type Q2<T> = MpMcQueue<T, 2>;
/// MPMC queue with a capability for 4 elements.
pub type Q4<T> = MpMcQueue<T, 4>;
/// MPMC queue with a capability for 8 elements.
pub type Q8<T> = MpMcQueue<T, 8>;
/// MPMC queue with a capability for 16 elements.
pub type Q16<T> = MpMcQueue<T, 16>;
/// MPMC queue with a capability for 32 elements.
pub type Q32<T> = MpMcQueue<T, 32>;
/// MPMC queue with a capability for 64 elements.
pub type Q64<T> = MpMcQueue<T, 64>;
/// MPMC queue with a capacity for N elements
/// N must be a power of 2
/// The max value of N is u8::MAX - 1 if `mpmc_large` feature is not enabled.
pub struct MpMcQueue<T, const N: usize> {
buffer: UnsafeCell<[Cell<T>; N]>,
dequeue_pos: AtomicTargetSize,
enqueue_pos: AtomicTargetSize,
}
impl<T, const N: usize> MpMcQueue<T, N> {
const MASK: IntSize = (N - 1) as IntSize;
const EMPTY_CELL: Cell<T> = Cell::new(0);
const ASSERT: [(); 1] = [()];
/// Creates an empty queue
pub const fn new() -> Self {
// Const assert
crate::sealed::greater_than_1::<N>();
crate::sealed::power_of_two::<N>();
// Const assert on size.
Self::ASSERT[!(N < (IntSize::MAX as usize)) as usize];
let mut cell_count = 0;
let mut result_cells: [Cell<T>; N] = [Self::EMPTY_CELL; N];
while cell_count != N {
result_cells[cell_count] = Cell::new(cell_count);
cell_count += 1;
}
Self {
buffer: UnsafeCell::new(result_cells),
dequeue_pos: AtomicTargetSize::new(0),
enqueue_pos: AtomicTargetSize::new(0),
}
}
/// Returns the item in the front of the queue, or `None` if the queue is empty
pub fn dequeue(&self) -> Option<T> {
unsafe { dequeue(self.buffer.get() as *mut _, &self.dequeue_pos, Self::MASK) }
}
/// Adds an `item` to the end of the queue
///
/// Returns back the `item` if the queue is full
pub fn enqueue(&self, item: T) -> Result<(), T> {
unsafe {
enqueue(
self.buffer.get() as *mut _,
&self.enqueue_pos,
Self::MASK,
item,
)
}
}
}
impl<T, const N: usize> Default for MpMcQueue<T, N> {
fn default() -> Self {
Self::new()
}
}
unsafe impl<T, const N: usize> Sync for MpMcQueue<T, N> where T: Send {}
struct Cell<T> {
data: MaybeUninit<T>,
sequence: AtomicTargetSize,
}
impl<T> Cell<T> {
const fn new(seq: usize) -> Self {
Self {
data: MaybeUninit::uninit(),
sequence: AtomicTargetSize::new(seq as IntSize),
}
}
}
unsafe fn dequeue<T>(
buffer: *mut Cell<T>,
dequeue_pos: &AtomicTargetSize,
mask: IntSize,
) -> Option<T> {
let mut pos = dequeue_pos.load(Ordering::Relaxed);
let mut cell;
loop {
cell = buffer.add(usize::from(pos & mask));
let seq = (*cell).sequence.load(Ordering::Acquire);
let dif = (seq as i8).wrapping_sub((pos.wrapping_add(1)) as i8);
if dif == 0 {
if dequeue_pos
.compare_exchange_weak(
pos,
pos.wrapping_add(1),
Ordering::Relaxed,
Ordering::Relaxed,
)
.is_ok()
{
break;
}
} else if dif < 0 {
return None;
} else {
pos = dequeue_pos.load(Ordering::Relaxed);
}
}
let data = (*cell).data.as_ptr().read();
(*cell)
.sequence
.store(pos.wrapping_add(mask).wrapping_add(1), Ordering::Release);
Some(data)
}
unsafe fn enqueue<T>(
buffer: *mut Cell<T>,
enqueue_pos: &AtomicTargetSize,
mask: IntSize,
item: T,
) -> Result<(), T> {
let mut pos = enqueue_pos.load(Ordering::Relaxed);
let mut cell;
loop {
cell = buffer.add(usize::from(pos & mask));
let seq = (*cell).sequence.load(Ordering::Acquire);
let dif = (seq as i8).wrapping_sub(pos as i8);
if dif == 0 {
if enqueue_pos
.compare_exchange_weak(
pos,
pos.wrapping_add(1),
Ordering::Relaxed,
Ordering::Relaxed,
)
.is_ok()
{
break;
}
} else if dif < 0 {
return Err(item);
} else {
pos = enqueue_pos.load(Ordering::Relaxed);
}
}
(*cell).data.as_mut_ptr().write(item);
(*cell)
.sequence
.store(pos.wrapping_add(1), Ordering::Release);
Ok(())
}
#[cfg(test)]
mod tests {
use super::Q2;
#[test]
fn sanity() {
let q = Q2::new();
q.enqueue(0).unwrap();
q.enqueue(1).unwrap();
assert!(q.enqueue(2).is_err());
assert_eq!(q.dequeue(), Some(0));
assert_eq!(q.dequeue(), Some(1));
assert_eq!(q.dequeue(), None);
}
#[test]
fn drain_at_pos255() {
let q = Q2::new();
for _ in 0..255 {
assert!(q.enqueue(0).is_ok());
assert_eq!(q.dequeue(), Some(0));
}
// this should not block forever
assert_eq!(q.dequeue(), None);
}
#[test]
fn full_at_wrapped_pos0() {
let q = Q2::new();
for _ in 0..254 {
assert!(q.enqueue(0).is_ok());
assert_eq!(q.dequeue(), Some(0));
}
assert!(q.enqueue(0).is_ok());
assert!(q.enqueue(0).is_ok());
// this should not block forever
assert!(q.enqueue(0).is_err());
}
}

59
vendor/heapless/src/pool.rs vendored Normal file
View File

@@ -0,0 +1,59 @@
//! Memory and object pools
//!
//! # Target support
//!
//! This module / API is only available on these compilation targets:
//!
//! - ARM architectures which instruction set include the LDREX, CLREX and STREX instructions, e.g.
//! `thumbv7m-none-eabi` but not `thumbv6m-none-eabi`
//! - 32-bit x86, e.g. `i686-unknown-linux-gnu`
//!
//! # Benchmarks
//!
//! - compilation settings
//! - `codegen-units = 1`
//! - `lto = 'fat'`
//! - `opt-level = 'z'`
//! - compilation target: `thumbv7em-none-eabihf`
//! - CPU: ARM Cortex-M4F
//!
//! - test program:
//!
//! ``` no_run
//! use heapless::box_pool;
//!
//! box_pool!(P: ()); // or `arc_pool!` or `object_pool!`
//!
//! bkpt();
//! let res = P.alloc(());
//! bkpt();
//!
//! if let Ok(boxed) = res {
//! bkpt();
//! drop(boxed);
//! bkpt();
//! }
//! # fn bkpt() {}
//! ```
//!
//! - measurement method: the cycle counter (CYCCNT) register was sampled each time a breakpoint
//! (`bkpt`) was hit. the difference between the "after" and the "before" value of CYCCNT yields the
//! execution time in clock cycles.
//!
//! | API | clock cycles |
//! |------------------------------|--------------|
//! | `BoxPool::alloc` | 23 |
//! | `pool::boxed::Box::drop` | 23 |
//! | `ArcPool::alloc` | 28 |
//! | `pool::arc::Arc::drop` | 59 |
//! | `ObjectPool::request` | 23 |
//! | `pool::object::Object::drop` | 23 |
//!
//! Note that the execution time won't include `T`'s initialization nor `T`'s destructor which will
//! be present in the general case for `Box` and `Arc`.
mod treiber;
pub mod arc;
pub mod boxed;
pub mod object;

526
vendor/heapless/src/pool/arc.rs vendored Normal file
View File

@@ -0,0 +1,526 @@
//! `std::sync::Arc`-like API on top of a lock-free memory pool
//!
//! # Example usage
//!
//! ```
//! use heapless::{arc_pool, pool::arc::{Arc, ArcBlock}};
//!
//! arc_pool!(P: u128);
//!
//! // cannot allocate without first giving memory blocks to the pool
//! assert!(P.alloc(42).is_err());
//!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut ArcBlock<u128> = unsafe {
//! static mut B: ArcBlock<u128> = ArcBlock::new();
//! &mut B
//! };
//!
//! P.manage(block);
//!
//! let arc = P.alloc(1).unwrap();
//!
//! // number of smart pointers is limited to the number of blocks managed by the pool
//! let res = P.alloc(2);
//! assert!(res.is_err());
//!
//! // but cloning does not consume an `ArcBlock`
//! let arc2 = arc.clone();
//!
//! assert_eq!(1, *arc2);
//!
//! // `arc`'s destructor returns the memory block to the pool
//! drop(arc2); // decrease reference counter
//! drop(arc); // release memory
//!
//! // it's now possible to allocate a new `Arc` smart pointer
//! let res = P.alloc(3);
//!
//! assert!(res.is_ok());
//! ```
//!
//! # Array block initialization
//!
//! You can create a static variable that contains an array of memory blocks and give all the blocks
//! to the `ArcPool`. This requires an intermediate `const` value as shown below:
//!
//! ```
//! use heapless::{arc_pool, pool::arc::ArcBlock};
//!
//! arc_pool!(P: u128);
//!
//! const POOL_CAPACITY: usize = 8;
//!
//! let blocks: &'static mut [ArcBlock<u128>] = {
//! const BLOCK: ArcBlock<u128> = ArcBlock::new(); // <=
//! static mut BLOCKS: [ArcBlock<u128>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY];
//! unsafe { &mut BLOCKS }
//! };
//!
//! for block in blocks {
//! P.manage(block);
//! }
//! ```
// reference counting logic is based on version 1.63.0 of the Rust standard library (`alloc` crate)
// which is licensed under 'MIT or APACHE-2.0'
// https://github.com/rust-lang/rust/blob/1.63.0/library/alloc/src/sync.rs#L235 (last visited
// 2022-09-05)
use core::{
fmt,
hash::{Hash, Hasher},
mem::{ManuallyDrop, MaybeUninit},
ops, ptr,
sync::atomic::{self, AtomicUsize, Ordering},
};
use super::treiber::{NonNullPtr, Stack, UnionNode};
/// Creates a new `ArcPool` singleton with the given `$name` that manages the specified `$data_type`
///
/// For more extensive documentation see the [module level documentation](crate::pool::arc)
#[macro_export]
macro_rules! arc_pool {
($name:ident: $data_type:ty) => {
pub struct $name;
impl $crate::pool::arc::ArcPool for $name {
type Data = $data_type;
fn singleton() -> &'static $crate::pool::arc::ArcPoolImpl<$data_type> {
static $name: $crate::pool::arc::ArcPoolImpl<$data_type> =
$crate::pool::arc::ArcPoolImpl::new();
&$name
}
}
impl $name {
/// Inherent method version of `ArcPool::alloc`
#[allow(dead_code)]
pub fn alloc(
&self,
value: $data_type,
) -> Result<$crate::pool::arc::Arc<$name>, $data_type> {
<$name as $crate::pool::arc::ArcPool>::alloc(value)
}
/// Inherent method version of `ArcPool::manage`
#[allow(dead_code)]
pub fn manage(&self, block: &'static mut $crate::pool::arc::ArcBlock<$data_type>) {
<$name as $crate::pool::arc::ArcPool>::manage(block)
}
}
};
}
/// A singleton that manages `pool::arc::Arc` smart pointers
pub trait ArcPool: Sized {
/// The data type managed by the memory pool
type Data: 'static;
/// `arc_pool!` implementation detail
#[doc(hidden)]
fn singleton() -> &'static ArcPoolImpl<Self::Data>;
/// Allocate a new `Arc` smart pointer initialized to the given `value`
///
/// `manage` should be called at least once before calling `alloc`
///
/// # Errors
///
/// The `Err`or variant is returned when the memory pool has run out of memory blocks
fn alloc(value: Self::Data) -> Result<Arc<Self>, Self::Data> {
Ok(Arc {
node_ptr: Self::singleton().alloc(value)?,
})
}
/// Add a statically allocated memory block to the memory pool
fn manage(block: &'static mut ArcBlock<Self::Data>) {
Self::singleton().manage(block)
}
}
/// `arc_pool!` implementation detail
// newtype to avoid having to make field types public
#[doc(hidden)]
pub struct ArcPoolImpl<T> {
stack: Stack<UnionNode<MaybeUninit<ArcInner<T>>>>,
}
impl<T> ArcPoolImpl<T> {
/// `arc_pool!` implementation detail
#[doc(hidden)]
pub const fn new() -> Self {
Self {
stack: Stack::new(),
}
}
fn alloc(&self, value: T) -> Result<NonNullPtr<UnionNode<MaybeUninit<ArcInner<T>>>>, T> {
if let Some(node_ptr) = self.stack.try_pop() {
let inner = ArcInner {
data: value,
strong: AtomicUsize::new(1),
};
unsafe { node_ptr.as_ptr().cast::<ArcInner<T>>().write(inner) }
Ok(node_ptr)
} else {
Err(value)
}
}
fn manage(&self, block: &'static mut ArcBlock<T>) {
let node: &'static mut _ = &mut block.node;
unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
}
}
unsafe impl<T> Sync for ArcPoolImpl<T> {}
/// Like `std::sync::Arc` but managed by memory pool `P`
pub struct Arc<P>
where
P: ArcPool,
{
node_ptr: NonNullPtr<UnionNode<MaybeUninit<ArcInner<P::Data>>>>,
}
impl<P> Arc<P>
where
P: ArcPool,
{
fn inner(&self) -> &ArcInner<P::Data> {
unsafe { &*self.node_ptr.as_ptr().cast::<ArcInner<P::Data>>() }
}
fn from_inner(node_ptr: NonNullPtr<UnionNode<MaybeUninit<ArcInner<P::Data>>>>) -> Self {
Self { node_ptr }
}
unsafe fn get_mut_unchecked(this: &mut Self) -> &mut P::Data {
&mut *ptr::addr_of_mut!((*this.node_ptr.as_ptr().cast::<ArcInner<P::Data>>()).data)
}
#[inline(never)]
unsafe fn drop_slow(&mut self) {
// run `P::Data`'s destructor
ptr::drop_in_place(Self::get_mut_unchecked(self));
// return memory to pool
P::singleton().stack.push(self.node_ptr);
}
}
impl<P> AsRef<P::Data> for Arc<P>
where
P: ArcPool,
{
fn as_ref(&self) -> &P::Data {
&**self
}
}
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
impl<P> Clone for Arc<P>
where
P: ArcPool,
{
fn clone(&self) -> Self {
let old_size = self.inner().strong.fetch_add(1, Ordering::Relaxed);
if old_size > MAX_REFCOUNT {
// XXX original code calls `intrinsics::abort` which is unstable API
panic!();
}
Self::from_inner(self.node_ptr)
}
}
impl<A> fmt::Debug for Arc<A>
where
A: ArcPool,
A::Data: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<P> ops::Deref for Arc<P>
where
P: ArcPool,
{
type Target = P::Data;
fn deref(&self) -> &Self::Target {
unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr().cast::<ArcInner<P::Data>>()).data) }
}
}
impl<A> fmt::Display for Arc<A>
where
A: ArcPool,
A::Data: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<A> Drop for Arc<A>
where
A: ArcPool,
{
fn drop(&mut self) {
if self.inner().strong.fetch_sub(1, Ordering::Release) != 1 {
return;
}
atomic::fence(Ordering::Acquire);
unsafe { self.drop_slow() }
}
}
impl<A> Eq for Arc<A>
where
A: ArcPool,
A::Data: Eq,
{
}
impl<A> Hash for Arc<A>
where
A: ArcPool,
A::Data: Hash,
{
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
(**self).hash(state)
}
}
impl<A> Ord for Arc<A>
where
A: ArcPool,
A::Data: Ord,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
A::Data::cmp(self, other)
}
}
impl<A, B> PartialEq<Arc<B>> for Arc<A>
where
A: ArcPool,
B: ArcPool,
A::Data: PartialEq<B::Data>,
{
fn eq(&self, other: &Arc<B>) -> bool {
A::Data::eq(self, &**other)
}
}
impl<A, B> PartialOrd<Arc<B>> for Arc<A>
where
A: ArcPool,
B: ArcPool,
A::Data: PartialOrd<B::Data>,
{
fn partial_cmp(&self, other: &Arc<B>) -> Option<core::cmp::Ordering> {
A::Data::partial_cmp(self, &**other)
}
}
unsafe impl<A> Send for Arc<A>
where
A: ArcPool,
A::Data: Sync + Send,
{
}
unsafe impl<A> Sync for Arc<A>
where
A: ArcPool,
A::Data: Sync + Send,
{
}
impl<A> Unpin for Arc<A> where A: ArcPool {}
struct ArcInner<T> {
data: T,
strong: AtomicUsize,
}
/// A chunk of memory that an `ArcPool` can manage
pub struct ArcBlock<T> {
node: UnionNode<MaybeUninit<ArcInner<T>>>,
}
impl<T> ArcBlock<T> {
/// Creates a new memory block
pub const fn new() -> Self {
Self {
node: UnionNode {
data: ManuallyDrop::new(MaybeUninit::uninit()),
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn cannot_alloc_if_empty() {
arc_pool!(P: i32);
assert_eq!(Err(42), P.alloc(42),);
}
#[test]
fn can_alloc_if_manages_one_block() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
assert_eq!(42, *P.alloc(42).unwrap());
}
#[test]
fn alloc_drop_alloc() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(1).unwrap();
drop(arc);
assert_eq!(2, *P.alloc(2).unwrap());
}
#[test]
fn strong_count_starts_at_one() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(1).ok().unwrap();
assert_eq!(1, arc.inner().strong.load(Ordering::Relaxed));
}
#[test]
fn clone_increases_strong_count() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(1).ok().unwrap();
let before = arc.inner().strong.load(Ordering::Relaxed);
let arc2 = arc.clone();
let expected = before + 1;
assert_eq!(expected, arc.inner().strong.load(Ordering::Relaxed));
assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed));
}
#[test]
fn drop_decreases_strong_count() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(1).ok().unwrap();
let arc2 = arc.clone();
let before = arc.inner().strong.load(Ordering::Relaxed);
drop(arc);
let expected = before - 1;
assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed));
}
#[test]
fn runs_destructor_exactly_once_when_strong_count_reaches_zero() {
static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S;
impl Drop for S {
fn drop(&mut self) {
COUNT.fetch_add(1, Ordering::Relaxed);
}
}
arc_pool!(P: S);
let block = unsafe {
static mut B: ArcBlock<S> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(S).ok().unwrap();
assert_eq!(0, COUNT.load(Ordering::Relaxed));
drop(arc);
assert_eq!(1, COUNT.load(Ordering::Relaxed));
}
#[test]
fn zst_is_well_aligned() {
#[repr(align(4096))]
pub struct Zst4096;
arc_pool!(P: Zst4096);
let block = unsafe {
static mut B: ArcBlock<Zst4096> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(Zst4096).ok().unwrap();
let raw = &*arc as *const Zst4096;
assert_eq!(0, raw as usize % 4096);
}
}

560
vendor/heapless/src/pool/boxed.rs vendored Normal file
View File

@@ -0,0 +1,560 @@
//! `std::boxed::Box`-like API on top of a lock-free memory pool
//!
//! # Example usage
//!
//! ```
//! use heapless::{box_pool, pool::boxed::{Box, BoxBlock}};
//!
//! box_pool!(P: u128);
//!
//! // cannot allocate without first giving memory blocks to the pool
//! assert!(P.alloc(42).is_err());
//!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut BoxBlock<u128> = unsafe {
//! static mut B: BoxBlock <u128>= BoxBlock::new();
//! &mut B
//! };
//!
//! // give block of memory to the pool
//! P.manage(block);
//!
//! // it's now possible to allocate
//! let mut boxed = P.alloc(1).unwrap();
//!
//! // mutation is possible
//! *boxed += 1;
//! assert_eq!(2, *boxed);
//!
//! // number of boxes is limited to the number of blocks managed by the pool
//! let res = P.alloc(3);
//! assert!(res.is_err());
//!
//! // give another memory block to the pool
//! P.manage(unsafe {
//! static mut B: BoxBlock<u128> = BoxBlock::new();
//! &mut B
//! });
//!
//! // cloning also consumes a memory block from the pool
//! let mut separate_box = boxed.clone();
//! *separate_box += 1;
//! assert_eq!(3, *separate_box);
//!
//! // after the clone it's not possible to allocate again
//! let res = P.alloc(4);
//! assert!(res.is_err());
//!
//! // `boxed`'s destructor returns the memory block to the pool
//! drop(boxed);
//!
//! // it's possible to allocate again
//! let res = P.alloc(5);
//!
//! assert!(res.is_ok());
//! ```
//!
//! # Array block initialization
//!
//! You can create a static variable that contains an array of memory blocks and give all the blocks
//! to the `BoxPool`. This requires an intermediate `const` value as shown below:
//!
//! ```
//! use heapless::{box_pool, pool::boxed::BoxBlock};
//!
//! box_pool!(P: u128);
//!
//! const POOL_CAPACITY: usize = 8;
//!
//! let blocks: &'static mut [BoxBlock<u128>] = {
//! const BLOCK: BoxBlock<u128> = BoxBlock::new(); // <=
//! static mut BLOCKS: [BoxBlock<u128>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY];
//! unsafe { &mut BLOCKS }
//! };
//!
//! for block in blocks {
//! P.manage(block);
//! }
//! ```
use core::{
fmt,
hash::{Hash, Hasher},
mem::{ManuallyDrop, MaybeUninit},
ops, ptr,
};
use stable_deref_trait::StableDeref;
use super::treiber::{NonNullPtr, Stack, UnionNode};
/// Creates a new `BoxPool` singleton with the given `$name` that manages the specified `$data_type`
///
/// For more extensive documentation see the [module level documentation](crate::pool::boxed)
#[macro_export]
macro_rules! box_pool {
($name:ident: $data_type:ty) => {
pub struct $name;
impl $crate::pool::boxed::BoxPool for $name {
type Data = $data_type;
fn singleton() -> &'static $crate::pool::boxed::BoxPoolImpl<$data_type> {
static $name: $crate::pool::boxed::BoxPoolImpl<$data_type> =
$crate::pool::boxed::BoxPoolImpl::new();
&$name
}
}
impl $name {
/// Inherent method version of `BoxPool::alloc`
#[allow(dead_code)]
pub fn alloc(
&self,
value: $data_type,
) -> Result<$crate::pool::boxed::Box<$name>, $data_type> {
<$name as $crate::pool::boxed::BoxPool>::alloc(value)
}
/// Inherent method version of `BoxPool::manage`
#[allow(dead_code)]
pub fn manage(&self, block: &'static mut $crate::pool::boxed::BoxBlock<$data_type>) {
<$name as $crate::pool::boxed::BoxPool>::manage(block)
}
}
};
}
/// A singleton that manages `pool::boxed::Box`-es
///
/// # Usage
///
/// Do not implement this trait yourself; instead use the `box_pool!` macro to create a type that
/// implements this trait.
///
/// # Semver guarantees
///
/// *Implementing* this trait is exempt from semver guarantees.
/// i.e. a new patch release is allowed to break downstream `BoxPool` implementations.
///
/// *Using* the trait, e.g. in generic code, does fall under semver guarantees.
pub trait BoxPool: Sized {
/// The data type managed by the memory pool
type Data: 'static;
/// `box_pool!` implementation detail
#[doc(hidden)]
fn singleton() -> &'static BoxPoolImpl<Self::Data>;
/// Allocate a new `Box` initialized to the given `value`
///
/// `manage` should be called at least once before calling `alloc`
///
/// # Errors
///
/// The `Err`or variant is returned when the memory pool has run out of memory blocks
fn alloc(value: Self::Data) -> Result<Box<Self>, Self::Data> {
Ok(Box {
node_ptr: Self::singleton().alloc(value)?,
})
}
/// Add a statically allocated memory block to the memory pool
fn manage(block: &'static mut BoxBlock<Self::Data>) {
Self::singleton().manage(block)
}
}
/// Like `std::boxed::Box` but managed by memory pool `P` rather than `#[global_allocator]`
pub struct Box<P>
where
P: BoxPool,
{
node_ptr: NonNullPtr<UnionNode<MaybeUninit<P::Data>>>,
}
impl<A> Clone for Box<A>
where
A: BoxPool,
A::Data: Clone,
{
fn clone(&self) -> Self {
A::alloc((**self).clone()).ok().expect("OOM")
}
}
impl<A> fmt::Debug for Box<A>
where
A: BoxPool,
A::Data: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<P> ops::Deref for Box<P>
where
P: BoxPool,
{
type Target = P::Data;
fn deref(&self) -> &Self::Target {
unsafe { &*self.node_ptr.as_ptr().cast::<P::Data>() }
}
}
impl<P> ops::DerefMut for Box<P>
where
P: BoxPool,
{
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.node_ptr.as_ptr().cast::<P::Data>() }
}
}
unsafe impl<P> StableDeref for Box<P> where P: BoxPool {}
impl<A> fmt::Display for Box<A>
where
A: BoxPool,
A::Data: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<P> Drop for Box<P>
where
P: BoxPool,
{
fn drop(&mut self) {
let node = self.node_ptr;
unsafe { ptr::drop_in_place(node.as_ptr().cast::<P::Data>()) }
unsafe { P::singleton().stack.push(node) }
}
}
impl<A> Eq for Box<A>
where
A: BoxPool,
A::Data: Eq,
{
}
impl<A> Hash for Box<A>
where
A: BoxPool,
A::Data: Hash,
{
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
(**self).hash(state)
}
}
impl<A> Ord for Box<A>
where
A: BoxPool,
A::Data: Ord,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
A::Data::cmp(self, other)
}
}
impl<A, B> PartialEq<Box<B>> for Box<A>
where
A: BoxPool,
B: BoxPool,
A::Data: PartialEq<B::Data>,
{
fn eq(&self, other: &Box<B>) -> bool {
A::Data::eq(self, other)
}
}
impl<A, B> PartialOrd<Box<B>> for Box<A>
where
A: BoxPool,
B: BoxPool,
A::Data: PartialOrd<B::Data>,
{
fn partial_cmp(&self, other: &Box<B>) -> Option<core::cmp::Ordering> {
A::Data::partial_cmp(self, other)
}
}
unsafe impl<P> Send for Box<P>
where
P: BoxPool,
P::Data: Send,
{
}
unsafe impl<P> Sync for Box<P>
where
P: BoxPool,
P::Data: Sync,
{
}
/// `box_pool!` implementation detail
// newtype to avoid having to make field types public
#[doc(hidden)]
pub struct BoxPoolImpl<T> {
stack: Stack<UnionNode<MaybeUninit<T>>>,
}
impl<T> BoxPoolImpl<T> {
pub const fn new() -> Self {
Self {
stack: Stack::new(),
}
}
fn alloc(&self, value: T) -> Result<NonNullPtr<UnionNode<MaybeUninit<T>>>, T> {
if let Some(node_ptr) = self.stack.try_pop() {
unsafe { node_ptr.as_ptr().cast::<T>().write(value) }
Ok(node_ptr)
} else {
Err(value)
}
}
fn manage(&self, block: &'static mut BoxBlock<T>) {
let node: &'static mut _ = &mut block.node;
unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
}
}
unsafe impl<T> Sync for BoxPoolImpl<T> {}
/// A chunk of memory that a `BoxPool` singleton can manage
pub struct BoxBlock<T> {
node: UnionNode<MaybeUninit<T>>,
}
impl<T> BoxBlock<T> {
/// Creates a new memory block
pub const fn new() -> Self {
Self {
node: UnionNode {
data: ManuallyDrop::new(MaybeUninit::uninit()),
},
}
}
}
#[cfg(test)]
mod tests {
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::thread;
use super::*;
#[test]
fn cannot_alloc_if_empty() {
box_pool!(P: i32);
assert_eq!(Err(42), P.alloc(42));
}
#[test]
fn can_alloc_if_pool_manages_one_block() {
box_pool!(P: i32);
let block = unsafe {
static mut B: BoxBlock<i32> = BoxBlock::new();
&mut B
};
P.manage(block);
assert_eq!(42, *P.alloc(42).unwrap());
}
#[test]
fn alloc_drop_alloc() {
box_pool!(P: i32);
let block = unsafe {
static mut B: BoxBlock<i32> = BoxBlock::new();
&mut B
};
P.manage(block);
let boxed = P.alloc(1).unwrap();
drop(boxed);
assert_eq!(2, *P.alloc(2).unwrap());
}
#[test]
fn runs_destructor_exactly_once_on_drop() {
static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S;
impl Drop for S {
fn drop(&mut self) {
COUNT.fetch_add(1, Ordering::Relaxed);
}
}
box_pool!(P: S);
let block = unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
};
P.manage(block);
let boxed = P.alloc(S).ok().unwrap();
assert_eq!(0, COUNT.load(Ordering::Relaxed));
drop(boxed);
assert_eq!(1, COUNT.load(Ordering::Relaxed));
}
#[test]
fn zst_is_well_aligned() {
#[repr(align(4096))]
pub struct Zst4096;
box_pool!(P: Zst4096);
let block = unsafe {
static mut B: BoxBlock<Zst4096> = BoxBlock::new();
&mut B
};
P.manage(block);
let boxed = P.alloc(Zst4096).ok().unwrap();
let raw = &*boxed as *const Zst4096;
assert_eq!(0, raw as usize % 4096);
}
#[allow(clippy::redundant_clone)]
#[test]
fn can_clone_if_pool_is_not_exhausted() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S;
impl Clone for S {
fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
Self
}
}
box_pool!(P: S);
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
let first = P.alloc(S).ok().unwrap();
let _second = first.clone();
assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
let is_oom = P.alloc(S).is_err();
assert!(is_oom);
}
#[allow(clippy::redundant_clone)]
#[test]
fn clone_panics_if_pool_exhausted() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S;
impl Clone for S {
fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
Self
}
}
box_pool!(P: S);
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
let first = P.alloc(S).ok().unwrap();
let thread = thread::spawn(move || {
let _second = first.clone();
});
let thread_panicked = thread.join().is_err();
assert!(thread_panicked);
// we diverge from `alloc::Box<T>` in that we call `T::clone` first and then request
// memory from the allocator whereas `alloc::Box<T>` does it the other way around
// assert!(!STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
}
#[allow(clippy::redundant_clone)]
#[test]
fn panicking_clone_does_not_leak_memory() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S;
impl Clone for S {
fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
panic!()
}
}
box_pool!(P: S);
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
let boxed = P.alloc(S).ok().unwrap();
let thread = thread::spawn(move || {
let _boxed = boxed.clone();
});
let thread_panicked = thread.join().is_err();
assert!(thread_panicked);
assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
let once = P.alloc(S);
let twice = P.alloc(S);
assert!(once.is_ok());
assert!(twice.is_ok());
}
}

420
vendor/heapless/src/pool/object.rs vendored Normal file
View File

@@ -0,0 +1,420 @@
//! Object pool API
//!
//! # Example usage
//!
//! ```
//! use heapless::{object_pool, pool::object::{Object, ObjectBlock}};
//!
//! object_pool!(P: [u8; 128]);
//!
//! // cannot request objects without first giving object blocks to the pool
//! assert!(P.request().is_none());
//!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut ObjectBlock<[u8; 128]> = unsafe {
//! // unlike the memory pool APIs, an initial value must be specified here
//! static mut B: ObjectBlock<[u8; 128]>= ObjectBlock::new([0; 128]);
//! &mut B
//! };
//!
//! // give object block to the pool
//! P.manage(block);
//!
//! // it's now possible to request objects
//! // unlike the memory pool APIs, no initial value is required here
//! let mut object = P.request().unwrap();
//!
//! // mutation is possible
//! object.iter_mut().for_each(|byte| *byte = byte.wrapping_add(1));
//!
//! // the number of live objects is limited to the number of blocks managed by the pool
//! let res = P.request();
//! assert!(res.is_none());
//!
//! // `object`'s destructor returns the object to the pool
//! drop(object);
//!
//! // it's possible to request an `Object` again
//! let res = P.request();
//!
//! assert!(res.is_some());
//! ```
//!
//! # Array block initialization
//!
//! You can create a static variable that contains an array of memory blocks and give all the blocks
//! to the `ObjectPool`. This requires an intermediate `const` value as shown below:
//!
//! ```
//! use heapless::{object_pool, pool::object::ObjectBlock};
//!
//! object_pool!(P: [u8; 128]);
//!
//! const POOL_CAPACITY: usize = 8;
//!
//! let blocks: &'static mut [ObjectBlock<[u8; 128]>] = {
//! const BLOCK: ObjectBlock<[u8; 128]> = ObjectBlock::new([0; 128]); // <=
//! static mut BLOCKS: [ObjectBlock<[u8; 128]>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY];
//! unsafe { &mut BLOCKS }
//! };
//!
//! for block in blocks {
//! P.manage(block);
//! }
//! ```
use core::{
cmp::Ordering,
fmt,
hash::{Hash, Hasher},
mem::ManuallyDrop,
ops, ptr,
};
use stable_deref_trait::StableDeref;
use super::treiber::{AtomicPtr, NonNullPtr, Stack, StructNode};
/// Creates a new `ObjectPool` singleton with the given `$name` that manages the specified
/// `$data_type`
///
/// For more extensive documentation see the [module level documentation](crate::pool::object)
#[macro_export]
macro_rules! object_pool {
($name:ident: $data_type:ty) => {
pub struct $name;
impl $crate::pool::object::ObjectPool for $name {
type Data = $data_type;
fn singleton() -> &'static $crate::pool::object::ObjectPoolImpl<$data_type> {
static $name: $crate::pool::object::ObjectPoolImpl<$data_type> =
$crate::pool::object::ObjectPoolImpl::new();
&$name
}
}
impl $name {
/// Inherent method version of `ObjectPool::request`
#[allow(dead_code)]
pub fn request(&self) -> Option<$crate::pool::object::Object<$name>> {
<$name as $crate::pool::object::ObjectPool>::request()
}
/// Inherent method version of `ObjectPool::manage`
#[allow(dead_code)]
pub fn manage(
&self,
block: &'static mut $crate::pool::object::ObjectBlock<$data_type>,
) {
<$name as $crate::pool::object::ObjectPool>::manage(block)
}
}
};
}
/// A singleton that manages `pool::object::Object`s
pub trait ObjectPool: Sized {
/// The data type of the objects managed by the object pool
type Data: 'static;
/// `object_pool!` implementation detail
#[doc(hidden)]
fn singleton() -> &'static ObjectPoolImpl<Self::Data>;
/// Request a new object from the pool
fn request() -> Option<Object<Self>> {
Self::singleton()
.request()
.map(|node_ptr| Object { node_ptr })
}
/// Adds a statically allocate object to the pool
fn manage(block: &'static mut ObjectBlock<Self::Data>) {
Self::singleton().manage(block)
}
}
/// `object_pool!` implementation detail
#[doc(hidden)]
pub struct ObjectPoolImpl<T> {
stack: Stack<StructNode<T>>,
}
impl<T> ObjectPoolImpl<T> {
/// `object_pool!` implementation detail
#[doc(hidden)]
pub const fn new() -> Self {
Self {
stack: Stack::new(),
}
}
fn request(&self) -> Option<NonNullPtr<StructNode<T>>> {
self.stack.try_pop()
}
fn manage(&self, block: &'static mut ObjectBlock<T>) {
let node: &'static mut _ = &mut block.node;
unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
}
}
// `T needs` to be Send because returning an object from a thread and then
// requesting it from another is effectively a cross-thread 'send' operation
unsafe impl<T> Sync for ObjectPoolImpl<T> where T: Send {}
/// An object managed by object pool `P`
pub struct Object<P>
where
P: ObjectPool,
{
node_ptr: NonNullPtr<StructNode<P::Data>>,
}
impl<A, T, const N: usize> AsMut<[T]> for Object<A>
where
A: ObjectPool<Data = [T; N]>,
{
fn as_mut(&mut self) -> &mut [T] {
&mut **self
}
}
impl<A, T, const N: usize> AsRef<[T]> for Object<A>
where
A: ObjectPool<Data = [T; N]>,
{
fn as_ref(&self) -> &[T] {
&**self
}
}
impl<A> fmt::Debug for Object<A>
where
A: ObjectPool,
A::Data: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<A> ops::Deref for Object<A>
where
A: ObjectPool,
{
type Target = A::Data;
fn deref(&self) -> &Self::Target {
unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr()).data) }
}
}
impl<A> ops::DerefMut for Object<A>
where
A: ObjectPool,
{
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *ptr::addr_of_mut!((*self.node_ptr.as_ptr()).data) }
}
}
unsafe impl<A> StableDeref for Object<A> where A: ObjectPool {}
impl<A> fmt::Display for Object<A>
where
A: ObjectPool,
A::Data: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<P> Drop for Object<P>
where
P: ObjectPool,
{
fn drop(&mut self) {
unsafe { P::singleton().stack.push(self.node_ptr) }
}
}
impl<A> Eq for Object<A>
where
A: ObjectPool,
A::Data: Eq,
{
}
impl<A> Hash for Object<A>
where
A: ObjectPool,
A::Data: Hash,
{
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
(**self).hash(state)
}
}
impl<A> Ord for Object<A>
where
A: ObjectPool,
A::Data: Ord,
{
fn cmp(&self, other: &Self) -> Ordering {
A::Data::cmp(self, other)
}
}
impl<A, B> PartialEq<Object<B>> for Object<A>
where
A: ObjectPool,
B: ObjectPool,
A::Data: PartialEq<B::Data>,
{
fn eq(&self, other: &Object<B>) -> bool {
A::Data::eq(self, other)
}
}
impl<A, B> PartialOrd<Object<B>> for Object<A>
where
A: ObjectPool,
B: ObjectPool,
A::Data: PartialOrd<B::Data>,
{
fn partial_cmp(&self, other: &Object<B>) -> Option<Ordering> {
A::Data::partial_cmp(self, other)
}
}
unsafe impl<P> Send for Object<P>
where
P: ObjectPool,
P::Data: Send,
{
}
unsafe impl<P> Sync for Object<P>
where
P: ObjectPool,
P::Data: Sync,
{
}
/// An object "block" of data type `T` that has not yet been associated to an `ObjectPool`
pub struct ObjectBlock<T> {
node: StructNode<T>,
}
impl<T> ObjectBlock<T> {
/// Creates a new object block with the given `initial_value`
pub const fn new(initial_value: T) -> Self {
Self {
node: StructNode {
next: ManuallyDrop::new(AtomicPtr::null()),
data: ManuallyDrop::new(initial_value),
},
}
}
}
#[cfg(test)]
mod tests {
use core::sync::atomic::{self, AtomicUsize};
use super::*;
#[test]
fn cannot_request_if_empty() {
object_pool!(P: i32);
assert_eq!(None, P.request());
}
#[test]
fn can_request_if_manages_one_block() {
object_pool!(P: i32);
let block = unsafe {
static mut B: ObjectBlock<i32> = ObjectBlock::new(1);
&mut B
};
P.manage(block);
assert_eq!(1, *P.request().unwrap());
}
#[test]
fn request_drop_request() {
object_pool!(P: i32);
let block = unsafe {
static mut B: ObjectBlock<i32> = ObjectBlock::new(1);
&mut B
};
P.manage(block);
let mut object = P.request().unwrap();
*object = 2;
drop(object);
assert_eq!(2, *P.request().unwrap());
}
#[test]
fn destructor_does_not_run_on_drop() {
static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S;
impl Drop for S {
fn drop(&mut self) {
COUNT.fetch_add(1, atomic::Ordering::Relaxed);
}
}
object_pool!(P: S);
let block = unsafe {
static mut B: ObjectBlock<S> = ObjectBlock::new(S);
&mut B
};
P.manage(block);
let object = P.request().unwrap();
assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed));
drop(object);
assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed));
}
#[test]
fn zst_is_well_aligned() {
#[repr(align(4096))]
pub struct Zst4096;
object_pool!(P: Zst4096);
let block = unsafe {
static mut B: ObjectBlock<Zst4096> = ObjectBlock::new(Zst4096);
&mut B
};
P.manage(block);
let object = P.request().unwrap();
let raw = &*object as *const Zst4096;
assert_eq!(0, raw as usize % 4096);
}
}

91
vendor/heapless/src/pool/treiber.rs vendored Normal file
View File

@@ -0,0 +1,91 @@
use core::mem::ManuallyDrop;
#[cfg_attr(target_arch = "x86", path = "treiber/cas.rs")]
#[cfg_attr(arm_llsc, path = "treiber/llsc.rs")]
mod impl_;
pub use impl_::{AtomicPtr, NonNullPtr};
pub struct Stack<N>
where
N: Node,
{
top: AtomicPtr<N>,
}
impl<N> Stack<N>
where
N: Node,
{
pub const fn new() -> Self {
Self {
top: AtomicPtr::null(),
}
}
/// # Safety
/// - `node` must be a valid pointer
/// - aliasing rules must be enforced by the caller. e.g, the same `node` may not be pushed more than once
pub unsafe fn push(&self, node: NonNullPtr<N>) {
impl_::push(self, node)
}
pub fn try_pop(&self) -> Option<NonNullPtr<N>> {
impl_::try_pop(self)
}
}
pub trait Node: Sized {
type Data;
fn next(&self) -> &AtomicPtr<Self>;
fn next_mut(&mut self) -> &mut AtomicPtr<Self>;
}
pub union UnionNode<T> {
next: ManuallyDrop<AtomicPtr<UnionNode<T>>>,
pub data: ManuallyDrop<T>,
}
impl<T> Node for UnionNode<T> {
type Data = T;
fn next(&self) -> &AtomicPtr<Self> {
unsafe { &self.next }
}
fn next_mut(&mut self) -> &mut AtomicPtr<Self> {
unsafe { &mut self.next }
}
}
pub struct StructNode<T> {
pub next: ManuallyDrop<AtomicPtr<StructNode<T>>>,
pub data: ManuallyDrop<T>,
}
impl<T> Node for StructNode<T> {
type Data = T;
fn next(&self) -> &AtomicPtr<Self> {
&self.next
}
fn next_mut(&mut self) -> &mut AtomicPtr<Self> {
&mut self.next
}
}
#[cfg(test)]
mod tests {
use core::mem;
use super::*;
#[test]
fn node_is_never_zero_sized() {
struct Zst;
assert_ne!(mem::size_of::<UnionNode<Zst>>(), 0);
}
}

196
vendor/heapless/src/pool/treiber/cas.rs vendored Normal file
View File

@@ -0,0 +1,196 @@
use core::{
marker::PhantomData,
num::{NonZeroU32, NonZeroU64},
ptr::NonNull,
sync::atomic::{AtomicU64, Ordering},
};
use super::{Node, Stack};
pub struct AtomicPtr<N>
where
N: Node,
{
inner: AtomicU64,
_marker: PhantomData<*mut N>,
}
impl<N> AtomicPtr<N>
where
N: Node,
{
pub const fn null() -> Self {
Self {
inner: AtomicU64::new(0),
_marker: PhantomData,
}
}
fn compare_and_exchange_weak(
&self,
current: Option<NonNullPtr<N>>,
new: Option<NonNullPtr<N>>,
success: Ordering,
failure: Ordering,
) -> Result<(), Option<NonNullPtr<N>>> {
self.inner
.compare_exchange_weak(
current
.map(|pointer| pointer.into_u64())
.unwrap_or_default(),
new.map(|pointer| pointer.into_u64()).unwrap_or_default(),
success,
failure,
)
.map(drop)
.map_err(NonNullPtr::from_u64)
}
fn load(&self, order: Ordering) -> Option<NonNullPtr<N>> {
NonZeroU64::new(self.inner.load(order)).map(|inner| NonNullPtr {
inner,
_marker: PhantomData,
})
}
fn store(&self, value: Option<NonNullPtr<N>>, order: Ordering) {
self.inner.store(
value.map(|pointer| pointer.into_u64()).unwrap_or_default(),
order,
)
}
}
pub struct NonNullPtr<N>
where
N: Node,
{
inner: NonZeroU64,
_marker: PhantomData<*mut N>,
}
impl<N> Clone for NonNullPtr<N>
where
N: Node,
{
fn clone(&self) -> Self {
*self
}
}
impl<N> Copy for NonNullPtr<N> where N: Node {}
impl<N> NonNullPtr<N>
where
N: Node,
{
pub fn as_ptr(&self) -> *mut N {
self.inner.get() as *mut N
}
pub fn from_static_mut_ref(ref_: &'static mut N) -> NonNullPtr<N> {
let non_null = NonNull::from(ref_);
Self::from_non_null(non_null)
}
fn from_non_null(ptr: NonNull<N>) -> Self {
let address = ptr.as_ptr() as u32;
let tag = initial_tag().get();
let value = (u64::from(tag) << 32) | u64::from(address);
Self {
inner: unsafe { NonZeroU64::new_unchecked(value) },
_marker: PhantomData,
}
}
fn from_u64(value: u64) -> Option<Self> {
NonZeroU64::new(value).map(|inner| Self {
inner,
_marker: PhantomData,
})
}
fn non_null(&self) -> NonNull<N> {
unsafe { NonNull::new_unchecked(self.inner.get() as *mut N) }
}
fn tag(&self) -> NonZeroU32 {
unsafe { NonZeroU32::new_unchecked((self.inner.get() >> 32) as u32) }
}
fn into_u64(self) -> u64 {
self.inner.get()
}
fn increase_tag(&mut self) {
let address = self.as_ptr() as u32;
let new_tag = self
.tag()
.get()
.checked_add(1)
.map(|val| unsafe { NonZeroU32::new_unchecked(val) })
.unwrap_or_else(initial_tag)
.get();
let value = (u64::from(new_tag) << 32) | u64::from(address);
self.inner = unsafe { NonZeroU64::new_unchecked(value) };
}
}
fn initial_tag() -> NonZeroU32 {
unsafe { NonZeroU32::new_unchecked(1) }
}
pub unsafe fn push<N>(stack: &Stack<N>, new_top: NonNullPtr<N>)
where
N: Node,
{
let mut top = stack.top.load(Ordering::Relaxed);
loop {
new_top
.non_null()
.as_ref()
.next()
.store(top, Ordering::Relaxed);
if let Err(p) = stack.top.compare_and_exchange_weak(
top,
Some(new_top),
Ordering::Release,
Ordering::Relaxed,
) {
top = p;
} else {
return;
}
}
}
pub fn try_pop<N>(stack: &Stack<N>) -> Option<NonNullPtr<N>>
where
N: Node,
{
loop {
if let Some(mut top) = stack.top.load(Ordering::Acquire) {
let next = unsafe { top.non_null().as_ref().next().load(Ordering::Relaxed) };
if stack
.top
.compare_and_exchange_weak(Some(top), next, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
top.increase_tag();
return Some(top);
}
} else {
// stack observed as empty
return None;
}
}
}

145
vendor/heapless/src/pool/treiber/llsc.rs vendored Normal file
View File

@@ -0,0 +1,145 @@
use core::{
cell::UnsafeCell,
ptr::{self, NonNull},
};
use super::{Node, Stack};
pub struct AtomicPtr<N>
where
N: Node,
{
inner: UnsafeCell<Option<NonNull<N>>>,
}
impl<N> AtomicPtr<N>
where
N: Node,
{
pub const fn null() -> Self {
Self {
inner: UnsafeCell::new(None),
}
}
}
pub struct NonNullPtr<N>
where
N: Node,
{
inner: NonNull<N>,
}
impl<N> NonNullPtr<N>
where
N: Node,
{
pub fn as_ptr(&self) -> *mut N {
self.inner.as_ptr().cast()
}
pub fn from_static_mut_ref(ref_: &'static mut N) -> Self {
Self {
inner: NonNull::from(ref_),
}
}
}
impl<N> Clone for NonNullPtr<N>
where
N: Node,
{
fn clone(&self) -> Self {
Self { inner: self.inner }
}
}
impl<N> Copy for NonNullPtr<N> where N: Node {}
pub unsafe fn push<N>(stack: &Stack<N>, mut node: NonNullPtr<N>)
where
N: Node,
{
let top_addr = ptr::addr_of!(stack.top) as *mut usize;
loop {
let top = arch::load_link(top_addr);
node.inner
.as_mut()
.next_mut()
.inner
.get()
.write(NonNull::new(top as *mut _));
if arch::store_conditional(node.inner.as_ptr() as usize, top_addr).is_ok() {
break;
}
}
}
pub fn try_pop<N>(stack: &Stack<N>) -> Option<NonNullPtr<N>>
where
N: Node,
{
unsafe {
let top_addr = ptr::addr_of!(stack.top) as *mut usize;
loop {
let top = arch::load_link(top_addr);
if let Some(top) = NonNull::new(top as *mut N) {
let next = &top.as_ref().next();
if arch::store_conditional(
next.inner
.get()
.read()
.map(|non_null| non_null.as_ptr() as usize)
.unwrap_or_default(),
top_addr,
)
.is_ok()
{
break Some(NonNullPtr { inner: top });
}
} else {
arch::clear_load_link();
break None;
}
}
}
}
#[cfg(arm_llsc)]
mod arch {
use core::arch::asm;
#[inline(always)]
pub fn clear_load_link() {
unsafe { asm!("clrex", options(nomem, nostack)) }
}
/// # Safety
/// - `addr` must be a valid pointer
#[inline(always)]
pub unsafe fn load_link(addr: *const usize) -> usize {
let value;
asm!("ldrex {}, [{}]", out(reg) value, in(reg) addr, options(nostack));
value
}
/// # Safety
/// - `addr` must be a valid pointer
#[inline(always)]
pub unsafe fn store_conditional(value: usize, addr: *mut usize) -> Result<(), ()> {
let outcome: usize;
asm!("strex {}, {}, [{}]", out(reg) outcome, in(reg) value, in(reg) addr, options(nostack));
if outcome == 0 {
Ok(())
} else {
Err(())
}
}
}

58
vendor/heapless/src/sealed.rs vendored Normal file
View File

@@ -0,0 +1,58 @@
#[allow(dead_code)]
#[allow(path_statements)]
pub(crate) const fn smaller_than<const N: usize, const MAX: usize>() {
Assert::<N, MAX>::LESS;
}
#[allow(dead_code)]
#[allow(path_statements)]
pub(crate) const fn greater_than_eq_0<const N: usize>() {
Assert::<N, 0>::GREATER_EQ;
}
#[allow(dead_code)]
#[allow(path_statements)]
pub(crate) const fn greater_than_0<const N: usize>() {
Assert::<N, 0>::GREATER;
}
#[allow(dead_code)]
#[allow(path_statements)]
pub(crate) const fn greater_than_1<const N: usize>() {
Assert::<N, 1>::GREATER;
}
#[allow(dead_code)]
#[allow(path_statements)]
pub(crate) const fn power_of_two<const N: usize>() {
Assert::<N, 0>::GREATER;
Assert::<N, 0>::POWER_OF_TWO;
}
#[allow(dead_code)]
/// Const assert hack
pub struct Assert<const L: usize, const R: usize>;
#[allow(dead_code)]
impl<const L: usize, const R: usize> Assert<L, R> {
/// Const assert hack
pub const GREATER_EQ: usize = L - R;
/// Const assert hack
pub const LESS_EQ: usize = R - L;
/// Const assert hack
pub const NOT_EQ: isize = 0 / (R as isize - L as isize);
/// Const assert hack
pub const EQ: usize = (R - L) + (L - R);
/// Const assert hack
pub const GREATER: usize = L - R - 1;
/// Const assert hack
pub const LESS: usize = R - L - 1;
/// Const assert hack
pub const POWER_OF_TWO: usize = 0 - (L & (L - 1));
}

123
vendor/heapless/src/ser.rs vendored Normal file
View File

@@ -0,0 +1,123 @@
use core::hash::{BuildHasher, Hash};
use crate::{
binary_heap::Kind as BinaryHeapKind, BinaryHeap, Deque, IndexMap, IndexSet, LinearMap, String,
Vec,
};
use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer};
// Sequential containers
impl<T, KIND, const N: usize> Serialize for BinaryHeap<T, KIND, N>
where
T: Ord + Serialize,
KIND: BinaryHeapKind,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.len()))?;
for element in self {
seq.serialize_element(element)?;
}
seq.end()
}
}
impl<T, S, const N: usize> Serialize for IndexSet<T, S, N>
where
T: Eq + Hash + Serialize,
S: BuildHasher,
{
fn serialize<SER>(&self, serializer: SER) -> Result<SER::Ok, SER::Error>
where
SER: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.len()))?;
for element in self {
seq.serialize_element(element)?;
}
seq.end()
}
}
impl<T, const N: usize> Serialize for Vec<T, N>
where
T: Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.len()))?;
for element in self {
seq.serialize_element(element)?;
}
seq.end()
}
}
impl<T, const N: usize> Serialize for Deque<T, N>
where
T: Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(Some(self.len()))?;
for element in self {
seq.serialize_element(element)?;
}
seq.end()
}
}
// Dictionaries
impl<K, V, S, const N: usize> Serialize for IndexMap<K, V, S, N>
where
K: Eq + Hash + Serialize,
S: BuildHasher,
V: Serialize,
{
fn serialize<SER>(&self, serializer: SER) -> Result<SER::Ok, SER::Error>
where
SER: Serializer,
{
let mut map = serializer.serialize_map(Some(self.len()))?;
for (k, v) in self {
map.serialize_entry(k, v)?;
}
map.end()
}
}
impl<K, V, const N: usize> Serialize for LinearMap<K, V, N>
where
K: Eq + Serialize,
V: Serialize,
{
fn serialize<SER>(&self, serializer: SER) -> Result<SER::Ok, SER::Error>
where
SER: Serializer,
{
let mut map = serializer.serialize_map(Some(self.len()))?;
for (k, v) in self {
map.serialize_entry(k, v)?;
}
map.end()
}
}
// String containers
impl<const N: usize> Serialize for String<N> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&*self)
}
}

View File

@@ -0,0 +1,866 @@
//! A fixed sorted priority linked list, similar to [`BinaryHeap`] but with different properties
//! on `push`, `pop`, etc.
//! For example, the sorting of the list will never `memcpy` the underlying value, so having large
//! objects in the list will not cause a performance hit.
//!
//! # Examples
//!
//! ```
//! use heapless::sorted_linked_list::{SortedLinkedList, Max};
//! let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
//!
//! // The largest value will always be first
//! ll.push(1).unwrap();
//! assert_eq!(ll.peek(), Some(&1));
//!
//! ll.push(2).unwrap();
//! assert_eq!(ll.peek(), Some(&2));
//!
//! ll.push(3).unwrap();
//! assert_eq!(ll.peek(), Some(&3));
//!
//! // This will not fit in the queue.
//! assert_eq!(ll.push(4), Err(4));
//! ```
//!
//! [`BinaryHeap`]: `crate::binary_heap::BinaryHeap`
use core::cmp::Ordering;
use core::fmt;
use core::marker::PhantomData;
use core::mem::MaybeUninit;
use core::ops::{Deref, DerefMut};
use core::ptr;
/// Trait for defining an index for the linked list, never implemented by users.
pub trait SortedLinkedListIndex: Copy {
#[doc(hidden)]
unsafe fn new_unchecked(val: usize) -> Self;
#[doc(hidden)]
unsafe fn get_unchecked(self) -> usize;
#[doc(hidden)]
fn option(self) -> Option<usize>;
#[doc(hidden)]
fn none() -> Self;
}
/// Marker for Min sorted [`SortedLinkedList`].
pub struct Min;
/// Marker for Max sorted [`SortedLinkedList`].
pub struct Max;
/// The linked list kind: min-list or max-list
pub trait Kind: private::Sealed {
#[doc(hidden)]
fn ordering() -> Ordering;
}
impl Kind for Min {
fn ordering() -> Ordering {
Ordering::Less
}
}
impl Kind for Max {
fn ordering() -> Ordering {
Ordering::Greater
}
}
/// Sealed traits
mod private {
pub trait Sealed {}
}
impl private::Sealed for Max {}
impl private::Sealed for Min {}
/// A node in the [`SortedLinkedList`].
pub struct Node<T, Idx> {
val: MaybeUninit<T>,
next: Idx,
}
/// The linked list.
pub struct SortedLinkedList<T, Idx, K, const N: usize>
where
Idx: SortedLinkedListIndex,
{
list: [Node<T, Idx>; N],
head: Idx,
free: Idx,
_kind: PhantomData<K>,
}
// Internal macro for generating indexes for the linkedlist and const new for the linked list
macro_rules! impl_index_and_const_new {
($name:ident, $ty:ty, $new_name:ident, $max_val:expr) => {
/// Index for the [`SortedLinkedList`] with specific backing storage.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct $name($ty);
impl SortedLinkedListIndex for $name {
#[inline(always)]
unsafe fn new_unchecked(val: usize) -> Self {
Self::new_unchecked(val as $ty)
}
/// This is only valid if `self.option()` is not `None`.
#[inline(always)]
unsafe fn get_unchecked(self) -> usize {
self.0 as usize
}
#[inline(always)]
fn option(self) -> Option<usize> {
if self.0 == <$ty>::MAX {
None
} else {
Some(self.0 as usize)
}
}
#[inline(always)]
fn none() -> Self {
Self::none()
}
}
impl $name {
/// Needed for a `const fn new()`.
#[inline]
const unsafe fn new_unchecked(value: $ty) -> Self {
$name(value)
}
/// Needed for a `const fn new()`.
#[inline]
const fn none() -> Self {
$name(<$ty>::MAX)
}
}
impl<T, K, const N: usize> SortedLinkedList<T, $name, K, N> {
const UNINIT: Node<T, $name> = Node {
val: MaybeUninit::uninit(),
next: $name::none(),
};
/// Create a new linked list.
pub const fn $new_name() -> Self {
// Const assert N < MAX
crate::sealed::smaller_than::<N, $max_val>();
let mut list = SortedLinkedList {
list: [Self::UNINIT; N],
head: $name::none(),
free: unsafe { $name::new_unchecked(0) },
_kind: PhantomData,
};
if N == 0 {
list.free = $name::none();
return list;
}
let mut free = 0;
// Initialize indexes
while free < N - 1 {
list.list[free].next = unsafe { $name::new_unchecked(free as $ty + 1) };
free += 1;
}
list
}
}
};
}
impl_index_and_const_new!(LinkedIndexU8, u8, new_u8, { u8::MAX as usize - 1 });
impl_index_and_const_new!(LinkedIndexU16, u16, new_u16, { u16::MAX as usize - 1 });
impl_index_and_const_new!(LinkedIndexUsize, usize, new_usize, { usize::MAX - 1 });
impl<T, Idx, K, const N: usize> SortedLinkedList<T, Idx, K, N>
where
Idx: SortedLinkedListIndex,
{
/// Internal access helper
#[inline(always)]
fn node_at(&self, index: usize) -> &Node<T, Idx> {
// Safety: The entire `self.list` is initialized in `new`, which makes this safe.
unsafe { self.list.get_unchecked(index) }
}
/// Internal access helper
#[inline(always)]
fn node_at_mut(&mut self, index: usize) -> &mut Node<T, Idx> {
// Safety: The entire `self.list` is initialized in `new`, which makes this safe.
unsafe { self.list.get_unchecked_mut(index) }
}
/// Internal access helper
#[inline(always)]
fn write_data_in_node_at(&mut self, index: usize, data: T) {
// Safety: The entire `self.list` is initialized in `new`, which makes this safe.
unsafe {
self.node_at_mut(index).val.as_mut_ptr().write(data);
}
}
/// Internal access helper
#[inline(always)]
fn read_data_in_node_at(&self, index: usize) -> &T {
// Safety: The entire `self.list` is initialized in `new`, which makes this safe.
unsafe { &*self.node_at(index).val.as_ptr() }
}
/// Internal access helper
#[inline(always)]
fn read_mut_data_in_node_at(&mut self, index: usize) -> &mut T {
// Safety: The entire `self.list` is initialized in `new`, which makes this safe.
unsafe { &mut *self.node_at_mut(index).val.as_mut_ptr() }
}
/// Internal access helper
#[inline(always)]
fn extract_data_in_node_at(&mut self, index: usize) -> T {
// Safety: The entire `self.list` is initialized in `new`, which makes this safe.
unsafe { self.node_at(index).val.as_ptr().read() }
}
}
impl<T, Idx, K, const N: usize> SortedLinkedList<T, Idx, K, N>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
/// Pushes a value onto the list without checking if the list is full.
///
/// Complexity is worst-case `O(N)`.
///
/// # Safety
///
/// Assumes that the list is not full.
pub unsafe fn push_unchecked(&mut self, value: T) {
let new = self.free.get_unchecked();
// Store the data and update the next free spot
self.write_data_in_node_at(new, value);
self.free = self.node_at(new).next;
if let Some(head) = self.head.option() {
// Check if we need to replace head
if self
.read_data_in_node_at(head)
.cmp(self.read_data_in_node_at(new))
!= K::ordering()
{
self.node_at_mut(new).next = self.head;
self.head = Idx::new_unchecked(new);
} else {
// It's not head, search the list for the correct placement
let mut current = head;
while let Some(next) = self.node_at(current).next.option() {
if self
.read_data_in_node_at(next)
.cmp(self.read_data_in_node_at(new))
!= K::ordering()
{
break;
}
current = next;
}
self.node_at_mut(new).next = self.node_at(current).next;
self.node_at_mut(current).next = Idx::new_unchecked(new);
}
} else {
self.node_at_mut(new).next = self.head;
self.head = Idx::new_unchecked(new);
}
}
/// Pushes an element to the linked list and sorts it into place.
///
/// Complexity is worst-case `O(N)`.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// // The largest value will always be first
/// ll.push(1).unwrap();
/// assert_eq!(ll.peek(), Some(&1));
///
/// ll.push(2).unwrap();
/// assert_eq!(ll.peek(), Some(&2));
///
/// ll.push(3).unwrap();
/// assert_eq!(ll.peek(), Some(&3));
///
/// // This will not fit in the queue.
/// assert_eq!(ll.push(4), Err(4));
/// ```
pub fn push(&mut self, value: T) -> Result<(), T> {
if !self.is_full() {
Ok(unsafe { self.push_unchecked(value) })
} else {
Err(value)
}
}
/// Get an iterator over the sorted list.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// ll.push(1).unwrap();
/// ll.push(2).unwrap();
///
/// let mut iter = ll.iter();
///
/// assert_eq!(iter.next(), Some(&2));
/// assert_eq!(iter.next(), Some(&1));
/// assert_eq!(iter.next(), None);
/// ```
pub fn iter(&self) -> Iter<'_, T, Idx, K, N> {
Iter {
list: self,
index: self.head,
}
}
/// Find an element in the list that can be changed and resorted.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// ll.push(1).unwrap();
/// ll.push(2).unwrap();
/// ll.push(3).unwrap();
///
/// // Find a value and update it
/// let mut find = ll.find_mut(|v| *v == 2).unwrap();
/// *find += 1000;
/// find.finish();
///
/// assert_eq!(ll.pop(), Ok(1002));
/// assert_eq!(ll.pop(), Ok(3));
/// assert_eq!(ll.pop(), Ok(1));
/// assert_eq!(ll.pop(), Err(()));
/// ```
pub fn find_mut<F>(&mut self, mut f: F) -> Option<FindMut<'_, T, Idx, K, N>>
where
F: FnMut(&T) -> bool,
{
let head = self.head.option()?;
// Special-case, first element
if f(self.read_data_in_node_at(head)) {
return Some(FindMut {
is_head: true,
prev_index: Idx::none(),
index: self.head,
list: self,
maybe_changed: false,
});
}
let mut current = head;
while let Some(next) = self.node_at(current).next.option() {
if f(self.read_data_in_node_at(next)) {
return Some(FindMut {
is_head: false,
prev_index: unsafe { Idx::new_unchecked(current) },
index: unsafe { Idx::new_unchecked(next) },
list: self,
maybe_changed: false,
});
}
current = next;
}
None
}
/// Peek at the first element.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max, Min};
/// let mut ll_max: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// // The largest value will always be first
/// ll_max.push(1).unwrap();
/// assert_eq!(ll_max.peek(), Some(&1));
/// ll_max.push(2).unwrap();
/// assert_eq!(ll_max.peek(), Some(&2));
/// ll_max.push(3).unwrap();
/// assert_eq!(ll_max.peek(), Some(&3));
///
/// let mut ll_min: SortedLinkedList<_, _, Min, 3> = SortedLinkedList::new_usize();
///
/// // The Smallest value will always be first
/// ll_min.push(3).unwrap();
/// assert_eq!(ll_min.peek(), Some(&3));
/// ll_min.push(2).unwrap();
/// assert_eq!(ll_min.peek(), Some(&2));
/// ll_min.push(1).unwrap();
/// assert_eq!(ll_min.peek(), Some(&1));
/// ```
pub fn peek(&self) -> Option<&T> {
self.head
.option()
.map(|head| self.read_data_in_node_at(head))
}
/// Pop an element from the list without checking so the list is not empty.
///
/// # Safety
///
/// Assumes that the list is not empty.
pub unsafe fn pop_unchecked(&mut self) -> T {
let head = self.head.get_unchecked();
let current = head;
self.head = self.node_at(head).next;
self.node_at_mut(current).next = self.free;
self.free = Idx::new_unchecked(current);
self.extract_data_in_node_at(current)
}
/// Pops the first element in the list.
///
/// Complexity is worst-case `O(1)`.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// ll.push(1).unwrap();
/// ll.push(2).unwrap();
///
/// assert_eq!(ll.pop(), Ok(2));
/// assert_eq!(ll.pop(), Ok(1));
/// assert_eq!(ll.pop(), Err(()));
/// ```
pub fn pop(&mut self) -> Result<T, ()> {
if !self.is_empty() {
Ok(unsafe { self.pop_unchecked() })
} else {
Err(())
}
}
/// Checks if the linked list is full.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// assert_eq!(ll.is_full(), false);
///
/// ll.push(1).unwrap();
/// assert_eq!(ll.is_full(), false);
/// ll.push(2).unwrap();
/// assert_eq!(ll.is_full(), false);
/// ll.push(3).unwrap();
/// assert_eq!(ll.is_full(), true);
/// ```
#[inline]
pub fn is_full(&self) -> bool {
self.free.option().is_none()
}
/// Checks if the linked list is empty.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// assert_eq!(ll.is_empty(), true);
///
/// ll.push(1).unwrap();
/// assert_eq!(ll.is_empty(), false);
/// ```
#[inline]
pub fn is_empty(&self) -> bool {
self.head.option().is_none()
}
}
/// Iterator for the linked list.
pub struct Iter<'a, T, Idx, K, const N: usize>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
list: &'a SortedLinkedList<T, Idx, K, N>,
index: Idx,
}
impl<'a, T, Idx, K, const N: usize> Iterator for Iter<'a, T, Idx, K, N>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
let index = self.index.option()?;
let node = self.list.node_at(index);
self.index = node.next;
Some(self.list.read_data_in_node_at(index))
}
}
/// Comes from [`SortedLinkedList::find_mut`].
pub struct FindMut<'a, T, Idx, K, const N: usize>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
list: &'a mut SortedLinkedList<T, Idx, K, N>,
is_head: bool,
prev_index: Idx,
index: Idx,
maybe_changed: bool,
}
impl<'a, T, Idx, K, const N: usize> FindMut<'a, T, Idx, K, N>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
fn pop_internal(&mut self) -> T {
if self.is_head {
// If it is the head element, we can do a normal pop
unsafe { self.list.pop_unchecked() }
} else {
// Somewhere in the list
let prev = unsafe { self.prev_index.get_unchecked() };
let curr = unsafe { self.index.get_unchecked() };
// Re-point the previous index
self.list.node_at_mut(prev).next = self.list.node_at_mut(curr).next;
// Release the index into the free queue
self.list.node_at_mut(curr).next = self.list.free;
self.list.free = self.index;
self.list.extract_data_in_node_at(curr)
}
}
/// This will pop the element from the list.
///
/// Complexity is worst-case `O(1)`.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// ll.push(1).unwrap();
/// ll.push(2).unwrap();
/// ll.push(3).unwrap();
///
/// // Find a value and update it
/// let mut find = ll.find_mut(|v| *v == 2).unwrap();
/// find.pop();
///
/// assert_eq!(ll.pop(), Ok(3));
/// assert_eq!(ll.pop(), Ok(1));
/// assert_eq!(ll.pop(), Err(()));
/// ```
#[inline]
pub fn pop(mut self) -> T {
self.pop_internal()
}
/// This will resort the element into the correct position in the list if needed. The resorting
/// will only happen if the element has been accessed mutably.
///
/// Same as calling `drop`.
///
/// Complexity is worst-case `O(N)`.
///
/// # Example
///
/// ```
/// use heapless::sorted_linked_list::{SortedLinkedList, Max};
/// let mut ll: SortedLinkedList<_, _, Max, 3> = SortedLinkedList::new_usize();
///
/// ll.push(1).unwrap();
/// ll.push(2).unwrap();
/// ll.push(3).unwrap();
///
/// let mut find = ll.find_mut(|v| *v == 2).unwrap();
/// find.finish(); // No resort, we did not access the value.
///
/// let mut find = ll.find_mut(|v| *v == 2).unwrap();
/// *find += 1000;
/// find.finish(); // Will resort, we accessed (and updated) the value.
///
/// assert_eq!(ll.pop(), Ok(1002));
/// assert_eq!(ll.pop(), Ok(3));
/// assert_eq!(ll.pop(), Ok(1));
/// assert_eq!(ll.pop(), Err(()));
/// ```
#[inline]
pub fn finish(self) {
drop(self)
}
}
impl<T, Idx, K, const N: usize> Drop for FindMut<'_, T, Idx, K, N>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
fn drop(&mut self) {
// Only resort the list if the element has changed
if self.maybe_changed {
let val = self.pop_internal();
unsafe { self.list.push_unchecked(val) };
}
}
}
impl<T, Idx, K, const N: usize> Deref for FindMut<'_, T, Idx, K, N>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
type Target = T;
fn deref(&self) -> &Self::Target {
self.list
.read_data_in_node_at(unsafe { self.index.get_unchecked() })
}
}
impl<T, Idx, K, const N: usize> DerefMut for FindMut<'_, T, Idx, K, N>
where
T: Ord,
Idx: SortedLinkedListIndex,
K: Kind,
{
fn deref_mut(&mut self) -> &mut Self::Target {
self.maybe_changed = true;
self.list
.read_mut_data_in_node_at(unsafe { self.index.get_unchecked() })
}
}
// /// Useful for debug during development.
// impl<T, Idx, K, const N: usize> fmt::Debug for FindMut<'_, T, Idx, K, N>
// where
// T: Ord + core::fmt::Debug,
// Idx: SortedLinkedListIndex,
// K: Kind,
// {
// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// f.debug_struct("FindMut")
// .field("prev_index", &self.prev_index.option())
// .field("index", &self.index.option())
// .field(
// "prev_value",
// &self
// .list
// .read_data_in_node_at(self.prev_index.option().unwrap()),
// )
// .field(
// "value",
// &self.list.read_data_in_node_at(self.index.option().unwrap()),
// )
// .finish()
// }
// }
impl<T, Idx, K, const N: usize> fmt::Debug for SortedLinkedList<T, Idx, K, N>
where
T: Ord + core::fmt::Debug,
Idx: SortedLinkedListIndex,
K: Kind,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
impl<T, Idx, K, const N: usize> Drop for SortedLinkedList<T, Idx, K, N>
where
Idx: SortedLinkedListIndex,
{
fn drop(&mut self) {
let mut index = self.head;
while let Some(i) = index.option() {
let node = self.node_at_mut(i);
index = node.next;
unsafe {
ptr::drop_in_place(node.val.as_mut_ptr());
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn const_new() {
static mut _V1: SortedLinkedList<u32, LinkedIndexU8, Max, 100> = SortedLinkedList::new_u8();
static mut _V2: SortedLinkedList<u32, LinkedIndexU16, Max, 10_000> =
SortedLinkedList::new_u16();
static mut _V3: SortedLinkedList<u32, LinkedIndexUsize, Max, 100_000> =
SortedLinkedList::new_usize();
}
#[test]
fn test_peek() {
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
ll.push(1).unwrap();
assert_eq!(ll.peek().unwrap(), &1);
ll.push(2).unwrap();
assert_eq!(ll.peek().unwrap(), &2);
ll.push(3).unwrap();
assert_eq!(ll.peek().unwrap(), &3);
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Min, 3> = SortedLinkedList::new_usize();
ll.push(2).unwrap();
assert_eq!(ll.peek().unwrap(), &2);
ll.push(1).unwrap();
assert_eq!(ll.peek().unwrap(), &1);
ll.push(3).unwrap();
assert_eq!(ll.peek().unwrap(), &1);
}
#[test]
fn test_full() {
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
ll.push(1).unwrap();
ll.push(2).unwrap();
ll.push(3).unwrap();
assert!(ll.is_full())
}
#[test]
fn test_empty() {
let ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
assert!(ll.is_empty())
}
#[test]
fn test_zero_size() {
let ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 0> = SortedLinkedList::new_usize();
assert!(ll.is_empty());
assert!(ll.is_full());
}
#[test]
fn test_rejected_push() {
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
ll.push(1).unwrap();
ll.push(2).unwrap();
ll.push(3).unwrap();
// This won't fit
let r = ll.push(4);
assert_eq!(r, Err(4));
}
#[test]
fn test_updating() {
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
ll.push(1).unwrap();
ll.push(2).unwrap();
ll.push(3).unwrap();
let mut find = ll.find_mut(|v| *v == 2).unwrap();
*find += 1000;
find.finish();
assert_eq!(ll.peek().unwrap(), &1002);
let mut find = ll.find_mut(|v| *v == 3).unwrap();
*find += 1000;
find.finish();
assert_eq!(ll.peek().unwrap(), &1003);
// Remove largest element
ll.find_mut(|v| *v == 1003).unwrap().pop();
assert_eq!(ll.peek().unwrap(), &1002);
}
#[test]
fn test_updating_1() {
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
ll.push(1).unwrap();
let v = ll.pop().unwrap();
assert_eq!(v, 1);
}
#[test]
fn test_updating_2() {
let mut ll: SortedLinkedList<u32, LinkedIndexUsize, Max, 3> = SortedLinkedList::new_usize();
ll.push(1).unwrap();
let mut find = ll.find_mut(|v| *v == 1).unwrap();
*find += 1000;
find.finish();
assert_eq!(ll.peek().unwrap(), &1001);
}
}

908
vendor/heapless/src/spsc.rs vendored Normal file
View File

@@ -0,0 +1,908 @@
//! Fixed capacity Single Producer Single Consumer (SPSC) queue
//!
//! Implementation based on <https://www.codeproject.com/Articles/43510/Lock-Free-Single-Producer-Single-Consumer-Circular>
//!
//! # Portability
//!
//! This module requires CAS atomic instructions which are not available on all architectures
//! (e.g. ARMv6-M (`thumbv6m-none-eabi`) and MSP430 (`msp430-none-elf`)). These atomics can be
//! emulated however with [`portable-atomic`](https://crates.io/crates/portable-atomic), which is
//! enabled with the `cas` feature and is enabled by default for `thumbv6m-none-eabi` and `riscv32`
//! targets.
//!
//! # Examples
//!
//! - `Queue` can be used as a plain queue
//!
//! ```
//! use heapless::spsc::Queue;
//!
//! let mut rb: Queue<u8, 4> = Queue::new();
//!
//! assert!(rb.enqueue(0).is_ok());
//! assert!(rb.enqueue(1).is_ok());
//! assert!(rb.enqueue(2).is_ok());
//! assert!(rb.enqueue(3).is_err()); // full
//!
//! assert_eq!(rb.dequeue(), Some(0));
//! ```
//!
//! - `Queue` can be `split` and then be used in Single Producer Single Consumer mode.
//!
//! "no alloc" applications can create a `&'static mut` reference to a `Queue` -- using a static
//! variable -- and then `split` it: this consumes the static reference. The resulting `Consumer`
//! and `Producer` can then be moved into different execution contexts (threads, interrupt handlers,
//! etc.)
//!
//! ```
//! use heapless::spsc::{Producer, Queue};
//!
//! enum Event { A, B }
//!
//! fn main() {
//! let queue: &'static mut Queue<Event, 4> = {
//! static mut Q: Queue<Event, 4> = Queue::new();
//! unsafe { &mut Q }
//! };
//!
//! let (producer, mut consumer) = queue.split();
//!
//! // `producer` can be moved into `interrupt_handler` using a static mutex or the mechanism
//! // provided by the concurrency framework you are using (e.g. a resource in RTIC)
//!
//! loop {
//! match consumer.dequeue() {
//! Some(Event::A) => { /* .. */ },
//! Some(Event::B) => { /* .. */ },
//! None => { /* sleep */ },
//! }
//! # break
//! }
//! }
//!
//! // this is a different execution context that can preempt `main`
//! fn interrupt_handler(producer: &mut Producer<'static, Event, 4>) {
//! # let condition = true;
//!
//! // ..
//!
//! if condition {
//! producer.enqueue(Event::A).ok().unwrap();
//! } else {
//! producer.enqueue(Event::B).ok().unwrap();
//! }
//!
//! // ..
//! }
//! ```
//!
//! # Benchmarks
//!
//! Measured on a ARM Cortex-M3 core running at 8 MHz and with zero Flash wait cycles
//!
//! `-C opt-level` |`3`|
//! -----------------------|---|
//! `Consumer<u8>::dequeue`| 15|
//! `Queue<u8>::dequeue` | 12|
//! `Producer<u8>::enqueue`| 16|
//! `Queue<u8>::enqueue` | 14|
//!
//! - All execution times are in clock cycles. 1 clock cycle = 125 ns.
//! - Execution time is *dependent* of `mem::size_of::<T>()`. Both operations include one
//! `memcpy(T)` in their successful path.
//! - The optimization level is indicated in the first row.
//! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue`
//! and `Ok` is returned by `enqueue`).
use core::{cell::UnsafeCell, fmt, hash, mem::MaybeUninit, ptr};
#[cfg(not(feature = "portable-atomic"))]
use core::sync::atomic;
#[cfg(feature = "portable-atomic")]
use portable_atomic as atomic;
use atomic::{AtomicUsize, Ordering};
/// A statically allocated single producer single consumer queue with a capacity of `N - 1` elements
///
/// *IMPORTANT*: To get better performance use a value for `N` that is a power of 2 (e.g. `16`, `32`,
/// etc.).
pub struct Queue<T, const N: usize> {
// this is from where we dequeue items
pub(crate) head: AtomicUsize,
// this is where we enqueue new items
pub(crate) tail: AtomicUsize,
pub(crate) buffer: [UnsafeCell<MaybeUninit<T>>; N],
}
impl<T, const N: usize> Queue<T, N> {
const INIT: UnsafeCell<MaybeUninit<T>> = UnsafeCell::new(MaybeUninit::uninit());
#[inline]
fn increment(val: usize) -> usize {
(val + 1) % N
}
/// Creates an empty queue with a fixed capacity of `N - 1`
pub const fn new() -> Self {
// Const assert N > 1
crate::sealed::greater_than_1::<N>();
Queue {
head: AtomicUsize::new(0),
tail: AtomicUsize::new(0),
buffer: [Self::INIT; N],
}
}
/// Returns the maximum number of elements the queue can hold
#[inline]
pub const fn capacity(&self) -> usize {
N - 1
}
/// Returns the number of elements in the queue
#[inline]
pub fn len(&self) -> usize {
let current_head = self.head.load(Ordering::Relaxed);
let current_tail = self.tail.load(Ordering::Relaxed);
current_tail.wrapping_sub(current_head).wrapping_add(N) % N
}
/// Returns `true` if the queue is empty
#[inline]
pub fn is_empty(&self) -> bool {
self.head.load(Ordering::Relaxed) == self.tail.load(Ordering::Relaxed)
}
/// Returns `true` if the queue is full
#[inline]
pub fn is_full(&self) -> bool {
Self::increment(self.tail.load(Ordering::Relaxed)) == self.head.load(Ordering::Relaxed)
}
/// Iterates from the front of the queue to the back
pub fn iter(&self) -> Iter<'_, T, N> {
Iter {
rb: self,
index: 0,
len: self.len(),
}
}
/// Returns an iterator that allows modifying each value
pub fn iter_mut(&mut self) -> IterMut<'_, T, N> {
let len = self.len();
IterMut {
rb: self,
index: 0,
len,
}
}
/// Adds an `item` to the end of the queue
///
/// Returns back the `item` if the queue is full
#[inline]
pub fn enqueue(&mut self, val: T) -> Result<(), T> {
unsafe { self.inner_enqueue(val) }
}
/// Returns the item in the front of the queue, or `None` if the queue is empty
#[inline]
pub fn dequeue(&mut self) -> Option<T> {
unsafe { self.inner_dequeue() }
}
/// Returns a reference to the item in the front of the queue without dequeuing, or
/// `None` if the queue is empty.
///
/// # Examples
/// ```
/// use heapless::spsc::Queue;
///
/// let mut queue: Queue<u8, 235> = Queue::new();
/// let (mut producer, mut consumer) = queue.split();
/// assert_eq!(None, consumer.peek());
/// producer.enqueue(1);
/// assert_eq!(Some(&1), consumer.peek());
/// assert_eq!(Some(1), consumer.dequeue());
/// assert_eq!(None, consumer.peek());
/// ```
pub fn peek(&self) -> Option<&T> {
if !self.is_empty() {
let head = self.head.load(Ordering::Relaxed);
Some(unsafe { &*(self.buffer.get_unchecked(head).get() as *const T) })
} else {
None
}
}
// The memory for enqueueing is "owned" by the tail pointer.
// NOTE: This internal function uses internal mutability to allow the [`Producer`] to enqueue
// items without doing pointer arithmetic and accessing internal fields of this type.
unsafe fn inner_enqueue(&self, val: T) -> Result<(), T> {
let current_tail = self.tail.load(Ordering::Relaxed);
let next_tail = Self::increment(current_tail);
if next_tail != self.head.load(Ordering::Acquire) {
(self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val));
self.tail.store(next_tail, Ordering::Release);
Ok(())
} else {
Err(val)
}
}
// The memory for enqueueing is "owned" by the tail pointer.
// NOTE: This internal function uses internal mutability to allow the [`Producer`] to enqueue
// items without doing pointer arithmetic and accessing internal fields of this type.
unsafe fn inner_enqueue_unchecked(&self, val: T) {
let current_tail = self.tail.load(Ordering::Relaxed);
(self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val));
self.tail
.store(Self::increment(current_tail), Ordering::Release);
}
/// Adds an `item` to the end of the queue, without checking if it's full
///
/// # Unsafety
///
/// If the queue is full this operation will leak a value (T's destructor won't run on
/// the value that got overwritten by `item`), *and* will allow the `dequeue` operation
/// to create a copy of `item`, which could result in `T`'s destructor running on `item`
/// twice.
pub unsafe fn enqueue_unchecked(&mut self, val: T) {
self.inner_enqueue_unchecked(val)
}
// The memory for dequeuing is "owned" by the head pointer,.
// NOTE: This internal function uses internal mutability to allow the [`Consumer`] to dequeue
// items without doing pointer arithmetic and accessing internal fields of this type.
unsafe fn inner_dequeue(&self) -> Option<T> {
let current_head = self.head.load(Ordering::Relaxed);
if current_head == self.tail.load(Ordering::Acquire) {
None
} else {
let v = (self.buffer.get_unchecked(current_head).get() as *const T).read();
self.head
.store(Self::increment(current_head), Ordering::Release);
Some(v)
}
}
// The memory for dequeuing is "owned" by the head pointer,.
// NOTE: This internal function uses internal mutability to allow the [`Consumer`] to dequeue
// items without doing pointer arithmetic and accessing internal fields of this type.
unsafe fn inner_dequeue_unchecked(&self) -> T {
let current_head = self.head.load(Ordering::Relaxed);
let v = (self.buffer.get_unchecked(current_head).get() as *const T).read();
self.head
.store(Self::increment(current_head), Ordering::Release);
v
}
/// Returns the item in the front of the queue, without checking if there is something in the
/// queue
///
/// # Unsafety
///
/// If the queue is empty this operation will return uninitialized memory.
pub unsafe fn dequeue_unchecked(&mut self) -> T {
self.inner_dequeue_unchecked()
}
/// Splits a queue into producer and consumer endpoints
pub fn split(&mut self) -> (Producer<'_, T, N>, Consumer<'_, T, N>) {
(Producer { rb: self }, Consumer { rb: self })
}
}
impl<T, const N: usize> Default for Queue<T, N> {
fn default() -> Self {
Self::new()
}
}
impl<T, const N: usize> Clone for Queue<T, N>
where
T: Clone,
{
fn clone(&self) -> Self {
let mut new: Queue<T, N> = Queue::new();
for s in self.iter() {
unsafe {
// NOTE(unsafe) new.capacity() == self.capacity() >= self.len()
// no overflow possible
new.enqueue_unchecked(s.clone());
}
}
new
}
}
impl<T, const N: usize, const N2: usize> PartialEq<Queue<T, N2>> for Queue<T, N>
where
T: PartialEq,
{
fn eq(&self, other: &Queue<T, N2>) -> bool {
self.len() == other.len() && self.iter().zip(other.iter()).all(|(v1, v2)| v1 == v2)
}
}
impl<T, const N: usize> Eq for Queue<T, N> where T: Eq {}
/// An iterator over the items of a queue
pub struct Iter<'a, T, const N: usize> {
rb: &'a Queue<T, N>,
index: usize,
len: usize,
}
impl<'a, T, const N: usize> Clone for Iter<'a, T, N> {
fn clone(&self) -> Self {
Self {
rb: self.rb,
index: self.index,
len: self.len,
}
}
}
/// A mutable iterator over the items of a queue
pub struct IterMut<'a, T, const N: usize> {
rb: &'a mut Queue<T, N>,
index: usize,
len: usize,
}
impl<'a, T, const N: usize> Iterator for Iter<'a, T, N> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let head = self.rb.head.load(Ordering::Relaxed);
let i = (head + self.index) % N;
self.index += 1;
Some(unsafe { &*(self.rb.buffer.get_unchecked(i).get() as *const T) })
} else {
None
}
}
}
impl<'a, T, const N: usize> Iterator for IterMut<'a, T, N> {
type Item = &'a mut T;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let head = self.rb.head.load(Ordering::Relaxed);
let i = (head + self.index) % N;
self.index += 1;
Some(unsafe { &mut *(self.rb.buffer.get_unchecked(i).get() as *mut T) })
} else {
None
}
}
}
impl<'a, T, const N: usize> DoubleEndedIterator for Iter<'a, T, N> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let head = self.rb.head.load(Ordering::Relaxed);
// self.len > 0, since it's larger than self.index > 0
let i = (head + self.len - 1) % N;
self.len -= 1;
Some(unsafe { &*(self.rb.buffer.get_unchecked(i).get() as *const T) })
} else {
None
}
}
}
impl<'a, T, const N: usize> DoubleEndedIterator for IterMut<'a, T, N> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let head = self.rb.head.load(Ordering::Relaxed);
// self.len > 0, since it's larger than self.index > 0
let i = (head + self.len - 1) % N;
self.len -= 1;
Some(unsafe { &mut *(self.rb.buffer.get_unchecked(i).get() as *mut T) })
} else {
None
}
}
}
impl<T, const N: usize> Drop for Queue<T, N> {
fn drop(&mut self) {
for item in self {
unsafe {
ptr::drop_in_place(item);
}
}
}
}
impl<T, const N: usize> fmt::Debug for Queue<T, N>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
impl<T, const N: usize> hash::Hash for Queue<T, N>
where
T: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) {
// iterate over self in order
for t in self.iter() {
hash::Hash::hash(t, state);
}
}
}
impl<'a, T, const N: usize> IntoIterator for &'a Queue<T, N> {
type Item = &'a T;
type IntoIter = Iter<'a, T, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T, const N: usize> IntoIterator for &'a mut Queue<T, N> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
/// A queue "consumer"; it can dequeue items from the queue
/// NOTE the consumer semantically owns the `head` pointer of the queue
pub struct Consumer<'a, T, const N: usize> {
rb: &'a Queue<T, N>,
}
unsafe impl<'a, T, const N: usize> Send for Consumer<'a, T, N> where T: Send {}
/// A queue "producer"; it can enqueue items into the queue
/// NOTE the producer semantically owns the `tail` pointer of the queue
pub struct Producer<'a, T, const N: usize> {
rb: &'a Queue<T, N>,
}
unsafe impl<'a, T, const N: usize> Send for Producer<'a, T, N> where T: Send {}
impl<'a, T, const N: usize> Consumer<'a, T, N> {
/// Returns the item in the front of the queue, or `None` if the queue is empty
#[inline]
pub fn dequeue(&mut self) -> Option<T> {
unsafe { self.rb.inner_dequeue() }
}
/// Returns the item in the front of the queue, without checking if there are elements in the
/// queue
///
/// See [`Queue::dequeue_unchecked`] for safety
#[inline]
pub unsafe fn dequeue_unchecked(&mut self) -> T {
self.rb.inner_dequeue_unchecked()
}
/// Returns if there are any items to dequeue. When this returns `true`, at least the
/// first subsequent dequeue will succeed
#[inline]
pub fn ready(&self) -> bool {
!self.rb.is_empty()
}
/// Returns the number of elements in the queue
#[inline]
pub fn len(&self) -> usize {
self.rb.len()
}
/// Returns the maximum number of elements the queue can hold
#[inline]
pub fn capacity(&self) -> usize {
self.rb.capacity()
}
/// Returns the item in the front of the queue without dequeuing, or `None` if the queue is
/// empty
///
/// # Examples
/// ```
/// use heapless::spsc::Queue;
///
/// let mut queue: Queue<u8, 235> = Queue::new();
/// let (mut producer, mut consumer) = queue.split();
/// assert_eq!(None, consumer.peek());
/// producer.enqueue(1);
/// assert_eq!(Some(&1), consumer.peek());
/// assert_eq!(Some(1), consumer.dequeue());
/// assert_eq!(None, consumer.peek());
/// ```
#[inline]
pub fn peek(&self) -> Option<&T> {
self.rb.peek()
}
}
impl<'a, T, const N: usize> Producer<'a, T, N> {
/// Adds an `item` to the end of the queue, returns back the `item` if the queue is full
#[inline]
pub fn enqueue(&mut self, val: T) -> Result<(), T> {
unsafe { self.rb.inner_enqueue(val) }
}
/// Adds an `item` to the end of the queue, without checking if the queue is full
///
/// See [`Queue::enqueue_unchecked`] for safety
#[inline]
pub unsafe fn enqueue_unchecked(&mut self, val: T) {
self.rb.inner_enqueue_unchecked(val)
}
/// Returns if there is any space to enqueue a new item. When this returns true, at
/// least the first subsequent enqueue will succeed.
#[inline]
pub fn ready(&self) -> bool {
!self.rb.is_full()
}
/// Returns the number of elements in the queue
#[inline]
pub fn len(&self) -> usize {
self.rb.len()
}
/// Returns the maximum number of elements the queue can hold
#[inline]
pub fn capacity(&self) -> usize {
self.rb.capacity()
}
}
#[cfg(test)]
mod tests {
use std::hash::{Hash, Hasher};
use crate::spsc::Queue;
#[test]
fn full() {
let mut rb: Queue<i32, 3> = Queue::new();
assert_eq!(rb.is_full(), false);
rb.enqueue(1).unwrap();
assert_eq!(rb.is_full(), false);
rb.enqueue(2).unwrap();
assert_eq!(rb.is_full(), true);
}
#[test]
fn empty() {
let mut rb: Queue<i32, 3> = Queue::new();
assert_eq!(rb.is_empty(), true);
rb.enqueue(1).unwrap();
assert_eq!(rb.is_empty(), false);
rb.enqueue(2).unwrap();
assert_eq!(rb.is_empty(), false);
}
#[test]
#[cfg_attr(miri, ignore)] // too slow
fn len() {
let mut rb: Queue<i32, 3> = Queue::new();
assert_eq!(rb.len(), 0);
rb.enqueue(1).unwrap();
assert_eq!(rb.len(), 1);
rb.enqueue(2).unwrap();
assert_eq!(rb.len(), 2);
for _ in 0..1_000_000 {
let v = rb.dequeue().unwrap();
println!("{}", v);
rb.enqueue(v).unwrap();
assert_eq!(rb.len(), 2);
}
}
#[test]
#[cfg_attr(miri, ignore)] // too slow
fn try_overflow() {
const N: usize = 23;
let mut rb: Queue<i32, N> = Queue::new();
for i in 0..N as i32 - 1 {
rb.enqueue(i).unwrap();
}
for _ in 0..1_000_000 {
for i in 0..N as i32 - 1 {
let d = rb.dequeue().unwrap();
assert_eq!(d, i);
rb.enqueue(i).unwrap();
}
}
}
#[test]
fn sanity() {
let mut rb: Queue<i32, 10> = Queue::new();
let (mut p, mut c) = rb.split();
assert_eq!(p.ready(), true);
assert_eq!(c.ready(), false);
assert_eq!(c.dequeue(), None);
p.enqueue(0).unwrap();
assert_eq!(c.dequeue(), Some(0));
}
#[test]
fn static_new() {
static mut _Q: Queue<i32, 4> = Queue::new();
}
#[test]
fn drop() {
struct Droppable;
impl Droppable {
fn new() -> Self {
unsafe {
COUNT += 1;
}
Droppable
}
}
impl Drop for Droppable {
fn drop(&mut self) {
unsafe {
COUNT -= 1;
}
}
}
static mut COUNT: i32 = 0;
{
let mut v: Queue<Droppable, 4> = Queue::new();
v.enqueue(Droppable::new()).ok().unwrap();
v.enqueue(Droppable::new()).ok().unwrap();
v.dequeue().unwrap();
}
assert_eq!(unsafe { COUNT }, 0);
{
let mut v: Queue<Droppable, 4> = Queue::new();
v.enqueue(Droppable::new()).ok().unwrap();
v.enqueue(Droppable::new()).ok().unwrap();
}
assert_eq!(unsafe { COUNT }, 0);
}
#[test]
fn iter() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.dequeue().unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
rb.enqueue(3).unwrap();
let mut items = rb.iter();
// assert_eq!(items.next(), Some(&0));
assert_eq!(items.next(), Some(&1));
assert_eq!(items.next(), Some(&2));
assert_eq!(items.next(), Some(&3));
assert_eq!(items.next(), None);
}
#[test]
fn iter_double_ended() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
let mut items = rb.iter();
assert_eq!(items.next(), Some(&0));
assert_eq!(items.next_back(), Some(&2));
assert_eq!(items.next(), Some(&1));
assert_eq!(items.next(), None);
assert_eq!(items.next_back(), None);
}
#[test]
fn iter_mut() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
let mut items = rb.iter_mut();
assert_eq!(items.next(), Some(&mut 0));
assert_eq!(items.next(), Some(&mut 1));
assert_eq!(items.next(), Some(&mut 2));
assert_eq!(items.next(), None);
}
#[test]
fn iter_mut_double_ended() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
let mut items = rb.iter_mut();
assert_eq!(items.next(), Some(&mut 0));
assert_eq!(items.next_back(), Some(&mut 2));
assert_eq!(items.next(), Some(&mut 1));
assert_eq!(items.next(), None);
assert_eq!(items.next_back(), None);
}
#[test]
fn wrap_around() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
rb.dequeue().unwrap();
rb.dequeue().unwrap();
rb.dequeue().unwrap();
rb.enqueue(3).unwrap();
rb.enqueue(4).unwrap();
assert_eq!(rb.len(), 2);
}
#[test]
fn ready_flag() {
let mut rb: Queue<i32, 3> = Queue::new();
let (mut p, mut c) = rb.split();
assert_eq!(c.ready(), false);
assert_eq!(p.ready(), true);
p.enqueue(0).unwrap();
assert_eq!(c.ready(), true);
assert_eq!(p.ready(), true);
p.enqueue(1).unwrap();
assert_eq!(c.ready(), true);
assert_eq!(p.ready(), false);
c.dequeue().unwrap();
assert_eq!(c.ready(), true);
assert_eq!(p.ready(), true);
c.dequeue().unwrap();
assert_eq!(c.ready(), false);
assert_eq!(p.ready(), true);
}
#[test]
fn clone() {
let mut rb1: Queue<i32, 4> = Queue::new();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
rb1.enqueue(0).unwrap();
let rb2 = rb1.clone();
assert_eq!(rb1.capacity(), rb2.capacity());
assert_eq!(rb1.len(), rb2.len());
assert!(rb1.iter().zip(rb2.iter()).all(|(v1, v2)| v1 == v2));
}
#[test]
fn eq() {
// generate two queues with same content
// but different buffer alignment
let mut rb1: Queue<i32, 4> = Queue::new();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
rb1.enqueue(0).unwrap();
let mut rb2: Queue<i32, 4> = Queue::new();
rb2.enqueue(0).unwrap();
rb2.enqueue(0).unwrap();
assert!(rb1 == rb2);
// test for symmetry
assert!(rb2 == rb1);
// test for changes in content
rb1.enqueue(0).unwrap();
assert!(rb1 != rb2);
rb2.enqueue(1).unwrap();
assert!(rb1 != rb2);
// test for refexive relation
assert!(rb1 == rb1);
assert!(rb2 == rb2);
}
#[test]
fn hash_equality() {
// generate two queues with same content
// but different buffer alignment
let rb1 = {
let mut rb1: Queue<i32, 4> = Queue::new();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
rb1.enqueue(0).unwrap();
rb1
};
let rb2 = {
let mut rb2: Queue<i32, 4> = Queue::new();
rb2.enqueue(0).unwrap();
rb2.enqueue(0).unwrap();
rb2
};
let hash1 = {
let mut hasher1 = hash32::FnvHasher::default();
rb1.hash(&mut hasher1);
let hash1 = hasher1.finish();
hash1
};
let hash2 = {
let mut hasher2 = hash32::FnvHasher::default();
rb2.hash(&mut hasher2);
let hash2 = hasher2.finish();
hash2
};
assert_eq!(hash1, hash2);
}
}

856
vendor/heapless/src/string.rs vendored Normal file
View File

@@ -0,0 +1,856 @@
use core::{
cmp::Ordering,
fmt,
fmt::Write,
hash, iter, ops,
str::{self, Utf8Error},
};
use crate::Vec;
/// A fixed capacity [`String`](https://doc.rust-lang.org/std/string/struct.String.html)
pub struct String<const N: usize> {
vec: Vec<u8, N>,
}
impl<const N: usize> String<N> {
/// Constructs a new, empty `String` with a fixed capacity of `N` bytes
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// // allocate the string on the stack
/// let mut s: String<4> = String::new();
///
/// // allocate the string in a static variable
/// static mut S: String<4> = String::new();
/// ```
#[inline]
pub const fn new() -> Self {
Self { vec: Vec::new() }
}
/// Convert UTF-8 bytes into a `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::{String, Vec};
///
/// let mut sparkle_heart = Vec::<u8, 4>::new();
/// sparkle_heart.extend_from_slice(&[240, 159, 146, 150]);
///
/// let sparkle_heart: String<4> = String::from_utf8(sparkle_heart)?;
/// assert_eq!("💖", sparkle_heart);
/// # Ok::<(), core::str::Utf8Error>(())
/// ```
///
/// Invalid UTF-8:
///
/// ```
/// use core::str::Utf8Error;
/// use heapless::{String, Vec};
///
/// let mut vec = Vec::<u8, 4>::new();
/// vec.extend_from_slice(&[0, 159, 146, 150]);
///
/// let e: Utf8Error = String::from_utf8(vec).unwrap_err();
/// assert_eq!(e.valid_up_to(), 1);
/// # Ok::<(), core::str::Utf8Error>(())
/// ```
#[inline]
pub fn from_utf8(vec: Vec<u8, N>) -> Result<Self, Utf8Error> {
core::str::from_utf8(&vec)?;
Ok(Self { vec })
}
/// Convert UTF-8 bytes into a `String`, without checking that the string
/// contains valid UTF-8.
///
/// # Safety
///
/// The bytes passed in must be valid UTF-8.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::{String, Vec};
///
/// let mut sparkle_heart = Vec::<u8, 4>::new();
/// sparkle_heart.extend_from_slice(&[240, 159, 146, 150]);
///
/// // Safety: `sparkle_heart` Vec is known to contain valid UTF-8
/// let sparkle_heart: String<4> = unsafe { String::from_utf8_unchecked(sparkle_heart) };
/// assert_eq!("💖", sparkle_heart);
/// ```
#[inline]
pub unsafe fn from_utf8_unchecked(vec: Vec<u8, N>) -> Self {
Self { vec }
}
/// Converts a `String` into a byte vector.
///
/// This consumes the `String`, so we do not need to copy its contents.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let s: String<4> = String::try_from("ab")?;
/// let b = s.into_bytes();
/// assert!(b.len() == 2);
///
/// assert_eq!(&['a' as u8, 'b' as u8], &b[..]);
/// # Ok::<(), ()>(())
/// ```
#[inline]
pub fn into_bytes(self) -> Vec<u8, N> {
self.vec
}
/// Extracts a string slice containing the entire string.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<4> = String::try_from("ab")?;
/// assert!(s.as_str() == "ab");
///
/// let _s = s.as_str();
/// // s.push('c'); // <- cannot borrow `s` as mutable because it is also borrowed as immutable
/// # Ok::<(), ()>(())
/// ```
#[inline]
pub fn as_str(&self) -> &str {
unsafe { str::from_utf8_unchecked(self.vec.as_slice()) }
}
/// Converts a `String` into a mutable string slice.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<4> = String::try_from("ab")?;
/// let s = s.as_mut_str();
/// s.make_ascii_uppercase();
/// # Ok::<(), ()>(())
/// ```
#[inline]
pub fn as_mut_str(&mut self) -> &mut str {
unsafe { str::from_utf8_unchecked_mut(self.vec.as_mut_slice()) }
}
/// Returns a mutable reference to the contents of this `String`.
///
/// # Safety
///
/// This function is unsafe because it does not check that the bytes passed
/// to it are valid UTF-8. If this constraint is violated, it may cause
/// memory unsafety issues with future users of the `String`, as the rest of
/// the library assumes that `String`s are valid UTF-8.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("hello")?;
///
/// unsafe {
/// let vec = s.as_mut_vec();
/// assert_eq!(&[104, 101, 108, 108, 111][..], &vec[..]);
///
/// vec.reverse();
/// }
/// assert_eq!(s, "olleh");
/// # Ok::<(), ()>(())
/// ```
pub unsafe fn as_mut_vec(&mut self) -> &mut Vec<u8, N> {
&mut self.vec
}
/// Appends a given string slice onto the end of this `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("foo")?;
///
/// assert!(s.push_str("bar").is_ok());
///
/// assert_eq!("foobar", s);
///
/// assert!(s.push_str("tender").is_err());
/// # Ok::<(), ()>(())
/// ```
#[inline]
pub fn push_str(&mut self, string: &str) -> Result<(), ()> {
self.vec.extend_from_slice(string.as_bytes())
}
/// Returns the maximum number of elements the String can hold
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<4> = String::new();
/// assert!(s.capacity() == 4);
/// ```
#[inline]
pub fn capacity(&self) -> usize {
self.vec.capacity()
}
/// Appends the given [`char`] to the end of this `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("abc")?;
///
/// s.push('1').unwrap();
/// s.push('2').unwrap();
/// s.push('3').unwrap();
///
/// assert!("abc123" == s.as_str());
///
/// assert_eq!("abc123", s);
/// # Ok::<(), ()>(())
/// ```
#[inline]
pub fn push(&mut self, c: char) -> Result<(), ()> {
match c.len_utf8() {
1 => self.vec.push(c as u8).map_err(|_| {}),
_ => self
.vec
.extend_from_slice(c.encode_utf8(&mut [0; 4]).as_bytes()),
}
}
/// Shortens this `String` to the specified length.
///
/// If `new_len` is greater than the string's current length, this has no
/// effect.
///
/// Note that this method has no effect on the allocated capacity
/// of the string
///
/// # Panics
///
/// Panics if `new_len` does not lie on a [`char`] boundary.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("hello")?;
///
/// s.truncate(2);
///
/// assert_eq!("he", s);
/// # Ok::<(), ()>(())
/// ```
#[inline]
pub fn truncate(&mut self, new_len: usize) {
if new_len <= self.len() {
assert!(self.is_char_boundary(new_len));
self.vec.truncate(new_len)
}
}
/// Removes the last character from the string buffer and returns it.
///
/// Returns [`None`] if this `String` is empty.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("foo")?;
///
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('f'));
///
/// assert_eq!(s.pop(), None);
/// Ok::<(), ()>(())
/// ```
pub fn pop(&mut self) -> Option<char> {
let ch = self.chars().rev().next()?;
// pop bytes that correspond to `ch`
for _ in 0..ch.len_utf8() {
unsafe {
self.vec.pop_unchecked();
}
}
Some(ch)
}
/// Removes a [`char`] from this `String` at a byte position and returns it.
///
/// Note: Because this shifts over the remaining elements, it has a
/// worst-case performance of *O*(*n*).
///
/// # Panics
///
/// Panics if `idx` is larger than or equal to the `String`'s length,
/// or if it does not lie on a [`char`] boundary.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("foo").unwrap();
///
/// assert_eq!(s.remove(0), 'f');
/// assert_eq!(s.remove(1), 'o');
/// assert_eq!(s.remove(0), 'o');
/// ```
#[inline]
pub fn remove(&mut self, index: usize) -> char {
let ch = match self[index..].chars().next() {
Some(ch) => ch,
None => panic!("cannot remove a char from the end of a string"),
};
let next = index + ch.len_utf8();
let len = self.len();
let ptr = self.vec.as_mut_ptr();
unsafe {
core::ptr::copy(ptr.add(next), ptr.add(index), len - next);
self.vec.set_len(len - (next - index));
}
ch
}
/// Truncates this `String`, removing all contents.
///
/// While this means the `String` will have a length of zero, it does not
/// touch its capacity.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::String;
///
/// let mut s: String<8> = String::try_from("foo")?;
///
/// s.clear();
///
/// assert!(s.is_empty());
/// assert_eq!(0, s.len());
/// assert_eq!(8, s.capacity());
/// Ok::<(), ()>(())
/// ```
#[inline]
pub fn clear(&mut self) {
self.vec.clear()
}
}
impl<const N: usize> Default for String<N> {
fn default() -> Self {
Self::new()
}
}
impl<'a, const N: usize> TryFrom<&'a str> for String<N> {
type Error = ();
fn try_from(s: &'a str) -> Result<Self, Self::Error> {
let mut new = String::new();
new.push_str(s)?;
Ok(new)
}
}
impl<const N: usize> str::FromStr for String<N> {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut new = String::new();
new.push_str(s)?;
Ok(new)
}
}
impl<const N: usize> iter::FromIterator<char> for String<N> {
fn from_iter<T: IntoIterator<Item = char>>(iter: T) -> Self {
let mut new = String::new();
for c in iter {
new.push(c).unwrap();
}
new
}
}
impl<'a, const N: usize> iter::FromIterator<&'a char> for String<N> {
fn from_iter<T: IntoIterator<Item = &'a char>>(iter: T) -> Self {
let mut new = String::new();
for c in iter {
new.push(*c).unwrap();
}
new
}
}
impl<'a, const N: usize> iter::FromIterator<&'a str> for String<N> {
fn from_iter<T: IntoIterator<Item = &'a str>>(iter: T) -> Self {
let mut new = String::new();
for c in iter {
new.push_str(c).unwrap();
}
new
}
}
impl<const N: usize> Clone for String<N> {
fn clone(&self) -> Self {
Self {
vec: self.vec.clone(),
}
}
}
impl<const N: usize> fmt::Debug for String<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<str as fmt::Debug>::fmt(self, f)
}
}
impl<const N: usize> fmt::Display for String<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<str as fmt::Display>::fmt(self, f)
}
}
impl<const N: usize> hash::Hash for String<N> {
#[inline]
fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
<str as hash::Hash>::hash(self, hasher)
}
}
impl<const N: usize> fmt::Write for String<N> {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
self.push_str(s).map_err(|_| fmt::Error)
}
fn write_char(&mut self, c: char) -> Result<(), fmt::Error> {
self.push(c).map_err(|_| fmt::Error)
}
}
impl<const N: usize> ops::Deref for String<N> {
type Target = str;
fn deref(&self) -> &str {
self.as_str()
}
}
impl<const N: usize> ops::DerefMut for String<N> {
fn deref_mut(&mut self) -> &mut str {
self.as_mut_str()
}
}
impl<const N: usize> AsRef<str> for String<N> {
#[inline]
fn as_ref(&self) -> &str {
self
}
}
impl<const N: usize> AsRef<[u8]> for String<N> {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
impl<const N1: usize, const N2: usize> PartialEq<String<N2>> for String<N1> {
fn eq(&self, rhs: &String<N2>) -> bool {
str::eq(&**self, &**rhs)
}
fn ne(&self, rhs: &String<N2>) -> bool {
str::ne(&**self, &**rhs)
}
}
// String<N> == str
impl<const N: usize> PartialEq<str> for String<N> {
#[inline]
fn eq(&self, other: &str) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &str) -> bool {
str::ne(&self[..], &other[..])
}
}
// String<N> == &'str
impl<const N: usize> PartialEq<&str> for String<N> {
#[inline]
fn eq(&self, other: &&str) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &&str) -> bool {
str::ne(&self[..], &other[..])
}
}
// str == String<N>
impl<const N: usize> PartialEq<String<N>> for str {
#[inline]
fn eq(&self, other: &String<N>) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &String<N>) -> bool {
str::ne(&self[..], &other[..])
}
}
// &'str == String<N>
impl<const N: usize> PartialEq<String<N>> for &str {
#[inline]
fn eq(&self, other: &String<N>) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &String<N>) -> bool {
str::ne(&self[..], &other[..])
}
}
impl<const N: usize> Eq for String<N> {}
impl<const N1: usize, const N2: usize> PartialOrd<String<N2>> for String<N1> {
#[inline]
fn partial_cmp(&self, other: &String<N2>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
}
impl<const N: usize> Ord for String<N> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
macro_rules! impl_try_from_num {
($num:ty, $size:expr) => {
impl<const N: usize> core::convert::TryFrom<$num> for String<N> {
type Error = ();
fn try_from(s: $num) -> Result<Self, Self::Error> {
let mut new = String::new();
write!(&mut new, "{}", s).map_err(|_| ())?;
Ok(new)
}
}
};
}
impl_try_from_num!(i8, 4);
impl_try_from_num!(i16, 6);
impl_try_from_num!(i32, 11);
impl_try_from_num!(i64, 20);
impl_try_from_num!(u8, 3);
impl_try_from_num!(u16, 5);
impl_try_from_num!(u32, 10);
impl_try_from_num!(u64, 20);
#[cfg(test)]
mod tests {
use crate::{String, Vec};
use core::convert::TryFrom;
#[test]
fn static_new() {
static mut _S: String<8> = String::new();
}
#[test]
fn clone() {
let s1: String<20> = String::try_from("abcd").unwrap();
let mut s2 = s1.clone();
s2.push_str(" efgh").unwrap();
assert_eq!(s1, "abcd");
assert_eq!(s2, "abcd efgh");
}
#[test]
fn cmp() {
let s1: String<4> = String::try_from("abcd").unwrap();
let s2: String<4> = String::try_from("zzzz").unwrap();
assert!(s1 < s2);
}
#[test]
fn cmp_heterogenous_size() {
let s1: String<4> = String::try_from("abcd").unwrap();
let s2: String<8> = String::try_from("zzzz").unwrap();
assert!(s1 < s2);
}
#[test]
fn debug() {
use core::fmt::Write;
let s: String<8> = String::try_from("abcd").unwrap();
let mut std_s = std::string::String::new();
write!(std_s, "{:?}", s).unwrap();
assert_eq!("\"abcd\"", std_s);
}
#[test]
fn display() {
use core::fmt::Write;
let s: String<8> = String::try_from("abcd").unwrap();
let mut std_s = std::string::String::new();
write!(std_s, "{}", s).unwrap();
assert_eq!("abcd", std_s);
}
#[test]
fn empty() {
let s: String<4> = String::new();
assert!(s.capacity() == 4);
assert_eq!(s, "");
assert_eq!(s.len(), 0);
assert_ne!(s.len(), 4);
}
#[test]
fn try_from() {
let s: String<4> = String::try_from("123").unwrap();
assert!(s.len() == 3);
assert_eq!(s, "123");
let e: () = String::<2>::try_from("123").unwrap_err();
assert_eq!(e, ());
}
#[test]
fn from_str() {
use core::str::FromStr;
let s: String<4> = String::<4>::from_str("123").unwrap();
assert!(s.len() == 3);
assert_eq!(s, "123");
let e: () = String::<2>::from_str("123").unwrap_err();
assert_eq!(e, ());
}
#[test]
fn from_iter() {
let mut v: Vec<char, 5> = Vec::new();
v.push('h').unwrap();
v.push('e').unwrap();
v.push('l').unwrap();
v.push('l').unwrap();
v.push('o').unwrap();
let string1: String<5> = v.iter().collect(); //&char
let string2: String<5> = "hello".chars().collect(); //char
assert_eq!(string1, "hello");
assert_eq!(string2, "hello");
}
#[test]
#[should_panic]
fn from_panic() {
let _: String<4> = String::try_from("12345").unwrap();
}
#[test]
fn try_from_num() {
let v: String<20> = String::try_from(18446744073709551615 as u64).unwrap();
assert_eq!(v, "18446744073709551615");
let e: () = String::<2>::try_from(18446744073709551615 as u64).unwrap_err();
assert_eq!(e, ());
}
#[test]
fn into_bytes() {
let s: String<4> = String::try_from("ab").unwrap();
let b: Vec<u8, 4> = s.into_bytes();
assert_eq!(b.len(), 2);
assert_eq!(&['a' as u8, 'b' as u8], &b[..]);
}
#[test]
fn as_str() {
let s: String<4> = String::try_from("ab").unwrap();
assert_eq!(s.as_str(), "ab");
// should be moved to fail test
// let _s = s.as_str();
// s.push('c'); // <- cannot borrow `s` as mutable because it is also borrowed as immutable
}
#[test]
fn as_mut_str() {
let mut s: String<4> = String::try_from("ab").unwrap();
let s = s.as_mut_str();
s.make_ascii_uppercase();
assert_eq!(s, "AB");
}
#[test]
fn push_str() {
let mut s: String<8> = String::try_from("foo").unwrap();
assert!(s.push_str("bar").is_ok());
assert_eq!("foobar", s);
assert_eq!(s, "foobar");
assert!(s.push_str("tender").is_err());
assert_eq!("foobar", s);
assert_eq!(s, "foobar");
}
#[test]
fn push() {
let mut s: String<6> = String::try_from("abc").unwrap();
assert!(s.push('1').is_ok());
assert!(s.push('2').is_ok());
assert!(s.push('3').is_ok());
assert!(s.push('4').is_err());
assert!("abc123" == s.as_str());
}
#[test]
fn as_bytes() {
let s: String<8> = String::try_from("hello").unwrap();
assert_eq!(&[104, 101, 108, 108, 111], s.as_bytes());
}
#[test]
fn truncate() {
let mut s: String<8> = String::try_from("hello").unwrap();
s.truncate(6);
assert_eq!(s.len(), 5);
s.truncate(2);
assert_eq!(s.len(), 2);
assert_eq!("he", s);
assert_eq!(s, "he");
}
#[test]
fn pop() {
let mut s: String<8> = String::try_from("foo").unwrap();
assert_eq!(s.pop(), Some('o'));
assert_eq!(s.pop(), Some('o'));
assert_eq!(s.pop(), Some('f'));
assert_eq!(s.pop(), None);
}
#[test]
fn pop_uenc() {
let mut s: String<8> = String::try_from("").unwrap();
assert_eq!(s.len(), 3);
match s.pop() {
Some(c) => {
assert_eq!(s.len(), 1);
assert_eq!(c, '\u{0301}'); // accute accent of e
()
}
None => assert!(false),
};
}
#[test]
fn is_empty() {
let mut v: String<8> = String::new();
assert!(v.is_empty());
let _ = v.push('a');
assert!(!v.is_empty());
}
#[test]
fn clear() {
let mut s: String<8> = String::try_from("foo").unwrap();
s.clear();
assert!(s.is_empty());
assert_eq!(0, s.len());
assert_eq!(8, s.capacity());
}
#[test]
fn remove() {
let mut s: String<8> = String::try_from("foo").unwrap();
assert_eq!(s.remove(0), 'f');
assert_eq!(s.as_str(), "oo");
}
#[test]
fn remove_uenc() {
let mut s: String<8> = String::try_from("ĝėēƶ").unwrap();
assert_eq!(s.remove(2), 'ė');
assert_eq!(s.remove(2), 'ē');
assert_eq!(s.remove(2), 'ƶ');
assert_eq!(s.as_str(), "ĝ");
}
#[test]
fn remove_uenc_combo_characters() {
let mut s: String<8> = String::try_from("héy").unwrap();
assert_eq!(s.remove(2), '\u{0301}');
assert_eq!(s.as_str(), "hey");
}
}

23
vendor/heapless/src/test_helpers.rs vendored Normal file
View File

@@ -0,0 +1,23 @@
macro_rules! droppable {
() => {
static COUNT: core::sync::atomic::AtomicI32 = core::sync::atomic::AtomicI32::new(0);
#[derive(Eq, Ord, PartialEq, PartialOrd)]
struct Droppable(i32);
impl Droppable {
fn new() -> Self {
COUNT.fetch_add(1, core::sync::atomic::Ordering::Relaxed);
Droppable(Self::count())
}
fn count() -> i32 {
COUNT.load(core::sync::atomic::Ordering::Relaxed)
}
}
impl Drop for Droppable {
fn drop(&mut self) {
COUNT.fetch_sub(1, core::sync::atomic::Ordering::Relaxed);
}
}
};
}

58
vendor/heapless/src/ufmt.rs vendored Normal file
View File

@@ -0,0 +1,58 @@
use crate::{string::String, vec::Vec};
use ufmt_write::uWrite;
impl<const N: usize> uWrite for String<N> {
type Error = ();
fn write_str(&mut self, s: &str) -> Result<(), Self::Error> {
self.push_str(s)
}
}
impl<const N: usize> uWrite for Vec<u8, N> {
type Error = ();
fn write_str(&mut self, s: &str) -> Result<(), Self::Error> {
self.extend_from_slice(s.as_bytes())
}
}
#[cfg(test)]
mod tests {
use super::*;
use ufmt::{derive::uDebug, uwrite};
#[derive(uDebug)]
struct Pair {
x: u32,
y: u32,
}
#[test]
fn test_string() {
let a = 123;
let b = Pair { x: 0, y: 1234 };
let mut s = String::<32>::new();
uwrite!(s, "{} -> {:?}", a, b).unwrap();
assert_eq!(s, "123 -> Pair { x: 0, y: 1234 }");
}
#[test]
fn test_string_err() {
let p = Pair { x: 0, y: 1234 };
let mut s = String::<4>::new();
assert!(uwrite!(s, "{:?}", p).is_err());
}
#[test]
fn test_vec() {
let a = 123;
let b = Pair { x: 0, y: 1234 };
let mut v = Vec::<u8, 32>::new();
uwrite!(v, "{} -> {:?}", a, b).unwrap();
assert_eq!(v, b"123 -> Pair { x: 0, y: 1234 }");
}
}

1581
vendor/heapless/src/vec.rs vendored Normal file

File diff suppressed because it is too large Load Diff