Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

526
vendor/heapless/src/pool/arc.rs vendored Normal file
View File

@@ -0,0 +1,526 @@
//! `std::sync::Arc`-like API on top of a lock-free memory pool
//!
//! # Example usage
//!
//! ```
//! use heapless::{arc_pool, pool::arc::{Arc, ArcBlock}};
//!
//! arc_pool!(P: u128);
//!
//! // cannot allocate without first giving memory blocks to the pool
//! assert!(P.alloc(42).is_err());
//!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut ArcBlock<u128> = unsafe {
//! static mut B: ArcBlock<u128> = ArcBlock::new();
//! &mut B
//! };
//!
//! P.manage(block);
//!
//! let arc = P.alloc(1).unwrap();
//!
//! // number of smart pointers is limited to the number of blocks managed by the pool
//! let res = P.alloc(2);
//! assert!(res.is_err());
//!
//! // but cloning does not consume an `ArcBlock`
//! let arc2 = arc.clone();
//!
//! assert_eq!(1, *arc2);
//!
//! // `arc`'s destructor returns the memory block to the pool
//! drop(arc2); // decrease reference counter
//! drop(arc); // release memory
//!
//! // it's now possible to allocate a new `Arc` smart pointer
//! let res = P.alloc(3);
//!
//! assert!(res.is_ok());
//! ```
//!
//! # Array block initialization
//!
//! You can create a static variable that contains an array of memory blocks and give all the blocks
//! to the `ArcPool`. This requires an intermediate `const` value as shown below:
//!
//! ```
//! use heapless::{arc_pool, pool::arc::ArcBlock};
//!
//! arc_pool!(P: u128);
//!
//! const POOL_CAPACITY: usize = 8;
//!
//! let blocks: &'static mut [ArcBlock<u128>] = {
//! const BLOCK: ArcBlock<u128> = ArcBlock::new(); // <=
//! static mut BLOCKS: [ArcBlock<u128>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY];
//! unsafe { &mut BLOCKS }
//! };
//!
//! for block in blocks {
//! P.manage(block);
//! }
//! ```
// reference counting logic is based on version 1.63.0 of the Rust standard library (`alloc` crate)
// which is licensed under 'MIT or APACHE-2.0'
// https://github.com/rust-lang/rust/blob/1.63.0/library/alloc/src/sync.rs#L235 (last visited
// 2022-09-05)
use core::{
fmt,
hash::{Hash, Hasher},
mem::{ManuallyDrop, MaybeUninit},
ops, ptr,
sync::atomic::{self, AtomicUsize, Ordering},
};
use super::treiber::{NonNullPtr, Stack, UnionNode};
/// Creates a new `ArcPool` singleton with the given `$name` that manages the specified `$data_type`
///
/// For more extensive documentation see the [module level documentation](crate::pool::arc)
#[macro_export]
macro_rules! arc_pool {
($name:ident: $data_type:ty) => {
pub struct $name;
impl $crate::pool::arc::ArcPool for $name {
type Data = $data_type;
fn singleton() -> &'static $crate::pool::arc::ArcPoolImpl<$data_type> {
static $name: $crate::pool::arc::ArcPoolImpl<$data_type> =
$crate::pool::arc::ArcPoolImpl::new();
&$name
}
}
impl $name {
/// Inherent method version of `ArcPool::alloc`
#[allow(dead_code)]
pub fn alloc(
&self,
value: $data_type,
) -> Result<$crate::pool::arc::Arc<$name>, $data_type> {
<$name as $crate::pool::arc::ArcPool>::alloc(value)
}
/// Inherent method version of `ArcPool::manage`
#[allow(dead_code)]
pub fn manage(&self, block: &'static mut $crate::pool::arc::ArcBlock<$data_type>) {
<$name as $crate::pool::arc::ArcPool>::manage(block)
}
}
};
}
/// A singleton that manages `pool::arc::Arc` smart pointers
pub trait ArcPool: Sized {
/// The data type managed by the memory pool
type Data: 'static;
/// `arc_pool!` implementation detail
#[doc(hidden)]
fn singleton() -> &'static ArcPoolImpl<Self::Data>;
/// Allocate a new `Arc` smart pointer initialized to the given `value`
///
/// `manage` should be called at least once before calling `alloc`
///
/// # Errors
///
/// The `Err`or variant is returned when the memory pool has run out of memory blocks
fn alloc(value: Self::Data) -> Result<Arc<Self>, Self::Data> {
Ok(Arc {
node_ptr: Self::singleton().alloc(value)?,
})
}
/// Add a statically allocated memory block to the memory pool
fn manage(block: &'static mut ArcBlock<Self::Data>) {
Self::singleton().manage(block)
}
}
/// `arc_pool!` implementation detail
// newtype to avoid having to make field types public
#[doc(hidden)]
pub struct ArcPoolImpl<T> {
stack: Stack<UnionNode<MaybeUninit<ArcInner<T>>>>,
}
impl<T> ArcPoolImpl<T> {
/// `arc_pool!` implementation detail
#[doc(hidden)]
pub const fn new() -> Self {
Self {
stack: Stack::new(),
}
}
fn alloc(&self, value: T) -> Result<NonNullPtr<UnionNode<MaybeUninit<ArcInner<T>>>>, T> {
if let Some(node_ptr) = self.stack.try_pop() {
let inner = ArcInner {
data: value,
strong: AtomicUsize::new(1),
};
unsafe { node_ptr.as_ptr().cast::<ArcInner<T>>().write(inner) }
Ok(node_ptr)
} else {
Err(value)
}
}
fn manage(&self, block: &'static mut ArcBlock<T>) {
let node: &'static mut _ = &mut block.node;
unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
}
}
unsafe impl<T> Sync for ArcPoolImpl<T> {}
/// Like `std::sync::Arc` but managed by memory pool `P`
pub struct Arc<P>
where
P: ArcPool,
{
node_ptr: NonNullPtr<UnionNode<MaybeUninit<ArcInner<P::Data>>>>,
}
impl<P> Arc<P>
where
P: ArcPool,
{
fn inner(&self) -> &ArcInner<P::Data> {
unsafe { &*self.node_ptr.as_ptr().cast::<ArcInner<P::Data>>() }
}
fn from_inner(node_ptr: NonNullPtr<UnionNode<MaybeUninit<ArcInner<P::Data>>>>) -> Self {
Self { node_ptr }
}
unsafe fn get_mut_unchecked(this: &mut Self) -> &mut P::Data {
&mut *ptr::addr_of_mut!((*this.node_ptr.as_ptr().cast::<ArcInner<P::Data>>()).data)
}
#[inline(never)]
unsafe fn drop_slow(&mut self) {
// run `P::Data`'s destructor
ptr::drop_in_place(Self::get_mut_unchecked(self));
// return memory to pool
P::singleton().stack.push(self.node_ptr);
}
}
impl<P> AsRef<P::Data> for Arc<P>
where
P: ArcPool,
{
fn as_ref(&self) -> &P::Data {
&**self
}
}
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
impl<P> Clone for Arc<P>
where
P: ArcPool,
{
fn clone(&self) -> Self {
let old_size = self.inner().strong.fetch_add(1, Ordering::Relaxed);
if old_size > MAX_REFCOUNT {
// XXX original code calls `intrinsics::abort` which is unstable API
panic!();
}
Self::from_inner(self.node_ptr)
}
}
impl<A> fmt::Debug for Arc<A>
where
A: ArcPool,
A::Data: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<P> ops::Deref for Arc<P>
where
P: ArcPool,
{
type Target = P::Data;
fn deref(&self) -> &Self::Target {
unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr().cast::<ArcInner<P::Data>>()).data) }
}
}
impl<A> fmt::Display for Arc<A>
where
A: ArcPool,
A::Data: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<A> Drop for Arc<A>
where
A: ArcPool,
{
fn drop(&mut self) {
if self.inner().strong.fetch_sub(1, Ordering::Release) != 1 {
return;
}
atomic::fence(Ordering::Acquire);
unsafe { self.drop_slow() }
}
}
impl<A> Eq for Arc<A>
where
A: ArcPool,
A::Data: Eq,
{
}
impl<A> Hash for Arc<A>
where
A: ArcPool,
A::Data: Hash,
{
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
(**self).hash(state)
}
}
impl<A> Ord for Arc<A>
where
A: ArcPool,
A::Data: Ord,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
A::Data::cmp(self, other)
}
}
impl<A, B> PartialEq<Arc<B>> for Arc<A>
where
A: ArcPool,
B: ArcPool,
A::Data: PartialEq<B::Data>,
{
fn eq(&self, other: &Arc<B>) -> bool {
A::Data::eq(self, &**other)
}
}
impl<A, B> PartialOrd<Arc<B>> for Arc<A>
where
A: ArcPool,
B: ArcPool,
A::Data: PartialOrd<B::Data>,
{
fn partial_cmp(&self, other: &Arc<B>) -> Option<core::cmp::Ordering> {
A::Data::partial_cmp(self, &**other)
}
}
unsafe impl<A> Send for Arc<A>
where
A: ArcPool,
A::Data: Sync + Send,
{
}
unsafe impl<A> Sync for Arc<A>
where
A: ArcPool,
A::Data: Sync + Send,
{
}
impl<A> Unpin for Arc<A> where A: ArcPool {}
struct ArcInner<T> {
data: T,
strong: AtomicUsize,
}
/// A chunk of memory that an `ArcPool` can manage
pub struct ArcBlock<T> {
node: UnionNode<MaybeUninit<ArcInner<T>>>,
}
impl<T> ArcBlock<T> {
/// Creates a new memory block
pub const fn new() -> Self {
Self {
node: UnionNode {
data: ManuallyDrop::new(MaybeUninit::uninit()),
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn cannot_alloc_if_empty() {
arc_pool!(P: i32);
assert_eq!(Err(42), P.alloc(42),);
}
#[test]
fn can_alloc_if_manages_one_block() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
assert_eq!(42, *P.alloc(42).unwrap());
}
#[test]
fn alloc_drop_alloc() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(1).unwrap();
drop(arc);
assert_eq!(2, *P.alloc(2).unwrap());
}
#[test]
fn strong_count_starts_at_one() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(1).ok().unwrap();
assert_eq!(1, arc.inner().strong.load(Ordering::Relaxed));
}
#[test]
fn clone_increases_strong_count() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(1).ok().unwrap();
let before = arc.inner().strong.load(Ordering::Relaxed);
let arc2 = arc.clone();
let expected = before + 1;
assert_eq!(expected, arc.inner().strong.load(Ordering::Relaxed));
assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed));
}
#[test]
fn drop_decreases_strong_count() {
arc_pool!(P: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(1).ok().unwrap();
let arc2 = arc.clone();
let before = arc.inner().strong.load(Ordering::Relaxed);
drop(arc);
let expected = before - 1;
assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed));
}
#[test]
fn runs_destructor_exactly_once_when_strong_count_reaches_zero() {
static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S;
impl Drop for S {
fn drop(&mut self) {
COUNT.fetch_add(1, Ordering::Relaxed);
}
}
arc_pool!(P: S);
let block = unsafe {
static mut B: ArcBlock<S> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(S).ok().unwrap();
assert_eq!(0, COUNT.load(Ordering::Relaxed));
drop(arc);
assert_eq!(1, COUNT.load(Ordering::Relaxed));
}
#[test]
fn zst_is_well_aligned() {
#[repr(align(4096))]
pub struct Zst4096;
arc_pool!(P: Zst4096);
let block = unsafe {
static mut B: ArcBlock<Zst4096> = ArcBlock::new();
&mut B
};
P.manage(block);
let arc = P.alloc(Zst4096).ok().unwrap();
let raw = &*arc as *const Zst4096;
assert_eq!(0, raw as usize % 4096);
}
}

560
vendor/heapless/src/pool/boxed.rs vendored Normal file
View File

@@ -0,0 +1,560 @@
//! `std::boxed::Box`-like API on top of a lock-free memory pool
//!
//! # Example usage
//!
//! ```
//! use heapless::{box_pool, pool::boxed::{Box, BoxBlock}};
//!
//! box_pool!(P: u128);
//!
//! // cannot allocate without first giving memory blocks to the pool
//! assert!(P.alloc(42).is_err());
//!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut BoxBlock<u128> = unsafe {
//! static mut B: BoxBlock <u128>= BoxBlock::new();
//! &mut B
//! };
//!
//! // give block of memory to the pool
//! P.manage(block);
//!
//! // it's now possible to allocate
//! let mut boxed = P.alloc(1).unwrap();
//!
//! // mutation is possible
//! *boxed += 1;
//! assert_eq!(2, *boxed);
//!
//! // number of boxes is limited to the number of blocks managed by the pool
//! let res = P.alloc(3);
//! assert!(res.is_err());
//!
//! // give another memory block to the pool
//! P.manage(unsafe {
//! static mut B: BoxBlock<u128> = BoxBlock::new();
//! &mut B
//! });
//!
//! // cloning also consumes a memory block from the pool
//! let mut separate_box = boxed.clone();
//! *separate_box += 1;
//! assert_eq!(3, *separate_box);
//!
//! // after the clone it's not possible to allocate again
//! let res = P.alloc(4);
//! assert!(res.is_err());
//!
//! // `boxed`'s destructor returns the memory block to the pool
//! drop(boxed);
//!
//! // it's possible to allocate again
//! let res = P.alloc(5);
//!
//! assert!(res.is_ok());
//! ```
//!
//! # Array block initialization
//!
//! You can create a static variable that contains an array of memory blocks and give all the blocks
//! to the `BoxPool`. This requires an intermediate `const` value as shown below:
//!
//! ```
//! use heapless::{box_pool, pool::boxed::BoxBlock};
//!
//! box_pool!(P: u128);
//!
//! const POOL_CAPACITY: usize = 8;
//!
//! let blocks: &'static mut [BoxBlock<u128>] = {
//! const BLOCK: BoxBlock<u128> = BoxBlock::new(); // <=
//! static mut BLOCKS: [BoxBlock<u128>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY];
//! unsafe { &mut BLOCKS }
//! };
//!
//! for block in blocks {
//! P.manage(block);
//! }
//! ```
use core::{
fmt,
hash::{Hash, Hasher},
mem::{ManuallyDrop, MaybeUninit},
ops, ptr,
};
use stable_deref_trait::StableDeref;
use super::treiber::{NonNullPtr, Stack, UnionNode};
/// Creates a new `BoxPool` singleton with the given `$name` that manages the specified `$data_type`
///
/// For more extensive documentation see the [module level documentation](crate::pool::boxed)
#[macro_export]
macro_rules! box_pool {
($name:ident: $data_type:ty) => {
pub struct $name;
impl $crate::pool::boxed::BoxPool for $name {
type Data = $data_type;
fn singleton() -> &'static $crate::pool::boxed::BoxPoolImpl<$data_type> {
static $name: $crate::pool::boxed::BoxPoolImpl<$data_type> =
$crate::pool::boxed::BoxPoolImpl::new();
&$name
}
}
impl $name {
/// Inherent method version of `BoxPool::alloc`
#[allow(dead_code)]
pub fn alloc(
&self,
value: $data_type,
) -> Result<$crate::pool::boxed::Box<$name>, $data_type> {
<$name as $crate::pool::boxed::BoxPool>::alloc(value)
}
/// Inherent method version of `BoxPool::manage`
#[allow(dead_code)]
pub fn manage(&self, block: &'static mut $crate::pool::boxed::BoxBlock<$data_type>) {
<$name as $crate::pool::boxed::BoxPool>::manage(block)
}
}
};
}
/// A singleton that manages `pool::boxed::Box`-es
///
/// # Usage
///
/// Do not implement this trait yourself; instead use the `box_pool!` macro to create a type that
/// implements this trait.
///
/// # Semver guarantees
///
/// *Implementing* this trait is exempt from semver guarantees.
/// i.e. a new patch release is allowed to break downstream `BoxPool` implementations.
///
/// *Using* the trait, e.g. in generic code, does fall under semver guarantees.
pub trait BoxPool: Sized {
/// The data type managed by the memory pool
type Data: 'static;
/// `box_pool!` implementation detail
#[doc(hidden)]
fn singleton() -> &'static BoxPoolImpl<Self::Data>;
/// Allocate a new `Box` initialized to the given `value`
///
/// `manage` should be called at least once before calling `alloc`
///
/// # Errors
///
/// The `Err`or variant is returned when the memory pool has run out of memory blocks
fn alloc(value: Self::Data) -> Result<Box<Self>, Self::Data> {
Ok(Box {
node_ptr: Self::singleton().alloc(value)?,
})
}
/// Add a statically allocated memory block to the memory pool
fn manage(block: &'static mut BoxBlock<Self::Data>) {
Self::singleton().manage(block)
}
}
/// Like `std::boxed::Box` but managed by memory pool `P` rather than `#[global_allocator]`
pub struct Box<P>
where
P: BoxPool,
{
node_ptr: NonNullPtr<UnionNode<MaybeUninit<P::Data>>>,
}
impl<A> Clone for Box<A>
where
A: BoxPool,
A::Data: Clone,
{
fn clone(&self) -> Self {
A::alloc((**self).clone()).ok().expect("OOM")
}
}
impl<A> fmt::Debug for Box<A>
where
A: BoxPool,
A::Data: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<P> ops::Deref for Box<P>
where
P: BoxPool,
{
type Target = P::Data;
fn deref(&self) -> &Self::Target {
unsafe { &*self.node_ptr.as_ptr().cast::<P::Data>() }
}
}
impl<P> ops::DerefMut for Box<P>
where
P: BoxPool,
{
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.node_ptr.as_ptr().cast::<P::Data>() }
}
}
unsafe impl<P> StableDeref for Box<P> where P: BoxPool {}
impl<A> fmt::Display for Box<A>
where
A: BoxPool,
A::Data: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<P> Drop for Box<P>
where
P: BoxPool,
{
fn drop(&mut self) {
let node = self.node_ptr;
unsafe { ptr::drop_in_place(node.as_ptr().cast::<P::Data>()) }
unsafe { P::singleton().stack.push(node) }
}
}
impl<A> Eq for Box<A>
where
A: BoxPool,
A::Data: Eq,
{
}
impl<A> Hash for Box<A>
where
A: BoxPool,
A::Data: Hash,
{
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
(**self).hash(state)
}
}
impl<A> Ord for Box<A>
where
A: BoxPool,
A::Data: Ord,
{
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
A::Data::cmp(self, other)
}
}
impl<A, B> PartialEq<Box<B>> for Box<A>
where
A: BoxPool,
B: BoxPool,
A::Data: PartialEq<B::Data>,
{
fn eq(&self, other: &Box<B>) -> bool {
A::Data::eq(self, other)
}
}
impl<A, B> PartialOrd<Box<B>> for Box<A>
where
A: BoxPool,
B: BoxPool,
A::Data: PartialOrd<B::Data>,
{
fn partial_cmp(&self, other: &Box<B>) -> Option<core::cmp::Ordering> {
A::Data::partial_cmp(self, other)
}
}
unsafe impl<P> Send for Box<P>
where
P: BoxPool,
P::Data: Send,
{
}
unsafe impl<P> Sync for Box<P>
where
P: BoxPool,
P::Data: Sync,
{
}
/// `box_pool!` implementation detail
// newtype to avoid having to make field types public
#[doc(hidden)]
pub struct BoxPoolImpl<T> {
stack: Stack<UnionNode<MaybeUninit<T>>>,
}
impl<T> BoxPoolImpl<T> {
pub const fn new() -> Self {
Self {
stack: Stack::new(),
}
}
fn alloc(&self, value: T) -> Result<NonNullPtr<UnionNode<MaybeUninit<T>>>, T> {
if let Some(node_ptr) = self.stack.try_pop() {
unsafe { node_ptr.as_ptr().cast::<T>().write(value) }
Ok(node_ptr)
} else {
Err(value)
}
}
fn manage(&self, block: &'static mut BoxBlock<T>) {
let node: &'static mut _ = &mut block.node;
unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
}
}
unsafe impl<T> Sync for BoxPoolImpl<T> {}
/// A chunk of memory that a `BoxPool` singleton can manage
pub struct BoxBlock<T> {
node: UnionNode<MaybeUninit<T>>,
}
impl<T> BoxBlock<T> {
/// Creates a new memory block
pub const fn new() -> Self {
Self {
node: UnionNode {
data: ManuallyDrop::new(MaybeUninit::uninit()),
},
}
}
}
#[cfg(test)]
mod tests {
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::thread;
use super::*;
#[test]
fn cannot_alloc_if_empty() {
box_pool!(P: i32);
assert_eq!(Err(42), P.alloc(42));
}
#[test]
fn can_alloc_if_pool_manages_one_block() {
box_pool!(P: i32);
let block = unsafe {
static mut B: BoxBlock<i32> = BoxBlock::new();
&mut B
};
P.manage(block);
assert_eq!(42, *P.alloc(42).unwrap());
}
#[test]
fn alloc_drop_alloc() {
box_pool!(P: i32);
let block = unsafe {
static mut B: BoxBlock<i32> = BoxBlock::new();
&mut B
};
P.manage(block);
let boxed = P.alloc(1).unwrap();
drop(boxed);
assert_eq!(2, *P.alloc(2).unwrap());
}
#[test]
fn runs_destructor_exactly_once_on_drop() {
static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S;
impl Drop for S {
fn drop(&mut self) {
COUNT.fetch_add(1, Ordering::Relaxed);
}
}
box_pool!(P: S);
let block = unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
};
P.manage(block);
let boxed = P.alloc(S).ok().unwrap();
assert_eq!(0, COUNT.load(Ordering::Relaxed));
drop(boxed);
assert_eq!(1, COUNT.load(Ordering::Relaxed));
}
#[test]
fn zst_is_well_aligned() {
#[repr(align(4096))]
pub struct Zst4096;
box_pool!(P: Zst4096);
let block = unsafe {
static mut B: BoxBlock<Zst4096> = BoxBlock::new();
&mut B
};
P.manage(block);
let boxed = P.alloc(Zst4096).ok().unwrap();
let raw = &*boxed as *const Zst4096;
assert_eq!(0, raw as usize % 4096);
}
#[allow(clippy::redundant_clone)]
#[test]
fn can_clone_if_pool_is_not_exhausted() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S;
impl Clone for S {
fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
Self
}
}
box_pool!(P: S);
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
let first = P.alloc(S).ok().unwrap();
let _second = first.clone();
assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
let is_oom = P.alloc(S).is_err();
assert!(is_oom);
}
#[allow(clippy::redundant_clone)]
#[test]
fn clone_panics_if_pool_exhausted() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S;
impl Clone for S {
fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
Self
}
}
box_pool!(P: S);
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
let first = P.alloc(S).ok().unwrap();
let thread = thread::spawn(move || {
let _second = first.clone();
});
let thread_panicked = thread.join().is_err();
assert!(thread_panicked);
// we diverge from `alloc::Box<T>` in that we call `T::clone` first and then request
// memory from the allocator whereas `alloc::Box<T>` does it the other way around
// assert!(!STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
}
#[allow(clippy::redundant_clone)]
#[test]
fn panicking_clone_does_not_leak_memory() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S;
impl Clone for S {
fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
panic!()
}
}
box_pool!(P: S);
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
});
let boxed = P.alloc(S).ok().unwrap();
let thread = thread::spawn(move || {
let _boxed = boxed.clone();
});
let thread_panicked = thread.join().is_err();
assert!(thread_panicked);
assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
let once = P.alloc(S);
let twice = P.alloc(S);
assert!(once.is_ok());
assert!(twice.is_ok());
}
}

420
vendor/heapless/src/pool/object.rs vendored Normal file
View File

@@ -0,0 +1,420 @@
//! Object pool API
//!
//! # Example usage
//!
//! ```
//! use heapless::{object_pool, pool::object::{Object, ObjectBlock}};
//!
//! object_pool!(P: [u8; 128]);
//!
//! // cannot request objects without first giving object blocks to the pool
//! assert!(P.request().is_none());
//!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut ObjectBlock<[u8; 128]> = unsafe {
//! // unlike the memory pool APIs, an initial value must be specified here
//! static mut B: ObjectBlock<[u8; 128]>= ObjectBlock::new([0; 128]);
//! &mut B
//! };
//!
//! // give object block to the pool
//! P.manage(block);
//!
//! // it's now possible to request objects
//! // unlike the memory pool APIs, no initial value is required here
//! let mut object = P.request().unwrap();
//!
//! // mutation is possible
//! object.iter_mut().for_each(|byte| *byte = byte.wrapping_add(1));
//!
//! // the number of live objects is limited to the number of blocks managed by the pool
//! let res = P.request();
//! assert!(res.is_none());
//!
//! // `object`'s destructor returns the object to the pool
//! drop(object);
//!
//! // it's possible to request an `Object` again
//! let res = P.request();
//!
//! assert!(res.is_some());
//! ```
//!
//! # Array block initialization
//!
//! You can create a static variable that contains an array of memory blocks and give all the blocks
//! to the `ObjectPool`. This requires an intermediate `const` value as shown below:
//!
//! ```
//! use heapless::{object_pool, pool::object::ObjectBlock};
//!
//! object_pool!(P: [u8; 128]);
//!
//! const POOL_CAPACITY: usize = 8;
//!
//! let blocks: &'static mut [ObjectBlock<[u8; 128]>] = {
//! const BLOCK: ObjectBlock<[u8; 128]> = ObjectBlock::new([0; 128]); // <=
//! static mut BLOCKS: [ObjectBlock<[u8; 128]>; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY];
//! unsafe { &mut BLOCKS }
//! };
//!
//! for block in blocks {
//! P.manage(block);
//! }
//! ```
use core::{
cmp::Ordering,
fmt,
hash::{Hash, Hasher},
mem::ManuallyDrop,
ops, ptr,
};
use stable_deref_trait::StableDeref;
use super::treiber::{AtomicPtr, NonNullPtr, Stack, StructNode};
/// Creates a new `ObjectPool` singleton with the given `$name` that manages the specified
/// `$data_type`
///
/// For more extensive documentation see the [module level documentation](crate::pool::object)
#[macro_export]
macro_rules! object_pool {
($name:ident: $data_type:ty) => {
pub struct $name;
impl $crate::pool::object::ObjectPool for $name {
type Data = $data_type;
fn singleton() -> &'static $crate::pool::object::ObjectPoolImpl<$data_type> {
static $name: $crate::pool::object::ObjectPoolImpl<$data_type> =
$crate::pool::object::ObjectPoolImpl::new();
&$name
}
}
impl $name {
/// Inherent method version of `ObjectPool::request`
#[allow(dead_code)]
pub fn request(&self) -> Option<$crate::pool::object::Object<$name>> {
<$name as $crate::pool::object::ObjectPool>::request()
}
/// Inherent method version of `ObjectPool::manage`
#[allow(dead_code)]
pub fn manage(
&self,
block: &'static mut $crate::pool::object::ObjectBlock<$data_type>,
) {
<$name as $crate::pool::object::ObjectPool>::manage(block)
}
}
};
}
/// A singleton that manages `pool::object::Object`s
pub trait ObjectPool: Sized {
/// The data type of the objects managed by the object pool
type Data: 'static;
/// `object_pool!` implementation detail
#[doc(hidden)]
fn singleton() -> &'static ObjectPoolImpl<Self::Data>;
/// Request a new object from the pool
fn request() -> Option<Object<Self>> {
Self::singleton()
.request()
.map(|node_ptr| Object { node_ptr })
}
/// Adds a statically allocate object to the pool
fn manage(block: &'static mut ObjectBlock<Self::Data>) {
Self::singleton().manage(block)
}
}
/// `object_pool!` implementation detail
#[doc(hidden)]
pub struct ObjectPoolImpl<T> {
stack: Stack<StructNode<T>>,
}
impl<T> ObjectPoolImpl<T> {
/// `object_pool!` implementation detail
#[doc(hidden)]
pub const fn new() -> Self {
Self {
stack: Stack::new(),
}
}
fn request(&self) -> Option<NonNullPtr<StructNode<T>>> {
self.stack.try_pop()
}
fn manage(&self, block: &'static mut ObjectBlock<T>) {
let node: &'static mut _ = &mut block.node;
unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) }
}
}
// `T needs` to be Send because returning an object from a thread and then
// requesting it from another is effectively a cross-thread 'send' operation
unsafe impl<T> Sync for ObjectPoolImpl<T> where T: Send {}
/// An object managed by object pool `P`
pub struct Object<P>
where
P: ObjectPool,
{
node_ptr: NonNullPtr<StructNode<P::Data>>,
}
impl<A, T, const N: usize> AsMut<[T]> for Object<A>
where
A: ObjectPool<Data = [T; N]>,
{
fn as_mut(&mut self) -> &mut [T] {
&mut **self
}
}
impl<A, T, const N: usize> AsRef<[T]> for Object<A>
where
A: ObjectPool<Data = [T; N]>,
{
fn as_ref(&self) -> &[T] {
&**self
}
}
impl<A> fmt::Debug for Object<A>
where
A: ObjectPool,
A::Data: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<A> ops::Deref for Object<A>
where
A: ObjectPool,
{
type Target = A::Data;
fn deref(&self) -> &Self::Target {
unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr()).data) }
}
}
impl<A> ops::DerefMut for Object<A>
where
A: ObjectPool,
{
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *ptr::addr_of_mut!((*self.node_ptr.as_ptr()).data) }
}
}
unsafe impl<A> StableDeref for Object<A> where A: ObjectPool {}
impl<A> fmt::Display for Object<A>
where
A: ObjectPool,
A::Data: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
A::Data::fmt(self, f)
}
}
impl<P> Drop for Object<P>
where
P: ObjectPool,
{
fn drop(&mut self) {
unsafe { P::singleton().stack.push(self.node_ptr) }
}
}
impl<A> Eq for Object<A>
where
A: ObjectPool,
A::Data: Eq,
{
}
impl<A> Hash for Object<A>
where
A: ObjectPool,
A::Data: Hash,
{
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
(**self).hash(state)
}
}
impl<A> Ord for Object<A>
where
A: ObjectPool,
A::Data: Ord,
{
fn cmp(&self, other: &Self) -> Ordering {
A::Data::cmp(self, other)
}
}
impl<A, B> PartialEq<Object<B>> for Object<A>
where
A: ObjectPool,
B: ObjectPool,
A::Data: PartialEq<B::Data>,
{
fn eq(&self, other: &Object<B>) -> bool {
A::Data::eq(self, other)
}
}
impl<A, B> PartialOrd<Object<B>> for Object<A>
where
A: ObjectPool,
B: ObjectPool,
A::Data: PartialOrd<B::Data>,
{
fn partial_cmp(&self, other: &Object<B>) -> Option<Ordering> {
A::Data::partial_cmp(self, other)
}
}
unsafe impl<P> Send for Object<P>
where
P: ObjectPool,
P::Data: Send,
{
}
unsafe impl<P> Sync for Object<P>
where
P: ObjectPool,
P::Data: Sync,
{
}
/// An object "block" of data type `T` that has not yet been associated to an `ObjectPool`
pub struct ObjectBlock<T> {
node: StructNode<T>,
}
impl<T> ObjectBlock<T> {
/// Creates a new object block with the given `initial_value`
pub const fn new(initial_value: T) -> Self {
Self {
node: StructNode {
next: ManuallyDrop::new(AtomicPtr::null()),
data: ManuallyDrop::new(initial_value),
},
}
}
}
#[cfg(test)]
mod tests {
use core::sync::atomic::{self, AtomicUsize};
use super::*;
#[test]
fn cannot_request_if_empty() {
object_pool!(P: i32);
assert_eq!(None, P.request());
}
#[test]
fn can_request_if_manages_one_block() {
object_pool!(P: i32);
let block = unsafe {
static mut B: ObjectBlock<i32> = ObjectBlock::new(1);
&mut B
};
P.manage(block);
assert_eq!(1, *P.request().unwrap());
}
#[test]
fn request_drop_request() {
object_pool!(P: i32);
let block = unsafe {
static mut B: ObjectBlock<i32> = ObjectBlock::new(1);
&mut B
};
P.manage(block);
let mut object = P.request().unwrap();
*object = 2;
drop(object);
assert_eq!(2, *P.request().unwrap());
}
#[test]
fn destructor_does_not_run_on_drop() {
static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S;
impl Drop for S {
fn drop(&mut self) {
COUNT.fetch_add(1, atomic::Ordering::Relaxed);
}
}
object_pool!(P: S);
let block = unsafe {
static mut B: ObjectBlock<S> = ObjectBlock::new(S);
&mut B
};
P.manage(block);
let object = P.request().unwrap();
assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed));
drop(object);
assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed));
}
#[test]
fn zst_is_well_aligned() {
#[repr(align(4096))]
pub struct Zst4096;
object_pool!(P: Zst4096);
let block = unsafe {
static mut B: ObjectBlock<Zst4096> = ObjectBlock::new(Zst4096);
&mut B
};
P.manage(block);
let object = P.request().unwrap();
let raw = &*object as *const Zst4096;
assert_eq!(0, raw as usize % 4096);
}
}

91
vendor/heapless/src/pool/treiber.rs vendored Normal file
View File

@@ -0,0 +1,91 @@
use core::mem::ManuallyDrop;
#[cfg_attr(target_arch = "x86", path = "treiber/cas.rs")]
#[cfg_attr(arm_llsc, path = "treiber/llsc.rs")]
mod impl_;
pub use impl_::{AtomicPtr, NonNullPtr};
pub struct Stack<N>
where
N: Node,
{
top: AtomicPtr<N>,
}
impl<N> Stack<N>
where
N: Node,
{
pub const fn new() -> Self {
Self {
top: AtomicPtr::null(),
}
}
/// # Safety
/// - `node` must be a valid pointer
/// - aliasing rules must be enforced by the caller. e.g, the same `node` may not be pushed more than once
pub unsafe fn push(&self, node: NonNullPtr<N>) {
impl_::push(self, node)
}
pub fn try_pop(&self) -> Option<NonNullPtr<N>> {
impl_::try_pop(self)
}
}
pub trait Node: Sized {
type Data;
fn next(&self) -> &AtomicPtr<Self>;
fn next_mut(&mut self) -> &mut AtomicPtr<Self>;
}
pub union UnionNode<T> {
next: ManuallyDrop<AtomicPtr<UnionNode<T>>>,
pub data: ManuallyDrop<T>,
}
impl<T> Node for UnionNode<T> {
type Data = T;
fn next(&self) -> &AtomicPtr<Self> {
unsafe { &self.next }
}
fn next_mut(&mut self) -> &mut AtomicPtr<Self> {
unsafe { &mut self.next }
}
}
pub struct StructNode<T> {
pub next: ManuallyDrop<AtomicPtr<StructNode<T>>>,
pub data: ManuallyDrop<T>,
}
impl<T> Node for StructNode<T> {
type Data = T;
fn next(&self) -> &AtomicPtr<Self> {
&self.next
}
fn next_mut(&mut self) -> &mut AtomicPtr<Self> {
&mut self.next
}
}
#[cfg(test)]
mod tests {
use core::mem;
use super::*;
#[test]
fn node_is_never_zero_sized() {
struct Zst;
assert_ne!(mem::size_of::<UnionNode<Zst>>(), 0);
}
}

196
vendor/heapless/src/pool/treiber/cas.rs vendored Normal file
View File

@@ -0,0 +1,196 @@
use core::{
marker::PhantomData,
num::{NonZeroU32, NonZeroU64},
ptr::NonNull,
sync::atomic::{AtomicU64, Ordering},
};
use super::{Node, Stack};
pub struct AtomicPtr<N>
where
N: Node,
{
inner: AtomicU64,
_marker: PhantomData<*mut N>,
}
impl<N> AtomicPtr<N>
where
N: Node,
{
pub const fn null() -> Self {
Self {
inner: AtomicU64::new(0),
_marker: PhantomData,
}
}
fn compare_and_exchange_weak(
&self,
current: Option<NonNullPtr<N>>,
new: Option<NonNullPtr<N>>,
success: Ordering,
failure: Ordering,
) -> Result<(), Option<NonNullPtr<N>>> {
self.inner
.compare_exchange_weak(
current
.map(|pointer| pointer.into_u64())
.unwrap_or_default(),
new.map(|pointer| pointer.into_u64()).unwrap_or_default(),
success,
failure,
)
.map(drop)
.map_err(NonNullPtr::from_u64)
}
fn load(&self, order: Ordering) -> Option<NonNullPtr<N>> {
NonZeroU64::new(self.inner.load(order)).map(|inner| NonNullPtr {
inner,
_marker: PhantomData,
})
}
fn store(&self, value: Option<NonNullPtr<N>>, order: Ordering) {
self.inner.store(
value.map(|pointer| pointer.into_u64()).unwrap_or_default(),
order,
)
}
}
pub struct NonNullPtr<N>
where
N: Node,
{
inner: NonZeroU64,
_marker: PhantomData<*mut N>,
}
impl<N> Clone for NonNullPtr<N>
where
N: Node,
{
fn clone(&self) -> Self {
*self
}
}
impl<N> Copy for NonNullPtr<N> where N: Node {}
impl<N> NonNullPtr<N>
where
N: Node,
{
pub fn as_ptr(&self) -> *mut N {
self.inner.get() as *mut N
}
pub fn from_static_mut_ref(ref_: &'static mut N) -> NonNullPtr<N> {
let non_null = NonNull::from(ref_);
Self::from_non_null(non_null)
}
fn from_non_null(ptr: NonNull<N>) -> Self {
let address = ptr.as_ptr() as u32;
let tag = initial_tag().get();
let value = (u64::from(tag) << 32) | u64::from(address);
Self {
inner: unsafe { NonZeroU64::new_unchecked(value) },
_marker: PhantomData,
}
}
fn from_u64(value: u64) -> Option<Self> {
NonZeroU64::new(value).map(|inner| Self {
inner,
_marker: PhantomData,
})
}
fn non_null(&self) -> NonNull<N> {
unsafe { NonNull::new_unchecked(self.inner.get() as *mut N) }
}
fn tag(&self) -> NonZeroU32 {
unsafe { NonZeroU32::new_unchecked((self.inner.get() >> 32) as u32) }
}
fn into_u64(self) -> u64 {
self.inner.get()
}
fn increase_tag(&mut self) {
let address = self.as_ptr() as u32;
let new_tag = self
.tag()
.get()
.checked_add(1)
.map(|val| unsafe { NonZeroU32::new_unchecked(val) })
.unwrap_or_else(initial_tag)
.get();
let value = (u64::from(new_tag) << 32) | u64::from(address);
self.inner = unsafe { NonZeroU64::new_unchecked(value) };
}
}
fn initial_tag() -> NonZeroU32 {
unsafe { NonZeroU32::new_unchecked(1) }
}
pub unsafe fn push<N>(stack: &Stack<N>, new_top: NonNullPtr<N>)
where
N: Node,
{
let mut top = stack.top.load(Ordering::Relaxed);
loop {
new_top
.non_null()
.as_ref()
.next()
.store(top, Ordering::Relaxed);
if let Err(p) = stack.top.compare_and_exchange_weak(
top,
Some(new_top),
Ordering::Release,
Ordering::Relaxed,
) {
top = p;
} else {
return;
}
}
}
pub fn try_pop<N>(stack: &Stack<N>) -> Option<NonNullPtr<N>>
where
N: Node,
{
loop {
if let Some(mut top) = stack.top.load(Ordering::Acquire) {
let next = unsafe { top.non_null().as_ref().next().load(Ordering::Relaxed) };
if stack
.top
.compare_and_exchange_weak(Some(top), next, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
top.increase_tag();
return Some(top);
}
} else {
// stack observed as empty
return None;
}
}
}

145
vendor/heapless/src/pool/treiber/llsc.rs vendored Normal file
View File

@@ -0,0 +1,145 @@
use core::{
cell::UnsafeCell,
ptr::{self, NonNull},
};
use super::{Node, Stack};
pub struct AtomicPtr<N>
where
N: Node,
{
inner: UnsafeCell<Option<NonNull<N>>>,
}
impl<N> AtomicPtr<N>
where
N: Node,
{
pub const fn null() -> Self {
Self {
inner: UnsafeCell::new(None),
}
}
}
pub struct NonNullPtr<N>
where
N: Node,
{
inner: NonNull<N>,
}
impl<N> NonNullPtr<N>
where
N: Node,
{
pub fn as_ptr(&self) -> *mut N {
self.inner.as_ptr().cast()
}
pub fn from_static_mut_ref(ref_: &'static mut N) -> Self {
Self {
inner: NonNull::from(ref_),
}
}
}
impl<N> Clone for NonNullPtr<N>
where
N: Node,
{
fn clone(&self) -> Self {
Self { inner: self.inner }
}
}
impl<N> Copy for NonNullPtr<N> where N: Node {}
pub unsafe fn push<N>(stack: &Stack<N>, mut node: NonNullPtr<N>)
where
N: Node,
{
let top_addr = ptr::addr_of!(stack.top) as *mut usize;
loop {
let top = arch::load_link(top_addr);
node.inner
.as_mut()
.next_mut()
.inner
.get()
.write(NonNull::new(top as *mut _));
if arch::store_conditional(node.inner.as_ptr() as usize, top_addr).is_ok() {
break;
}
}
}
pub fn try_pop<N>(stack: &Stack<N>) -> Option<NonNullPtr<N>>
where
N: Node,
{
unsafe {
let top_addr = ptr::addr_of!(stack.top) as *mut usize;
loop {
let top = arch::load_link(top_addr);
if let Some(top) = NonNull::new(top as *mut N) {
let next = &top.as_ref().next();
if arch::store_conditional(
next.inner
.get()
.read()
.map(|non_null| non_null.as_ptr() as usize)
.unwrap_or_default(),
top_addr,
)
.is_ok()
{
break Some(NonNullPtr { inner: top });
}
} else {
arch::clear_load_link();
break None;
}
}
}
}
#[cfg(arm_llsc)]
mod arch {
use core::arch::asm;
#[inline(always)]
pub fn clear_load_link() {
unsafe { asm!("clrex", options(nomem, nostack)) }
}
/// # Safety
/// - `addr` must be a valid pointer
#[inline(always)]
pub unsafe fn load_link(addr: *const usize) -> usize {
let value;
asm!("ldrex {}, [{}]", out(reg) value, in(reg) addr, options(nostack));
value
}
/// # Safety
/// - `addr` must be a valid pointer
#[inline(always)]
pub unsafe fn store_conditional(value: usize, addr: *mut usize) -> Result<(), ()> {
let outcome: usize;
asm!("strex {}, {}, [{}]", out(reg) outcome, in(reg) value, in(reg) addr, options(nostack));
if outcome == 0 {
Ok(())
} else {
Err(())
}
}
}