//! `std::sync::Arc`-like API on top of a lock-free memory pool //! //! # Example usage //! //! ``` //! use heapless::{arc_pool, pool::arc::{Arc, ArcBlock}}; //! //! arc_pool!(P: u128); //! //! // cannot allocate without first giving memory blocks to the pool //! assert!(P.alloc(42).is_err()); //! //! // (some `no_std` runtimes have safe APIs to create `&'static mut` references) //! let block: &'static mut ArcBlock = unsafe { //! static mut B: ArcBlock = ArcBlock::new(); //! &mut B //! }; //! //! P.manage(block); //! //! let arc = P.alloc(1).unwrap(); //! //! // number of smart pointers is limited to the number of blocks managed by the pool //! let res = P.alloc(2); //! assert!(res.is_err()); //! //! // but cloning does not consume an `ArcBlock` //! let arc2 = arc.clone(); //! //! assert_eq!(1, *arc2); //! //! // `arc`'s destructor returns the memory block to the pool //! drop(arc2); // decrease reference counter //! drop(arc); // release memory //! //! // it's now possible to allocate a new `Arc` smart pointer //! let res = P.alloc(3); //! //! assert!(res.is_ok()); //! ``` //! //! # Array block initialization //! //! You can create a static variable that contains an array of memory blocks and give all the blocks //! to the `ArcPool`. This requires an intermediate `const` value as shown below: //! //! ``` //! use heapless::{arc_pool, pool::arc::ArcBlock}; //! //! arc_pool!(P: u128); //! //! const POOL_CAPACITY: usize = 8; //! //! let blocks: &'static mut [ArcBlock] = { //! const BLOCK: ArcBlock = ArcBlock::new(); // <= //! static mut BLOCKS: [ArcBlock; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY]; //! unsafe { &mut BLOCKS } //! }; //! //! for block in blocks { //! P.manage(block); //! } //! ``` // reference counting logic is based on version 1.63.0 of the Rust standard library (`alloc` crate) // which is licensed under 'MIT or APACHE-2.0' // https://github.com/rust-lang/rust/blob/1.63.0/library/alloc/src/sync.rs#L235 (last visited // 2022-09-05) use core::{ fmt, hash::{Hash, Hasher}, mem::{ManuallyDrop, MaybeUninit}, ops, ptr, sync::atomic::{self, AtomicUsize, Ordering}, }; use super::treiber::{NonNullPtr, Stack, UnionNode}; /// Creates a new `ArcPool` singleton with the given `$name` that manages the specified `$data_type` /// /// For more extensive documentation see the [module level documentation](crate::pool::arc) #[macro_export] macro_rules! arc_pool { ($name:ident: $data_type:ty) => { pub struct $name; impl $crate::pool::arc::ArcPool for $name { type Data = $data_type; fn singleton() -> &'static $crate::pool::arc::ArcPoolImpl<$data_type> { static $name: $crate::pool::arc::ArcPoolImpl<$data_type> = $crate::pool::arc::ArcPoolImpl::new(); &$name } } impl $name { /// Inherent method version of `ArcPool::alloc` #[allow(dead_code)] pub fn alloc( &self, value: $data_type, ) -> Result<$crate::pool::arc::Arc<$name>, $data_type> { <$name as $crate::pool::arc::ArcPool>::alloc(value) } /// Inherent method version of `ArcPool::manage` #[allow(dead_code)] pub fn manage(&self, block: &'static mut $crate::pool::arc::ArcBlock<$data_type>) { <$name as $crate::pool::arc::ArcPool>::manage(block) } } }; } /// A singleton that manages `pool::arc::Arc` smart pointers pub trait ArcPool: Sized { /// The data type managed by the memory pool type Data: 'static; /// `arc_pool!` implementation detail #[doc(hidden)] fn singleton() -> &'static ArcPoolImpl; /// Allocate a new `Arc` smart pointer initialized to the given `value` /// /// `manage` should be called at least once before calling `alloc` /// /// # Errors /// /// The `Err`or variant is returned when the memory pool has run out of memory blocks fn alloc(value: Self::Data) -> Result, Self::Data> { Ok(Arc { node_ptr: Self::singleton().alloc(value)?, }) } /// Add a statically allocated memory block to the memory pool fn manage(block: &'static mut ArcBlock) { Self::singleton().manage(block) } } /// `arc_pool!` implementation detail // newtype to avoid having to make field types public #[doc(hidden)] pub struct ArcPoolImpl { stack: Stack>>>, } impl ArcPoolImpl { /// `arc_pool!` implementation detail #[doc(hidden)] pub const fn new() -> Self { Self { stack: Stack::new(), } } fn alloc(&self, value: T) -> Result>>>, T> { if let Some(node_ptr) = self.stack.try_pop() { let inner = ArcInner { data: value, strong: AtomicUsize::new(1), }; unsafe { node_ptr.as_ptr().cast::>().write(inner) } Ok(node_ptr) } else { Err(value) } } fn manage(&self, block: &'static mut ArcBlock) { let node: &'static mut _ = &mut block.node; unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) } } } unsafe impl Sync for ArcPoolImpl {} /// Like `std::sync::Arc` but managed by memory pool `P` pub struct Arc

where P: ArcPool, { node_ptr: NonNullPtr>>>, } impl

Arc

where P: ArcPool, { fn inner(&self) -> &ArcInner { unsafe { &*self.node_ptr.as_ptr().cast::>() } } fn from_inner(node_ptr: NonNullPtr>>>) -> Self { Self { node_ptr } } unsafe fn get_mut_unchecked(this: &mut Self) -> &mut P::Data { &mut *ptr::addr_of_mut!((*this.node_ptr.as_ptr().cast::>()).data) } #[inline(never)] unsafe fn drop_slow(&mut self) { // run `P::Data`'s destructor ptr::drop_in_place(Self::get_mut_unchecked(self)); // return memory to pool P::singleton().stack.push(self.node_ptr); } } impl

AsRef for Arc

where P: ArcPool, { fn as_ref(&self) -> &P::Data { &**self } } const MAX_REFCOUNT: usize = (isize::MAX) as usize; impl

Clone for Arc

where P: ArcPool, { fn clone(&self) -> Self { let old_size = self.inner().strong.fetch_add(1, Ordering::Relaxed); if old_size > MAX_REFCOUNT { // XXX original code calls `intrinsics::abort` which is unstable API panic!(); } Self::from_inner(self.node_ptr) } } impl fmt::Debug for Arc where A: ArcPool, A::Data: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { A::Data::fmt(self, f) } } impl

ops::Deref for Arc

where P: ArcPool, { type Target = P::Data; fn deref(&self) -> &Self::Target { unsafe { &*ptr::addr_of!((*self.node_ptr.as_ptr().cast::>()).data) } } } impl fmt::Display for Arc where A: ArcPool, A::Data: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { A::Data::fmt(self, f) } } impl Drop for Arc where A: ArcPool, { fn drop(&mut self) { if self.inner().strong.fetch_sub(1, Ordering::Release) != 1 { return; } atomic::fence(Ordering::Acquire); unsafe { self.drop_slow() } } } impl Eq for Arc where A: ArcPool, A::Data: Eq, { } impl Hash for Arc where A: ArcPool, A::Data: Hash, { fn hash(&self, state: &mut H) where H: Hasher, { (**self).hash(state) } } impl Ord for Arc where A: ArcPool, A::Data: Ord, { fn cmp(&self, other: &Self) -> core::cmp::Ordering { A::Data::cmp(self, other) } } impl PartialEq> for Arc where A: ArcPool, B: ArcPool, A::Data: PartialEq, { fn eq(&self, other: &Arc) -> bool { A::Data::eq(self, &**other) } } impl PartialOrd> for Arc where A: ArcPool, B: ArcPool, A::Data: PartialOrd, { fn partial_cmp(&self, other: &Arc) -> Option { A::Data::partial_cmp(self, &**other) } } unsafe impl Send for Arc where A: ArcPool, A::Data: Sync + Send, { } unsafe impl Sync for Arc where A: ArcPool, A::Data: Sync + Send, { } impl Unpin for Arc where A: ArcPool {} struct ArcInner { data: T, strong: AtomicUsize, } /// A chunk of memory that an `ArcPool` can manage pub struct ArcBlock { node: UnionNode>>, } impl ArcBlock { /// Creates a new memory block pub const fn new() -> Self { Self { node: UnionNode { data: ManuallyDrop::new(MaybeUninit::uninit()), }, } } } #[cfg(test)] mod tests { use super::*; #[test] fn cannot_alloc_if_empty() { arc_pool!(P: i32); assert_eq!(Err(42), P.alloc(42),); } #[test] fn can_alloc_if_manages_one_block() { arc_pool!(P: i32); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); assert_eq!(42, *P.alloc(42).unwrap()); } #[test] fn alloc_drop_alloc() { arc_pool!(P: i32); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); let arc = P.alloc(1).unwrap(); drop(arc); assert_eq!(2, *P.alloc(2).unwrap()); } #[test] fn strong_count_starts_at_one() { arc_pool!(P: i32); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); let arc = P.alloc(1).ok().unwrap(); assert_eq!(1, arc.inner().strong.load(Ordering::Relaxed)); } #[test] fn clone_increases_strong_count() { arc_pool!(P: i32); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); let arc = P.alloc(1).ok().unwrap(); let before = arc.inner().strong.load(Ordering::Relaxed); let arc2 = arc.clone(); let expected = before + 1; assert_eq!(expected, arc.inner().strong.load(Ordering::Relaxed)); assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed)); } #[test] fn drop_decreases_strong_count() { arc_pool!(P: i32); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); let arc = P.alloc(1).ok().unwrap(); let arc2 = arc.clone(); let before = arc.inner().strong.load(Ordering::Relaxed); drop(arc); let expected = before - 1; assert_eq!(expected, arc2.inner().strong.load(Ordering::Relaxed)); } #[test] fn runs_destructor_exactly_once_when_strong_count_reaches_zero() { static COUNT: AtomicUsize = AtomicUsize::new(0); pub struct S; impl Drop for S { fn drop(&mut self) { COUNT.fetch_add(1, Ordering::Relaxed); } } arc_pool!(P: S); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); let arc = P.alloc(S).ok().unwrap(); assert_eq!(0, COUNT.load(Ordering::Relaxed)); drop(arc); assert_eq!(1, COUNT.load(Ordering::Relaxed)); } #[test] fn zst_is_well_aligned() { #[repr(align(4096))] pub struct Zst4096; arc_pool!(P: Zst4096); let block = unsafe { static mut B: ArcBlock = ArcBlock::new(); &mut B }; P.manage(block); let arc = P.alloc(Zst4096).ok().unwrap(); let raw = &*arc as *const Zst4096; assert_eq!(0, raw as usize % 4096); } }