//! `std::boxed::Box`-like API on top of a lock-free memory pool //! //! # Example usage //! //! ``` //! use heapless::{box_pool, pool::boxed::{Box, BoxBlock}}; //! //! box_pool!(P: u128); //! //! // cannot allocate without first giving memory blocks to the pool //! assert!(P.alloc(42).is_err()); //! //! // (some `no_std` runtimes have safe APIs to create `&'static mut` references) //! let block: &'static mut BoxBlock = unsafe { //! static mut B: BoxBlock = BoxBlock::new(); //! &mut B //! }; //! //! // give block of memory to the pool //! P.manage(block); //! //! // it's now possible to allocate //! let mut boxed = P.alloc(1).unwrap(); //! //! // mutation is possible //! *boxed += 1; //! assert_eq!(2, *boxed); //! //! // number of boxes is limited to the number of blocks managed by the pool //! let res = P.alloc(3); //! assert!(res.is_err()); //! //! // give another memory block to the pool //! P.manage(unsafe { //! static mut B: BoxBlock = BoxBlock::new(); //! &mut B //! }); //! //! // cloning also consumes a memory block from the pool //! let mut separate_box = boxed.clone(); //! *separate_box += 1; //! assert_eq!(3, *separate_box); //! //! // after the clone it's not possible to allocate again //! let res = P.alloc(4); //! assert!(res.is_err()); //! //! // `boxed`'s destructor returns the memory block to the pool //! drop(boxed); //! //! // it's possible to allocate again //! let res = P.alloc(5); //! //! assert!(res.is_ok()); //! ``` //! //! # Array block initialization //! //! You can create a static variable that contains an array of memory blocks and give all the blocks //! to the `BoxPool`. This requires an intermediate `const` value as shown below: //! //! ``` //! use heapless::{box_pool, pool::boxed::BoxBlock}; //! //! box_pool!(P: u128); //! //! const POOL_CAPACITY: usize = 8; //! //! let blocks: &'static mut [BoxBlock] = { //! const BLOCK: BoxBlock = BoxBlock::new(); // <= //! static mut BLOCKS: [BoxBlock; POOL_CAPACITY] = [BLOCK; POOL_CAPACITY]; //! unsafe { &mut BLOCKS } //! }; //! //! for block in blocks { //! P.manage(block); //! } //! ``` use core::{ fmt, hash::{Hash, Hasher}, mem::{ManuallyDrop, MaybeUninit}, ops, ptr, }; use stable_deref_trait::StableDeref; use super::treiber::{NonNullPtr, Stack, UnionNode}; /// Creates a new `BoxPool` singleton with the given `$name` that manages the specified `$data_type` /// /// For more extensive documentation see the [module level documentation](crate::pool::boxed) #[macro_export] macro_rules! box_pool { ($name:ident: $data_type:ty) => { pub struct $name; impl $crate::pool::boxed::BoxPool for $name { type Data = $data_type; fn singleton() -> &'static $crate::pool::boxed::BoxPoolImpl<$data_type> { static $name: $crate::pool::boxed::BoxPoolImpl<$data_type> = $crate::pool::boxed::BoxPoolImpl::new(); &$name } } impl $name { /// Inherent method version of `BoxPool::alloc` #[allow(dead_code)] pub fn alloc( &self, value: $data_type, ) -> Result<$crate::pool::boxed::Box<$name>, $data_type> { <$name as $crate::pool::boxed::BoxPool>::alloc(value) } /// Inherent method version of `BoxPool::manage` #[allow(dead_code)] pub fn manage(&self, block: &'static mut $crate::pool::boxed::BoxBlock<$data_type>) { <$name as $crate::pool::boxed::BoxPool>::manage(block) } } }; } /// A singleton that manages `pool::boxed::Box`-es /// /// # Usage /// /// Do not implement this trait yourself; instead use the `box_pool!` macro to create a type that /// implements this trait. /// /// # Semver guarantees /// /// *Implementing* this trait is exempt from semver guarantees. /// i.e. a new patch release is allowed to break downstream `BoxPool` implementations. /// /// *Using* the trait, e.g. in generic code, does fall under semver guarantees. pub trait BoxPool: Sized { /// The data type managed by the memory pool type Data: 'static; /// `box_pool!` implementation detail #[doc(hidden)] fn singleton() -> &'static BoxPoolImpl; /// Allocate a new `Box` initialized to the given `value` /// /// `manage` should be called at least once before calling `alloc` /// /// # Errors /// /// The `Err`or variant is returned when the memory pool has run out of memory blocks fn alloc(value: Self::Data) -> Result, Self::Data> { Ok(Box { node_ptr: Self::singleton().alloc(value)?, }) } /// Add a statically allocated memory block to the memory pool fn manage(block: &'static mut BoxBlock) { Self::singleton().manage(block) } } /// Like `std::boxed::Box` but managed by memory pool `P` rather than `#[global_allocator]` pub struct Box

where P: BoxPool, { node_ptr: NonNullPtr>>, } impl Clone for Box where A: BoxPool, A::Data: Clone, { fn clone(&self) -> Self { A::alloc((**self).clone()).ok().expect("OOM") } } impl fmt::Debug for Box where A: BoxPool, A::Data: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { A::Data::fmt(self, f) } } impl

ops::Deref for Box

where P: BoxPool, { type Target = P::Data; fn deref(&self) -> &Self::Target { unsafe { &*self.node_ptr.as_ptr().cast::() } } } impl

ops::DerefMut for Box

where P: BoxPool, { fn deref_mut(&mut self) -> &mut Self::Target { unsafe { &mut *self.node_ptr.as_ptr().cast::() } } } unsafe impl

StableDeref for Box

where P: BoxPool {} impl fmt::Display for Box where A: BoxPool, A::Data: fmt::Display, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { A::Data::fmt(self, f) } } impl

Drop for Box

where P: BoxPool, { fn drop(&mut self) { let node = self.node_ptr; unsafe { ptr::drop_in_place(node.as_ptr().cast::()) } unsafe { P::singleton().stack.push(node) } } } impl Eq for Box where A: BoxPool, A::Data: Eq, { } impl Hash for Box where A: BoxPool, A::Data: Hash, { fn hash(&self, state: &mut H) where H: Hasher, { (**self).hash(state) } } impl Ord for Box where A: BoxPool, A::Data: Ord, { fn cmp(&self, other: &Self) -> core::cmp::Ordering { A::Data::cmp(self, other) } } impl PartialEq> for Box where A: BoxPool, B: BoxPool, A::Data: PartialEq, { fn eq(&self, other: &Box) -> bool { A::Data::eq(self, other) } } impl PartialOrd> for Box where A: BoxPool, B: BoxPool, A::Data: PartialOrd, { fn partial_cmp(&self, other: &Box) -> Option { A::Data::partial_cmp(self, other) } } unsafe impl

Send for Box

where P: BoxPool, P::Data: Send, { } unsafe impl

Sync for Box

where P: BoxPool, P::Data: Sync, { } /// `box_pool!` implementation detail // newtype to avoid having to make field types public #[doc(hidden)] pub struct BoxPoolImpl { stack: Stack>>, } impl BoxPoolImpl { pub const fn new() -> Self { Self { stack: Stack::new(), } } fn alloc(&self, value: T) -> Result>>, T> { if let Some(node_ptr) = self.stack.try_pop() { unsafe { node_ptr.as_ptr().cast::().write(value) } Ok(node_ptr) } else { Err(value) } } fn manage(&self, block: &'static mut BoxBlock) { let node: &'static mut _ = &mut block.node; unsafe { self.stack.push(NonNullPtr::from_static_mut_ref(node)) } } } unsafe impl Sync for BoxPoolImpl {} /// A chunk of memory that a `BoxPool` singleton can manage pub struct BoxBlock { node: UnionNode>, } impl BoxBlock { /// Creates a new memory block pub const fn new() -> Self { Self { node: UnionNode { data: ManuallyDrop::new(MaybeUninit::uninit()), }, } } } #[cfg(test)] mod tests { use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::thread; use super::*; #[test] fn cannot_alloc_if_empty() { box_pool!(P: i32); assert_eq!(Err(42), P.alloc(42)); } #[test] fn can_alloc_if_pool_manages_one_block() { box_pool!(P: i32); let block = unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }; P.manage(block); assert_eq!(42, *P.alloc(42).unwrap()); } #[test] fn alloc_drop_alloc() { box_pool!(P: i32); let block = unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }; P.manage(block); let boxed = P.alloc(1).unwrap(); drop(boxed); assert_eq!(2, *P.alloc(2).unwrap()); } #[test] fn runs_destructor_exactly_once_on_drop() { static COUNT: AtomicUsize = AtomicUsize::new(0); pub struct S; impl Drop for S { fn drop(&mut self) { COUNT.fetch_add(1, Ordering::Relaxed); } } box_pool!(P: S); let block = unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }; P.manage(block); let boxed = P.alloc(S).ok().unwrap(); assert_eq!(0, COUNT.load(Ordering::Relaxed)); drop(boxed); assert_eq!(1, COUNT.load(Ordering::Relaxed)); } #[test] fn zst_is_well_aligned() { #[repr(align(4096))] pub struct Zst4096; box_pool!(P: Zst4096); let block = unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }; P.manage(block); let boxed = P.alloc(Zst4096).ok().unwrap(); let raw = &*boxed as *const Zst4096; assert_eq!(0, raw as usize % 4096); } #[allow(clippy::redundant_clone)] #[test] fn can_clone_if_pool_is_not_exhausted() { static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false); pub struct S; impl Clone for S { fn clone(&self) -> Self { STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed); Self } } box_pool!(P: S); P.manage(unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }); P.manage(unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }); let first = P.alloc(S).ok().unwrap(); let _second = first.clone(); assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed)); let is_oom = P.alloc(S).is_err(); assert!(is_oom); } #[allow(clippy::redundant_clone)] #[test] fn clone_panics_if_pool_exhausted() { static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false); pub struct S; impl Clone for S { fn clone(&self) -> Self { STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed); Self } } box_pool!(P: S); P.manage(unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }); let first = P.alloc(S).ok().unwrap(); let thread = thread::spawn(move || { let _second = first.clone(); }); let thread_panicked = thread.join().is_err(); assert!(thread_panicked); // we diverge from `alloc::Box` in that we call `T::clone` first and then request // memory from the allocator whereas `alloc::Box` does it the other way around // assert!(!STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed)); } #[allow(clippy::redundant_clone)] #[test] fn panicking_clone_does_not_leak_memory() { static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false); pub struct S; impl Clone for S { fn clone(&self) -> Self { STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed); panic!() } } box_pool!(P: S); P.manage(unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }); P.manage(unsafe { static mut B: BoxBlock = BoxBlock::new(); &mut B }); let boxed = P.alloc(S).ok().unwrap(); let thread = thread::spawn(move || { let _boxed = boxed.clone(); }); let thread_panicked = thread.join().is_err(); assert!(thread_panicked); assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed)); let once = P.alloc(S); let twice = P.alloc(S); assert!(once.is_ok()); assert!(twice.is_ok()); } }