Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

215
vendor/sharded-slab/src/cfg.rs vendored Normal file
View File

@@ -0,0 +1,215 @@
use crate::page::{
slot::{Generation, RefCount},
Addr,
};
use crate::Pack;
use std::{fmt, marker::PhantomData};
/// Configuration parameters which can be overridden to tune the behavior of a slab.
pub trait Config: Sized {
/// The maximum number of threads which can access the slab.
///
/// This value (rounded to a power of two) determines the number of shards
/// in the slab. If a thread is created, accesses the slab, and then terminates,
/// its shard may be reused and thus does not count against the maximum
/// number of threads once the thread has terminated.
const MAX_THREADS: usize = DefaultConfig::MAX_THREADS;
/// The maximum number of pages in each shard in the slab.
///
/// This value, in combination with `INITIAL_PAGE_SIZE`, determines how many
/// bits of each index are used to represent page addresses.
const MAX_PAGES: usize = DefaultConfig::MAX_PAGES;
/// The size of the first page in each shard.
///
/// When a page in a shard has been filled with values, a new page
/// will be allocated that is twice as large as the previous page. Thus, the
/// second page will be twice this size, and the third will be four times
/// this size, and so on.
///
/// Note that page sizes must be powers of two. If this value is not a power
/// of two, it will be rounded to the next power of two.
const INITIAL_PAGE_SIZE: usize = DefaultConfig::INITIAL_PAGE_SIZE;
/// Sets a number of high-order bits in each index which are reserved from
/// user code.
///
/// Note that these bits are taken from the generation counter; if the page
/// address and thread IDs are configured to use a large number of bits,
/// reserving additional bits will decrease the period of the generation
/// counter. These should thus be used relatively sparingly, to ensure that
/// generation counters are able to effectively prevent the ABA problem.
const RESERVED_BITS: usize = 0;
}
pub(crate) trait CfgPrivate: Config {
const USED_BITS: usize = Generation::<Self>::LEN + Generation::<Self>::SHIFT;
const INITIAL_SZ: usize = next_pow2(Self::INITIAL_PAGE_SIZE);
const MAX_SHARDS: usize = next_pow2(Self::MAX_THREADS - 1);
const ADDR_INDEX_SHIFT: usize = Self::INITIAL_SZ.trailing_zeros() as usize + 1;
fn page_size(n: usize) -> usize {
Self::INITIAL_SZ * 2usize.pow(n as _)
}
fn debug() -> DebugConfig<Self> {
DebugConfig { _cfg: PhantomData }
}
fn validate() {
assert!(
Self::INITIAL_SZ.is_power_of_two(),
"invalid Config: {:#?}",
Self::debug(),
);
assert!(
Self::INITIAL_SZ <= Addr::<Self>::BITS,
"invalid Config: {:#?}",
Self::debug()
);
assert!(
Generation::<Self>::BITS >= 3,
"invalid Config: {:#?}\ngeneration counter should be at least 3 bits!",
Self::debug()
);
assert!(
Self::USED_BITS <= WIDTH,
"invalid Config: {:#?}\ntotal number of bits per index is too large to fit in a word!",
Self::debug()
);
assert!(
WIDTH - Self::USED_BITS >= Self::RESERVED_BITS,
"invalid Config: {:#?}\nindices are too large to fit reserved bits!",
Self::debug()
);
assert!(
RefCount::<Self>::MAX > 1,
"invalid config: {:#?}\n maximum concurrent references would be {}",
Self::debug(),
RefCount::<Self>::MAX,
);
}
#[inline(always)]
fn unpack<A: Pack<Self>>(packed: usize) -> A {
A::from_packed(packed)
}
#[inline(always)]
fn unpack_addr(packed: usize) -> Addr<Self> {
Self::unpack(packed)
}
#[inline(always)]
fn unpack_tid(packed: usize) -> crate::Tid<Self> {
Self::unpack(packed)
}
#[inline(always)]
fn unpack_gen(packed: usize) -> Generation<Self> {
Self::unpack(packed)
}
}
impl<C: Config> CfgPrivate for C {}
/// Default slab configuration values.
#[derive(Copy, Clone)]
pub struct DefaultConfig {
_p: (),
}
pub(crate) struct DebugConfig<C: Config> {
_cfg: PhantomData<fn(C)>,
}
pub(crate) const WIDTH: usize = std::mem::size_of::<usize>() * 8;
pub(crate) const fn next_pow2(n: usize) -> usize {
let pow2 = n.count_ones() == 1;
let zeros = n.leading_zeros();
1 << (WIDTH - zeros as usize - pow2 as usize)
}
// === impl DefaultConfig ===
impl Config for DefaultConfig {
const INITIAL_PAGE_SIZE: usize = 32;
#[cfg(target_pointer_width = "64")]
const MAX_THREADS: usize = 4096;
#[cfg(target_pointer_width = "32")]
// TODO(eliza): can we find enough bits to give 32-bit platforms more threads?
const MAX_THREADS: usize = 128;
const MAX_PAGES: usize = WIDTH / 2;
}
impl fmt::Debug for DefaultConfig {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Self::debug().fmt(f)
}
}
impl<C: Config> fmt::Debug for DebugConfig<C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct(std::any::type_name::<C>())
.field("initial_page_size", &C::INITIAL_SZ)
.field("max_shards", &C::MAX_SHARDS)
.field("max_pages", &C::MAX_PAGES)
.field("used_bits", &C::USED_BITS)
.field("reserved_bits", &C::RESERVED_BITS)
.field("pointer_width", &WIDTH)
.field("max_concurrent_references", &RefCount::<C>::MAX)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_util;
use crate::Slab;
#[test]
#[cfg_attr(loom, ignore)]
#[should_panic]
fn validates_max_refs() {
struct GiantGenConfig;
// Configure the slab with a very large number of bits for the generation
// counter. This will only leave 1 bit to use for the slot reference
// counter, which will fail to validate.
impl Config for GiantGenConfig {
const INITIAL_PAGE_SIZE: usize = 1;
const MAX_THREADS: usize = 1;
const MAX_PAGES: usize = 1;
}
let _slab = Slab::<usize>::new_with_config::<GiantGenConfig>();
}
#[test]
#[cfg_attr(loom, ignore)]
fn big() {
let slab = Slab::new();
for i in 0..10000 {
println!("{:?}", i);
let k = slab.insert(i).expect("insert");
assert_eq!(slab.get(k).expect("get"), i);
}
}
#[test]
#[cfg_attr(loom, ignore)]
fn custom_page_sz() {
let slab = Slab::new_with_config::<test_util::TinyConfig>();
for i in 0..4096 {
println!("{}", i);
let k = slab.insert(i).expect("insert");
assert_eq!(slab.get(k).expect("get"), i);
}
}
}

100
vendor/sharded-slab/src/clear.rs vendored Normal file
View File

@@ -0,0 +1,100 @@
use std::{collections, hash, ops::DerefMut, sync};
/// Trait implemented by types which can be cleared in place, retaining any
/// allocated memory.
///
/// This is essentially a generalization of methods on standard library
/// collection types, including as [`Vec::clear`], [`String::clear`], and
/// [`HashMap::clear`]. These methods drop all data stored in the collection,
/// but retain the collection's heap allocation for future use. Types such as
/// `BTreeMap`, whose `clear` methods drops allocations, should not
/// implement this trait.
///
/// When implemented for types which do not own a heap allocation, `Clear`
/// should reset the type in place if possible. If the type has an empty state
/// or stores `Option`s, those values should be reset to the empty state. For
/// "plain old data" types, which hold no pointers to other data and do not have
/// an empty or initial state, it's okay for a `Clear` implementation to be a
/// no-op. In that case, it essentially serves as a marker indicating that the
/// type may be reused to store new data.
///
/// [`Vec::clear`]: https://doc.rust-lang.org/stable/std/vec/struct.Vec.html#method.clear
/// [`String::clear`]: https://doc.rust-lang.org/stable/std/string/struct.String.html#method.clear
/// [`HashMap::clear`]: https://doc.rust-lang.org/stable/std/collections/struct.HashMap.html#method.clear
pub trait Clear {
/// Clear all data in `self`, retaining the allocated capacithy.
fn clear(&mut self);
}
impl<T> Clear for Option<T> {
fn clear(&mut self) {
let _ = self.take();
}
}
impl<T> Clear for Box<T>
where
T: Clear,
{
#[inline]
fn clear(&mut self) {
self.deref_mut().clear()
}
}
impl<T> Clear for Vec<T> {
#[inline]
fn clear(&mut self) {
Vec::clear(self)
}
}
impl<K, V, S> Clear for collections::HashMap<K, V, S>
where
K: hash::Hash + Eq,
S: hash::BuildHasher,
{
#[inline]
fn clear(&mut self) {
collections::HashMap::clear(self)
}
}
impl<T, S> Clear for collections::HashSet<T, S>
where
T: hash::Hash + Eq,
S: hash::BuildHasher,
{
#[inline]
fn clear(&mut self) {
collections::HashSet::clear(self)
}
}
impl Clear for String {
#[inline]
fn clear(&mut self) {
String::clear(self)
}
}
impl<T: Clear> Clear for sync::Mutex<T> {
#[inline]
fn clear(&mut self) {
self.get_mut().unwrap().clear();
}
}
impl<T: Clear> Clear for sync::RwLock<T> {
#[inline]
fn clear(&mut self) {
self.write().unwrap().clear();
}
}
#[cfg(all(loom, test))]
impl<T: Clear> Clear for crate::sync::alloc::Track<T> {
fn clear(&mut self) {
self.get_mut().clear()
}
}

View File

@@ -0,0 +1,138 @@
// This module exists only to provide a separate page for the implementation
// documentation.
//! Notes on `sharded-slab`'s implementation and design.
//!
//! # Design
//!
//! The sharded slab's design is strongly inspired by the ideas presented by
//! Leijen, Zorn, and de Moura in [Mimalloc: Free List Sharding in
//! Action][mimalloc]. In this report, the authors present a novel design for a
//! memory allocator based on a concept of _free list sharding_.
//!
//! Memory allocators must keep track of what memory regions are not currently
//! allocated ("free") in order to provide them to future allocation requests.
//! The term [_free list_][freelist] refers to a technique for performing this
//! bookkeeping, where each free block stores a pointer to the next free block,
//! forming a linked list. The memory allocator keeps a pointer to the most
//! recently freed block, the _head_ of the free list. To allocate more memory,
//! the allocator pops from the free list by setting the head pointer to the
//! next free block of the current head block, and returning the previous head.
//! To deallocate a block, the block is pushed to the free list by setting its
//! first word to the current head pointer, and the head pointer is set to point
//! to the deallocated block. Most implementations of slab allocators backed by
//! arrays or vectors use a similar technique, where pointers are replaced by
//! indices into the backing array.
//!
//! When allocations and deallocations can occur concurrently across threads,
//! they must synchronize accesses to the free list; either by putting the
//! entire allocator state inside of a lock, or by using atomic operations to
//! treat the free list as a lock-free structure (such as a [Treiber stack]). In
//! both cases, there is a significant performance cost — even when the free
//! list is lock-free, it is likely that a noticeable amount of time will be
//! spent in compare-and-swap loops. Ideally, the global synchronzation point
//! created by the single global free list could be avoided as much as possible.
//!
//! The approach presented by Leijen, Zorn, and de Moura is to introduce
//! sharding and thus increase the granularity of synchronization significantly.
//! In mimalloc, the heap is _sharded_ so that each thread has its own
//! thread-local heap. Objects are always allocated from the local heap of the
//! thread where the allocation is performed. Because allocations are always
//! done from a thread's local heap, they need not be synchronized.
//!
//! However, since objects can move between threads before being deallocated,
//! _deallocations_ may still occur concurrently. Therefore, Leijen et al.
//! introduce a concept of _local_ and _global_ free lists. When an object is
//! deallocated on the same thread it was originally allocated on, it is placed
//! on the local free list; if it is deallocated on another thread, it goes on
//! the global free list for the heap of the thread from which it originated. To
//! allocate, the local free list is used first; if it is empty, the entire
//! global free list is popped onto the local free list. Since the local free
//! list is only ever accessed by the thread it belongs to, it does not require
//! synchronization at all, and because the global free list is popped from
//! infrequently, the cost of synchronization has a reduced impact. A majority
//! of allocations can occur without any synchronization at all; and
//! deallocations only require synchronization when an object has left its
//! parent thread (a relatively uncommon case).
//!
//! [mimalloc]: https://www.microsoft.com/en-us/research/uploads/prod/2019/06/mimalloc-tr-v1.pdf
//! [freelist]: https://en.wikipedia.org/wiki/Free_list
//! [Treiber stack]: https://en.wikipedia.org/wiki/Treiber_stack
//!
//! # Implementation
//!
//! A slab is represented as an array of [`MAX_THREADS`] _shards_. A shard
//! consists of a vector of one or more _pages_ plus associated metadata.
//! Finally, a page consists of an array of _slots_, head indices for the local
//! and remote free lists.
//!
//! ```text
//! ┌─────────────┐
//! │ shard 1 │
//! │ │ ┌─────────────┐ ┌────────┐
//! │ pages───────┼───▶│ page 1 │ │ │
//! ├─────────────┤ ├─────────────┤ ┌────▶│ next──┼─┐
//! │ shard 2 │ │ page 2 │ │ ├────────┤ │
//! ├─────────────┤ │ │ │ │XXXXXXXX│ │
//! │ shard 3 │ │ local_head──┼──┘ ├────────┤ │
//! └─────────────┘ │ remote_head─┼──┐ │ │◀┘
//! ... ├─────────────┤ │ │ next──┼─┐
//! ┌─────────────┐ │ page 3 │ │ ├────────┤ │
//! │ shard n │ └─────────────┘ │ │XXXXXXXX│ │
//! └─────────────┘ ... │ ├────────┤ │
//! ┌─────────────┐ │ │XXXXXXXX│ │
//! │ page n │ │ ├────────┤ │
//! └─────────────┘ │ │ │◀┘
//! └────▶│ next──┼───▶ ...
//! ├────────┤
//! │XXXXXXXX│
//! └────────┘
//! ```
//!
//!
//! The size of the first page in a shard is always a power of two, and every
//! subsequent page added after the first is twice as large as the page that
//! preceeds it.
//!
//! ```text
//!
//! pg.
//! ┌───┐ ┌─┬─┐
//! │ 0 │───▶ │ │
//! ├───┤ ├─┼─┼─┬─┐
//! │ 1 │───▶ │ │ │ │
//! ├───┤ ├─┼─┼─┼─┼─┬─┬─┬─┐
//! │ 2 │───▶ │ │ │ │ │ │ │ │
//! ├───┤ ├─┼─┼─┼─┼─┼─┼─┼─┼─┬─┬─┬─┬─┬─┬─┬─┐
//! │ 3 │───▶ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │ │
//! └───┘ └─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┴─┘
//! ```
//!
//! When searching for a free slot, the smallest page is searched first, and if
//! it is full, the search proceeds to the next page until either a free slot is
//! found or all available pages have been searched. If all available pages have
//! been searched and the maximum number of pages has not yet been reached, a
//! new page is then allocated.
//!
//! Since every page is twice as large as the previous page, and all page sizes
//! are powers of two, we can determine the page index that contains a given
//! address by shifting the address down by the smallest page size and
//! looking at how many twos places necessary to represent that number,
//! telling us what power of two page size it fits inside of. We can
//! determine the number of twos places by counting the number of leading
//! zeros (unused twos places) in the number's binary representation, and
//! subtracting that count from the total number of bits in a word.
//!
//! The formula for determining the page number that contains an offset is thus:
//!
//! ```rust,ignore
//! WIDTH - ((offset + INITIAL_PAGE_SIZE) >> INDEX_SHIFT).leading_zeros()
//! ```
//!
//! where `WIDTH` is the number of bits in a `usize`, and `INDEX_SHIFT` is
//!
//! ```rust,ignore
//! INITIAL_PAGE_SIZE.trailing_zeros() + 1;
//! ```
//!
//! [`MAX_THREADS`]: https://docs.rs/sharded-slab/latest/sharded_slab/trait.Config.html#associatedconstant.MAX_THREADS

45
vendor/sharded-slab/src/iter.rs vendored Normal file
View File

@@ -0,0 +1,45 @@
use std::{iter::FusedIterator, slice};
use crate::{cfg, page, shard};
/// An exclusive fused iterator over the items in a [`Slab`](crate::Slab).
#[must_use = "iterators are lazy and do nothing unless consumed"]
#[derive(Debug)]
pub struct UniqueIter<'a, T, C: cfg::Config> {
pub(super) shards: shard::IterMut<'a, Option<T>, C>,
pub(super) pages: slice::Iter<'a, page::Shared<Option<T>, C>>,
pub(super) slots: Option<page::Iter<'a, T, C>>,
}
impl<'a, T, C: cfg::Config> Iterator for UniqueIter<'a, T, C> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
test_println!("UniqueIter::next");
loop {
test_println!("-> try next slot");
if let Some(item) = self.slots.as_mut().and_then(|slots| slots.next()) {
test_println!("-> found an item!");
return Some(item);
}
test_println!("-> try next page");
if let Some(page) = self.pages.next() {
test_println!("-> found another page");
self.slots = page.iter();
continue;
}
test_println!("-> try next shard");
if let Some(shard) = self.shards.next() {
test_println!("-> found another shard");
self.pages = shard.iter();
} else {
test_println!("-> all done!");
return None;
}
}
}
}
impl<T, C: cfg::Config> FusedIterator for UniqueIter<'_, T, C> {}

1106
vendor/sharded-slab/src/lib.rs vendored Normal file

File diff suppressed because it is too large Load Diff

67
vendor/sharded-slab/src/macros.rs vendored Normal file
View File

@@ -0,0 +1,67 @@
macro_rules! test_println {
($($arg:tt)*) => {
if cfg!(test) && cfg!(slab_print) {
if std::thread::panicking() {
// getting the thread ID while panicking doesn't seem to play super nicely with loom's
// mock lazy_static...
println!("[PANIC {:>17}:{:<3}] {}", file!(), line!(), format_args!($($arg)*))
} else {
println!("[{:?} {:>17}:{:<3}] {}", crate::Tid::<crate::DefaultConfig>::current(), file!(), line!(), format_args!($($arg)*))
}
}
}
}
#[cfg(all(test, loom))]
macro_rules! test_dbg {
($e:expr) => {
match $e {
e => {
test_println!("{} = {:?}", stringify!($e), &e);
e
}
}
};
}
macro_rules! panic_in_drop {
($($arg:tt)*) => {
if !std::thread::panicking() {
panic!($($arg)*)
} else {
let thread = std::thread::current();
eprintln!(
"thread '{thread}' attempted to panic at '{msg}', {file}:{line}:{col}\n\
note: we were already unwinding due to a previous panic.",
thread = thread.name().unwrap_or("<unnamed>"),
msg = format_args!($($arg)*),
file = file!(),
line = line!(),
col = column!(),
);
}
}
}
macro_rules! debug_assert_eq_in_drop {
($this:expr, $that:expr) => {
debug_assert_eq_in_drop!(@inner $this, $that, "")
};
($this:expr, $that:expr, $($arg:tt)+) => {
debug_assert_eq_in_drop!(@inner $this, $that, format_args!(": {}", format_args!($($arg)+)))
};
(@inner $this:expr, $that:expr, $msg:expr) => {
if cfg!(debug_assertions) {
if $this != $that {
panic_in_drop!(
"assertion failed ({} == {})\n left: `{:?}`,\n right: `{:?}`{}",
stringify!($this),
stringify!($that),
$this,
$that,
$msg,
)
}
}
}
}

449
vendor/sharded-slab/src/page/mod.rs vendored Normal file
View File

@@ -0,0 +1,449 @@
use crate::cfg::{self, CfgPrivate};
use crate::clear::Clear;
use crate::sync::UnsafeCell;
use crate::Pack;
pub(crate) mod slot;
mod stack;
pub(crate) use self::slot::Slot;
use std::{fmt, marker::PhantomData};
/// A page address encodes the location of a slot within a shard (the page
/// number and offset within that page) as a single linear value.
#[repr(transparent)]
pub(crate) struct Addr<C: cfg::Config = cfg::DefaultConfig> {
addr: usize,
_cfg: PhantomData<fn(C)>,
}
impl<C: cfg::Config> Addr<C> {
const NULL: usize = Self::BITS + 1;
pub(crate) fn index(self) -> usize {
// Since every page is twice as large as the previous page, and all page sizes
// are powers of two, we can determine the page index that contains a given
// address by counting leading zeros, which tells us what power of two
// the offset fits into.
//
// First, we must shift down to the smallest page size, so that the last
// offset on the first page becomes 0.
let shifted = (self.addr + C::INITIAL_SZ) >> C::ADDR_INDEX_SHIFT;
// Now, we can determine the number of twos places by counting the
// number of leading zeros (unused twos places) in the number's binary
// representation, and subtracting that count from the total number of bits in a word.
cfg::WIDTH - shifted.leading_zeros() as usize
}
pub(crate) fn offset(self) -> usize {
self.addr
}
}
pub(crate) trait FreeList<C> {
fn push<T>(&self, new_head: usize, slot: &Slot<T, C>)
where
C: cfg::Config;
}
impl<C: cfg::Config> Pack<C> for Addr<C> {
const LEN: usize = C::MAX_PAGES + C::ADDR_INDEX_SHIFT;
type Prev = ();
fn as_usize(&self) -> usize {
self.addr
}
fn from_usize(addr: usize) -> Self {
debug_assert!(addr <= Self::BITS);
Self {
addr,
_cfg: PhantomData,
}
}
}
pub(crate) type Iter<'a, T, C> = std::iter::FilterMap<
std::slice::Iter<'a, Slot<Option<T>, C>>,
fn(&'a Slot<Option<T>, C>) -> Option<&'a T>,
>;
pub(crate) struct Local {
/// Index of the first slot on the local free list
head: UnsafeCell<usize>,
}
pub(crate) struct Shared<T, C> {
/// The remote free list
///
/// Slots freed from a remote thread are pushed onto this list.
remote: stack::TransferStack<C>,
// Total size of the page.
//
// If the head index of the local or remote free list is greater than the size of the
// page, then that free list is emtpy. If the head of both free lists is greater than `size`
// then there are no slots left in that page.
size: usize,
prev_sz: usize,
slab: UnsafeCell<Option<Slots<T, C>>>,
}
type Slots<T, C> = Box<[Slot<T, C>]>;
impl Local {
pub(crate) fn new() -> Self {
Self {
head: UnsafeCell::new(0),
}
}
#[inline(always)]
fn head(&self) -> usize {
self.head.with(|head| unsafe { *head })
}
#[inline(always)]
fn set_head(&self, new_head: usize) {
self.head.with_mut(|head| unsafe {
*head = new_head;
})
}
}
impl<C: cfg::Config> FreeList<C> for Local {
fn push<T>(&self, new_head: usize, slot: &Slot<T, C>) {
slot.set_next(self.head());
self.set_head(new_head);
}
}
impl<T, C> Shared<T, C>
where
C: cfg::Config,
{
const NULL: usize = Addr::<C>::NULL;
pub(crate) fn new(size: usize, prev_sz: usize) -> Self {
Self {
prev_sz,
size,
remote: stack::TransferStack::new(),
slab: UnsafeCell::new(None),
}
}
/// Return the head of the freelist
///
/// If there is space on the local list, it returns the head of the local list. Otherwise, it
/// pops all the slots from the global list and returns the head of that list
///
/// *Note*: The local list's head is reset when setting the new state in the slot pointed to be
/// `head` returned from this function
#[inline]
fn pop(&self, local: &Local) -> Option<usize> {
let head = local.head();
test_println!("-> local head {:?}", head);
// are there any items on the local free list? (fast path)
let head = if head < self.size {
head
} else {
// slow path: if the local free list is empty, pop all the items on
// the remote free list.
let head = self.remote.pop_all();
test_println!("-> remote head {:?}", head);
head?
};
// if the head is still null, both the local and remote free lists are
// empty --- we can't fit any more items on this page.
if head == Self::NULL {
test_println!("-> NULL! {:?}", head);
None
} else {
Some(head)
}
}
/// Returns `true` if storage is currently allocated for this page, `false`
/// otherwise.
#[inline]
fn is_unallocated(&self) -> bool {
self.slab.with(|s| unsafe { (*s).is_none() })
}
#[inline]
pub(crate) fn with_slot<'a, U>(
&'a self,
addr: Addr<C>,
f: impl FnOnce(&'a Slot<T, C>) -> Option<U>,
) -> Option<U> {
let poff = addr.offset() - self.prev_sz;
test_println!("-> offset {:?}", poff);
self.slab.with(|slab| {
let slot = unsafe { &*slab }.as_ref()?.get(poff)?;
f(slot)
})
}
#[inline(always)]
pub(crate) fn free_list(&self) -> &impl FreeList<C> {
&self.remote
}
}
impl<'a, T, C> Shared<Option<T>, C>
where
C: cfg::Config + 'a,
{
pub(crate) fn take<F>(
&self,
addr: Addr<C>,
gen: slot::Generation<C>,
free_list: &F,
) -> Option<T>
where
F: FreeList<C>,
{
let offset = addr.offset() - self.prev_sz;
test_println!("-> take: offset {:?}", offset);
self.slab.with(|slab| {
let slab = unsafe { &*slab }.as_ref()?;
let slot = slab.get(offset)?;
slot.remove_value(gen, offset, free_list)
})
}
pub(crate) fn remove<F: FreeList<C>>(
&self,
addr: Addr<C>,
gen: slot::Generation<C>,
free_list: &F,
) -> bool {
let offset = addr.offset() - self.prev_sz;
test_println!("-> offset {:?}", offset);
self.slab.with(|slab| {
let slab = unsafe { &*slab }.as_ref();
if let Some(slot) = slab.and_then(|slab| slab.get(offset)) {
slot.try_remove_value(gen, offset, free_list)
} else {
false
}
})
}
// Need this function separately, as we need to pass a function pointer to `filter_map` and
// `Slot::value` just returns a `&T`, specifically a `&Option<T>` for this impl.
fn make_ref(slot: &'a Slot<Option<T>, C>) -> Option<&'a T> {
slot.value().as_ref()
}
pub(crate) fn iter(&self) -> Option<Iter<'a, T, C>> {
let slab = self.slab.with(|slab| unsafe { (*slab).as_ref() });
slab.map(|slab| {
slab.iter()
.filter_map(Shared::make_ref as fn(&'a Slot<Option<T>, C>) -> Option<&'a T>)
})
}
}
impl<T, C> Shared<T, C>
where
T: Clear + Default,
C: cfg::Config,
{
pub(crate) fn init_with<U>(
&self,
local: &Local,
init: impl FnOnce(usize, &Slot<T, C>) -> Option<U>,
) -> Option<U> {
let head = self.pop(local)?;
// do we need to allocate storage for this page?
if self.is_unallocated() {
self.allocate();
}
let index = head + self.prev_sz;
let result = self.slab.with(|slab| {
let slab = unsafe { &*(slab) }
.as_ref()
.expect("page must have been allocated to insert!");
let slot = &slab[head];
let result = init(index, slot)?;
local.set_head(slot.next());
Some(result)
})?;
test_println!("-> init_with: insert at offset: {}", index);
Some(result)
}
/// Allocates storage for the page's slots.
#[cold]
fn allocate(&self) {
test_println!("-> alloc new page ({})", self.size);
debug_assert!(self.is_unallocated());
let mut slab = Vec::with_capacity(self.size);
slab.extend((1..self.size).map(Slot::new));
slab.push(Slot::new(Self::NULL));
self.slab.with_mut(|s| {
// safety: this mut access is safe — it only occurs to initially allocate the page,
// which only happens on this thread; if the page has not yet been allocated, other
// threads will not try to access it yet.
unsafe {
*s = Some(slab.into_boxed_slice());
}
});
}
pub(crate) fn mark_clear<F: FreeList<C>>(
&self,
addr: Addr<C>,
gen: slot::Generation<C>,
free_list: &F,
) -> bool {
let offset = addr.offset() - self.prev_sz;
test_println!("-> offset {:?}", offset);
self.slab.with(|slab| {
let slab = unsafe { &*slab }.as_ref();
if let Some(slot) = slab.and_then(|slab| slab.get(offset)) {
slot.try_clear_storage(gen, offset, free_list)
} else {
false
}
})
}
pub(crate) fn clear<F: FreeList<C>>(
&self,
addr: Addr<C>,
gen: slot::Generation<C>,
free_list: &F,
) -> bool {
let offset = addr.offset() - self.prev_sz;
test_println!("-> offset {:?}", offset);
self.slab.with(|slab| {
let slab = unsafe { &*slab }.as_ref();
if let Some(slot) = slab.and_then(|slab| slab.get(offset)) {
slot.clear_storage(gen, offset, free_list)
} else {
false
}
})
}
}
impl fmt::Debug for Local {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.head.with(|head| {
let head = unsafe { *head };
f.debug_struct("Local")
.field("head", &format_args!("{:#0x}", head))
.finish()
})
}
}
impl<C, T> fmt::Debug for Shared<C, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Shared")
.field("remote", &self.remote)
.field("prev_sz", &self.prev_sz)
.field("size", &self.size)
// .field("slab", &self.slab)
.finish()
}
}
impl<C: cfg::Config> fmt::Debug for Addr<C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Addr")
.field("addr", &format_args!("{:#0x}", &self.addr))
.field("index", &self.index())
.field("offset", &self.offset())
.finish()
}
}
impl<C: cfg::Config> PartialEq for Addr<C> {
fn eq(&self, other: &Self) -> bool {
self.addr == other.addr
}
}
impl<C: cfg::Config> Eq for Addr<C> {}
impl<C: cfg::Config> PartialOrd for Addr<C> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.addr.partial_cmp(&other.addr)
}
}
impl<C: cfg::Config> Ord for Addr<C> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.addr.cmp(&other.addr)
}
}
impl<C: cfg::Config> Clone for Addr<C> {
fn clone(&self) -> Self {
*self
}
}
impl<C: cfg::Config> Copy for Addr<C> {}
#[inline(always)]
pub(crate) fn indices<C: cfg::Config>(idx: usize) -> (Addr<C>, usize) {
let addr = C::unpack_addr(idx);
(addr, addr.index())
}
#[cfg(test)]
mod test {
use super::*;
use crate::Pack;
use proptest::prelude::*;
proptest! {
#[test]
fn addr_roundtrips(pidx in 0usize..Addr::<cfg::DefaultConfig>::BITS) {
let addr = Addr::<cfg::DefaultConfig>::from_usize(pidx);
let packed = addr.pack(0);
assert_eq!(addr, Addr::from_packed(packed));
}
#[test]
fn gen_roundtrips(gen in 0usize..slot::Generation::<cfg::DefaultConfig>::BITS) {
let gen = slot::Generation::<cfg::DefaultConfig>::from_usize(gen);
let packed = gen.pack(0);
assert_eq!(gen, slot::Generation::from_packed(packed));
}
#[test]
fn page_roundtrips(
gen in 0usize..slot::Generation::<cfg::DefaultConfig>::BITS,
addr in 0usize..Addr::<cfg::DefaultConfig>::BITS,
) {
let gen = slot::Generation::<cfg::DefaultConfig>::from_usize(gen);
let addr = Addr::<cfg::DefaultConfig>::from_usize(addr);
let packed = gen.pack(addr.pack(0));
assert_eq!(addr, Addr::from_packed(packed));
assert_eq!(gen, slot::Generation::from_packed(packed));
}
}
}

922
vendor/sharded-slab/src/page/slot.rs vendored Normal file
View File

@@ -0,0 +1,922 @@
use super::FreeList;
use crate::sync::{
atomic::{AtomicUsize, Ordering},
hint, UnsafeCell,
};
use crate::{cfg, clear::Clear, Pack, Tid};
use std::{fmt, marker::PhantomData, mem, ptr, thread};
pub(crate) struct Slot<T, C> {
lifecycle: AtomicUsize,
/// The offset of the next item on the free list.
next: UnsafeCell<usize>,
/// The data stored in the slot.
item: UnsafeCell<T>,
_cfg: PhantomData<fn(C)>,
}
#[derive(Debug)]
pub(crate) struct Guard<T, C: cfg::Config = cfg::DefaultConfig> {
slot: ptr::NonNull<Slot<T, C>>,
}
#[derive(Debug)]
pub(crate) struct InitGuard<T, C: cfg::Config = cfg::DefaultConfig> {
slot: ptr::NonNull<Slot<T, C>>,
curr_lifecycle: usize,
released: bool,
}
#[repr(transparent)]
pub(crate) struct Generation<C = cfg::DefaultConfig> {
value: usize,
_cfg: PhantomData<fn(C)>,
}
#[repr(transparent)]
pub(crate) struct RefCount<C = cfg::DefaultConfig> {
value: usize,
_cfg: PhantomData<fn(C)>,
}
pub(crate) struct Lifecycle<C> {
state: State,
_cfg: PhantomData<fn(C)>,
}
struct LifecycleGen<C>(Generation<C>);
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
#[repr(usize)]
enum State {
Present = 0b00,
Marked = 0b01,
Removing = 0b11,
}
impl<C: cfg::Config> Pack<C> for Generation<C> {
/// Use all the remaining bits in the word for the generation counter, minus
/// any bits reserved by the user.
const LEN: usize = (cfg::WIDTH - C::RESERVED_BITS) - Self::SHIFT;
type Prev = Tid<C>;
#[inline(always)]
fn from_usize(u: usize) -> Self {
debug_assert!(u <= Self::BITS);
Self::new(u)
}
#[inline(always)]
fn as_usize(&self) -> usize {
self.value
}
}
impl<C: cfg::Config> Generation<C> {
fn new(value: usize) -> Self {
Self {
value,
_cfg: PhantomData,
}
}
}
// Slot methods which should work across all trait bounds
impl<T, C> Slot<T, C>
where
C: cfg::Config,
{
#[inline(always)]
pub(super) fn next(&self) -> usize {
self.next.with(|next| unsafe { *next })
}
#[inline(always)]
pub(crate) fn value(&self) -> &T {
self.item.with(|item| unsafe { &*item })
}
#[inline(always)]
pub(super) fn set_next(&self, next: usize) {
self.next.with_mut(|n| unsafe {
(*n) = next;
})
}
#[inline(always)]
pub(crate) fn get(&self, gen: Generation<C>) -> Option<Guard<T, C>> {
let mut lifecycle = self.lifecycle.load(Ordering::Acquire);
loop {
// Unpack the current state.
let state = Lifecycle::<C>::from_packed(lifecycle);
let current_gen = LifecycleGen::<C>::from_packed(lifecycle).0;
let refs = RefCount::<C>::from_packed(lifecycle);
test_println!(
"-> get {:?}; current_gen={:?}; lifecycle={:#x}; state={:?}; refs={:?};",
gen,
current_gen,
lifecycle,
state,
refs,
);
// Is it okay to access this slot? The accessed generation must be
// current, and the slot must not be in the process of being
// removed. If we can no longer access the slot at the given
// generation, return `None`.
if gen != current_gen || state != Lifecycle::PRESENT {
test_println!("-> get: no longer exists!");
return None;
}
// Try to increment the slot's ref count by one.
let new_refs = refs.incr()?;
match self.lifecycle.compare_exchange(
lifecycle,
new_refs.pack(lifecycle),
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
test_println!("-> {:?}", new_refs);
return Some(Guard {
slot: ptr::NonNull::from(self),
});
}
Err(actual) => {
// Another thread modified the slot's state before us! We
// need to retry with the new state.
//
// Since the new state may mean that the accessed generation
// is no longer valid, we'll check again on the next
// iteration of the loop.
test_println!("-> get: retrying; lifecycle={:#x};", actual);
lifecycle = actual;
}
};
}
}
/// Marks this slot to be released, returning `true` if the slot can be
/// mutated *now* and `false` otherwise.
///
/// This method checks if there are any references to this slot. If there _are_ valid
/// references, it just marks them for modification and returns and the next thread calling
/// either `clear_storage` or `remove_value` will try and modify the storage
fn mark_release(&self, gen: Generation<C>) -> Option<bool> {
let mut lifecycle = self.lifecycle.load(Ordering::Acquire);
let mut curr_gen;
// Try to advance the slot's state to "MARKED", which indicates that it
// should be removed when it is no longer concurrently accessed.
loop {
curr_gen = LifecycleGen::from_packed(lifecycle).0;
test_println!(
"-> mark_release; gen={:?}; current_gen={:?};",
gen,
curr_gen
);
// Is the slot still at the generation we are trying to remove?
if gen != curr_gen {
return None;
}
let state = Lifecycle::<C>::from_packed(lifecycle).state;
test_println!("-> mark_release; state={:?};", state);
match state {
State::Removing => {
test_println!("--> mark_release; cannot release (already removed!)");
return None;
}
State::Marked => {
test_println!("--> mark_release; already marked;");
break;
}
State::Present => {}
};
// Set the new state to `MARKED`.
let new_lifecycle = Lifecycle::<C>::MARKED.pack(lifecycle);
test_println!(
"-> mark_release; old_lifecycle={:#x}; new_lifecycle={:#x};",
lifecycle,
new_lifecycle
);
match self.lifecycle.compare_exchange(
lifecycle,
new_lifecycle,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => break,
Err(actual) => {
test_println!("-> mark_release; retrying");
lifecycle = actual;
}
}
}
// Unpack the current reference count to see if we can remove the slot now.
let refs = RefCount::<C>::from_packed(lifecycle);
test_println!("-> mark_release: marked; refs={:?};", refs);
// Are there currently outstanding references to the slot? If so, it
// will have to be removed when those references are dropped.
Some(refs.value == 0)
}
/// Mutates this slot.
///
/// This method spins until no references to this slot are left, and calls the mutator
fn release_with<F, M, R>(&self, gen: Generation<C>, offset: usize, free: &F, mutator: M) -> R
where
F: FreeList<C>,
M: FnOnce(Option<&mut T>) -> R,
{
let mut lifecycle = self.lifecycle.load(Ordering::Acquire);
let mut advanced = false;
// Exponential spin backoff while waiting for the slot to be released.
let mut spin_exp = 0;
let next_gen = gen.advance();
loop {
let current_gen = LifecycleGen::from_packed(lifecycle).0;
test_println!("-> release_with; lifecycle={:#x}; expected_gen={:?}; current_gen={:?}; next_gen={:?};",
lifecycle,
gen,
current_gen,
next_gen
);
// First, make sure we are actually able to remove the value.
// If we're going to remove the value, the generation has to match
// the value that `remove_value` was called with...unless we've
// already stored the new generation.
if (!advanced) && gen != current_gen {
test_println!("-> already removed!");
return mutator(None);
}
match self.lifecycle.compare_exchange(
lifecycle,
LifecycleGen(next_gen).pack(lifecycle),
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(actual) => {
// If we're in this state, we have successfully advanced to
// the next generation.
advanced = true;
// Make sure that there are no outstanding references.
let refs = RefCount::<C>::from_packed(actual);
test_println!("-> advanced gen; lifecycle={:#x}; refs={:?};", actual, refs);
if refs.value == 0 {
test_println!("-> ok to remove!");
// safety: we've modified the generation of this slot and any other thread
// calling this method will exit out at the generation check above in the
// next iteraton of the loop.
let value = self
.item
.with_mut(|item| mutator(Some(unsafe { &mut *item })));
free.push(offset, self);
return value;
}
// Otherwise, a reference must be dropped before we can
// remove the value. Spin here until there are no refs remaining...
test_println!("-> refs={:?}; spin...", refs);
// Back off, spinning and possibly yielding.
exponential_backoff(&mut spin_exp);
}
Err(actual) => {
test_println!("-> retrying; lifecycle={:#x};", actual);
lifecycle = actual;
// The state changed; reset the spin backoff.
spin_exp = 0;
}
}
}
}
/// Initialize a slot
///
/// This method initializes and sets up the state for a slot. When being used in `Pool`, we
/// only need to ensure that the `Slot` is in the right `state, while when being used in a
/// `Slab` we want to insert a value into it, as the memory is not initialized
pub(crate) fn init(&self) -> Option<InitGuard<T, C>> {
// Load the current lifecycle state.
let lifecycle = self.lifecycle.load(Ordering::Acquire);
let gen = LifecycleGen::<C>::from_packed(lifecycle).0;
let refs = RefCount::<C>::from_packed(lifecycle);
test_println!(
"-> initialize_state; state={:?}; gen={:?}; refs={:?};",
Lifecycle::<C>::from_packed(lifecycle),
gen,
refs,
);
if refs.value != 0 {
test_println!("-> initialize while referenced! cancelling");
return None;
}
Some(InitGuard {
slot: ptr::NonNull::from(self),
curr_lifecycle: lifecycle,
released: false,
})
}
}
// Slot impl which _needs_ an `Option` for self.item, this is for `Slab` to use.
impl<T, C> Slot<Option<T>, C>
where
C: cfg::Config,
{
fn is_empty(&self) -> bool {
self.item.with(|item| unsafe { (*item).is_none() })
}
/// Insert a value into a slot
///
/// We first initialize the state and then insert the pased in value into the slot.
#[inline]
pub(crate) fn insert(&self, value: &mut Option<T>) -> Option<Generation<C>> {
debug_assert!(self.is_empty(), "inserted into full slot");
debug_assert!(value.is_some(), "inserted twice");
let mut guard = self.init()?;
let gen = guard.generation();
unsafe {
// Safety: Accessing the value of an `InitGuard` is unsafe because
// it has a pointer to a slot which may dangle. Here, we know the
// pointed slot is alive because we have a reference to it in scope,
// and the `InitGuard` will be dropped when this function returns.
mem::swap(guard.value_mut(), value);
guard.release();
};
test_println!("-> inserted at {:?}", gen);
Some(gen)
}
/// Tries to remove the value in the slot, returning `true` if the value was
/// removed.
///
/// This method tries to remove the value in the slot. If there are existing references, then
/// the slot is marked for removal and the next thread calling either this method or
/// `remove_value` will do the work instead.
#[inline]
pub(super) fn try_remove_value<F: FreeList<C>>(
&self,
gen: Generation<C>,
offset: usize,
free: &F,
) -> bool {
let should_remove = match self.mark_release(gen) {
// If `mark_release` returns `Some`, a value exists at this
// generation. The bool inside this option indicates whether or not
// _we're_ allowed to remove the value.
Some(should_remove) => should_remove,
// Otherwise, the generation we tried to remove has already expired,
// and we did not mark anything for removal.
None => {
test_println!(
"-> try_remove_value; nothing exists at generation={:?}",
gen
);
return false;
}
};
test_println!("-> try_remove_value; marked!");
if should_remove {
// We're allowed to remove the slot now!
test_println!("-> try_remove_value; can remove now");
self.remove_value(gen, offset, free);
}
true
}
#[inline]
pub(super) fn remove_value<F: FreeList<C>>(
&self,
gen: Generation<C>,
offset: usize,
free: &F,
) -> Option<T> {
self.release_with(gen, offset, free, |item| item.and_then(Option::take))
}
}
// These impls are specific to `Pool`
impl<T, C> Slot<T, C>
where
T: Default + Clear,
C: cfg::Config,
{
pub(in crate::page) fn new(next: usize) -> Self {
Self {
lifecycle: AtomicUsize::new(Lifecycle::<C>::REMOVING.as_usize()),
item: UnsafeCell::new(T::default()),
next: UnsafeCell::new(next),
_cfg: PhantomData,
}
}
/// Try to clear this slot's storage
///
/// If there are references to this slot, then we mark this slot for clearing and let the last
/// thread do the work for us.
#[inline]
pub(super) fn try_clear_storage<F: FreeList<C>>(
&self,
gen: Generation<C>,
offset: usize,
free: &F,
) -> bool {
let should_clear = match self.mark_release(gen) {
// If `mark_release` returns `Some`, a value exists at this
// generation. The bool inside this option indicates whether or not
// _we're_ allowed to clear the value.
Some(should_clear) => should_clear,
// Otherwise, the generation we tried to remove has already expired,
// and we did not mark anything for removal.
None => {
test_println!(
"-> try_clear_storage; nothing exists at generation={:?}",
gen
);
return false;
}
};
test_println!("-> try_clear_storage; marked!");
if should_clear {
// We're allowed to remove the slot now!
test_println!("-> try_remove_value; can clear now");
return self.clear_storage(gen, offset, free);
}
true
}
/// Clear this slot's storage
///
/// This method blocks until all references have been dropped and clears the storage.
pub(super) fn clear_storage<F: FreeList<C>>(
&self,
gen: Generation<C>,
offset: usize,
free: &F,
) -> bool {
// release_with will _always_ wait unitl it can release the slot or just return if the slot
// has already been released.
self.release_with(gen, offset, free, |item| {
let cleared = item.map(|inner| Clear::clear(inner)).is_some();
test_println!("-> cleared: {}", cleared);
cleared
})
}
}
impl<T, C: cfg::Config> Slot<T, C> {
fn release(&self) -> bool {
let mut lifecycle = self.lifecycle.load(Ordering::Acquire);
loop {
let refs = RefCount::<C>::from_packed(lifecycle);
let state = Lifecycle::<C>::from_packed(lifecycle).state;
let gen = LifecycleGen::<C>::from_packed(lifecycle).0;
// Are we the last guard, and is the slot marked for removal?
let dropping = refs.value == 1 && state == State::Marked;
let new_lifecycle = if dropping {
// If so, we want to advance the state to "removing".
// Also, reset the ref count to 0.
LifecycleGen(gen).pack(State::Removing as usize)
} else {
// Otherwise, just subtract 1 from the ref count.
refs.decr().pack(lifecycle)
};
test_println!(
"-> drop guard: state={:?}; gen={:?}; refs={:?}; lifecycle={:#x}; new_lifecycle={:#x}; dropping={:?}",
state,
gen,
refs,
lifecycle,
new_lifecycle,
dropping
);
match self.lifecycle.compare_exchange(
lifecycle,
new_lifecycle,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
test_println!("-> drop guard: done; dropping={:?}", dropping);
return dropping;
}
Err(actual) => {
test_println!("-> drop guard; retry, actual={:#x}", actual);
lifecycle = actual;
}
}
}
}
}
impl<T, C: cfg::Config> fmt::Debug for Slot<T, C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let lifecycle = self.lifecycle.load(Ordering::Relaxed);
f.debug_struct("Slot")
.field("lifecycle", &format_args!("{:#x}", lifecycle))
.field("state", &Lifecycle::<C>::from_packed(lifecycle).state)
.field("gen", &LifecycleGen::<C>::from_packed(lifecycle).0)
.field("refs", &RefCount::<C>::from_packed(lifecycle))
.field("next", &self.next())
.finish()
}
}
// === impl Generation ===
impl<C> fmt::Debug for Generation<C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Generation").field(&self.value).finish()
}
}
impl<C: cfg::Config> Generation<C> {
fn advance(self) -> Self {
Self::from_usize((self.value + 1) % Self::BITS)
}
}
impl<C: cfg::Config> PartialEq for Generation<C> {
fn eq(&self, other: &Self) -> bool {
self.value == other.value
}
}
impl<C: cfg::Config> Eq for Generation<C> {}
impl<C: cfg::Config> PartialOrd for Generation<C> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.value.partial_cmp(&other.value)
}
}
impl<C: cfg::Config> Ord for Generation<C> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.value.cmp(&other.value)
}
}
impl<C: cfg::Config> Clone for Generation<C> {
fn clone(&self) -> Self {
*self
}
}
impl<C: cfg::Config> Copy for Generation<C> {}
// === impl Guard ===
impl<T, C: cfg::Config> Guard<T, C> {
/// Releases the guard, returning `true` if the slot should be cleared.
///
/// ## Safety
///
/// This dereferences a raw pointer to the slot. The caller is responsible
/// for ensuring that the `Guard` does not outlive the slab that contains
/// the pointed slot. Failure to do so means this pointer may dangle.
#[inline]
pub(crate) unsafe fn release(&self) -> bool {
self.slot().release()
}
/// Returns a borrowed reference to the slot.
///
/// ## Safety
///
/// This dereferences a raw pointer to the slot. The caller is responsible
/// for ensuring that the `Guard` does not outlive the slab that contains
/// the pointed slot. Failure to do so means this pointer may dangle.
#[inline]
pub(crate) unsafe fn slot(&self) -> &Slot<T, C> {
self.slot.as_ref()
}
/// Returns a borrowed reference to the slot's value.
///
/// ## Safety
///
/// This dereferences a raw pointer to the slot. The caller is responsible
/// for ensuring that the `Guard` does not outlive the slab that contains
/// the pointed slot. Failure to do so means this pointer may dangle.
#[inline(always)]
pub(crate) unsafe fn value(&self) -> &T {
self.slot().item.with(|item| &*item)
}
}
// === impl Lifecycle ===
impl<C: cfg::Config> Lifecycle<C> {
const MARKED: Self = Self {
state: State::Marked,
_cfg: PhantomData,
};
const REMOVING: Self = Self {
state: State::Removing,
_cfg: PhantomData,
};
const PRESENT: Self = Self {
state: State::Present,
_cfg: PhantomData,
};
}
impl<C: cfg::Config> Pack<C> for Lifecycle<C> {
const LEN: usize = 2;
type Prev = ();
fn from_usize(u: usize) -> Self {
Self {
state: match u & Self::MASK {
0b00 => State::Present,
0b01 => State::Marked,
0b11 => State::Removing,
bad => unreachable!("weird lifecycle {:#b}", bad),
},
_cfg: PhantomData,
}
}
fn as_usize(&self) -> usize {
self.state as usize
}
}
impl<C> PartialEq for Lifecycle<C> {
fn eq(&self, other: &Self) -> bool {
self.state == other.state
}
}
impl<C> Eq for Lifecycle<C> {}
impl<C> fmt::Debug for Lifecycle<C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Lifecycle").field(&self.state).finish()
}
}
// === impl RefCount ===
impl<C: cfg::Config> Pack<C> for RefCount<C> {
const LEN: usize = cfg::WIDTH - (Lifecycle::<C>::LEN + Generation::<C>::LEN);
type Prev = Lifecycle<C>;
fn from_usize(value: usize) -> Self {
debug_assert!(value <= Self::BITS);
Self {
value,
_cfg: PhantomData,
}
}
fn as_usize(&self) -> usize {
self.value
}
}
impl<C: cfg::Config> RefCount<C> {
pub(crate) const MAX: usize = Self::BITS - 1;
#[inline]
fn incr(self) -> Option<Self> {
if self.value >= Self::MAX {
test_println!("-> get: {}; MAX={}", self.value, RefCount::<C>::MAX);
return None;
}
Some(Self::from_usize(self.value + 1))
}
#[inline]
fn decr(self) -> Self {
Self::from_usize(self.value - 1)
}
}
impl<C> fmt::Debug for RefCount<C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("RefCount").field(&self.value).finish()
}
}
impl<C: cfg::Config> PartialEq for RefCount<C> {
fn eq(&self, other: &Self) -> bool {
self.value == other.value
}
}
impl<C: cfg::Config> Eq for RefCount<C> {}
impl<C: cfg::Config> PartialOrd for RefCount<C> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.value.partial_cmp(&other.value)
}
}
impl<C: cfg::Config> Ord for RefCount<C> {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.value.cmp(&other.value)
}
}
impl<C: cfg::Config> Clone for RefCount<C> {
fn clone(&self) -> Self {
*self
}
}
impl<C: cfg::Config> Copy for RefCount<C> {}
// === impl LifecycleGen ===
impl<C: cfg::Config> Pack<C> for LifecycleGen<C> {
const LEN: usize = Generation::<C>::LEN;
type Prev = RefCount<C>;
fn from_usize(value: usize) -> Self {
Self(Generation::from_usize(value))
}
fn as_usize(&self) -> usize {
self.0.as_usize()
}
}
impl<T, C: cfg::Config> InitGuard<T, C> {
pub(crate) fn generation(&self) -> Generation<C> {
LifecycleGen::<C>::from_packed(self.curr_lifecycle).0
}
/// Returns a borrowed reference to the slot's value.
///
/// ## Safety
///
/// This dereferences a raw pointer to the slot. The caller is responsible
/// for ensuring that the `InitGuard` does not outlive the slab that
/// contains the pointed slot. Failure to do so means this pointer may
/// dangle.
pub(crate) unsafe fn value(&self) -> &T {
self.slot.as_ref().item.with(|val| &*val)
}
/// Returns a mutably borrowed reference to the slot's value.
///
/// ## Safety
///
/// This dereferences a raw pointer to the slot. The caller is responsible
/// for ensuring that the `InitGuard` does not outlive the slab that
/// contains the pointed slot. Failure to do so means this pointer may
/// dangle.
///
/// It's safe to reference the slot mutably, though, because creating an
/// `InitGuard` ensures there are no outstanding immutable references.
pub(crate) unsafe fn value_mut(&mut self) -> &mut T {
self.slot.as_ref().item.with_mut(|val| &mut *val)
}
/// Releases the guard, returning `true` if the slot should be cleared.
///
/// ## Safety
///
/// This dereferences a raw pointer to the slot. The caller is responsible
/// for ensuring that the `InitGuard` does not outlive the slab that
/// contains the pointed slot. Failure to do so means this pointer may
/// dangle.
pub(crate) unsafe fn release(&mut self) -> bool {
self.release2(0)
}
/// Downgrades the guard to an immutable guard
///
/// ## Safety
///
/// This dereferences a raw pointer to the slot. The caller is responsible
/// for ensuring that the `InitGuard` does not outlive the slab that
/// contains the pointed slot. Failure to do so means this pointer may
/// dangle.
pub(crate) unsafe fn downgrade(&mut self) -> Guard<T, C> {
let _ = self.release2(RefCount::<C>::from_usize(1).pack(0));
Guard { slot: self.slot }
}
unsafe fn release2(&mut self, new_refs: usize) -> bool {
test_println!(
"InitGuard::release; curr_lifecycle={:?}; downgrading={}",
Lifecycle::<C>::from_packed(self.curr_lifecycle),
new_refs != 0,
);
if self.released {
test_println!("-> already released!");
return false;
}
self.released = true;
let mut curr_lifecycle = self.curr_lifecycle;
let slot = self.slot.as_ref();
let new_lifecycle = LifecycleGen::<C>::from_packed(self.curr_lifecycle)
.pack(Lifecycle::<C>::PRESENT.pack(new_refs));
match slot.lifecycle.compare_exchange(
curr_lifecycle,
new_lifecycle,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
test_println!("--> advanced to PRESENT; done");
return false;
}
Err(actual) => {
test_println!(
"--> lifecycle changed; actual={:?}",
Lifecycle::<C>::from_packed(actual)
);
curr_lifecycle = actual;
}
}
// if the state was no longer the prior state, we are now responsible
// for releasing the slot.
loop {
let refs = RefCount::<C>::from_packed(curr_lifecycle);
let state = Lifecycle::<C>::from_packed(curr_lifecycle).state;
test_println!(
"-> InitGuard::release; lifecycle={:#x}; state={:?}; refs={:?};",
curr_lifecycle,
state,
refs,
);
debug_assert!(state == State::Marked || thread::panicking(), "state was not MARKED; someone else has removed the slot while we have exclusive access!\nactual={:?}", state);
debug_assert!(refs.value == 0 || thread::panicking(), "ref count was not 0; someone else has referenced the slot while we have exclusive access!\nactual={:?}", refs);
let new_lifecycle = LifecycleGen(self.generation()).pack(State::Removing as usize);
match slot.lifecycle.compare_exchange(
curr_lifecycle,
new_lifecycle,
Ordering::AcqRel,
Ordering::Acquire,
) {
Ok(_) => {
test_println!("-> InitGuard::RELEASE: done!");
return true;
}
Err(actual) => {
debug_assert!(thread::panicking(), "we should not have to retry this CAS!");
test_println!("-> InitGuard::release; retry, actual={:#x}", actual);
curr_lifecycle = actual;
}
}
}
}
}
// === helpers ===
#[inline(always)]
fn exponential_backoff(exp: &mut usize) {
/// Maximum exponent we can back off to.
const MAX_EXPONENT: usize = 8;
// Issue 2^exp pause instructions.
for _ in 0..(1 << *exp) {
hint::spin_loop();
}
if *exp >= MAX_EXPONENT {
// If we have reached the max backoff, also yield to the scheduler
// explicitly.
crate::sync::yield_now();
} else {
// Otherwise, increment the exponent.
*exp += 1;
}
}

124
vendor/sharded-slab/src/page/stack.rs vendored Normal file
View File

@@ -0,0 +1,124 @@
use crate::cfg;
use crate::sync::atomic::{AtomicUsize, Ordering};
use std::{fmt, marker::PhantomData};
pub(super) struct TransferStack<C = cfg::DefaultConfig> {
head: AtomicUsize,
_cfg: PhantomData<fn(C)>,
}
impl<C: cfg::Config> TransferStack<C> {
pub(super) fn new() -> Self {
Self {
head: AtomicUsize::new(super::Addr::<C>::NULL),
_cfg: PhantomData,
}
}
pub(super) fn pop_all(&self) -> Option<usize> {
let val = self.head.swap(super::Addr::<C>::NULL, Ordering::Acquire);
test_println!("-> pop {:#x}", val);
if val == super::Addr::<C>::NULL {
None
} else {
Some(val)
}
}
fn push(&self, new_head: usize, before: impl Fn(usize)) {
// We loop to win the race to set the new head. The `next` variable
// is the next slot on the stack which needs to be pointed to by the
// new head.
let mut next = self.head.load(Ordering::Relaxed);
loop {
test_println!("-> next {:#x}", next);
before(next);
match self
.head
.compare_exchange(next, new_head, Ordering::Release, Ordering::Relaxed)
{
// lost the race!
Err(actual) => {
test_println!("-> retry!");
next = actual;
}
Ok(_) => {
test_println!("-> successful; next={:#x}", next);
return;
}
}
}
}
}
impl<C: cfg::Config> super::FreeList<C> for TransferStack<C> {
fn push<T>(&self, new_head: usize, slot: &super::Slot<T, C>) {
self.push(new_head, |next| slot.set_next(next))
}
}
impl<C> fmt::Debug for TransferStack<C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TransferStack")
.field(
"head",
&format_args!("{:#0x}", &self.head.load(Ordering::Relaxed)),
)
.finish()
}
}
#[cfg(all(loom, test))]
mod test {
use super::*;
use crate::{sync::UnsafeCell, test_util};
use loom::thread;
use std::sync::Arc;
#[test]
fn transfer_stack() {
test_util::run_model("transfer_stack", || {
let causalities = [UnsafeCell::new(999), UnsafeCell::new(999)];
let shared = Arc::new((causalities, TransferStack::<cfg::DefaultConfig>::new()));
let shared1 = shared.clone();
let shared2 = shared.clone();
let t1 = thread::spawn(move || {
let (causalities, stack) = &*shared1;
stack.push(0, |prev| {
causalities[0].with_mut(|c| unsafe {
*c = 0;
});
test_println!("prev={:#x}", prev)
});
});
let t2 = thread::spawn(move || {
let (causalities, stack) = &*shared2;
stack.push(1, |prev| {
causalities[1].with_mut(|c| unsafe {
*c = 1;
});
test_println!("prev={:#x}", prev)
});
});
let (causalities, stack) = &*shared;
let mut idx = stack.pop_all();
while idx == None {
idx = stack.pop_all();
thread::yield_now();
}
let idx = idx.unwrap();
causalities[idx].with(|val| unsafe {
assert_eq!(
*val, idx,
"UnsafeCell write must happen-before index is pushed to the stack!"
);
});
t1.join().unwrap();
t2.join().unwrap();
});
}
}

1342
vendor/sharded-slab/src/pool.rs vendored Normal file

File diff suppressed because it is too large Load Diff

432
vendor/sharded-slab/src/shard.rs vendored Normal file
View File

@@ -0,0 +1,432 @@
use crate::{
cfg::{self, CfgPrivate},
clear::Clear,
page,
sync::{
alloc,
atomic::{
AtomicPtr, AtomicUsize,
Ordering::{self, *},
},
},
tid::Tid,
Pack,
};
use std::{fmt, ptr, slice};
// ┌─────────────┐ ┌────────┐
// │ page 1 │ │ │
// ├─────────────┤ ┌───▶│ next──┼─┐
// │ page 2 │ │ ├────────┤ │
// │ │ │ │XXXXXXXX│ │
// │ local_free──┼─┘ ├────────┤ │
// │ global_free─┼─┐ │ │◀┘
// ├─────────────┤ └───▶│ next──┼─┐
// │ page 3 │ ├────────┤ │
// └─────────────┘ │XXXXXXXX│ │
// ... ├────────┤ │
// ┌─────────────┐ │XXXXXXXX│ │
// │ page n │ ├────────┤ │
// └─────────────┘ │ │◀┘
// │ next──┼───▶
// ├────────┤
// │XXXXXXXX│
// └────────┘
// ...
pub(crate) struct Shard<T, C: cfg::Config> {
/// The shard's parent thread ID.
pub(crate) tid: usize,
/// The local free list for each page.
///
/// These are only ever accessed from this shard's thread, so they are
/// stored separately from the shared state for the page that can be
/// accessed concurrently, to minimize false sharing.
local: Box<[page::Local]>,
/// The shared state for each page in this shard.
///
/// This consists of the page's metadata (size, previous size), remote free
/// list, and a pointer to the actual array backing that page.
shared: Box<[page::Shared<T, C>]>,
}
pub(crate) struct Array<T, C: cfg::Config> {
shards: Box<[Ptr<T, C>]>,
max: AtomicUsize,
}
#[derive(Debug)]
struct Ptr<T, C: cfg::Config>(AtomicPtr<alloc::Track<Shard<T, C>>>);
#[derive(Debug)]
pub(crate) struct IterMut<'a, T: 'a, C: cfg::Config + 'a>(slice::IterMut<'a, Ptr<T, C>>);
// === impl Shard ===
impl<T, C> Shard<T, C>
where
C: cfg::Config,
{
#[inline(always)]
pub(crate) fn with_slot<'a, U>(
&'a self,
idx: usize,
f: impl FnOnce(&'a page::Slot<T, C>) -> Option<U>,
) -> Option<U> {
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
test_println!("-> {:?}", addr);
if page_index >= self.shared.len() {
return None;
}
self.shared[page_index].with_slot(addr, f)
}
pub(crate) fn new(tid: usize) -> Self {
let mut total_sz = 0;
let shared = (0..C::MAX_PAGES)
.map(|page_num| {
let sz = C::page_size(page_num);
let prev_sz = total_sz;
total_sz += sz;
page::Shared::new(sz, prev_sz)
})
.collect();
let local = (0..C::MAX_PAGES).map(|_| page::Local::new()).collect();
Self { tid, local, shared }
}
}
impl<T, C> Shard<Option<T>, C>
where
C: cfg::Config,
{
/// Remove an item on the shard's local thread.
pub(crate) fn take_local(&self, idx: usize) -> Option<T> {
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
test_println!("-> remove_local {:?}", addr);
self.shared
.get(page_index)?
.take(addr, C::unpack_gen(idx), self.local(page_index))
}
/// Remove an item, while on a different thread from the shard's local thread.
pub(crate) fn take_remote(&self, idx: usize) -> Option<T> {
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
debug_assert!(Tid::<C>::current().as_usize() != self.tid);
let (addr, page_index) = page::indices::<C>(idx);
test_println!("-> take_remote {:?}; page {:?}", addr, page_index);
let shared = self.shared.get(page_index)?;
shared.take(addr, C::unpack_gen(idx), shared.free_list())
}
pub(crate) fn remove_local(&self, idx: usize) -> bool {
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
if page_index >= self.shared.len() {
return false;
}
self.shared[page_index].remove(addr, C::unpack_gen(idx), self.local(page_index))
}
pub(crate) fn remove_remote(&self, idx: usize) -> bool {
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
if page_index >= self.shared.len() {
return false;
}
let shared = &self.shared[page_index];
shared.remove(addr, C::unpack_gen(idx), shared.free_list())
}
pub(crate) fn iter(&self) -> std::slice::Iter<'_, page::Shared<Option<T>, C>> {
self.shared.iter()
}
}
impl<T, C> Shard<T, C>
where
T: Clear + Default,
C: cfg::Config,
{
pub(crate) fn init_with<U>(
&self,
mut init: impl FnMut(usize, &page::Slot<T, C>) -> Option<U>,
) -> Option<U> {
// Can we fit the value into an exist`ing page?
for (page_idx, page) in self.shared.iter().enumerate() {
let local = self.local(page_idx);
test_println!("-> page {}; {:?}; {:?}", page_idx, local, page);
if let Some(res) = page.init_with(local, &mut init) {
return Some(res);
}
}
None
}
pub(crate) fn mark_clear_local(&self, idx: usize) -> bool {
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
if page_index >= self.shared.len() {
return false;
}
self.shared[page_index].mark_clear(addr, C::unpack_gen(idx), self.local(page_index))
}
pub(crate) fn mark_clear_remote(&self, idx: usize) -> bool {
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
if page_index >= self.shared.len() {
return false;
}
let shared = &self.shared[page_index];
shared.mark_clear(addr, C::unpack_gen(idx), shared.free_list())
}
pub(crate) fn clear_after_release(&self, idx: usize) {
crate::sync::atomic::fence(crate::sync::atomic::Ordering::Acquire);
let tid = Tid::<C>::current().as_usize();
test_println!(
"-> clear_after_release; self.tid={:?}; current.tid={:?};",
tid,
self.tid
);
if tid == self.tid {
self.clear_local(idx);
} else {
self.clear_remote(idx);
}
}
fn clear_local(&self, idx: usize) -> bool {
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
if page_index >= self.shared.len() {
return false;
}
self.shared[page_index].clear(addr, C::unpack_gen(idx), self.local(page_index))
}
fn clear_remote(&self, idx: usize) -> bool {
debug_assert_eq_in_drop!(Tid::<C>::from_packed(idx).as_usize(), self.tid);
let (addr, page_index) = page::indices::<C>(idx);
if page_index >= self.shared.len() {
return false;
}
let shared = &self.shared[page_index];
shared.clear(addr, C::unpack_gen(idx), shared.free_list())
}
#[inline(always)]
fn local(&self, i: usize) -> &page::Local {
#[cfg(debug_assertions)]
debug_assert_eq_in_drop!(
Tid::<C>::current().as_usize(),
self.tid,
"tried to access local data from another thread!"
);
&self.local[i]
}
}
impl<T: fmt::Debug, C: cfg::Config> fmt::Debug for Shard<T, C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut d = f.debug_struct("Shard");
#[cfg(debug_assertions)]
d.field("tid", &self.tid);
d.field("shared", &self.shared).finish()
}
}
// === impl Array ===
impl<T, C> Array<T, C>
where
C: cfg::Config,
{
pub(crate) fn new() -> Self {
let mut shards = Vec::with_capacity(C::MAX_SHARDS);
for _ in 0..C::MAX_SHARDS {
// XXX(eliza): T_T this could be avoided with maybeuninit or something...
shards.push(Ptr::null());
}
Self {
shards: shards.into(),
max: AtomicUsize::new(0),
}
}
#[inline]
pub(crate) fn get(&self, idx: usize) -> Option<&Shard<T, C>> {
test_println!("-> get shard={}", idx);
self.shards.get(idx)?.load(Acquire)
}
#[inline]
pub(crate) fn current(&self) -> (Tid<C>, &Shard<T, C>) {
let tid = Tid::<C>::current();
test_println!("current: {:?}", tid);
let idx = tid.as_usize();
assert!(
idx < self.shards.len(),
"Thread count overflowed the configured max count. \
Thread index = {}, max threads = {}.",
idx,
C::MAX_SHARDS,
);
// It's okay for this to be relaxed. The value is only ever stored by
// the thread that corresponds to the index, and we are that thread.
let shard = self.shards[idx].load(Relaxed).unwrap_or_else(|| {
let ptr = Box::into_raw(Box::new(alloc::Track::new(Shard::new(idx))));
test_println!("-> allocated new shard for index {} at {:p}", idx, ptr);
self.shards[idx].set(ptr);
let mut max = self.max.load(Acquire);
while max < idx {
match self.max.compare_exchange(max, idx, AcqRel, Acquire) {
Ok(_) => break,
Err(actual) => max = actual,
}
}
test_println!("-> highest index={}, prev={}", std::cmp::max(max, idx), max);
unsafe {
// Safety: we just put it there!
&*ptr
}
.get_ref()
});
(tid, shard)
}
pub(crate) fn iter_mut(&mut self) -> IterMut<'_, T, C> {
test_println!("Array::iter_mut");
let max = self.max.load(Acquire);
test_println!("-> highest index={}", max);
IterMut(self.shards[0..=max].iter_mut())
}
}
impl<T, C: cfg::Config> Drop for Array<T, C> {
fn drop(&mut self) {
// XXX(eliza): this could be `with_mut` if we wanted to impl a wrapper for std atomics to change `get_mut` to `with_mut`...
let max = self.max.load(Acquire);
for shard in &self.shards[0..=max] {
// XXX(eliza): this could be `with_mut` if we wanted to impl a wrapper for std atomics to change `get_mut` to `with_mut`...
let ptr = shard.0.load(Acquire);
if ptr.is_null() {
continue;
}
let shard = unsafe {
// Safety: this is the only place where these boxes are
// deallocated, and we have exclusive access to the shard array,
// because...we are dropping it...
Box::from_raw(ptr)
};
drop(shard)
}
}
}
impl<T: fmt::Debug, C: cfg::Config> fmt::Debug for Array<T, C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let max = self.max.load(Acquire);
let mut set = f.debug_map();
for shard in &self.shards[0..=max] {
let ptr = shard.0.load(Acquire);
if let Some(shard) = ptr::NonNull::new(ptr) {
set.entry(&format_args!("{:p}", ptr), unsafe { shard.as_ref() });
} else {
set.entry(&format_args!("{:p}", ptr), &());
}
}
set.finish()
}
}
// === impl Ptr ===
impl<T, C: cfg::Config> Ptr<T, C> {
#[inline]
fn null() -> Self {
Self(AtomicPtr::new(ptr::null_mut()))
}
#[inline]
fn load(&self, order: Ordering) -> Option<&Shard<T, C>> {
let ptr = self.0.load(order);
test_println!("---> loaded={:p} (order={:?})", ptr, order);
if ptr.is_null() {
test_println!("---> null");
return None;
}
let track = unsafe {
// Safety: The returned reference will have the same lifetime as the
// reference to the shard pointer, which (morally, if not actually)
// owns the shard. The shard is only deallocated when the shard
// array is dropped, and it won't be dropped while this pointer is
// borrowed --- and the returned reference has the same lifetime.
//
// We know that the pointer is not null, because we just
// null-checked it immediately prior.
&*ptr
};
Some(track.get_ref())
}
#[inline]
fn set(&self, new: *mut alloc::Track<Shard<T, C>>) {
self.0
.compare_exchange(ptr::null_mut(), new, AcqRel, Acquire)
.expect("a shard can only be inserted by the thread that owns it, this is a bug!");
}
}
// === Iterators ===
impl<'a, T, C> Iterator for IterMut<'a, T, C>
where
T: 'a,
C: cfg::Config + 'a,
{
type Item = &'a Shard<T, C>;
fn next(&mut self) -> Option<Self::Item> {
test_println!("IterMut::next");
loop {
// Skip over empty indices if they are less than the highest
// allocated shard. Some threads may have accessed the slab
// (generating a thread ID) but never actually inserted data, so
// they may have never allocated a shard.
let next = self.0.next();
test_println!("-> next.is_some={}", next.is_some());
if let Some(shard) = next?.load(Acquire) {
test_println!("-> done");
return Some(shard);
}
}
}
}

140
vendor/sharded-slab/src/sync.rs vendored Normal file
View File

@@ -0,0 +1,140 @@
pub(crate) use self::inner::*;
#[cfg(all(loom, any(test, feature = "loom")))]
mod inner {
pub(crate) mod atomic {
pub use loom::sync::atomic::*;
pub use std::sync::atomic::Ordering;
}
pub(crate) use loom::{
cell::UnsafeCell, hint, lazy_static, sync::Mutex, thread::yield_now, thread_local,
};
pub(crate) mod alloc {
#![allow(dead_code)]
use loom::alloc;
use std::fmt;
/// Track allocations, detecting leaks
///
/// This is a version of `loom::alloc::Track` that adds a missing
/// `Default` impl.
pub struct Track<T>(alloc::Track<T>);
impl<T> Track<T> {
/// Track a value for leaks
#[inline(always)]
pub fn new(value: T) -> Track<T> {
Track(alloc::Track::new(value))
}
/// Get a reference to the value
#[inline(always)]
pub fn get_ref(&self) -> &T {
self.0.get_ref()
}
/// Get a mutable reference to the value
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
self.0.get_mut()
}
/// Stop tracking the value for leaks
#[inline(always)]
pub fn into_inner(self) -> T {
self.0.into_inner()
}
}
impl<T: fmt::Debug> fmt::Debug for Track<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
impl<T: Default> Default for Track<T> {
fn default() -> Self {
Self::new(T::default())
}
}
}
}
#[cfg(not(all(loom, any(feature = "loom", test))))]
mod inner {
#![allow(dead_code)]
pub(crate) use lazy_static::lazy_static;
pub(crate) use std::{
sync::{atomic, Mutex},
thread::yield_now,
thread_local,
};
pub(crate) mod hint {
#[inline(always)]
pub(crate) fn spin_loop() {
// MSRV: std::hint::spin_loop() stabilized in 1.49.0
#[allow(deprecated)]
super::atomic::spin_loop_hint()
}
}
#[derive(Debug)]
pub(crate) struct UnsafeCell<T>(std::cell::UnsafeCell<T>);
impl<T> UnsafeCell<T> {
pub fn new(data: T) -> UnsafeCell<T> {
UnsafeCell(std::cell::UnsafeCell::new(data))
}
#[inline(always)]
pub fn with<F, R>(&self, f: F) -> R
where
F: FnOnce(*const T) -> R,
{
f(self.0.get())
}
#[inline(always)]
pub fn with_mut<F, R>(&self, f: F) -> R
where
F: FnOnce(*mut T) -> R,
{
f(self.0.get())
}
}
pub(crate) mod alloc {
/// Track allocations, detecting leaks
#[derive(Debug, Default)]
pub struct Track<T> {
value: T,
}
impl<T> Track<T> {
/// Track a value for leaks
#[inline(always)]
pub fn new(value: T) -> Track<T> {
Track { value }
}
/// Get a reference to the value
#[inline(always)]
pub fn get_ref(&self) -> &T {
&self.value
}
/// Get a mutable reference to the value
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
&mut self.value
}
/// Stop tracking the value for leaks
#[inline(always)]
pub fn into_inner(self) -> T {
self.value
}
}
}
}

View File

@@ -0,0 +1,78 @@
//! Ensures that a custom config behaves as the default config, until limits are reached.
//! Prevents regression after #80.
use crate::{cfg::CfgPrivate, Config, Slab};
struct CustomConfig;
#[cfg(target_pointer_width = "64")]
impl Config for CustomConfig {
const INITIAL_PAGE_SIZE: usize = 32;
const MAX_PAGES: usize = 15;
const MAX_THREADS: usize = 256;
const RESERVED_BITS: usize = 24;
}
#[cfg(not(target_pointer_width = "64"))]
impl Config for CustomConfig {
const INITIAL_PAGE_SIZE: usize = 16;
const MAX_PAGES: usize = 6;
const MAX_THREADS: usize = 128;
const RESERVED_BITS: usize = 12;
}
// We should repeat actions several times to detect invalid lifecycle changes.
const ITERS: u64 = 5;
#[track_caller]
fn slab_eq(mut lhs: Slab<u64, impl Config>, mut rhs: Slab<u64, impl Config>) {
let mut lhs_vec = lhs.unique_iter().collect::<Vec<_>>();
lhs_vec.sort_unstable();
let mut rhs_vec = rhs.unique_iter().collect::<Vec<_>>();
rhs_vec.sort_unstable();
assert_eq!(lhs_vec, rhs_vec);
}
/// Calls `insert(); remove()` multiple times to detect invalid releasing.
/// Initially, it revealed bugs in the `Slot::release_with()` implementation.
#[test]
fn insert_remove() {
eprintln!("bits={}; config={:#?}", usize::BITS, CustomConfig::debug());
let default_slab = Slab::<u64, _>::new();
let custom_slab = Slab::<u64, _>::new_with_config::<CustomConfig>();
for i in 0..=ITERS {
let idx = default_slab.insert(i).unwrap();
assert!(default_slab.remove(idx));
let idx = custom_slab.insert(i).unwrap();
assert!(custom_slab.remove(idx));
}
slab_eq(custom_slab, default_slab);
}
/// Calls `get()` multiple times to detect invalid ref counting.
/// Initially, it revealed bugs in the `Slot::get()` implementation.
#[test]
fn double_get() {
eprintln!("bits={}; config={:#?}", usize::BITS, CustomConfig::debug());
let default_slab = Slab::<u64, _>::new();
let custom_slab = Slab::<u64, _>::new_with_config::<CustomConfig>();
for i in 0..=ITERS {
let idx = default_slab.insert(i).unwrap();
assert!(default_slab.get(idx).is_some());
assert!(default_slab.get(idx).is_some());
assert!(default_slab.remove(idx));
let idx = custom_slab.insert(i).unwrap();
assert!(custom_slab.get(idx).is_some());
assert!(custom_slab.get(idx).is_some());
assert!(custom_slab.remove(idx));
}
slab_eq(custom_slab, default_slab);
}

View File

@@ -0,0 +1,641 @@
use super::util::*;
use crate::{clear::Clear, sync::alloc, Pack, Pool};
use loom::{
sync::{
atomic::{AtomicBool, Ordering},
Condvar, Mutex,
},
thread,
};
use std::sync::Arc;
#[derive(Default, Debug)]
struct State {
is_dropped: AtomicBool,
is_cleared: AtomicBool,
id: usize,
}
impl State {
fn assert_clear(&self) {
assert!(!self.is_dropped.load(Ordering::SeqCst));
assert!(self.is_cleared.load(Ordering::SeqCst));
}
fn assert_not_clear(&self) {
assert!(!self.is_dropped.load(Ordering::SeqCst));
assert!(!self.is_cleared.load(Ordering::SeqCst));
}
}
impl PartialEq for State {
fn eq(&self, other: &State) -> bool {
self.id.eq(&other.id)
}
}
#[derive(Default, Debug)]
struct DontDropMe(Arc<State>);
impl PartialEq for DontDropMe {
fn eq(&self, other: &DontDropMe) -> bool {
self.0.eq(&other.0)
}
}
impl DontDropMe {
fn new(id: usize) -> (Arc<State>, Self) {
let state = Arc::new(State {
is_dropped: AtomicBool::new(false),
is_cleared: AtomicBool::new(false),
id,
});
(state.clone(), Self(state))
}
}
impl Drop for DontDropMe {
fn drop(&mut self) {
test_println!("-> DontDropMe drop: dropping data {:?}", self.0.id);
self.0.is_dropped.store(true, Ordering::SeqCst)
}
}
impl Clear for DontDropMe {
fn clear(&mut self) {
test_println!("-> DontDropMe clear: clearing data {:?}", self.0.id);
self.0.is_cleared.store(true, Ordering::SeqCst);
}
}
#[test]
fn dont_drop() {
run_model("dont_drop", || {
let pool: Pool<DontDropMe> = Pool::new();
let (item1, value) = DontDropMe::new(1);
test_println!("-> dont_drop: Inserting into pool {}", item1.id);
let idx = pool
.create_with(move |item| *item = value)
.expect("create_with");
item1.assert_not_clear();
test_println!("-> dont_drop: clearing idx: {}", idx);
pool.clear(idx);
item1.assert_clear();
});
}
#[test]
fn concurrent_create_with_clear() {
run_model("concurrent_create_with_clear", || {
let pool: Arc<Pool<DontDropMe>> = Arc::new(Pool::new());
let pair = Arc::new((Mutex::new(None), Condvar::new()));
let (item1, value) = DontDropMe::new(1);
let idx1 = pool
.create_with(move |item| *item = value)
.expect("create_with");
let p = pool.clone();
let pair2 = pair.clone();
let test_value = item1.clone();
let t1 = thread::spawn(move || {
let (lock, cvar) = &*pair2;
test_println!("-> making get request");
assert_eq!(p.get(idx1).unwrap().0.id, test_value.id);
let mut next = lock.lock().unwrap();
*next = Some(());
cvar.notify_one();
});
test_println!("-> making get request");
let guard = pool.get(idx1);
let (lock, cvar) = &*pair;
let mut next = lock.lock().unwrap();
// wait until we have a guard on the other thread.
while next.is_none() {
next = cvar.wait(next).unwrap();
}
// the item should be marked (clear returns true)...
assert!(pool.clear(idx1));
// ...but the value shouldn't be removed yet.
item1.assert_not_clear();
t1.join().expect("thread 1 unable to join");
drop(guard);
item1.assert_clear();
})
}
#[test]
fn racy_clear() {
run_model("racy_clear", || {
let pool = Arc::new(Pool::new());
let (item, value) = DontDropMe::new(1);
let idx = pool
.create_with(move |item| *item = value)
.expect("create_with");
assert_eq!(pool.get(idx).unwrap().0.id, item.id);
let p = pool.clone();
let t2 = thread::spawn(move || p.clear(idx));
let r1 = pool.clear(idx);
let r2 = t2.join().expect("thread 2 should not panic");
test_println!("r1: {}, r2: {}", r1, r2);
assert!(
!(r1 && r2),
"Both threads should not have cleared the value"
);
assert!(r1 || r2, "One thread should have removed the value");
assert!(pool.get(idx).is_none());
item.assert_clear();
})
}
#[test]
fn clear_local_and_reuse() {
run_model("take_remote_and_reuse", || {
let pool = Arc::new(Pool::new_with_config::<TinyConfig>());
let idx1 = pool
.create_with(|item: &mut String| {
item.push_str("hello world");
})
.expect("create_with");
let idx2 = pool
.create_with(|item| item.push_str("foo"))
.expect("create_with");
let idx3 = pool
.create_with(|item| item.push_str("bar"))
.expect("create_with");
assert_eq!(pool.get(idx1).unwrap(), String::from("hello world"));
assert_eq!(pool.get(idx2).unwrap(), String::from("foo"));
assert_eq!(pool.get(idx3).unwrap(), String::from("bar"));
let first = idx1 & (!crate::page::slot::Generation::<TinyConfig>::MASK);
assert!(pool.clear(idx1));
let idx1 = pool
.create_with(move |item| item.push_str("h"))
.expect("create_with");
let second = idx1 & (!crate::page::slot::Generation::<TinyConfig>::MASK);
assert_eq!(first, second);
assert!(pool.get(idx1).unwrap().capacity() >= 11);
})
}
#[test]
fn create_mut_guard_prevents_access() {
run_model("create_mut_guard_prevents_access", || {
let pool = Arc::new(Pool::<String>::new());
let guard = pool.create().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
thread::spawn(move || {
assert!(pool2.get(key).is_none());
})
.join()
.unwrap();
});
}
#[test]
fn create_mut_guard() {
run_model("create_mut_guard", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.create().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
guard.push_str("Hello world");
drop(guard);
t1.join().unwrap();
});
}
#[test]
fn create_mut_guard_2() {
run_model("create_mut_guard_2", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.create().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
guard.push_str("Hello world");
let t2 = thread::spawn(move || {
test_dbg!(pool3.get(key));
});
drop(guard);
t1.join().unwrap();
t2.join().unwrap();
});
}
#[test]
fn create_mut_guard_downgrade() {
run_model("create_mut_guard_downgrade", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.create().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
guard.push_str("Hello world");
let guard = guard.downgrade();
let t2 = thread::spawn(move || {
test_dbg!(pool3.get(key));
});
t1.join().unwrap();
t2.join().unwrap();
assert_eq!(guard, "Hello world".to_owned());
});
}
#[test]
fn create_mut_guard_downgrade_clear() {
run_model("create_mut_guard_downgrade_clear", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.create().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
guard.push_str("Hello world");
let guard = guard.downgrade();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
let t2 = thread::spawn(move || {
test_dbg!(pool3.clear(key));
});
assert_eq!(guard, "Hello world".to_owned());
drop(guard);
t1.join().unwrap();
t2.join().unwrap();
assert!(pool.get(key).is_none());
});
}
#[test]
fn create_mut_downgrade_during_clear() {
run_model("create_mut_downgrade_during_clear", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.create().unwrap();
let key: usize = guard.key();
guard.push_str("Hello world");
let pool2 = pool.clone();
let guard = guard.downgrade();
let t1 = thread::spawn(move || {
test_dbg!(pool2.clear(key));
});
t1.join().unwrap();
assert_eq!(guard, "Hello world".to_owned());
drop(guard);
assert!(pool.get(key).is_none());
});
}
#[test]
fn ownedref_send_out_of_local() {
run_model("ownedref_send_out_of_local", || {
let pool = Arc::new(Pool::<alloc::Track<String>>::new());
let key1 = pool
.create_with(|item| item.get_mut().push_str("hello"))
.expect("create item 1");
let key2 = pool
.create_with(|item| item.get_mut().push_str("goodbye"))
.expect("create item 2");
let item1 = pool.clone().get_owned(key1).expect("get key1");
let item2 = pool.clone().get_owned(key2).expect("get key2");
let pool2 = pool.clone();
test_dbg!(pool.clear(key1));
let t1 = thread::spawn(move || {
assert_eq!(item1.get_ref(), &String::from("hello"));
drop(item1);
});
let t2 = thread::spawn(move || {
assert_eq!(item2.get_ref(), &String::from("goodbye"));
test_dbg!(pool2.clear(key2));
drop(item2);
});
t1.join().unwrap();
t2.join().unwrap();
assert!(pool.get(key1).is_none());
assert!(pool.get(key2).is_none());
});
}
#[test]
fn ownedrefs_outlive_pool() {
run_model("ownedrefs_outlive_pool", || {
let pool = Arc::new(Pool::<alloc::Track<String>>::new());
let key1 = pool
.create_with(|item| item.get_mut().push_str("hello"))
.expect("create item 1");
let key2 = pool
.create_with(|item| item.get_mut().push_str("goodbye"))
.expect("create item 2");
let item1_1 = pool.clone().get_owned(key1).expect("get key1");
let item1_2 = pool.clone().get_owned(key1).expect("get key1 again");
let item2 = pool.clone().get_owned(key2).expect("get key2");
drop(pool);
let t1 = thread::spawn(move || {
assert_eq!(item1_1.get_ref(), &String::from("hello"));
drop(item1_1);
});
let t2 = thread::spawn(move || {
assert_eq!(item2.get_ref(), &String::from("goodbye"));
drop(item2);
});
t1.join().unwrap();
t2.join().unwrap();
assert_eq!(item1_2.get_ref(), &String::from("hello"));
});
}
#[test]
fn ownedref_ping_pong() {
run_model("ownedref_ping_pong", || {
let pool = Arc::new(Pool::<alloc::Track<String>>::new());
let key1 = pool
.create_with(|item| item.get_mut().push_str("hello"))
.expect("create item 1");
let key2 = pool
.create_with(|item| item.get_mut().push_str("world"))
.expect("create item 2");
let item1 = pool.clone().get_owned(key1).expect("get key1");
let pool2 = pool.clone();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
assert_eq!(item1.get_ref(), &String::from("hello"));
pool2.clear(key1);
item1
});
let t2 = thread::spawn(move || {
let item2 = pool3.clone().get_owned(key2).unwrap();
assert_eq!(item2.get_ref(), &String::from("world"));
pool3.clear(key1);
item2
});
let item1 = t1.join().unwrap();
let item2 = t2.join().unwrap();
assert_eq!(item1.get_ref(), &String::from("hello"));
assert_eq!(item2.get_ref(), &String::from("world"));
});
}
#[test]
fn ownedref_drop_from_other_threads() {
run_model("ownedref_drop_from_other_threads", || {
let pool = Arc::new(Pool::<alloc::Track<String>>::new());
let key1 = pool
.create_with(|item| item.get_mut().push_str("hello"))
.expect("create item 1");
let item1 = pool.clone().get_owned(key1).expect("get key1");
let pool2 = pool.clone();
let t1 = thread::spawn(move || {
let pool = pool2.clone();
let key2 = pool
.create_with(|item| item.get_mut().push_str("goodbye"))
.expect("create item 1");
let item2 = pool.clone().get_owned(key2).expect("get key1");
let t2 = thread::spawn(move || {
assert_eq!(item2.get_ref(), &String::from("goodbye"));
test_dbg!(pool2.clear(key1));
drop(item2)
});
assert_eq!(item1.get_ref(), &String::from("hello"));
test_dbg!(pool.clear(key2));
drop(item1);
(t2, key2)
});
let (t2, key2) = t1.join().unwrap();
test_dbg!(pool.get(key1));
test_dbg!(pool.get(key2));
t2.join().unwrap();
assert!(pool.get(key1).is_none());
assert!(pool.get(key2).is_none());
});
}
#[test]
fn create_owned_mut_guard() {
run_model("create_owned_mut_guard", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
guard.push_str("Hello world");
drop(guard);
t1.join().unwrap();
});
}
#[test]
fn create_owned_mut_guard_send() {
run_model("create_owned_mut_guard", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
let t2 = thread::spawn(move || {
guard.push_str("Hello world");
drop(guard);
});
t1.join().unwrap();
t2.join().unwrap();
});
}
#[test]
fn create_owned_mut_guard_2() {
run_model("create_owned_mut_guard_2", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
guard.push_str("Hello world");
let t2 = thread::spawn(move || {
test_dbg!(pool3.get(key));
});
drop(guard);
t1.join().unwrap();
t2.join().unwrap();
});
}
#[test]
fn create_owned_mut_guard_downgrade() {
run_model("create_owned_mut_guard_downgrade", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
guard.push_str("Hello world");
let key: usize = guard.key();
let pool2 = pool.clone();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
let guard = guard.downgrade();
let t2 = thread::spawn(move || {
assert_eq!(pool3.get(key).unwrap(), "Hello world".to_owned());
});
t1.join().unwrap();
t2.join().unwrap();
assert_eq!(guard, "Hello world".to_owned());
});
}
#[test]
fn create_owned_mut_guard_downgrade_then_clear() {
run_model("create_owned_mut_guard_downgrade_then_clear", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
guard.push_str("Hello world");
let guard = guard.downgrade();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
let t2 = thread::spawn(move || {
test_dbg!(pool3.clear(key));
});
assert_eq!(guard, "Hello world".to_owned());
drop(guard);
t1.join().unwrap();
t2.join().unwrap();
assert!(pool.get(key).is_none());
});
}
#[test]
fn create_owned_mut_downgrade_during_clear() {
run_model("create_owned_mut_downgrade_during_clear", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
let key: usize = guard.key();
guard.push_str("Hello world");
let pool2 = pool.clone();
let guard = guard.downgrade();
let t1 = thread::spawn(move || {
test_dbg!(pool2.clear(key));
});
t1.join().unwrap();
assert_eq!(guard, "Hello world".to_owned());
drop(guard);
assert!(pool.get(key).is_none());
});
}
#[test]
fn create_mut_downgrade_during_clear_by_other_thead() {
run_model("create_mut_downgrade_during_clear_by_other_thread", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
let key: usize = guard.key();
guard.push_str("Hello world");
let pool2 = pool.clone();
let t1 = thread::spawn(move || {
let guard = guard.downgrade();
assert_eq!(guard, "Hello world".to_owned());
drop(guard);
});
let t2 = thread::spawn(move || {
test_dbg!(pool2.clear(key));
});
test_dbg!(pool.get(key));
t1.join().unwrap();
t2.join().unwrap();
});
}

View File

@@ -0,0 +1,760 @@
use super::util::*;
use crate::sync::alloc;
use crate::Slab;
use loom::sync::{Condvar, Mutex};
use loom::thread;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
#[test]
fn take_local() {
run_model("take_local", || {
let slab = Arc::new(Slab::new());
let s = slab.clone();
let t1 = thread::spawn(move || {
let idx = s.insert(1).expect("insert");
assert_eq!(s.get(idx).unwrap(), 1);
assert_eq!(s.take(idx), Some(1));
assert!(s.get(idx).is_none());
let idx = s.insert(2).expect("insert");
assert_eq!(s.get(idx).unwrap(), 2);
assert_eq!(s.take(idx), Some(2));
assert!(s.get(idx).is_none());
});
let s = slab.clone();
let t2 = thread::spawn(move || {
let idx = s.insert(3).expect("insert");
assert_eq!(s.get(idx).unwrap(), 3);
assert_eq!(s.take(idx), Some(3));
assert!(s.get(idx).is_none());
let idx = s.insert(4).expect("insert");
assert_eq!(s.get(idx).unwrap(), 4);
assert_eq!(s.take(idx), Some(4));
assert!(s.get(idx).is_none());
});
let s = slab;
let idx1 = s.insert(5).expect("insert");
assert_eq!(s.get(idx1).unwrap(), 5);
let idx2 = s.insert(6).expect("insert");
assert_eq!(s.get(idx2).unwrap(), 6);
assert_eq!(s.take(idx1), Some(5));
assert!(s.get(idx1).is_none());
assert_eq!(s.get(idx2).unwrap(), 6);
assert_eq!(s.take(idx2), Some(6));
assert!(s.get(idx2).is_none());
t1.join().expect("thread 1 should not panic");
t2.join().expect("thread 2 should not panic");
});
}
#[test]
fn take_remote() {
run_model("take_remote", || {
let slab = Arc::new(Slab::new());
let idx1 = slab.insert(1).expect("insert");
assert_eq!(slab.get(idx1).unwrap(), 1);
let idx2 = slab.insert(2).expect("insert");
assert_eq!(slab.get(idx2).unwrap(), 2);
let idx3 = slab.insert(3).expect("insert");
assert_eq!(slab.get(idx3).unwrap(), 3);
let s = slab.clone();
let t1 = thread::spawn(move || {
assert_eq!(s.get(idx2).unwrap(), 2);
assert_eq!(s.take(idx2), Some(2));
});
let s = slab.clone();
let t2 = thread::spawn(move || {
assert_eq!(s.get(idx3).unwrap(), 3);
assert_eq!(s.take(idx3), Some(3));
});
t1.join().expect("thread 1 should not panic");
t2.join().expect("thread 2 should not panic");
assert_eq!(slab.get(idx1).unwrap(), 1);
assert!(slab.get(idx2).is_none());
assert!(slab.get(idx3).is_none());
});
}
#[test]
fn racy_take() {
run_model("racy_take", || {
let slab = Arc::new(Slab::new());
let idx = slab.insert(1).expect("insert");
assert_eq!(slab.get(idx).unwrap(), 1);
let s1 = slab.clone();
let s2 = slab.clone();
let t1 = thread::spawn(move || s1.take(idx));
let t2 = thread::spawn(move || s2.take(idx));
let r1 = t1.join().expect("thread 1 should not panic");
let r2 = t2.join().expect("thread 2 should not panic");
assert!(
r1.is_none() || r2.is_none(),
"both threads should not have removed the value"
);
assert_eq!(
r1.or(r2),
Some(1),
"one thread should have removed the value"
);
assert!(slab.get(idx).is_none());
});
}
#[test]
fn racy_take_local() {
run_model("racy_take_local", || {
let slab = Arc::new(Slab::new());
let idx = slab.insert(1).expect("insert");
assert_eq!(slab.get(idx).unwrap(), 1);
let s = slab.clone();
let t2 = thread::spawn(move || s.take(idx));
let r1 = slab.take(idx);
let r2 = t2.join().expect("thread 2 should not panic");
assert!(
r1.is_none() || r2.is_none(),
"both threads should not have removed the value"
);
assert!(
r1.or(r2).is_some(),
"one thread should have removed the value"
);
assert!(slab.get(idx).is_none());
});
}
#[test]
fn concurrent_insert_take() {
run_model("concurrent_insert_remove", || {
let slab = Arc::new(Slab::new());
let pair = Arc::new((Mutex::new(None), Condvar::new()));
let slab2 = slab.clone();
let pair2 = pair.clone();
let remover = thread::spawn(move || {
let (lock, cvar) = &*pair2;
for i in 0..2 {
test_println!("--- remover i={} ---", i);
let mut next = lock.lock().unwrap();
while next.is_none() {
next = cvar.wait(next).unwrap();
}
let key = next.take().unwrap();
assert_eq!(slab2.take(key), Some(i));
cvar.notify_one();
}
});
let (lock, cvar) = &*pair;
for i in 0..2 {
test_println!("--- inserter i={} ---", i);
let key = slab.insert(i).expect("insert");
let mut next = lock.lock().unwrap();
*next = Some(key);
cvar.notify_one();
// Wait for the item to be removed.
while next.is_some() {
next = cvar.wait(next).unwrap();
}
assert!(slab.get(key).is_none());
}
remover.join().unwrap();
})
}
#[test]
fn take_remote_and_reuse() {
run_model("take_remote_and_reuse", || {
let slab = Arc::new(Slab::new_with_config::<TinyConfig>());
let idx1 = slab.insert(1).expect("insert");
let idx2 = slab.insert(2).expect("insert");
let idx3 = slab.insert(3).expect("insert");
let idx4 = slab.insert(4).expect("insert");
assert_eq!(slab.get(idx1).unwrap(), 1, "slab: {:#?}", slab);
assert_eq!(slab.get(idx2).unwrap(), 2, "slab: {:#?}", slab);
assert_eq!(slab.get(idx3).unwrap(), 3, "slab: {:#?}", slab);
assert_eq!(slab.get(idx4).unwrap(), 4, "slab: {:#?}", slab);
let s = slab.clone();
let t1 = thread::spawn(move || {
assert_eq!(s.take(idx1), Some(1), "slab: {:#?}", s);
});
let idx1 = slab.insert(5).expect("insert");
t1.join().expect("thread 1 should not panic");
assert_eq!(slab.get(idx1).unwrap(), 5, "slab: {:#?}", slab);
assert_eq!(slab.get(idx2).unwrap(), 2, "slab: {:#?}", slab);
assert_eq!(slab.get(idx3).unwrap(), 3, "slab: {:#?}", slab);
assert_eq!(slab.get(idx4).unwrap(), 4, "slab: {:#?}", slab);
});
}
fn store_when_free<C: crate::Config>(slab: &Arc<Slab<usize, C>>, t: usize) -> usize {
loop {
test_println!("try store {:?}", t);
if let Some(key) = slab.insert(t) {
test_println!("inserted at {:#x}", key);
return key;
}
test_println!("retrying; slab is full...");
thread::yield_now();
}
}
struct TinierConfig;
impl crate::Config for TinierConfig {
const INITIAL_PAGE_SIZE: usize = 2;
const MAX_PAGES: usize = 1;
}
#[test]
fn concurrent_remove_remote_and_reuse() {
let mut model = loom::model::Builder::new();
model.max_branches = 100000;
run_builder("concurrent_remove_remote_and_reuse", model, || {
let slab = Arc::new(Slab::new_with_config::<TinierConfig>());
let idx1 = slab.insert(1).unwrap();
let idx2 = slab.insert(2).unwrap();
assert_eq!(slab.get(idx1).unwrap(), 1, "slab: {:#?}", slab);
assert_eq!(slab.get(idx2).unwrap(), 2, "slab: {:#?}", slab);
let s = slab.clone();
let s2 = slab.clone();
let t1 = thread::spawn(move || {
s.take(idx1).expect("must remove");
});
let t2 = thread::spawn(move || {
s2.take(idx2).expect("must remove");
});
let idx3 = store_when_free(&slab, 3);
t1.join().expect("thread 1 should not panic");
t2.join().expect("thread 1 should not panic");
assert!(slab.get(idx1).is_none(), "slab: {:#?}", slab);
assert!(slab.get(idx2).is_none(), "slab: {:#?}", slab);
assert_eq!(slab.get(idx3).unwrap(), 3, "slab: {:#?}", slab);
});
}
struct SetDropped {
val: usize,
dropped: std::sync::Arc<AtomicBool>,
}
struct AssertDropped {
dropped: std::sync::Arc<AtomicBool>,
}
impl AssertDropped {
fn new(val: usize) -> (Self, SetDropped) {
let dropped = std::sync::Arc::new(AtomicBool::new(false));
let val = SetDropped {
val,
dropped: dropped.clone(),
};
(Self { dropped }, val)
}
fn assert_dropped(&self) {
assert!(
self.dropped.load(Ordering::SeqCst),
"value should have been dropped!"
);
}
}
impl Drop for SetDropped {
fn drop(&mut self) {
self.dropped.store(true, Ordering::SeqCst);
}
}
#[test]
fn remove_local() {
run_model("remove_local", || {
let slab = Arc::new(Slab::new_with_config::<TinyConfig>());
let slab2 = slab.clone();
let (dropped, item) = AssertDropped::new(1);
let idx = slab.insert(item).expect("insert");
let guard = slab.get(idx).unwrap();
assert!(slab.remove(idx));
let t1 = thread::spawn(move || {
let g = slab2.get(idx);
drop(g);
});
assert!(slab.get(idx).is_none());
t1.join().expect("thread 1 should not panic");
drop(guard);
assert!(slab.get(idx).is_none());
dropped.assert_dropped();
})
}
#[test]
fn remove_remote() {
run_model("remove_remote", || {
let slab = Arc::new(Slab::new_with_config::<TinyConfig>());
let slab2 = slab.clone();
let (dropped, item) = AssertDropped::new(1);
let idx = slab.insert(item).expect("insert");
assert!(slab.remove(idx));
let t1 = thread::spawn(move || {
let g = slab2.get(idx);
drop(g);
});
t1.join().expect("thread 1 should not panic");
assert!(slab.get(idx).is_none());
dropped.assert_dropped();
});
}
#[test]
fn remove_remote_during_insert() {
run_model("remove_remote_during_insert", || {
let slab = Arc::new(Slab::new_with_config::<TinyConfig>());
let slab2 = slab.clone();
let (dropped, item) = AssertDropped::new(1);
let idx = slab.insert(item).expect("insert");
let t1 = thread::spawn(move || {
let g = slab2.get(idx);
assert_ne!(g.as_ref().map(|v| v.val), Some(2));
drop(g);
});
let (_, item) = AssertDropped::new(2);
assert!(slab.remove(idx));
let idx2 = slab.insert(item).expect("insert");
t1.join().expect("thread 1 should not panic");
assert!(slab.get(idx).is_none());
assert!(slab.get(idx2).is_some());
dropped.assert_dropped();
});
}
#[test]
fn unique_iter() {
run_model("unique_iter", || {
let mut slab = Arc::new(Slab::new());
let s = slab.clone();
let t1 = thread::spawn(move || {
s.insert(1).expect("insert");
s.insert(2).expect("insert");
});
let s = slab.clone();
let t2 = thread::spawn(move || {
s.insert(3).expect("insert");
s.insert(4).expect("insert");
});
t1.join().expect("thread 1 should not panic");
t2.join().expect("thread 2 should not panic");
let slab = Arc::get_mut(&mut slab).expect("other arcs should be dropped");
let items: Vec<_> = slab.unique_iter().map(|&i| i).collect();
assert!(items.contains(&1), "items: {:?}", items);
assert!(items.contains(&2), "items: {:?}", items);
assert!(items.contains(&3), "items: {:?}", items);
assert!(items.contains(&4), "items: {:?}", items);
});
}
#[test]
fn custom_page_sz() {
let mut model = loom::model::Builder::new();
model.max_branches = 100000;
model.check(|| {
let slab = Slab::<usize>::new_with_config::<TinyConfig>();
for i in 0..1024usize {
test_println!("{}", i);
let k = slab.insert(i).expect("insert");
let v = slab.get(k).expect("get");
assert_eq!(v, i, "slab: {:#?}", slab);
}
});
}
#[test]
fn max_refs() {
struct LargeGenConfig;
// Configure the slab with a very large number of bits for the generation
// counter. That way, there will be very few bits for the ref count left
// over, and this test won't have to malloc millions of references.
impl crate::cfg::Config for LargeGenConfig {
const INITIAL_PAGE_SIZE: usize = 2;
const MAX_THREADS: usize = 32;
const MAX_PAGES: usize = 2;
}
let mut model = loom::model::Builder::new();
model.max_branches = 100000;
model.check(|| {
let slab = Slab::new_with_config::<LargeGenConfig>();
let key = slab.insert("hello world").unwrap();
let max = crate::page::slot::RefCount::<LargeGenConfig>::MAX;
// Create the maximum number of concurrent references to the entry.
let mut refs = (0..max)
.map(|_| slab.get(key).unwrap())
// Store the refs in a vec so they don't get dropped immediately.
.collect::<Vec<_>>();
assert!(slab.get(key).is_none());
// After dropping a ref, we should now be able to access the slot again.
drop(refs.pop());
let ref1 = slab.get(key);
assert!(ref1.is_some());
// Ref1 should max out the number of references again.
assert!(slab.get(key).is_none());
})
}
mod free_list_reuse {
use super::*;
struct TinyConfig;
impl crate::cfg::Config for TinyConfig {
const INITIAL_PAGE_SIZE: usize = 2;
}
#[test]
fn local_remove() {
run_model("free_list_reuse::local_remove", || {
let slab = Slab::new_with_config::<TinyConfig>();
let t1 = slab.insert("hello").expect("insert");
let t2 = slab.insert("world").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t1).1,
0,
"1st slot should be on 0th page"
);
assert_eq!(
crate::page::indices::<TinyConfig>(t2).1,
0,
"2nd slot should be on 0th page"
);
let t3 = slab.insert("earth").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t3).1,
1,
"3rd slot should be on 1st page"
);
slab.remove(t2);
let t4 = slab.insert("universe").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t4).1,
0,
"2nd slot should be reused (0th page)"
);
slab.remove(t1);
let _ = slab.insert("goodbye").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t4).1,
0,
"1st slot should be reused (0th page)"
);
});
}
#[test]
fn local_take() {
run_model("free_list_reuse::local_take", || {
let slab = Slab::new_with_config::<TinyConfig>();
let t1 = slab.insert("hello").expect("insert");
let t2 = slab.insert("world").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t1).1,
0,
"1st slot should be on 0th page"
);
assert_eq!(
crate::page::indices::<TinyConfig>(t2).1,
0,
"2nd slot should be on 0th page"
);
let t3 = slab.insert("earth").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t3).1,
1,
"3rd slot should be on 1st page"
);
assert_eq!(slab.take(t2), Some("world"));
let t4 = slab.insert("universe").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t4).1,
0,
"2nd slot should be reused (0th page)"
);
assert_eq!(slab.take(t1), Some("hello"));
let _ = slab.insert("goodbye").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t4).1,
0,
"1st slot should be reused (0th page)"
);
});
}
}
#[test]
fn vacant_entry() {
run_model("vacant_entry", || {
let slab = Arc::new(Slab::new());
let entry = slab.vacant_entry().unwrap();
let key: usize = entry.key();
let slab2 = slab.clone();
let t1 = thread::spawn(move || {
test_dbg!(slab2.get(key));
});
entry.insert("hello world");
t1.join().unwrap();
assert_eq!(slab.get(key).expect("get"), "hello world");
});
}
#[test]
fn vacant_entry_2() {
run_model("vacant_entry_2", || {
let slab = Arc::new(Slab::new());
let entry = slab.vacant_entry().unwrap();
let key: usize = entry.key();
let slab2 = slab.clone();
let slab3 = slab.clone();
let t1 = thread::spawn(move || {
test_dbg!(slab2.get(key));
});
entry.insert("hello world");
let t2 = thread::spawn(move || {
test_dbg!(slab3.get(key));
});
t1.join().unwrap();
t2.join().unwrap();
assert_eq!(slab.get(key).expect("get"), "hello world");
});
}
#[test]
fn vacant_entry_remove() {
run_model("vacant_entry_remove", || {
let slab = Arc::new(Slab::new());
let entry = slab.vacant_entry().unwrap();
let key: usize = entry.key();
let slab2 = slab.clone();
let t1 = thread::spawn(move || {
assert!(!slab2.remove(key));
});
t1.join().unwrap();
entry.insert("hello world");
assert_eq!(slab.get(key).expect("get"), "hello world");
});
}
#[test]
fn owned_entry_send_out_of_local() {
run_model("owned_entry_send_out_of_local", || {
let slab = Arc::new(Slab::<alloc::Track<String>>::new());
let key1 = slab
.insert(alloc::Track::new(String::from("hello")))
.expect("insert item 1");
let key2 = slab
.insert(alloc::Track::new(String::from("goodbye")))
.expect("insert item 2");
let item1 = slab.clone().get_owned(key1).expect("get key1");
let item2 = slab.clone().get_owned(key2).expect("get key2");
let slab2 = slab.clone();
test_dbg!(slab.remove(key1));
let t1 = thread::spawn(move || {
assert_eq!(item1.get_ref(), &String::from("hello"));
drop(item1);
});
let t2 = thread::spawn(move || {
assert_eq!(item2.get_ref(), &String::from("goodbye"));
test_dbg!(slab2.remove(key2));
drop(item2);
});
t1.join().unwrap();
t2.join().unwrap();
assert!(slab.get(key1).is_none());
assert!(slab.get(key2).is_none());
});
}
#[test]
fn owned_entrys_outlive_slab() {
run_model("owned_entrys_outlive_slab", || {
let slab = Arc::new(Slab::<alloc::Track<String>>::new());
let key1 = slab
.insert(alloc::Track::new(String::from("hello")))
.expect("insert item 1");
let key2 = slab
.insert(alloc::Track::new(String::from("goodbye")))
.expect("insert item 2");
let item1_1 = slab.clone().get_owned(key1).expect("get key1");
let item1_2 = slab.clone().get_owned(key1).expect("get key1 again");
let item2 = slab.clone().get_owned(key2).expect("get key2");
drop(slab);
let t1 = thread::spawn(move || {
assert_eq!(item1_1.get_ref(), &String::from("hello"));
drop(item1_1);
});
let t2 = thread::spawn(move || {
assert_eq!(item2.get_ref(), &String::from("goodbye"));
drop(item2);
});
t1.join().unwrap();
t2.join().unwrap();
assert_eq!(item1_2.get_ref(), &String::from("hello"));
});
}
#[test]
fn owned_entry_ping_pong() {
run_model("owned_entry_ping_pong", || {
let slab = Arc::new(Slab::<alloc::Track<String>>::new());
let key1 = slab
.insert(alloc::Track::new(String::from("hello")))
.expect("insert item 1");
let key2 = slab
.insert(alloc::Track::new(String::from("world")))
.expect("insert item 2");
let item1 = slab.clone().get_owned(key1).expect("get key1");
let slab2 = slab.clone();
let slab3 = slab.clone();
let t1 = thread::spawn(move || {
assert_eq!(item1.get_ref(), &String::from("hello"));
slab2.remove(key1);
item1
});
let t2 = thread::spawn(move || {
let item2 = slab3.clone().get_owned(key2).unwrap();
assert_eq!(item2.get_ref(), &String::from("world"));
slab3.remove(key1);
item2
});
let item1 = t1.join().unwrap();
let item2 = t2.join().unwrap();
assert_eq!(item1.get_ref(), &String::from("hello"));
assert_eq!(item2.get_ref(), &String::from("world"));
});
}
#[test]
fn owned_entry_drop_from_other_threads() {
run_model("owned_entry_drop_from_other_threads", || {
let slab = Arc::new(Slab::<alloc::Track<String>>::new());
let key1 = slab
.insert(alloc::Track::new(String::from("hello")))
.expect("insert item 1");
let item1 = slab.clone().get_owned(key1).expect("get key1");
let slab2 = slab.clone();
let t1 = thread::spawn(move || {
let slab = slab2.clone();
let key2 = slab
.insert(alloc::Track::new(String::from("goodbye")))
.expect("insert item 1");
let item2 = slab.clone().get_owned(key2).expect("get key1");
let t2 = thread::spawn(move || {
assert_eq!(item2.get_ref(), &String::from("goodbye"));
test_dbg!(slab2.remove(key1));
drop(item2)
});
assert_eq!(item1.get_ref(), &String::from("hello"));
test_dbg!(slab.remove(key2));
drop(item1);
(t2, key2)
});
let (t2, key2) = t1.join().unwrap();
test_dbg!(slab.get(key1));
test_dbg!(slab.get(key2));
t2.join().unwrap();
assert!(slab.get(key1).is_none());
assert!(slab.get(key2).is_none());
});
}

75
vendor/sharded-slab/src/tests/mod.rs vendored Normal file
View File

@@ -0,0 +1,75 @@
mod idx {
use crate::{
cfg,
page::{self, slot},
Pack, Tid,
};
use proptest::prelude::*;
proptest! {
#[test]
#[cfg_attr(loom, ignore)]
fn tid_roundtrips(tid in 0usize..Tid::<cfg::DefaultConfig>::BITS) {
let tid = Tid::<cfg::DefaultConfig>::from_usize(tid);
let packed = tid.pack(0);
assert_eq!(tid, Tid::from_packed(packed));
}
#[test]
#[cfg_attr(loom, ignore)]
fn idx_roundtrips(
tid in 0usize..Tid::<cfg::DefaultConfig>::BITS,
gen in 0usize..slot::Generation::<cfg::DefaultConfig>::BITS,
addr in 0usize..page::Addr::<cfg::DefaultConfig>::BITS,
) {
let tid = Tid::<cfg::DefaultConfig>::from_usize(tid);
let gen = slot::Generation::<cfg::DefaultConfig>::from_usize(gen);
let addr = page::Addr::<cfg::DefaultConfig>::from_usize(addr);
let packed = tid.pack(gen.pack(addr.pack(0)));
assert_eq!(addr, page::Addr::from_packed(packed));
assert_eq!(gen, slot::Generation::from_packed(packed));
assert_eq!(tid, Tid::from_packed(packed));
}
}
}
pub(crate) mod util {
#[cfg(loom)]
use std::sync::atomic::{AtomicUsize, Ordering};
pub(crate) struct TinyConfig;
impl crate::Config for TinyConfig {
const INITIAL_PAGE_SIZE: usize = 4;
}
#[cfg(loom)]
pub(crate) fn run_model(name: &'static str, f: impl Fn() + Sync + Send + 'static) {
run_builder(name, loom::model::Builder::new(), f)
}
#[cfg(loom)]
pub(crate) fn run_builder(
name: &'static str,
builder: loom::model::Builder,
f: impl Fn() + Sync + Send + 'static,
) {
let iters = AtomicUsize::new(1);
builder.check(move || {
test_println!(
"\n------------ running test {}; iteration {} ------------\n",
name,
iters.fetch_add(1, Ordering::SeqCst)
);
f()
});
}
}
#[cfg(not(loom))]
mod custom_config;
#[cfg(loom)]
mod loom_pool;
#[cfg(loom)]
mod loom_slab;
#[cfg(not(loom))]
mod properties;

View File

@@ -0,0 +1,244 @@
//! This module contains property-based tests against the public API:
//! * API never panics.
//! * Active entries cannot be overridden until removed.
//! * The slab doesn't produce overlapping keys.
//! * The slab doesn't leave "lost" keys.
//! * `get()`, `get_owned`, and `contains()` are consistent.
//! * `RESERVED_BITS` are actually not used.
//!
//! The test is supposed to be deterministic, so it doesn't spawn real threads
//! and uses `tid::with()` to override the TID for the current thread.
use std::{ops::Range, sync::Arc};
use indexmap::IndexMap;
use proptest::prelude::*;
use crate::{tid, Config, DefaultConfig, Slab};
const THREADS: Range<usize> = 1..4;
const ACTIONS: Range<usize> = 1..1000;
#[derive(Debug, Clone)]
struct Action {
tid: usize,
kind: ActionKind,
}
#[derive(Debug, Clone)]
enum ActionKind {
Insert,
VacantEntry,
RemoveRandom(usize), // key
RemoveExistent(usize), // seed
TakeRandom(usize), // key
TakeExistent(usize), // seed
GetRandom(usize), // key
GetExistent(usize), // seed
}
prop_compose! {
fn action_strategy()(tid in THREADS, kind in action_kind_strategy()) -> Action {
Action { tid, kind }
}
}
fn action_kind_strategy() -> impl Strategy<Value = ActionKind> {
prop_oneof![
1 => Just(ActionKind::Insert),
1 => Just(ActionKind::VacantEntry),
1 => prop::num::usize::ANY.prop_map(ActionKind::RemoveRandom),
1 => prop::num::usize::ANY.prop_map(ActionKind::RemoveExistent),
1 => prop::num::usize::ANY.prop_map(ActionKind::TakeRandom),
1 => prop::num::usize::ANY.prop_map(ActionKind::TakeExistent),
// Produce `GetRandom` and `GetExistent` more often.
5 => prop::num::usize::ANY.prop_map(ActionKind::GetRandom),
5 => prop::num::usize::ANY.prop_map(ActionKind::GetExistent),
]
}
/// Stores active entries (added and not yet removed).
#[derive(Default)]
struct Active {
// Use `IndexMap` to preserve determinism.
map: IndexMap<usize, u32>,
prev_value: u32,
}
impl Active {
fn next_value(&mut self) -> u32 {
self.prev_value += 1;
self.prev_value
}
fn get(&self, key: usize) -> Option<u32> {
self.map.get(&key).copied()
}
fn get_any(&self, seed: usize) -> Option<(usize, u32)> {
if self.map.is_empty() {
return None;
}
let index = seed % self.map.len();
self.map.get_index(index).map(|(k, v)| (*k, *v))
}
fn insert(&mut self, key: usize, value: u32) {
assert_eq!(
self.map.insert(key, value),
None,
"keys of active entries must be unique"
);
}
fn remove(&mut self, key: usize) -> Option<u32> {
self.map.swap_remove(&key)
}
fn remove_any(&mut self, seed: usize) -> Option<(usize, u32)> {
if self.map.is_empty() {
return None;
}
let index = seed % self.map.len();
self.map.swap_remove_index(index)
}
fn drain(&mut self) -> impl Iterator<Item = (usize, u32)> + '_ {
self.map.drain(..)
}
}
fn used_bits<C: Config>(key: usize) -> usize {
assert_eq!(
C::RESERVED_BITS + Slab::<u32, C>::USED_BITS,
std::mem::size_of::<usize>() * 8
);
key & ((!0) >> C::RESERVED_BITS)
}
fn apply_action<C: Config>(
slab: &Arc<Slab<u32, C>>,
active: &mut Active,
action: ActionKind,
) -> Result<(), TestCaseError> {
match action {
ActionKind::Insert => {
let value = active.next_value();
let key = slab.insert(value).expect("unexpectedly exhausted slab");
prop_assert_eq!(used_bits::<C>(key), key);
active.insert(key, value);
}
ActionKind::VacantEntry => {
let value = active.next_value();
let entry = slab.vacant_entry().expect("unexpectedly exhausted slab");
let key = entry.key();
prop_assert_eq!(used_bits::<C>(key), key);
entry.insert(value);
active.insert(key, value);
}
ActionKind::RemoveRandom(key) => {
let used_key = used_bits::<C>(key);
prop_assert_eq!(slab.get(key).map(|e| *e), slab.get(used_key).map(|e| *e));
prop_assert_eq!(slab.remove(key), active.remove(used_key).is_some());
}
ActionKind::RemoveExistent(seed) => {
if let Some((key, _value)) = active.remove_any(seed) {
prop_assert!(slab.contains(key));
prop_assert!(slab.remove(key));
}
}
ActionKind::TakeRandom(key) => {
let used_key = used_bits::<C>(key);
prop_assert_eq!(slab.get(key).map(|e| *e), slab.get(used_key).map(|e| *e));
prop_assert_eq!(slab.take(key), active.remove(used_key));
}
ActionKind::TakeExistent(seed) => {
if let Some((key, value)) = active.remove_any(seed) {
prop_assert!(slab.contains(key));
prop_assert_eq!(slab.take(key), Some(value));
}
}
ActionKind::GetRandom(key) => {
let used_key = used_bits::<C>(key);
prop_assert_eq!(slab.get(key).map(|e| *e), slab.get(used_key).map(|e| *e));
prop_assert_eq!(slab.get(key).map(|e| *e), active.get(used_key));
prop_assert_eq!(
slab.clone().get_owned(key).map(|e| *e),
active.get(used_key)
);
}
ActionKind::GetExistent(seed) => {
if let Some((key, value)) = active.get_any(seed) {
prop_assert!(slab.contains(key));
prop_assert_eq!(slab.get(key).map(|e| *e), Some(value));
prop_assert_eq!(slab.clone().get_owned(key).map(|e| *e), Some(value));
}
}
}
Ok(())
}
fn run<C: Config>(actions: Vec<Action>) -> Result<(), TestCaseError> {
let mut slab = Arc::new(Slab::new_with_config::<C>());
let mut active = Active::default();
// Apply all actions.
for action in actions {
// Override the TID for the current thread instead of using multiple real threads
// to preserve determinism. We're not checking concurrency issues here, they should be
// covered by loom tests anyway. Thus, it's fine to run all actions consequently.
tid::with(action.tid, || {
apply_action::<C>(&slab, &mut active, action.kind)
})?;
}
// Ensure the slab contains all remaining entries.
let mut expected_values = Vec::new();
for (key, value) in active.drain() {
prop_assert!(slab.contains(key));
prop_assert_eq!(slab.get(key).map(|e| *e), Some(value));
prop_assert_eq!(slab.clone().get_owned(key).map(|e| *e), Some(value));
expected_values.push(value);
}
expected_values.sort();
// Ensure `unique_iter()` returns all remaining entries.
let slab = Arc::get_mut(&mut slab).unwrap();
let mut actual_values = slab.unique_iter().copied().collect::<Vec<_>>();
actual_values.sort();
prop_assert_eq!(actual_values, expected_values);
Ok(())
}
proptest! {
#[test]
fn default_config(actions in prop::collection::vec(action_strategy(), ACTIONS)) {
run::<DefaultConfig>(actions)?;
}
#[test]
fn custom_config(actions in prop::collection::vec(action_strategy(), ACTIONS)) {
run::<CustomConfig>(actions)?;
}
}
struct CustomConfig;
#[cfg(target_pointer_width = "64")]
impl Config for CustomConfig {
const INITIAL_PAGE_SIZE: usize = 32;
const MAX_PAGES: usize = 15;
const MAX_THREADS: usize = 256;
const RESERVED_BITS: usize = 24;
}
#[cfg(target_pointer_width = "32")]
impl Config for CustomConfig {
const INITIAL_PAGE_SIZE: usize = 16;
const MAX_PAGES: usize = 6;
const MAX_THREADS: usize = 128;
const RESERVED_BITS: usize = 12;
}

210
vendor/sharded-slab/src/tid.rs vendored Normal file
View File

@@ -0,0 +1,210 @@
use crate::{
cfg::{self, CfgPrivate},
page,
sync::{
atomic::{AtomicUsize, Ordering},
lazy_static, thread_local, Mutex,
},
Pack,
};
use std::{
cell::{Cell, UnsafeCell},
collections::VecDeque,
fmt,
marker::PhantomData,
};
/// Uniquely identifies a thread.
pub(crate) struct Tid<C> {
id: usize,
_not_send: PhantomData<UnsafeCell<()>>,
_cfg: PhantomData<fn(C)>,
}
#[derive(Debug)]
struct Registration(Cell<Option<usize>>);
struct Registry {
next: AtomicUsize,
free: Mutex<VecDeque<usize>>,
}
lazy_static! {
static ref REGISTRY: Registry = Registry {
next: AtomicUsize::new(0),
free: Mutex::new(VecDeque::new()),
};
}
thread_local! {
static REGISTRATION: Registration = Registration::new();
}
// === impl Tid ===
impl<C: cfg::Config> Pack<C> for Tid<C> {
const LEN: usize = C::MAX_SHARDS.trailing_zeros() as usize + 1;
type Prev = page::Addr<C>;
#[inline(always)]
fn as_usize(&self) -> usize {
self.id
}
#[inline(always)]
fn from_usize(id: usize) -> Self {
Self {
id,
_not_send: PhantomData,
_cfg: PhantomData,
}
}
}
impl<C: cfg::Config> Tid<C> {
#[inline]
pub(crate) fn current() -> Self {
REGISTRATION
.try_with(Registration::current)
.unwrap_or_else(|_| Self::poisoned())
}
pub(crate) fn is_current(self) -> bool {
REGISTRATION
.try_with(|r| self == r.current::<C>())
.unwrap_or(false)
}
#[inline(always)]
pub fn new(id: usize) -> Self {
Self::from_usize(id)
}
}
impl<C> Tid<C> {
#[cold]
fn poisoned() -> Self {
Self {
id: std::usize::MAX,
_not_send: PhantomData,
_cfg: PhantomData,
}
}
/// Returns true if the local thread ID was accessed while unwinding.
pub(crate) fn is_poisoned(&self) -> bool {
self.id == std::usize::MAX
}
}
impl<C> fmt::Debug for Tid<C> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.is_poisoned() {
f.debug_tuple("Tid")
.field(&format_args!("<poisoned>"))
.finish()
} else {
f.debug_tuple("Tid")
.field(&format_args!("{}", self.id))
.finish()
}
}
}
impl<C> PartialEq for Tid<C> {
fn eq(&self, other: &Self) -> bool {
self.id == other.id
}
}
impl<C> Eq for Tid<C> {}
impl<C: cfg::Config> Clone for Tid<C> {
fn clone(&self) -> Self {
*self
}
}
impl<C: cfg::Config> Copy for Tid<C> {}
// === impl Registration ===
impl Registration {
fn new() -> Self {
Self(Cell::new(None))
}
#[inline(always)]
fn current<C: cfg::Config>(&self) -> Tid<C> {
if let Some(tid) = self.0.get().map(Tid::new) {
return tid;
}
self.register()
}
#[cold]
fn register<C: cfg::Config>(&self) -> Tid<C> {
let id = REGISTRY
.free
.lock()
.ok()
.and_then(|mut free| {
if free.len() > 1 {
free.pop_front()
} else {
None
}
})
.unwrap_or_else(|| {
let id = REGISTRY.next.fetch_add(1, Ordering::AcqRel);
if id > Tid::<C>::BITS {
panic_in_drop!(
"creating a new thread ID ({}) would exceed the \
maximum number of thread ID bits specified in {} \
({})",
id,
std::any::type_name::<C>(),
Tid::<C>::BITS,
);
}
id
});
self.0.set(Some(id));
Tid::new(id)
}
}
// Reusing thread IDs doesn't work under loom, since this `Drop` impl results in
// an access to a `loom` lazy_static while the test is shutting down, which
// panics. T_T
// Just skip TID reuse and use loom's lazy_static macro to ensure we have a
// clean initial TID on every iteration, instead.
#[cfg(not(all(loom, any(feature = "loom", test))))]
impl Drop for Registration {
fn drop(&mut self) {
use std::sync::PoisonError;
if let Some(id) = self.0.get() {
let mut free_list = REGISTRY.free.lock().unwrap_or_else(PoisonError::into_inner);
free_list.push_back(id);
}
}
}
#[cfg(all(test, not(loom)))]
pub(crate) fn with<R>(tid: usize, f: impl FnOnce() -> R) -> R {
struct Guard(Option<usize>);
impl Drop for Guard {
fn drop(&mut self) {
REGISTRATION.with(|r| r.0.set(self.0.take()));
}
}
let prev = REGISTRATION.with(|r| r.0.replace(Some(tid)));
let _guard = Guard(prev);
f()
}