Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

View File

@@ -0,0 +1,78 @@
//! Ensures that a custom config behaves as the default config, until limits are reached.
//! Prevents regression after #80.
use crate::{cfg::CfgPrivate, Config, Slab};
struct CustomConfig;
#[cfg(target_pointer_width = "64")]
impl Config for CustomConfig {
const INITIAL_PAGE_SIZE: usize = 32;
const MAX_PAGES: usize = 15;
const MAX_THREADS: usize = 256;
const RESERVED_BITS: usize = 24;
}
#[cfg(not(target_pointer_width = "64"))]
impl Config for CustomConfig {
const INITIAL_PAGE_SIZE: usize = 16;
const MAX_PAGES: usize = 6;
const MAX_THREADS: usize = 128;
const RESERVED_BITS: usize = 12;
}
// We should repeat actions several times to detect invalid lifecycle changes.
const ITERS: u64 = 5;
#[track_caller]
fn slab_eq(mut lhs: Slab<u64, impl Config>, mut rhs: Slab<u64, impl Config>) {
let mut lhs_vec = lhs.unique_iter().collect::<Vec<_>>();
lhs_vec.sort_unstable();
let mut rhs_vec = rhs.unique_iter().collect::<Vec<_>>();
rhs_vec.sort_unstable();
assert_eq!(lhs_vec, rhs_vec);
}
/// Calls `insert(); remove()` multiple times to detect invalid releasing.
/// Initially, it revealed bugs in the `Slot::release_with()` implementation.
#[test]
fn insert_remove() {
eprintln!("bits={}; config={:#?}", usize::BITS, CustomConfig::debug());
let default_slab = Slab::<u64, _>::new();
let custom_slab = Slab::<u64, _>::new_with_config::<CustomConfig>();
for i in 0..=ITERS {
let idx = default_slab.insert(i).unwrap();
assert!(default_slab.remove(idx));
let idx = custom_slab.insert(i).unwrap();
assert!(custom_slab.remove(idx));
}
slab_eq(custom_slab, default_slab);
}
/// Calls `get()` multiple times to detect invalid ref counting.
/// Initially, it revealed bugs in the `Slot::get()` implementation.
#[test]
fn double_get() {
eprintln!("bits={}; config={:#?}", usize::BITS, CustomConfig::debug());
let default_slab = Slab::<u64, _>::new();
let custom_slab = Slab::<u64, _>::new_with_config::<CustomConfig>();
for i in 0..=ITERS {
let idx = default_slab.insert(i).unwrap();
assert!(default_slab.get(idx).is_some());
assert!(default_slab.get(idx).is_some());
assert!(default_slab.remove(idx));
let idx = custom_slab.insert(i).unwrap();
assert!(custom_slab.get(idx).is_some());
assert!(custom_slab.get(idx).is_some());
assert!(custom_slab.remove(idx));
}
slab_eq(custom_slab, default_slab);
}

View File

@@ -0,0 +1,641 @@
use super::util::*;
use crate::{clear::Clear, sync::alloc, Pack, Pool};
use loom::{
sync::{
atomic::{AtomicBool, Ordering},
Condvar, Mutex,
},
thread,
};
use std::sync::Arc;
#[derive(Default, Debug)]
struct State {
is_dropped: AtomicBool,
is_cleared: AtomicBool,
id: usize,
}
impl State {
fn assert_clear(&self) {
assert!(!self.is_dropped.load(Ordering::SeqCst));
assert!(self.is_cleared.load(Ordering::SeqCst));
}
fn assert_not_clear(&self) {
assert!(!self.is_dropped.load(Ordering::SeqCst));
assert!(!self.is_cleared.load(Ordering::SeqCst));
}
}
impl PartialEq for State {
fn eq(&self, other: &State) -> bool {
self.id.eq(&other.id)
}
}
#[derive(Default, Debug)]
struct DontDropMe(Arc<State>);
impl PartialEq for DontDropMe {
fn eq(&self, other: &DontDropMe) -> bool {
self.0.eq(&other.0)
}
}
impl DontDropMe {
fn new(id: usize) -> (Arc<State>, Self) {
let state = Arc::new(State {
is_dropped: AtomicBool::new(false),
is_cleared: AtomicBool::new(false),
id,
});
(state.clone(), Self(state))
}
}
impl Drop for DontDropMe {
fn drop(&mut self) {
test_println!("-> DontDropMe drop: dropping data {:?}", self.0.id);
self.0.is_dropped.store(true, Ordering::SeqCst)
}
}
impl Clear for DontDropMe {
fn clear(&mut self) {
test_println!("-> DontDropMe clear: clearing data {:?}", self.0.id);
self.0.is_cleared.store(true, Ordering::SeqCst);
}
}
#[test]
fn dont_drop() {
run_model("dont_drop", || {
let pool: Pool<DontDropMe> = Pool::new();
let (item1, value) = DontDropMe::new(1);
test_println!("-> dont_drop: Inserting into pool {}", item1.id);
let idx = pool
.create_with(move |item| *item = value)
.expect("create_with");
item1.assert_not_clear();
test_println!("-> dont_drop: clearing idx: {}", idx);
pool.clear(idx);
item1.assert_clear();
});
}
#[test]
fn concurrent_create_with_clear() {
run_model("concurrent_create_with_clear", || {
let pool: Arc<Pool<DontDropMe>> = Arc::new(Pool::new());
let pair = Arc::new((Mutex::new(None), Condvar::new()));
let (item1, value) = DontDropMe::new(1);
let idx1 = pool
.create_with(move |item| *item = value)
.expect("create_with");
let p = pool.clone();
let pair2 = pair.clone();
let test_value = item1.clone();
let t1 = thread::spawn(move || {
let (lock, cvar) = &*pair2;
test_println!("-> making get request");
assert_eq!(p.get(idx1).unwrap().0.id, test_value.id);
let mut next = lock.lock().unwrap();
*next = Some(());
cvar.notify_one();
});
test_println!("-> making get request");
let guard = pool.get(idx1);
let (lock, cvar) = &*pair;
let mut next = lock.lock().unwrap();
// wait until we have a guard on the other thread.
while next.is_none() {
next = cvar.wait(next).unwrap();
}
// the item should be marked (clear returns true)...
assert!(pool.clear(idx1));
// ...but the value shouldn't be removed yet.
item1.assert_not_clear();
t1.join().expect("thread 1 unable to join");
drop(guard);
item1.assert_clear();
})
}
#[test]
fn racy_clear() {
run_model("racy_clear", || {
let pool = Arc::new(Pool::new());
let (item, value) = DontDropMe::new(1);
let idx = pool
.create_with(move |item| *item = value)
.expect("create_with");
assert_eq!(pool.get(idx).unwrap().0.id, item.id);
let p = pool.clone();
let t2 = thread::spawn(move || p.clear(idx));
let r1 = pool.clear(idx);
let r2 = t2.join().expect("thread 2 should not panic");
test_println!("r1: {}, r2: {}", r1, r2);
assert!(
!(r1 && r2),
"Both threads should not have cleared the value"
);
assert!(r1 || r2, "One thread should have removed the value");
assert!(pool.get(idx).is_none());
item.assert_clear();
})
}
#[test]
fn clear_local_and_reuse() {
run_model("take_remote_and_reuse", || {
let pool = Arc::new(Pool::new_with_config::<TinyConfig>());
let idx1 = pool
.create_with(|item: &mut String| {
item.push_str("hello world");
})
.expect("create_with");
let idx2 = pool
.create_with(|item| item.push_str("foo"))
.expect("create_with");
let idx3 = pool
.create_with(|item| item.push_str("bar"))
.expect("create_with");
assert_eq!(pool.get(idx1).unwrap(), String::from("hello world"));
assert_eq!(pool.get(idx2).unwrap(), String::from("foo"));
assert_eq!(pool.get(idx3).unwrap(), String::from("bar"));
let first = idx1 & (!crate::page::slot::Generation::<TinyConfig>::MASK);
assert!(pool.clear(idx1));
let idx1 = pool
.create_with(move |item| item.push_str("h"))
.expect("create_with");
let second = idx1 & (!crate::page::slot::Generation::<TinyConfig>::MASK);
assert_eq!(first, second);
assert!(pool.get(idx1).unwrap().capacity() >= 11);
})
}
#[test]
fn create_mut_guard_prevents_access() {
run_model("create_mut_guard_prevents_access", || {
let pool = Arc::new(Pool::<String>::new());
let guard = pool.create().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
thread::spawn(move || {
assert!(pool2.get(key).is_none());
})
.join()
.unwrap();
});
}
#[test]
fn create_mut_guard() {
run_model("create_mut_guard", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.create().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
guard.push_str("Hello world");
drop(guard);
t1.join().unwrap();
});
}
#[test]
fn create_mut_guard_2() {
run_model("create_mut_guard_2", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.create().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
guard.push_str("Hello world");
let t2 = thread::spawn(move || {
test_dbg!(pool3.get(key));
});
drop(guard);
t1.join().unwrap();
t2.join().unwrap();
});
}
#[test]
fn create_mut_guard_downgrade() {
run_model("create_mut_guard_downgrade", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.create().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
guard.push_str("Hello world");
let guard = guard.downgrade();
let t2 = thread::spawn(move || {
test_dbg!(pool3.get(key));
});
t1.join().unwrap();
t2.join().unwrap();
assert_eq!(guard, "Hello world".to_owned());
});
}
#[test]
fn create_mut_guard_downgrade_clear() {
run_model("create_mut_guard_downgrade_clear", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.create().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
guard.push_str("Hello world");
let guard = guard.downgrade();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
let t2 = thread::spawn(move || {
test_dbg!(pool3.clear(key));
});
assert_eq!(guard, "Hello world".to_owned());
drop(guard);
t1.join().unwrap();
t2.join().unwrap();
assert!(pool.get(key).is_none());
});
}
#[test]
fn create_mut_downgrade_during_clear() {
run_model("create_mut_downgrade_during_clear", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.create().unwrap();
let key: usize = guard.key();
guard.push_str("Hello world");
let pool2 = pool.clone();
let guard = guard.downgrade();
let t1 = thread::spawn(move || {
test_dbg!(pool2.clear(key));
});
t1.join().unwrap();
assert_eq!(guard, "Hello world".to_owned());
drop(guard);
assert!(pool.get(key).is_none());
});
}
#[test]
fn ownedref_send_out_of_local() {
run_model("ownedref_send_out_of_local", || {
let pool = Arc::new(Pool::<alloc::Track<String>>::new());
let key1 = pool
.create_with(|item| item.get_mut().push_str("hello"))
.expect("create item 1");
let key2 = pool
.create_with(|item| item.get_mut().push_str("goodbye"))
.expect("create item 2");
let item1 = pool.clone().get_owned(key1).expect("get key1");
let item2 = pool.clone().get_owned(key2).expect("get key2");
let pool2 = pool.clone();
test_dbg!(pool.clear(key1));
let t1 = thread::spawn(move || {
assert_eq!(item1.get_ref(), &String::from("hello"));
drop(item1);
});
let t2 = thread::spawn(move || {
assert_eq!(item2.get_ref(), &String::from("goodbye"));
test_dbg!(pool2.clear(key2));
drop(item2);
});
t1.join().unwrap();
t2.join().unwrap();
assert!(pool.get(key1).is_none());
assert!(pool.get(key2).is_none());
});
}
#[test]
fn ownedrefs_outlive_pool() {
run_model("ownedrefs_outlive_pool", || {
let pool = Arc::new(Pool::<alloc::Track<String>>::new());
let key1 = pool
.create_with(|item| item.get_mut().push_str("hello"))
.expect("create item 1");
let key2 = pool
.create_with(|item| item.get_mut().push_str("goodbye"))
.expect("create item 2");
let item1_1 = pool.clone().get_owned(key1).expect("get key1");
let item1_2 = pool.clone().get_owned(key1).expect("get key1 again");
let item2 = pool.clone().get_owned(key2).expect("get key2");
drop(pool);
let t1 = thread::spawn(move || {
assert_eq!(item1_1.get_ref(), &String::from("hello"));
drop(item1_1);
});
let t2 = thread::spawn(move || {
assert_eq!(item2.get_ref(), &String::from("goodbye"));
drop(item2);
});
t1.join().unwrap();
t2.join().unwrap();
assert_eq!(item1_2.get_ref(), &String::from("hello"));
});
}
#[test]
fn ownedref_ping_pong() {
run_model("ownedref_ping_pong", || {
let pool = Arc::new(Pool::<alloc::Track<String>>::new());
let key1 = pool
.create_with(|item| item.get_mut().push_str("hello"))
.expect("create item 1");
let key2 = pool
.create_with(|item| item.get_mut().push_str("world"))
.expect("create item 2");
let item1 = pool.clone().get_owned(key1).expect("get key1");
let pool2 = pool.clone();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
assert_eq!(item1.get_ref(), &String::from("hello"));
pool2.clear(key1);
item1
});
let t2 = thread::spawn(move || {
let item2 = pool3.clone().get_owned(key2).unwrap();
assert_eq!(item2.get_ref(), &String::from("world"));
pool3.clear(key1);
item2
});
let item1 = t1.join().unwrap();
let item2 = t2.join().unwrap();
assert_eq!(item1.get_ref(), &String::from("hello"));
assert_eq!(item2.get_ref(), &String::from("world"));
});
}
#[test]
fn ownedref_drop_from_other_threads() {
run_model("ownedref_drop_from_other_threads", || {
let pool = Arc::new(Pool::<alloc::Track<String>>::new());
let key1 = pool
.create_with(|item| item.get_mut().push_str("hello"))
.expect("create item 1");
let item1 = pool.clone().get_owned(key1).expect("get key1");
let pool2 = pool.clone();
let t1 = thread::spawn(move || {
let pool = pool2.clone();
let key2 = pool
.create_with(|item| item.get_mut().push_str("goodbye"))
.expect("create item 1");
let item2 = pool.clone().get_owned(key2).expect("get key1");
let t2 = thread::spawn(move || {
assert_eq!(item2.get_ref(), &String::from("goodbye"));
test_dbg!(pool2.clear(key1));
drop(item2)
});
assert_eq!(item1.get_ref(), &String::from("hello"));
test_dbg!(pool.clear(key2));
drop(item1);
(t2, key2)
});
let (t2, key2) = t1.join().unwrap();
test_dbg!(pool.get(key1));
test_dbg!(pool.get(key2));
t2.join().unwrap();
assert!(pool.get(key1).is_none());
assert!(pool.get(key2).is_none());
});
}
#[test]
fn create_owned_mut_guard() {
run_model("create_owned_mut_guard", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
guard.push_str("Hello world");
drop(guard);
t1.join().unwrap();
});
}
#[test]
fn create_owned_mut_guard_send() {
run_model("create_owned_mut_guard", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
let t2 = thread::spawn(move || {
guard.push_str("Hello world");
drop(guard);
});
t1.join().unwrap();
t2.join().unwrap();
});
}
#[test]
fn create_owned_mut_guard_2() {
run_model("create_owned_mut_guard_2", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
guard.push_str("Hello world");
let t2 = thread::spawn(move || {
test_dbg!(pool3.get(key));
});
drop(guard);
t1.join().unwrap();
t2.join().unwrap();
});
}
#[test]
fn create_owned_mut_guard_downgrade() {
run_model("create_owned_mut_guard_downgrade", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
guard.push_str("Hello world");
let key: usize = guard.key();
let pool2 = pool.clone();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
let guard = guard.downgrade();
let t2 = thread::spawn(move || {
assert_eq!(pool3.get(key).unwrap(), "Hello world".to_owned());
});
t1.join().unwrap();
t2.join().unwrap();
assert_eq!(guard, "Hello world".to_owned());
});
}
#[test]
fn create_owned_mut_guard_downgrade_then_clear() {
run_model("create_owned_mut_guard_downgrade_then_clear", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
let key: usize = guard.key();
let pool2 = pool.clone();
guard.push_str("Hello world");
let guard = guard.downgrade();
let pool3 = pool.clone();
let t1 = thread::spawn(move || {
test_dbg!(pool2.get(key));
});
let t2 = thread::spawn(move || {
test_dbg!(pool3.clear(key));
});
assert_eq!(guard, "Hello world".to_owned());
drop(guard);
t1.join().unwrap();
t2.join().unwrap();
assert!(pool.get(key).is_none());
});
}
#[test]
fn create_owned_mut_downgrade_during_clear() {
run_model("create_owned_mut_downgrade_during_clear", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
let key: usize = guard.key();
guard.push_str("Hello world");
let pool2 = pool.clone();
let guard = guard.downgrade();
let t1 = thread::spawn(move || {
test_dbg!(pool2.clear(key));
});
t1.join().unwrap();
assert_eq!(guard, "Hello world".to_owned());
drop(guard);
assert!(pool.get(key).is_none());
});
}
#[test]
fn create_mut_downgrade_during_clear_by_other_thead() {
run_model("create_mut_downgrade_during_clear_by_other_thread", || {
let pool = Arc::new(Pool::<String>::new());
let mut guard = pool.clone().create_owned().unwrap();
let key: usize = guard.key();
guard.push_str("Hello world");
let pool2 = pool.clone();
let t1 = thread::spawn(move || {
let guard = guard.downgrade();
assert_eq!(guard, "Hello world".to_owned());
drop(guard);
});
let t2 = thread::spawn(move || {
test_dbg!(pool2.clear(key));
});
test_dbg!(pool.get(key));
t1.join().unwrap();
t2.join().unwrap();
});
}

View File

@@ -0,0 +1,760 @@
use super::util::*;
use crate::sync::alloc;
use crate::Slab;
use loom::sync::{Condvar, Mutex};
use loom::thread;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
#[test]
fn take_local() {
run_model("take_local", || {
let slab = Arc::new(Slab::new());
let s = slab.clone();
let t1 = thread::spawn(move || {
let idx = s.insert(1).expect("insert");
assert_eq!(s.get(idx).unwrap(), 1);
assert_eq!(s.take(idx), Some(1));
assert!(s.get(idx).is_none());
let idx = s.insert(2).expect("insert");
assert_eq!(s.get(idx).unwrap(), 2);
assert_eq!(s.take(idx), Some(2));
assert!(s.get(idx).is_none());
});
let s = slab.clone();
let t2 = thread::spawn(move || {
let idx = s.insert(3).expect("insert");
assert_eq!(s.get(idx).unwrap(), 3);
assert_eq!(s.take(idx), Some(3));
assert!(s.get(idx).is_none());
let idx = s.insert(4).expect("insert");
assert_eq!(s.get(idx).unwrap(), 4);
assert_eq!(s.take(idx), Some(4));
assert!(s.get(idx).is_none());
});
let s = slab;
let idx1 = s.insert(5).expect("insert");
assert_eq!(s.get(idx1).unwrap(), 5);
let idx2 = s.insert(6).expect("insert");
assert_eq!(s.get(idx2).unwrap(), 6);
assert_eq!(s.take(idx1), Some(5));
assert!(s.get(idx1).is_none());
assert_eq!(s.get(idx2).unwrap(), 6);
assert_eq!(s.take(idx2), Some(6));
assert!(s.get(idx2).is_none());
t1.join().expect("thread 1 should not panic");
t2.join().expect("thread 2 should not panic");
});
}
#[test]
fn take_remote() {
run_model("take_remote", || {
let slab = Arc::new(Slab::new());
let idx1 = slab.insert(1).expect("insert");
assert_eq!(slab.get(idx1).unwrap(), 1);
let idx2 = slab.insert(2).expect("insert");
assert_eq!(slab.get(idx2).unwrap(), 2);
let idx3 = slab.insert(3).expect("insert");
assert_eq!(slab.get(idx3).unwrap(), 3);
let s = slab.clone();
let t1 = thread::spawn(move || {
assert_eq!(s.get(idx2).unwrap(), 2);
assert_eq!(s.take(idx2), Some(2));
});
let s = slab.clone();
let t2 = thread::spawn(move || {
assert_eq!(s.get(idx3).unwrap(), 3);
assert_eq!(s.take(idx3), Some(3));
});
t1.join().expect("thread 1 should not panic");
t2.join().expect("thread 2 should not panic");
assert_eq!(slab.get(idx1).unwrap(), 1);
assert!(slab.get(idx2).is_none());
assert!(slab.get(idx3).is_none());
});
}
#[test]
fn racy_take() {
run_model("racy_take", || {
let slab = Arc::new(Slab::new());
let idx = slab.insert(1).expect("insert");
assert_eq!(slab.get(idx).unwrap(), 1);
let s1 = slab.clone();
let s2 = slab.clone();
let t1 = thread::spawn(move || s1.take(idx));
let t2 = thread::spawn(move || s2.take(idx));
let r1 = t1.join().expect("thread 1 should not panic");
let r2 = t2.join().expect("thread 2 should not panic");
assert!(
r1.is_none() || r2.is_none(),
"both threads should not have removed the value"
);
assert_eq!(
r1.or(r2),
Some(1),
"one thread should have removed the value"
);
assert!(slab.get(idx).is_none());
});
}
#[test]
fn racy_take_local() {
run_model("racy_take_local", || {
let slab = Arc::new(Slab::new());
let idx = slab.insert(1).expect("insert");
assert_eq!(slab.get(idx).unwrap(), 1);
let s = slab.clone();
let t2 = thread::spawn(move || s.take(idx));
let r1 = slab.take(idx);
let r2 = t2.join().expect("thread 2 should not panic");
assert!(
r1.is_none() || r2.is_none(),
"both threads should not have removed the value"
);
assert!(
r1.or(r2).is_some(),
"one thread should have removed the value"
);
assert!(slab.get(idx).is_none());
});
}
#[test]
fn concurrent_insert_take() {
run_model("concurrent_insert_remove", || {
let slab = Arc::new(Slab::new());
let pair = Arc::new((Mutex::new(None), Condvar::new()));
let slab2 = slab.clone();
let pair2 = pair.clone();
let remover = thread::spawn(move || {
let (lock, cvar) = &*pair2;
for i in 0..2 {
test_println!("--- remover i={} ---", i);
let mut next = lock.lock().unwrap();
while next.is_none() {
next = cvar.wait(next).unwrap();
}
let key = next.take().unwrap();
assert_eq!(slab2.take(key), Some(i));
cvar.notify_one();
}
});
let (lock, cvar) = &*pair;
for i in 0..2 {
test_println!("--- inserter i={} ---", i);
let key = slab.insert(i).expect("insert");
let mut next = lock.lock().unwrap();
*next = Some(key);
cvar.notify_one();
// Wait for the item to be removed.
while next.is_some() {
next = cvar.wait(next).unwrap();
}
assert!(slab.get(key).is_none());
}
remover.join().unwrap();
})
}
#[test]
fn take_remote_and_reuse() {
run_model("take_remote_and_reuse", || {
let slab = Arc::new(Slab::new_with_config::<TinyConfig>());
let idx1 = slab.insert(1).expect("insert");
let idx2 = slab.insert(2).expect("insert");
let idx3 = slab.insert(3).expect("insert");
let idx4 = slab.insert(4).expect("insert");
assert_eq!(slab.get(idx1).unwrap(), 1, "slab: {:#?}", slab);
assert_eq!(slab.get(idx2).unwrap(), 2, "slab: {:#?}", slab);
assert_eq!(slab.get(idx3).unwrap(), 3, "slab: {:#?}", slab);
assert_eq!(slab.get(idx4).unwrap(), 4, "slab: {:#?}", slab);
let s = slab.clone();
let t1 = thread::spawn(move || {
assert_eq!(s.take(idx1), Some(1), "slab: {:#?}", s);
});
let idx1 = slab.insert(5).expect("insert");
t1.join().expect("thread 1 should not panic");
assert_eq!(slab.get(idx1).unwrap(), 5, "slab: {:#?}", slab);
assert_eq!(slab.get(idx2).unwrap(), 2, "slab: {:#?}", slab);
assert_eq!(slab.get(idx3).unwrap(), 3, "slab: {:#?}", slab);
assert_eq!(slab.get(idx4).unwrap(), 4, "slab: {:#?}", slab);
});
}
fn store_when_free<C: crate::Config>(slab: &Arc<Slab<usize, C>>, t: usize) -> usize {
loop {
test_println!("try store {:?}", t);
if let Some(key) = slab.insert(t) {
test_println!("inserted at {:#x}", key);
return key;
}
test_println!("retrying; slab is full...");
thread::yield_now();
}
}
struct TinierConfig;
impl crate::Config for TinierConfig {
const INITIAL_PAGE_SIZE: usize = 2;
const MAX_PAGES: usize = 1;
}
#[test]
fn concurrent_remove_remote_and_reuse() {
let mut model = loom::model::Builder::new();
model.max_branches = 100000;
run_builder("concurrent_remove_remote_and_reuse", model, || {
let slab = Arc::new(Slab::new_with_config::<TinierConfig>());
let idx1 = slab.insert(1).unwrap();
let idx2 = slab.insert(2).unwrap();
assert_eq!(slab.get(idx1).unwrap(), 1, "slab: {:#?}", slab);
assert_eq!(slab.get(idx2).unwrap(), 2, "slab: {:#?}", slab);
let s = slab.clone();
let s2 = slab.clone();
let t1 = thread::spawn(move || {
s.take(idx1).expect("must remove");
});
let t2 = thread::spawn(move || {
s2.take(idx2).expect("must remove");
});
let idx3 = store_when_free(&slab, 3);
t1.join().expect("thread 1 should not panic");
t2.join().expect("thread 1 should not panic");
assert!(slab.get(idx1).is_none(), "slab: {:#?}", slab);
assert!(slab.get(idx2).is_none(), "slab: {:#?}", slab);
assert_eq!(slab.get(idx3).unwrap(), 3, "slab: {:#?}", slab);
});
}
struct SetDropped {
val: usize,
dropped: std::sync::Arc<AtomicBool>,
}
struct AssertDropped {
dropped: std::sync::Arc<AtomicBool>,
}
impl AssertDropped {
fn new(val: usize) -> (Self, SetDropped) {
let dropped = std::sync::Arc::new(AtomicBool::new(false));
let val = SetDropped {
val,
dropped: dropped.clone(),
};
(Self { dropped }, val)
}
fn assert_dropped(&self) {
assert!(
self.dropped.load(Ordering::SeqCst),
"value should have been dropped!"
);
}
}
impl Drop for SetDropped {
fn drop(&mut self) {
self.dropped.store(true, Ordering::SeqCst);
}
}
#[test]
fn remove_local() {
run_model("remove_local", || {
let slab = Arc::new(Slab::new_with_config::<TinyConfig>());
let slab2 = slab.clone();
let (dropped, item) = AssertDropped::new(1);
let idx = slab.insert(item).expect("insert");
let guard = slab.get(idx).unwrap();
assert!(slab.remove(idx));
let t1 = thread::spawn(move || {
let g = slab2.get(idx);
drop(g);
});
assert!(slab.get(idx).is_none());
t1.join().expect("thread 1 should not panic");
drop(guard);
assert!(slab.get(idx).is_none());
dropped.assert_dropped();
})
}
#[test]
fn remove_remote() {
run_model("remove_remote", || {
let slab = Arc::new(Slab::new_with_config::<TinyConfig>());
let slab2 = slab.clone();
let (dropped, item) = AssertDropped::new(1);
let idx = slab.insert(item).expect("insert");
assert!(slab.remove(idx));
let t1 = thread::spawn(move || {
let g = slab2.get(idx);
drop(g);
});
t1.join().expect("thread 1 should not panic");
assert!(slab.get(idx).is_none());
dropped.assert_dropped();
});
}
#[test]
fn remove_remote_during_insert() {
run_model("remove_remote_during_insert", || {
let slab = Arc::new(Slab::new_with_config::<TinyConfig>());
let slab2 = slab.clone();
let (dropped, item) = AssertDropped::new(1);
let idx = slab.insert(item).expect("insert");
let t1 = thread::spawn(move || {
let g = slab2.get(idx);
assert_ne!(g.as_ref().map(|v| v.val), Some(2));
drop(g);
});
let (_, item) = AssertDropped::new(2);
assert!(slab.remove(idx));
let idx2 = slab.insert(item).expect("insert");
t1.join().expect("thread 1 should not panic");
assert!(slab.get(idx).is_none());
assert!(slab.get(idx2).is_some());
dropped.assert_dropped();
});
}
#[test]
fn unique_iter() {
run_model("unique_iter", || {
let mut slab = Arc::new(Slab::new());
let s = slab.clone();
let t1 = thread::spawn(move || {
s.insert(1).expect("insert");
s.insert(2).expect("insert");
});
let s = slab.clone();
let t2 = thread::spawn(move || {
s.insert(3).expect("insert");
s.insert(4).expect("insert");
});
t1.join().expect("thread 1 should not panic");
t2.join().expect("thread 2 should not panic");
let slab = Arc::get_mut(&mut slab).expect("other arcs should be dropped");
let items: Vec<_> = slab.unique_iter().map(|&i| i).collect();
assert!(items.contains(&1), "items: {:?}", items);
assert!(items.contains(&2), "items: {:?}", items);
assert!(items.contains(&3), "items: {:?}", items);
assert!(items.contains(&4), "items: {:?}", items);
});
}
#[test]
fn custom_page_sz() {
let mut model = loom::model::Builder::new();
model.max_branches = 100000;
model.check(|| {
let slab = Slab::<usize>::new_with_config::<TinyConfig>();
for i in 0..1024usize {
test_println!("{}", i);
let k = slab.insert(i).expect("insert");
let v = slab.get(k).expect("get");
assert_eq!(v, i, "slab: {:#?}", slab);
}
});
}
#[test]
fn max_refs() {
struct LargeGenConfig;
// Configure the slab with a very large number of bits for the generation
// counter. That way, there will be very few bits for the ref count left
// over, and this test won't have to malloc millions of references.
impl crate::cfg::Config for LargeGenConfig {
const INITIAL_PAGE_SIZE: usize = 2;
const MAX_THREADS: usize = 32;
const MAX_PAGES: usize = 2;
}
let mut model = loom::model::Builder::new();
model.max_branches = 100000;
model.check(|| {
let slab = Slab::new_with_config::<LargeGenConfig>();
let key = slab.insert("hello world").unwrap();
let max = crate::page::slot::RefCount::<LargeGenConfig>::MAX;
// Create the maximum number of concurrent references to the entry.
let mut refs = (0..max)
.map(|_| slab.get(key).unwrap())
// Store the refs in a vec so they don't get dropped immediately.
.collect::<Vec<_>>();
assert!(slab.get(key).is_none());
// After dropping a ref, we should now be able to access the slot again.
drop(refs.pop());
let ref1 = slab.get(key);
assert!(ref1.is_some());
// Ref1 should max out the number of references again.
assert!(slab.get(key).is_none());
})
}
mod free_list_reuse {
use super::*;
struct TinyConfig;
impl crate::cfg::Config for TinyConfig {
const INITIAL_PAGE_SIZE: usize = 2;
}
#[test]
fn local_remove() {
run_model("free_list_reuse::local_remove", || {
let slab = Slab::new_with_config::<TinyConfig>();
let t1 = slab.insert("hello").expect("insert");
let t2 = slab.insert("world").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t1).1,
0,
"1st slot should be on 0th page"
);
assert_eq!(
crate::page::indices::<TinyConfig>(t2).1,
0,
"2nd slot should be on 0th page"
);
let t3 = slab.insert("earth").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t3).1,
1,
"3rd slot should be on 1st page"
);
slab.remove(t2);
let t4 = slab.insert("universe").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t4).1,
0,
"2nd slot should be reused (0th page)"
);
slab.remove(t1);
let _ = slab.insert("goodbye").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t4).1,
0,
"1st slot should be reused (0th page)"
);
});
}
#[test]
fn local_take() {
run_model("free_list_reuse::local_take", || {
let slab = Slab::new_with_config::<TinyConfig>();
let t1 = slab.insert("hello").expect("insert");
let t2 = slab.insert("world").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t1).1,
0,
"1st slot should be on 0th page"
);
assert_eq!(
crate::page::indices::<TinyConfig>(t2).1,
0,
"2nd slot should be on 0th page"
);
let t3 = slab.insert("earth").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t3).1,
1,
"3rd slot should be on 1st page"
);
assert_eq!(slab.take(t2), Some("world"));
let t4 = slab.insert("universe").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t4).1,
0,
"2nd slot should be reused (0th page)"
);
assert_eq!(slab.take(t1), Some("hello"));
let _ = slab.insert("goodbye").expect("insert");
assert_eq!(
crate::page::indices::<TinyConfig>(t4).1,
0,
"1st slot should be reused (0th page)"
);
});
}
}
#[test]
fn vacant_entry() {
run_model("vacant_entry", || {
let slab = Arc::new(Slab::new());
let entry = slab.vacant_entry().unwrap();
let key: usize = entry.key();
let slab2 = slab.clone();
let t1 = thread::spawn(move || {
test_dbg!(slab2.get(key));
});
entry.insert("hello world");
t1.join().unwrap();
assert_eq!(slab.get(key).expect("get"), "hello world");
});
}
#[test]
fn vacant_entry_2() {
run_model("vacant_entry_2", || {
let slab = Arc::new(Slab::new());
let entry = slab.vacant_entry().unwrap();
let key: usize = entry.key();
let slab2 = slab.clone();
let slab3 = slab.clone();
let t1 = thread::spawn(move || {
test_dbg!(slab2.get(key));
});
entry.insert("hello world");
let t2 = thread::spawn(move || {
test_dbg!(slab3.get(key));
});
t1.join().unwrap();
t2.join().unwrap();
assert_eq!(slab.get(key).expect("get"), "hello world");
});
}
#[test]
fn vacant_entry_remove() {
run_model("vacant_entry_remove", || {
let slab = Arc::new(Slab::new());
let entry = slab.vacant_entry().unwrap();
let key: usize = entry.key();
let slab2 = slab.clone();
let t1 = thread::spawn(move || {
assert!(!slab2.remove(key));
});
t1.join().unwrap();
entry.insert("hello world");
assert_eq!(slab.get(key).expect("get"), "hello world");
});
}
#[test]
fn owned_entry_send_out_of_local() {
run_model("owned_entry_send_out_of_local", || {
let slab = Arc::new(Slab::<alloc::Track<String>>::new());
let key1 = slab
.insert(alloc::Track::new(String::from("hello")))
.expect("insert item 1");
let key2 = slab
.insert(alloc::Track::new(String::from("goodbye")))
.expect("insert item 2");
let item1 = slab.clone().get_owned(key1).expect("get key1");
let item2 = slab.clone().get_owned(key2).expect("get key2");
let slab2 = slab.clone();
test_dbg!(slab.remove(key1));
let t1 = thread::spawn(move || {
assert_eq!(item1.get_ref(), &String::from("hello"));
drop(item1);
});
let t2 = thread::spawn(move || {
assert_eq!(item2.get_ref(), &String::from("goodbye"));
test_dbg!(slab2.remove(key2));
drop(item2);
});
t1.join().unwrap();
t2.join().unwrap();
assert!(slab.get(key1).is_none());
assert!(slab.get(key2).is_none());
});
}
#[test]
fn owned_entrys_outlive_slab() {
run_model("owned_entrys_outlive_slab", || {
let slab = Arc::new(Slab::<alloc::Track<String>>::new());
let key1 = slab
.insert(alloc::Track::new(String::from("hello")))
.expect("insert item 1");
let key2 = slab
.insert(alloc::Track::new(String::from("goodbye")))
.expect("insert item 2");
let item1_1 = slab.clone().get_owned(key1).expect("get key1");
let item1_2 = slab.clone().get_owned(key1).expect("get key1 again");
let item2 = slab.clone().get_owned(key2).expect("get key2");
drop(slab);
let t1 = thread::spawn(move || {
assert_eq!(item1_1.get_ref(), &String::from("hello"));
drop(item1_1);
});
let t2 = thread::spawn(move || {
assert_eq!(item2.get_ref(), &String::from("goodbye"));
drop(item2);
});
t1.join().unwrap();
t2.join().unwrap();
assert_eq!(item1_2.get_ref(), &String::from("hello"));
});
}
#[test]
fn owned_entry_ping_pong() {
run_model("owned_entry_ping_pong", || {
let slab = Arc::new(Slab::<alloc::Track<String>>::new());
let key1 = slab
.insert(alloc::Track::new(String::from("hello")))
.expect("insert item 1");
let key2 = slab
.insert(alloc::Track::new(String::from("world")))
.expect("insert item 2");
let item1 = slab.clone().get_owned(key1).expect("get key1");
let slab2 = slab.clone();
let slab3 = slab.clone();
let t1 = thread::spawn(move || {
assert_eq!(item1.get_ref(), &String::from("hello"));
slab2.remove(key1);
item1
});
let t2 = thread::spawn(move || {
let item2 = slab3.clone().get_owned(key2).unwrap();
assert_eq!(item2.get_ref(), &String::from("world"));
slab3.remove(key1);
item2
});
let item1 = t1.join().unwrap();
let item2 = t2.join().unwrap();
assert_eq!(item1.get_ref(), &String::from("hello"));
assert_eq!(item2.get_ref(), &String::from("world"));
});
}
#[test]
fn owned_entry_drop_from_other_threads() {
run_model("owned_entry_drop_from_other_threads", || {
let slab = Arc::new(Slab::<alloc::Track<String>>::new());
let key1 = slab
.insert(alloc::Track::new(String::from("hello")))
.expect("insert item 1");
let item1 = slab.clone().get_owned(key1).expect("get key1");
let slab2 = slab.clone();
let t1 = thread::spawn(move || {
let slab = slab2.clone();
let key2 = slab
.insert(alloc::Track::new(String::from("goodbye")))
.expect("insert item 1");
let item2 = slab.clone().get_owned(key2).expect("get key1");
let t2 = thread::spawn(move || {
assert_eq!(item2.get_ref(), &String::from("goodbye"));
test_dbg!(slab2.remove(key1));
drop(item2)
});
assert_eq!(item1.get_ref(), &String::from("hello"));
test_dbg!(slab.remove(key2));
drop(item1);
(t2, key2)
});
let (t2, key2) = t1.join().unwrap();
test_dbg!(slab.get(key1));
test_dbg!(slab.get(key2));
t2.join().unwrap();
assert!(slab.get(key1).is_none());
assert!(slab.get(key2).is_none());
});
}

75
vendor/sharded-slab/src/tests/mod.rs vendored Normal file
View File

@@ -0,0 +1,75 @@
mod idx {
use crate::{
cfg,
page::{self, slot},
Pack, Tid,
};
use proptest::prelude::*;
proptest! {
#[test]
#[cfg_attr(loom, ignore)]
fn tid_roundtrips(tid in 0usize..Tid::<cfg::DefaultConfig>::BITS) {
let tid = Tid::<cfg::DefaultConfig>::from_usize(tid);
let packed = tid.pack(0);
assert_eq!(tid, Tid::from_packed(packed));
}
#[test]
#[cfg_attr(loom, ignore)]
fn idx_roundtrips(
tid in 0usize..Tid::<cfg::DefaultConfig>::BITS,
gen in 0usize..slot::Generation::<cfg::DefaultConfig>::BITS,
addr in 0usize..page::Addr::<cfg::DefaultConfig>::BITS,
) {
let tid = Tid::<cfg::DefaultConfig>::from_usize(tid);
let gen = slot::Generation::<cfg::DefaultConfig>::from_usize(gen);
let addr = page::Addr::<cfg::DefaultConfig>::from_usize(addr);
let packed = tid.pack(gen.pack(addr.pack(0)));
assert_eq!(addr, page::Addr::from_packed(packed));
assert_eq!(gen, slot::Generation::from_packed(packed));
assert_eq!(tid, Tid::from_packed(packed));
}
}
}
pub(crate) mod util {
#[cfg(loom)]
use std::sync::atomic::{AtomicUsize, Ordering};
pub(crate) struct TinyConfig;
impl crate::Config for TinyConfig {
const INITIAL_PAGE_SIZE: usize = 4;
}
#[cfg(loom)]
pub(crate) fn run_model(name: &'static str, f: impl Fn() + Sync + Send + 'static) {
run_builder(name, loom::model::Builder::new(), f)
}
#[cfg(loom)]
pub(crate) fn run_builder(
name: &'static str,
builder: loom::model::Builder,
f: impl Fn() + Sync + Send + 'static,
) {
let iters = AtomicUsize::new(1);
builder.check(move || {
test_println!(
"\n------------ running test {}; iteration {} ------------\n",
name,
iters.fetch_add(1, Ordering::SeqCst)
);
f()
});
}
}
#[cfg(not(loom))]
mod custom_config;
#[cfg(loom)]
mod loom_pool;
#[cfg(loom)]
mod loom_slab;
#[cfg(not(loom))]
mod properties;

View File

@@ -0,0 +1,244 @@
//! This module contains property-based tests against the public API:
//! * API never panics.
//! * Active entries cannot be overridden until removed.
//! * The slab doesn't produce overlapping keys.
//! * The slab doesn't leave "lost" keys.
//! * `get()`, `get_owned`, and `contains()` are consistent.
//! * `RESERVED_BITS` are actually not used.
//!
//! The test is supposed to be deterministic, so it doesn't spawn real threads
//! and uses `tid::with()` to override the TID for the current thread.
use std::{ops::Range, sync::Arc};
use indexmap::IndexMap;
use proptest::prelude::*;
use crate::{tid, Config, DefaultConfig, Slab};
const THREADS: Range<usize> = 1..4;
const ACTIONS: Range<usize> = 1..1000;
#[derive(Debug, Clone)]
struct Action {
tid: usize,
kind: ActionKind,
}
#[derive(Debug, Clone)]
enum ActionKind {
Insert,
VacantEntry,
RemoveRandom(usize), // key
RemoveExistent(usize), // seed
TakeRandom(usize), // key
TakeExistent(usize), // seed
GetRandom(usize), // key
GetExistent(usize), // seed
}
prop_compose! {
fn action_strategy()(tid in THREADS, kind in action_kind_strategy()) -> Action {
Action { tid, kind }
}
}
fn action_kind_strategy() -> impl Strategy<Value = ActionKind> {
prop_oneof![
1 => Just(ActionKind::Insert),
1 => Just(ActionKind::VacantEntry),
1 => prop::num::usize::ANY.prop_map(ActionKind::RemoveRandom),
1 => prop::num::usize::ANY.prop_map(ActionKind::RemoveExistent),
1 => prop::num::usize::ANY.prop_map(ActionKind::TakeRandom),
1 => prop::num::usize::ANY.prop_map(ActionKind::TakeExistent),
// Produce `GetRandom` and `GetExistent` more often.
5 => prop::num::usize::ANY.prop_map(ActionKind::GetRandom),
5 => prop::num::usize::ANY.prop_map(ActionKind::GetExistent),
]
}
/// Stores active entries (added and not yet removed).
#[derive(Default)]
struct Active {
// Use `IndexMap` to preserve determinism.
map: IndexMap<usize, u32>,
prev_value: u32,
}
impl Active {
fn next_value(&mut self) -> u32 {
self.prev_value += 1;
self.prev_value
}
fn get(&self, key: usize) -> Option<u32> {
self.map.get(&key).copied()
}
fn get_any(&self, seed: usize) -> Option<(usize, u32)> {
if self.map.is_empty() {
return None;
}
let index = seed % self.map.len();
self.map.get_index(index).map(|(k, v)| (*k, *v))
}
fn insert(&mut self, key: usize, value: u32) {
assert_eq!(
self.map.insert(key, value),
None,
"keys of active entries must be unique"
);
}
fn remove(&mut self, key: usize) -> Option<u32> {
self.map.swap_remove(&key)
}
fn remove_any(&mut self, seed: usize) -> Option<(usize, u32)> {
if self.map.is_empty() {
return None;
}
let index = seed % self.map.len();
self.map.swap_remove_index(index)
}
fn drain(&mut self) -> impl Iterator<Item = (usize, u32)> + '_ {
self.map.drain(..)
}
}
fn used_bits<C: Config>(key: usize) -> usize {
assert_eq!(
C::RESERVED_BITS + Slab::<u32, C>::USED_BITS,
std::mem::size_of::<usize>() * 8
);
key & ((!0) >> C::RESERVED_BITS)
}
fn apply_action<C: Config>(
slab: &Arc<Slab<u32, C>>,
active: &mut Active,
action: ActionKind,
) -> Result<(), TestCaseError> {
match action {
ActionKind::Insert => {
let value = active.next_value();
let key = slab.insert(value).expect("unexpectedly exhausted slab");
prop_assert_eq!(used_bits::<C>(key), key);
active.insert(key, value);
}
ActionKind::VacantEntry => {
let value = active.next_value();
let entry = slab.vacant_entry().expect("unexpectedly exhausted slab");
let key = entry.key();
prop_assert_eq!(used_bits::<C>(key), key);
entry.insert(value);
active.insert(key, value);
}
ActionKind::RemoveRandom(key) => {
let used_key = used_bits::<C>(key);
prop_assert_eq!(slab.get(key).map(|e| *e), slab.get(used_key).map(|e| *e));
prop_assert_eq!(slab.remove(key), active.remove(used_key).is_some());
}
ActionKind::RemoveExistent(seed) => {
if let Some((key, _value)) = active.remove_any(seed) {
prop_assert!(slab.contains(key));
prop_assert!(slab.remove(key));
}
}
ActionKind::TakeRandom(key) => {
let used_key = used_bits::<C>(key);
prop_assert_eq!(slab.get(key).map(|e| *e), slab.get(used_key).map(|e| *e));
prop_assert_eq!(slab.take(key), active.remove(used_key));
}
ActionKind::TakeExistent(seed) => {
if let Some((key, value)) = active.remove_any(seed) {
prop_assert!(slab.contains(key));
prop_assert_eq!(slab.take(key), Some(value));
}
}
ActionKind::GetRandom(key) => {
let used_key = used_bits::<C>(key);
prop_assert_eq!(slab.get(key).map(|e| *e), slab.get(used_key).map(|e| *e));
prop_assert_eq!(slab.get(key).map(|e| *e), active.get(used_key));
prop_assert_eq!(
slab.clone().get_owned(key).map(|e| *e),
active.get(used_key)
);
}
ActionKind::GetExistent(seed) => {
if let Some((key, value)) = active.get_any(seed) {
prop_assert!(slab.contains(key));
prop_assert_eq!(slab.get(key).map(|e| *e), Some(value));
prop_assert_eq!(slab.clone().get_owned(key).map(|e| *e), Some(value));
}
}
}
Ok(())
}
fn run<C: Config>(actions: Vec<Action>) -> Result<(), TestCaseError> {
let mut slab = Arc::new(Slab::new_with_config::<C>());
let mut active = Active::default();
// Apply all actions.
for action in actions {
// Override the TID for the current thread instead of using multiple real threads
// to preserve determinism. We're not checking concurrency issues here, they should be
// covered by loom tests anyway. Thus, it's fine to run all actions consequently.
tid::with(action.tid, || {
apply_action::<C>(&slab, &mut active, action.kind)
})?;
}
// Ensure the slab contains all remaining entries.
let mut expected_values = Vec::new();
for (key, value) in active.drain() {
prop_assert!(slab.contains(key));
prop_assert_eq!(slab.get(key).map(|e| *e), Some(value));
prop_assert_eq!(slab.clone().get_owned(key).map(|e| *e), Some(value));
expected_values.push(value);
}
expected_values.sort();
// Ensure `unique_iter()` returns all remaining entries.
let slab = Arc::get_mut(&mut slab).unwrap();
let mut actual_values = slab.unique_iter().copied().collect::<Vec<_>>();
actual_values.sort();
prop_assert_eq!(actual_values, expected_values);
Ok(())
}
proptest! {
#[test]
fn default_config(actions in prop::collection::vec(action_strategy(), ACTIONS)) {
run::<DefaultConfig>(actions)?;
}
#[test]
fn custom_config(actions in prop::collection::vec(action_strategy(), ACTIONS)) {
run::<CustomConfig>(actions)?;
}
}
struct CustomConfig;
#[cfg(target_pointer_width = "64")]
impl Config for CustomConfig {
const INITIAL_PAGE_SIZE: usize = 32;
const MAX_PAGES: usize = 15;
const MAX_THREADS: usize = 256;
const RESERVED_BITS: usize = 24;
}
#[cfg(target_pointer_width = "32")]
impl Config for CustomConfig {
const INITIAL_PAGE_SIZE: usize = 16;
const MAX_PAGES: usize = 6;
const MAX_THREADS: usize = 128;
const RESERVED_BITS: usize = 12;
}