Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

397
vendor/zerocopy/src/byte_slice.rs vendored Normal file
View File

@@ -0,0 +1,397 @@
// Copyright 2024 The Fuchsia Authors
//
// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to
// those terms.
//! Traits for types that encapsulate a `[u8]`.
//!
//! These traits are used to bound the `B` parameter of [`Ref`].
use core::{
cell,
ops::{Deref, DerefMut},
};
// For each trait polyfill, as soon as the corresponding feature is stable, the
// polyfill import will be unused because method/function resolution will prefer
// the inherent method/function over a trait method/function. Thus, we suppress
// the `unused_imports` warning.
//
// See the documentation on `util::polyfills` for more information.
#[allow(unused_imports)]
use crate::util::polyfills::{self, NonNullExt as _, NumExt as _};
#[cfg(doc)]
use crate::Ref;
/// A mutable or immutable reference to a byte slice.
///
/// `ByteSlice` abstracts over the mutability of a byte slice reference, and is
/// implemented for various special reference types such as
/// [`Ref<[u8]>`](core::cell::Ref) and [`RefMut<[u8]>`](core::cell::RefMut).
///
/// # Safety
///
/// Implementations of `ByteSlice` must promise that their implementations of
/// [`Deref`] and [`DerefMut`] are "stable". In particular, given `B: ByteSlice`
/// and `b: B`, two calls, each to either `b.deref()` or `b.deref_mut()`, must
/// return a byte slice with the same address and length. This must hold even if
/// the two calls are separated by an arbitrary sequence of calls to methods on
/// `ByteSlice`, [`ByteSliceMut`], [`IntoByteSlice`], or [`IntoByteSliceMut`],
/// or on their super-traits. This does *not* need to hold if the two calls are
/// separated by any method calls, field accesses, or field modifications *other
/// than* those from these traits.
///
/// Note that this also implies that, given `b: B`, the address and length
/// cannot be modified via objects other than `b`, either on the same thread or
/// on another thread.
pub unsafe trait ByteSlice: Deref<Target = [u8]> + Sized {}
/// A mutable reference to a byte slice.
///
/// `ByteSliceMut` abstracts over various ways of storing a mutable reference to
/// a byte slice, and is implemented for various special reference types such as
/// `RefMut<[u8]>`.
///
/// `ByteSliceMut` is a shorthand for [`ByteSlice`] and [`DerefMut`].
pub trait ByteSliceMut: ByteSlice + DerefMut {}
impl<B: ByteSlice + DerefMut> ByteSliceMut for B {}
/// A [`ByteSlice`] which can be copied without violating dereference stability.
///
/// # Safety
///
/// If `B: CopyableByteSlice`, then the dereference stability properties
/// required by [`ByteSlice`] (see that trait's safety documentation) do not
/// only hold regarding two calls to `b.deref()` or `b.deref_mut()`, but also
/// hold regarding `c.deref()` or `c.deref_mut()`, where `c` is produced by
/// copying `b`.
pub unsafe trait CopyableByteSlice: ByteSlice + Copy + CloneableByteSlice {}
/// A [`ByteSlice`] which can be cloned without violating dereference stability.
///
/// # Safety
///
/// If `B: CloneableByteSlice`, then the dereference stability properties
/// required by [`ByteSlice`] (see that trait's safety documentation) do not
/// only hold regarding two calls to `b.deref()` or `b.deref_mut()`, but also
/// hold regarding `c.deref()` or `c.deref_mut()`, where `c` is produced by
/// `b.clone()`, `b.clone().clone()`, etc.
pub unsafe trait CloneableByteSlice: ByteSlice + Clone {}
/// A [`ByteSlice`] that can be split in two.
///
/// # Safety
///
/// Unsafe code may depend for its soundness on the assumption that `split_at`
/// and `split_at_unchecked` are implemented correctly. In particular, given `B:
/// SplitByteSlice` and `b: B`, if `b.deref()` returns a byte slice with address
/// `addr` and length `len`, then if `split <= len`, both of these
/// invocations:
/// - `b.split_at(split)`
/// - `b.split_at_unchecked(split)`
///
/// ...will return `(first, second)` such that:
/// - `first`'s address is `addr` and its length is `split`
/// - `second`'s address is `addr + split` and its length is `len - split`
pub unsafe trait SplitByteSlice: ByteSlice {
/// Attempts to split `self` at the midpoint.
///
/// `s.split_at(mid)` returns `Ok((s[..mid], s[mid..]))` if `mid <=
/// s.deref().len()` and otherwise returns `Err(s)`.
///
/// # Safety
///
/// Unsafe code may rely on this function correctly implementing the above
/// functionality.
#[inline]
fn split_at(self, mid: usize) -> Result<(Self, Self), Self> {
if mid <= self.deref().len() {
// SAFETY: Above, we ensure that `mid <= self.deref().len()`. By
// invariant on `ByteSlice`, a supertrait of `SplitByteSlice`,
// `.deref()` is guaranteed to be "stable"; i.e., it will always
// dereference to a byte slice of the same address and length. Thus,
// we can be sure that the above precondition remains satisfied
// through the call to `split_at_unchecked`.
unsafe { Ok(self.split_at_unchecked(mid)) }
} else {
Err(self)
}
}
/// Splits the slice at the midpoint, possibly omitting bounds checks.
///
/// `s.split_at_unchecked(mid)` returns `s[..mid]` and `s[mid..]`.
///
/// # Safety
///
/// `mid` must not be greater than `self.deref().len()`.
///
/// # Panics
///
/// Implementations of this method may choose to perform a bounds check and
/// panic if `mid > self.deref().len()`. They may also panic for any other
/// reason. Since it is optional, callers must not rely on this behavior for
/// soundness.
#[must_use]
unsafe fn split_at_unchecked(self, mid: usize) -> (Self, Self);
}
/// A shorthand for [`SplitByteSlice`] and [`ByteSliceMut`].
pub trait SplitByteSliceMut: SplitByteSlice + ByteSliceMut {}
impl<B: SplitByteSlice + ByteSliceMut> SplitByteSliceMut for B {}
#[allow(clippy::missing_safety_doc)] // There's a `Safety` section on `into_byte_slice`.
/// A [`ByteSlice`] that conveys no ownership, and so can be converted into a
/// byte slice.
///
/// Some `ByteSlice` types (notably, the standard library's [`Ref`] type) convey
/// ownership, and so they cannot soundly be moved by-value into a byte slice
/// type (`&[u8]`). Some methods in this crate's API (such as [`Ref::into_ref`])
/// are only compatible with `ByteSlice` types without these ownership
/// semantics.
///
/// [`Ref`]: core::cell::Ref
pub unsafe trait IntoByteSlice<'a>: ByteSlice {
/// Coverts `self` into a `&[u8]`.
///
/// # Safety
///
/// The returned reference has the same address and length as `self.deref()`
/// and `self.deref_mut()`.
///
/// Note that, combined with the safety invariant on [`ByteSlice`], this
/// safety invariant implies that the returned reference is "stable" in the
/// sense described in the `ByteSlice` docs.
fn into_byte_slice(self) -> &'a [u8];
}
#[allow(clippy::missing_safety_doc)] // There's a `Safety` section on `into_byte_slice_mut`.
/// A [`ByteSliceMut`] that conveys no ownership, and so can be converted into a
/// mutable byte slice.
///
/// Some `ByteSliceMut` types (notably, the standard library's [`RefMut`] type)
/// convey ownership, and so they cannot soundly be moved by-value into a byte
/// slice type (`&mut [u8]`). Some methods in this crate's API (such as
/// [`Ref::into_mut`]) are only compatible with `ByteSliceMut` types without
/// these ownership semantics.
///
/// [`RefMut`]: core::cell::RefMut
pub unsafe trait IntoByteSliceMut<'a>: IntoByteSlice<'a> + ByteSliceMut {
/// Coverts `self` into a `&mut [u8]`.
///
/// # Safety
///
/// The returned reference has the same address and length as `self.deref()`
/// and `self.deref_mut()`.
///
/// Note that, combined with the safety invariant on [`ByteSlice`], this
/// safety invariant implies that the returned reference is "stable" in the
/// sense described in the `ByteSlice` docs.
fn into_byte_slice_mut(self) -> &'a mut [u8];
}
// FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl ByteSlice for &[u8] {}
// FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl CopyableByteSlice for &[u8] {}
// FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl CloneableByteSlice for &[u8] {}
// SAFETY: This delegates to `polyfills:split_at_unchecked`, which is documented
// to correctly split `self` into two slices at the given `mid` point.
unsafe impl SplitByteSlice for &[u8] {
#[inline]
unsafe fn split_at_unchecked(self, mid: usize) -> (Self, Self) {
// SAFETY: By contract on caller, `mid` is not greater than
// `bytes.len()`.
unsafe { (<[u8]>::get_unchecked(self, ..mid), <[u8]>::get_unchecked(self, mid..)) }
}
}
// SAFETY: See inline.
unsafe impl<'a> IntoByteSlice<'a> for &'a [u8] {
#[inline(always)]
fn into_byte_slice(self) -> &'a [u8] {
// SAFETY: It would be patently insane to implement `<Deref for
// &[u8]>::deref` as anything other than `fn deref(&self) -> &[u8] {
// *self }`. Assuming this holds, then `self` is stable as required by
// `into_byte_slice`.
self
}
}
// FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl ByteSlice for &mut [u8] {}
// SAFETY: This delegates to `polyfills:split_at_mut_unchecked`, which is
// documented to correctly split `self` into two slices at the given `mid`
// point.
unsafe impl SplitByteSlice for &mut [u8] {
#[inline]
unsafe fn split_at_unchecked(self, mid: usize) -> (Self, Self) {
use core::slice::from_raw_parts_mut;
// `l_ptr` is non-null, because `self` is non-null, by invariant on
// `&mut [u8]`.
let l_ptr = self.as_mut_ptr();
// SAFETY: By contract on caller, `mid` is not greater than
// `self.len()`.
let r_ptr = unsafe { l_ptr.add(mid) };
let l_len = mid;
// SAFETY: By contract on caller, `mid` is not greater than
// `self.len()`.
//
// FIXME(#67): Remove this allow. See NumExt for more details.
#[allow(unstable_name_collisions)]
let r_len = unsafe { self.len().unchecked_sub(mid) };
// SAFETY: These invocations of `from_raw_parts_mut` satisfy its
// documented safety preconditions [1]:
// - The data `l_ptr` and `r_ptr` are valid for both reads and writes of
// `l_len` and `r_len` bytes, respectively, and they are trivially
// aligned. In particular:
// - The entire memory range of each slice is contained within a
// single allocated object, since `l_ptr` and `r_ptr` are both
// derived from within the address range of `self`.
// - Both `l_ptr` and `r_ptr` are non-null and trivially aligned.
// `self` is non-null by invariant on `&mut [u8]`, and the
// operations that derive `l_ptr` and `r_ptr` from `self` do not
// nullify either pointer.
// - The data `l_ptr` and `r_ptr` point to `l_len` and `r_len`,
// respectively, consecutive properly initialized values of type `u8`.
// This is true for `self` by invariant on `&mut [u8]`, and remains
// true for these two sub-slices of `self`.
// - The memory referenced by the returned slice cannot be accessed
// through any other pointer (not derived from the return value) for
// the duration of lifetime `'a``, because:
// - `split_at_unchecked` consumes `self` (which is not `Copy`),
// - `split_at_unchecked` does not exfiltrate any references to this
// memory, besides those references returned below,
// - the returned slices are non-overlapping.
// - The individual sizes of the sub-slices of `self` are no larger than
// `isize::MAX`, because their combined sizes are no larger than
// `isize::MAX`, by invariant on `self`.
//
// [1] https://doc.rust-lang.org/std/slice/fn.from_raw_parts_mut.html#safety
unsafe { (from_raw_parts_mut(l_ptr, l_len), from_raw_parts_mut(r_ptr, r_len)) }
}
}
// SAFETY: See inline.
unsafe impl<'a> IntoByteSlice<'a> for &'a mut [u8] {
#[inline(always)]
fn into_byte_slice(self) -> &'a [u8] {
// SAFETY: It would be patently insane to implement `<Deref for &mut
// [u8]>::deref` as anything other than `fn deref(&self) -> &[u8] {
// *self }`. Assuming this holds, then `self` is stable as required by
// `into_byte_slice`.
self
}
}
// SAFETY: See inline.
unsafe impl<'a> IntoByteSliceMut<'a> for &'a mut [u8] {
#[inline(always)]
fn into_byte_slice_mut(self) -> &'a mut [u8] {
// SAFETY: It would be patently insane to implement `<DerefMut for &mut
// [u8]>::deref` as anything other than `fn deref_mut(&mut self) -> &mut
// [u8] { *self }`. Assuming this holds, then `self` is stable as
// required by `into_byte_slice_mut`.
self
}
}
// FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl ByteSlice for cell::Ref<'_, [u8]> {}
// SAFETY: This delegates to stdlib implementation of `Ref::map_split`, which is
// assumed to be correct, and `SplitByteSlice::split_at_unchecked`, which is
// documented to correctly split `self` into two slices at the given `mid`
// point.
unsafe impl SplitByteSlice for cell::Ref<'_, [u8]> {
#[inline]
unsafe fn split_at_unchecked(self, mid: usize) -> (Self, Self) {
cell::Ref::map_split(self, |slice|
// SAFETY: By precondition on caller, `mid` is not greater than
// `slice.len()`.
unsafe {
SplitByteSlice::split_at_unchecked(slice, mid)
})
}
}
// FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe impl ByteSlice for cell::RefMut<'_, [u8]> {}
// SAFETY: This delegates to stdlib implementation of `RefMut::map_split`, which
// is assumed to be correct, and `SplitByteSlice::split_at_unchecked`, which is
// documented to correctly split `self` into two slices at the given `mid`
// point.
unsafe impl SplitByteSlice for cell::RefMut<'_, [u8]> {
#[inline]
unsafe fn split_at_unchecked(self, mid: usize) -> (Self, Self) {
cell::RefMut::map_split(self, |slice|
// SAFETY: By precondition on caller, `mid` is not greater than
// `slice.len()`
unsafe {
SplitByteSlice::split_at_unchecked(slice, mid)
})
}
}
#[cfg(kani)]
mod proofs {
use super::*;
fn any_vec() -> Vec<u8> {
let len = kani::any();
kani::assume(len <= isize::MAX as usize);
vec![0u8; len]
}
#[kani::proof]
fn prove_split_at_unchecked() {
let v = any_vec();
let slc = v.as_slice();
let mid = kani::any();
kani::assume(mid <= slc.len());
let (l, r) = unsafe { slc.split_at_unchecked(mid) };
assert_eq!(l.len() + r.len(), slc.len());
let slc: *const _ = slc;
let l: *const _ = l;
let r: *const _ = r;
assert_eq!(slc.cast::<u8>(), l.cast::<u8>());
assert_eq!(unsafe { slc.cast::<u8>().add(mid) }, r.cast::<u8>());
let mut v = any_vec();
let slc = v.as_mut_slice();
let len = slc.len();
let mid = kani::any();
kani::assume(mid <= slc.len());
let (l, r) = unsafe { slc.split_at_unchecked(mid) };
assert_eq!(l.len() + r.len(), len);
let l: *mut _ = l;
let r: *mut _ = r;
let slc: *mut _ = slc;
assert_eq!(slc.cast::<u8>(), l.cast::<u8>());
assert_eq!(unsafe { slc.cast::<u8>().add(mid) }, r.cast::<u8>());
}
}

1530
vendor/zerocopy/src/byteorder.rs vendored Normal file

File diff suppressed because it is too large Load Diff

211
vendor/zerocopy/src/deprecated.rs vendored Normal file
View File

@@ -0,0 +1,211 @@
// Copyright 2024 The Fuchsia Authors
//
// Licensed under the 2-Clause BSD License <LICENSE-BSD or
// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to
// those terms.
//! Deprecated items. These are kept separate so that they don't clutter up
//! other modules.
use super::*;
impl<B, T> Ref<B, T>
where
B: ByteSlice,
T: KnownLayout + Immutable + ?Sized,
{
#[deprecated(since = "0.8.0", note = "renamed to `Ref::from_bytes`")]
#[doc(hidden)]
#[must_use = "has no side effects"]
#[inline(always)]
pub fn new(bytes: B) -> Option<Ref<B, T>> {
Self::from_bytes(bytes).ok()
}
}
impl<B, T> Ref<B, T>
where
B: SplitByteSlice,
T: KnownLayout + Immutable + ?Sized,
{
#[deprecated(since = "0.8.0", note = "renamed to `Ref::from_prefix`")]
#[doc(hidden)]
#[must_use = "has no side effects"]
#[inline(always)]
pub fn new_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
Self::from_prefix(bytes).ok()
}
}
impl<B, T> Ref<B, T>
where
B: SplitByteSlice,
T: KnownLayout + Immutable + ?Sized,
{
#[deprecated(since = "0.8.0", note = "renamed to `Ref::from_suffix`")]
#[doc(hidden)]
#[must_use = "has no side effects"]
#[inline(always)]
pub fn new_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
Self::from_suffix(bytes).ok()
}
}
impl<B, T> Ref<B, T>
where
B: ByteSlice,
T: Unaligned + KnownLayout + Immutable + ?Sized,
{
#[deprecated(
since = "0.8.0",
note = "use `Ref::from_bytes`; for `T: Unaligned`, the returned `CastError` implements `Into<SizeError>`"
)]
#[doc(hidden)]
#[must_use = "has no side effects"]
#[inline(always)]
pub fn new_unaligned(bytes: B) -> Option<Ref<B, T>> {
Self::from_bytes(bytes).ok()
}
}
impl<B, T> Ref<B, T>
where
B: SplitByteSlice,
T: Unaligned + KnownLayout + Immutable + ?Sized,
{
#[deprecated(
since = "0.8.0",
note = "use `Ref::from_prefix`; for `T: Unaligned`, the returned `CastError` implements `Into<SizeError>`"
)]
#[doc(hidden)]
#[must_use = "has no side effects"]
#[inline(always)]
pub fn new_unaligned_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
Self::from_prefix(bytes).ok()
}
}
impl<B, T> Ref<B, T>
where
B: SplitByteSlice,
T: Unaligned + KnownLayout + Immutable + ?Sized,
{
#[deprecated(
since = "0.8.0",
note = "use `Ref::from_suffix`; for `T: Unaligned`, the returned `CastError` implements `Into<SizeError>`"
)]
#[doc(hidden)]
#[must_use = "has no side effects"]
#[inline(always)]
pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
Self::from_suffix(bytes).ok()
}
}
impl<B, T> Ref<B, [T]>
where
B: ByteSlice,
T: Immutable,
{
#[deprecated(since = "0.8.0", note = "`Ref::from_bytes` now supports slices")]
#[doc(hidden)]
#[inline(always)]
pub fn new_slice(bytes: B) -> Option<Ref<B, [T]>> {
Self::from_bytes(bytes).ok()
}
}
impl<B, T> Ref<B, [T]>
where
B: ByteSlice,
T: Unaligned + Immutable,
{
#[deprecated(
since = "0.8.0",
note = "`Ref::from_bytes` now supports slices; for `T: Unaligned`, the returned `CastError` implements `Into<SizeError>`"
)]
#[doc(hidden)]
#[inline(always)]
pub fn new_slice_unaligned(bytes: B) -> Option<Ref<B, [T]>> {
Ref::from_bytes(bytes).ok()
}
}
impl<'a, B, T> Ref<B, [T]>
where
B: 'a + IntoByteSlice<'a>,
T: FromBytes + Immutable,
{
#[deprecated(since = "0.8.0", note = "`Ref::into_ref` now supports slices")]
#[doc(hidden)]
#[inline(always)]
pub fn into_slice(self) -> &'a [T] {
Ref::into_ref(self)
}
}
impl<'a, B, T> Ref<B, [T]>
where
B: 'a + IntoByteSliceMut<'a>,
T: FromBytes + IntoBytes + Immutable,
{
#[deprecated(since = "0.8.0", note = "`Ref::into_mut` now supports slices")]
#[doc(hidden)]
#[inline(always)]
pub fn into_mut_slice(self) -> &'a mut [T] {
Ref::into_mut(self)
}
}
impl<B, T> Ref<B, [T]>
where
B: SplitByteSlice,
T: Immutable,
{
#[deprecated(since = "0.8.0", note = "replaced by `Ref::from_prefix_with_elems`")]
#[must_use = "has no side effects"]
#[doc(hidden)]
#[inline(always)]
pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
Ref::from_prefix_with_elems(bytes, count).ok()
}
#[deprecated(since = "0.8.0", note = "replaced by `Ref::from_suffix_with_elems`")]
#[must_use = "has no side effects"]
#[doc(hidden)]
#[inline(always)]
pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
Ref::from_suffix_with_elems(bytes, count).ok()
}
}
impl<B, T> Ref<B, [T]>
where
B: SplitByteSlice,
T: Unaligned + Immutable,
{
#[deprecated(
since = "0.8.0",
note = "use `Ref::from_prefix_with_elems`; for `T: Unaligned`, the returned `CastError` implements `Into<SizeError>`"
)]
#[doc(hidden)]
#[must_use = "has no side effects"]
#[inline(always)]
pub fn new_slice_unaligned_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
Ref::from_prefix_with_elems(bytes, count).ok()
}
#[deprecated(
since = "0.8.0",
note = "use `Ref::from_suffix_with_elems`; for `T: Unaligned`, the returned `CastError` implements `Into<SizeError>`"
)]
#[doc(hidden)]
#[must_use = "has no side effects"]
#[inline(always)]
pub fn new_slice_unaligned_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
Ref::from_suffix_with_elems(bytes, count).ok()
}
}

125
vendor/zerocopy/src/doctests.rs vendored Normal file
View File

@@ -0,0 +1,125 @@
// Copyright 2025 The Fuchsia Authors
//
// Licensed under the 2-Clause BSD License <LICENSE-BSD or
// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to
// those terms.
#![cfg(feature = "derive")] // Required for derives on `SliceDst`
#![allow(dead_code)]
//! Our UI test framework, built on the `trybuild` crate, does not support
//! testing for post-monomorphization errors. Instead, we use doctests, which
//! are able to test for post-monomorphization errors.
use crate::*;
#[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
#[repr(C)]
#[allow(missing_debug_implementations, missing_copy_implementations)]
pub struct SliceDst<T, U> {
pub t: T,
pub u: [U],
}
#[allow(clippy::must_use_candidate, clippy::missing_inline_in_public_items, clippy::todo)]
impl<T: FromBytes + IntoBytes, U: FromBytes + IntoBytes> SliceDst<T, U> {
pub fn new() -> &'static SliceDst<T, U> {
todo!()
}
pub fn new_mut() -> &'static mut SliceDst<T, U> {
todo!()
}
}
/// We require that the alignment of the destination type is not larger than the
/// alignment of the source type.
///
/// ```compile_fail,E0080
/// let increase_alignment: &u16 = zerocopy::transmute_ref!(&[0u8; 2]);
/// ```
///
/// ```compile_fail,E0080
/// let mut src = [0u8; 2];
/// let increase_alignment: &mut u16 = zerocopy::transmute_mut!(&mut src);
/// ```
enum TransmuteRefMutAlignmentIncrease {}
/// We require that the size of the destination type is not larger than the size
/// of the source type.
///
/// ```compile_fail,E0080
/// let increase_size: &[u8; 2] = zerocopy::transmute_ref!(&0u8);
/// ```
///
/// ```compile_fail,E0080
/// let mut src = 0u8;
/// let increase_size: &mut [u8; 2] = zerocopy::transmute_mut!(&mut src);
/// ```
enum TransmuteRefMutSizeIncrease {}
/// We require that the size of the destination type is not smaller than the
/// size of the source type.
///
/// ```compile_fail,E0080
/// let decrease_size: &u8 = zerocopy::transmute_ref!(&[0u8; 2]);
/// ```
///
/// ```compile_fail,E0080
/// let mut src = [0u8; 2];
/// let decrease_size: &mut u8 = zerocopy::transmute_mut!(&mut src);
/// ```
enum TransmuteRefMutSizeDecrease {}
/// It's not possible in the general case to increase the trailing slice offset
/// during a reference transmutation - some pointer metadata values would not be
/// supportable, and so such a transmutation would be fallible.
///
/// ```compile_fail,E0080
/// use zerocopy::doctests::SliceDst;
/// let src: &SliceDst<u8, u8> = SliceDst::new();
/// let increase_offset: &SliceDst<[u8; 2], u8> = zerocopy::transmute_ref!(src);
/// ```
///
/// ```compile_fail,E0080
/// use zerocopy::doctests::SliceDst;
/// let src: &mut SliceDst<u8, u8> = SliceDst::new_mut();
/// let increase_offset: &mut SliceDst<[u8; 2], u8> = zerocopy::transmute_mut!(src);
/// ```
enum TransmuteRefMutDstOffsetIncrease {}
/// Reference transmutes are not possible when the difference between the source
/// and destination types' trailing slice offsets is not a multiple of the
/// destination type's trailing slice element size.
///
/// ```compile_fail,E0080
/// use zerocopy::doctests::SliceDst;
/// let src: &SliceDst<[u8; 3], [u8; 2]> = SliceDst::new();
/// let _: &SliceDst<[u8; 2], [u8; 2]> = zerocopy::transmute_ref!(src);
/// ```
///
/// ```compile_fail,E0080
/// use zerocopy::doctests::SliceDst;
/// let src: &mut SliceDst<[u8; 3], [u8; 2]> = SliceDst::new_mut();
/// let _: &mut SliceDst<[u8; 2], [u8; 2]> = zerocopy::transmute_mut!(src);
/// ```
enum TransmuteRefMutDstOffsetNotMultiple {}
/// Reference transmutes are not possible when the source's trailing slice
/// element size is not a multiple of the destination's.
///
/// ```compile_fail,E0080
/// use zerocopy::doctests::SliceDst;
/// let src: &SliceDst<(), [u8; 3]> = SliceDst::new();
/// let _: &SliceDst<(), [u8; 2]> = zerocopy::transmute_ref!(src);
/// ```
///
/// ```compile_fail,E0080
/// use zerocopy::doctests::SliceDst;
/// let src: &mut SliceDst<(), [u8; 3]> = SliceDst::new_mut();
/// let _: &mut SliceDst<(), [u8; 2]> = zerocopy::transmute_mut!(src);
/// ```
enum TransmuteRefMutDstElemSizeNotMultiple {}

1081
vendor/zerocopy/src/error.rs vendored Normal file

File diff suppressed because it is too large Load Diff

2081
vendor/zerocopy/src/impls.rs vendored Normal file

File diff suppressed because it is too large Load Diff

2066
vendor/zerocopy/src/layout.rs vendored Normal file

File diff suppressed because it is too large Load Diff

6786
vendor/zerocopy/src/lib.rs vendored Normal file

File diff suppressed because it is too large Load Diff

1623
vendor/zerocopy/src/macros.rs vendored Normal file

File diff suppressed because it is too large Load Diff

747
vendor/zerocopy/src/pointer/inner.rs vendored Normal file
View File

@@ -0,0 +1,747 @@
// Copyright 2024 The Fuchsia Authors
//
// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to
// those terms.
use core::{marker::PhantomData, mem, ops::Range, ptr::NonNull};
pub use _def::PtrInner;
#[allow(unused_imports)]
use crate::util::polyfills::NumExt as _;
use crate::{
layout::{CastType, MetadataCastError},
util::AsAddress,
AlignmentError, CastError, KnownLayout, MetadataOf, SizeError, SplitAt,
};
mod _def {
use super::*;
/// The inner pointer stored inside a [`Ptr`][crate::Ptr].
///
/// `PtrInner<'a, T>` is [covariant] in `'a` and invariant in `T`.
///
/// [covariant]: https://doc.rust-lang.org/reference/subtyping.html
#[allow(missing_debug_implementations)]
pub struct PtrInner<'a, T>
where
T: ?Sized,
{
/// # Invariants
///
/// 0. If `ptr`'s referent is not zero sized, then `ptr` has valid
/// provenance for its referent, which is entirely contained in some
/// Rust allocation, `A`.
/// 1. If `ptr`'s referent is not zero sized, `A` is guaranteed to live
/// for at least `'a`.
///
/// # Postconditions
///
/// By virtue of these invariants, code may assume the following, which
/// are logical implications of the invariants:
/// - `ptr`'s referent is not larger than `isize::MAX` bytes \[1\]
/// - `ptr`'s referent does not wrap around the address space \[1\]
///
/// \[1\] Per <https://doc.rust-lang.org/1.85.0/std/ptr/index.html#allocated-object>:
///
/// For any allocated object with `base` address, `size`, and a set of
/// `addresses`, the following are guaranteed:
/// ...
/// - `size <= isize::MAX`
///
/// As a consequence of these guarantees, given any address `a` within
/// the set of addresses of an allocated object:
/// ...
/// - It is guaranteed that, given `o = a - base` (i.e., the offset of
/// `a` within the allocated object), `base + o` will not wrap around
/// the address space (in other words, will not overflow `usize`)
ptr: NonNull<T>,
// SAFETY: `&'a UnsafeCell<T>` is covariant in `'a` and invariant in `T`
// [1]. We use this construction rather than the equivalent `&mut T`,
// because our MSRV of 1.65 prohibits `&mut` types in const contexts.
//
// [1] https://doc.rust-lang.org/1.81.0/reference/subtyping.html#variance
_marker: PhantomData<&'a core::cell::UnsafeCell<T>>,
}
impl<'a, T: 'a + ?Sized> Copy for PtrInner<'a, T> {}
impl<'a, T: 'a + ?Sized> Clone for PtrInner<'a, T> {
#[inline(always)]
fn clone(&self) -> PtrInner<'a, T> {
// SAFETY: None of the invariants on `ptr` are affected by having
// multiple copies of a `PtrInner`.
*self
}
}
impl<'a, T: 'a + ?Sized> PtrInner<'a, T> {
/// Constructs a `Ptr` from a [`NonNull`].
///
/// # Safety
///
/// The caller promises that:
///
/// 0. If `ptr`'s referent is not zero sized, then `ptr` has valid
/// provenance for its referent, which is entirely contained in some
/// Rust allocation, `A`.
/// 1. If `ptr`'s referent is not zero sized, `A` is guaranteed to live
/// for at least `'a`.
#[inline(always)]
#[must_use]
pub const unsafe fn new(ptr: NonNull<T>) -> PtrInner<'a, T> {
// SAFETY: The caller has promised to satisfy all safety invariants
// of `PtrInner`.
Self { ptr, _marker: PhantomData }
}
/// Converts this `PtrInner<T>` to a [`NonNull<T>`].
///
/// Note that this method does not consume `self`. The caller should
/// watch out for `unsafe` code which uses the returned `NonNull` in a
/// way that violates the safety invariants of `self`.
#[inline(always)]
#[must_use]
pub const fn as_non_null(&self) -> NonNull<T> {
self.ptr
}
}
}
impl<'a, T: ?Sized> PtrInner<'a, T> {
/// Constructs a `PtrInner` from a reference.
#[inline]
pub(crate) fn from_ref(ptr: &'a T) -> Self {
let ptr = NonNull::from(ptr);
// SAFETY:
// 0. If `ptr`'s referent is not zero sized, then `ptr`, by invariant on
// `&'a T` [1], has valid provenance for its referent, which is
// entirely contained in some Rust allocation, `A`.
// 1. If `ptr`'s referent is not zero sized, then `A`, by invariant on
// `&'a T`, is guaranteed to live for at least `'a`.
//
// [1] Per https://doc.rust-lang.org/1.85.0/std/primitive.reference.html#safety:
//
// For all types, `T: ?Sized`, and for all `t: &T` or `t: &mut T`,
// when such values cross an API boundary, the following invariants
// must generally be upheld:
// ...
// - if `size_of_val(t) > 0`, then `t` is dereferenceable for
// `size_of_val(t)` many bytes
//
// If `t` points at address `a`, being “dereferenceable” for N bytes
// means that the memory range `[a, a + N)` is all contained within a
// single allocated object.
unsafe { Self::new(ptr) }
}
/// Constructs a `PtrInner` from a mutable reference.
#[inline]
pub(crate) fn from_mut(ptr: &'a mut T) -> Self {
let ptr = NonNull::from(ptr);
// SAFETY:
// 0. If `ptr`'s referent is not zero sized, then `ptr`, by invariant on
// `&'a mut T` [1], has valid provenance for its referent, which is
// entirely contained in some Rust allocation, `A`.
// 1. If `ptr`'s referent is not zero sized, then `A`, by invariant on
// `&'a mut T`, is guaranteed to live for at least `'a`.
//
// [1] Per https://doc.rust-lang.org/1.85.0/std/primitive.reference.html#safety:
//
// For all types, `T: ?Sized`, and for all `t: &T` or `t: &mut T`,
// when such values cross an API boundary, the following invariants
// must generally be upheld:
// ...
// - if `size_of_val(t) > 0`, then `t` is dereferenceable for
// `size_of_val(t)` many bytes
//
// If `t` points at address `a`, being “dereferenceable” for N bytes
// means that the memory range `[a, a + N)` is all contained within a
// single allocated object.
unsafe { Self::new(ptr) }
}
#[must_use]
#[inline(always)]
pub fn cast_sized<U>(self) -> PtrInner<'a, U>
where
T: Sized,
{
static_assert!(T, U => mem::size_of::<T>() >= mem::size_of::<U>());
// SAFETY: By the preceding assert, `U` is no larger than `T`, which is
// the size of `self`'s referent.
unsafe { self.cast() }
}
/// # Safety
///
/// `U` must not be larger than the size of `self`'s referent.
#[must_use]
#[inline(always)]
pub unsafe fn cast<U>(self) -> PtrInner<'a, U> {
let ptr = self.as_non_null().cast::<U>();
// SAFETY: The caller promises that `U` is no larger than `self`'s
// referent. Thus, `ptr` addresses a subset of the bytes addressed by
// `self`.
//
// 0. By invariant on `self`, if `self`'s referent is not zero sized,
// then `self` has valid provenance for its referent, which is
// entirely contained in some Rust allocation, `A`. Thus, the same
// holds of `ptr`.
// 1. By invariant on `self`, if `self`'s referent is not zero sized,
// then `A` is guaranteed to live for at least `'a`.
unsafe { PtrInner::new(ptr) }
}
}
#[allow(clippy::needless_lifetimes)]
impl<'a, T> PtrInner<'a, T>
where
T: ?Sized + KnownLayout,
{
/// Extracts the metadata of this `ptr`.
pub(crate) fn meta(self) -> MetadataOf<T> {
let meta = T::pointer_to_metadata(self.as_non_null().as_ptr());
// SAFETY: By invariant on `PtrInner`, `self.as_non_null()` addresses no
// more than `isize::MAX` bytes.
unsafe { MetadataOf::new_unchecked(meta) }
}
/// Produces a `PtrInner` with the same address and provenance as `self` but
/// the given `meta`.
///
/// # Safety
///
/// The caller promises that if `self`'s referent is not zero sized, then
/// a pointer constructed from its address with the given `meta` metadata
/// will address a subset of the allocation pointed to by `self`.
#[inline]
pub(crate) unsafe fn with_meta(self, meta: T::PointerMetadata) -> Self
where
T: KnownLayout,
{
let raw = T::raw_from_ptr_len(self.as_non_null().cast(), meta);
// SAFETY:
//
// Lemma 0: `raw` either addresses zero bytes, or addresses a subset of
// the allocation pointed to by `self` and has the same
// provenance as `self`. Proof: `raw` is constructed using
// provenance-preserving operations, and the caller has
// promised that, if `self`'s referent is not zero-sized, the
// resulting pointer addresses a subset of the allocation
// pointed to by `self`.
//
// 0. Per Lemma 0 and by invariant on `self`, if `ptr`'s referent is not
// zero sized, then `ptr` is derived from some valid Rust allocation,
// `A`.
// 1. Per Lemma 0 and by invariant on `self`, if `ptr`'s referent is not
// zero sized, then `ptr` has valid provenance for `A`.
// 2. Per Lemma 0 and by invariant on `self`, if `ptr`'s referent is not
// zero sized, then `ptr` addresses a byte range which is entirely
// contained in `A`.
// 3. Per Lemma 0 and by invariant on `self`, `ptr` addresses a byte
// range whose length fits in an `isize`.
// 4. Per Lemma 0 and by invariant on `self`, `ptr` addresses a byte
// range which does not wrap around the address space.
// 5. Per Lemma 0 and by invariant on `self`, if `ptr`'s referent is not
// zero sized, then `A` is guaranteed to live for at least `'a`.
unsafe { PtrInner::new(raw) }
}
pub(crate) fn as_bytes(self) -> PtrInner<'a, [u8]> {
let ptr = self.as_non_null();
let bytes = match T::size_of_val_raw(ptr) {
Some(bytes) => bytes,
// SAFETY: `KnownLayout::size_of_val_raw` promises to always
// return `Some` so long as the resulting size fits in a
// `usize`. By invariant on `PtrInner`, `self` refers to a range
// of bytes whose size fits in an `isize`, which implies that it
// also fits in a `usize`.
None => unsafe { core::hint::unreachable_unchecked() },
};
let ptr = core::ptr::slice_from_raw_parts_mut(ptr.cast::<u8>().as_ptr(), bytes);
// SAFETY: `ptr` has the same address as `ptr = self.as_non_null()`,
// which is non-null by construction.
let ptr = unsafe { NonNull::new_unchecked(ptr) };
// SAFETY: `ptr` points to `bytes` `u8`s starting at the same address as
// `self`'s referent. Since `bytes` is the length of `self`'s referent,
// `ptr` addresses the same byte range as `self`. Thus, by invariant on
// `self` (as a `PtrInner`):
//
// 0. If `ptr`'s referent is not zero sized, then `ptr` has valid
// provenance for its referent, which is entirely contained in some
// Rust allocation, `A`.
// 1. If `ptr`'s referent is not zero sized, `A` is guaranteed to live
// for at least `'a`.
unsafe { PtrInner::new(ptr) }
}
}
#[allow(clippy::needless_lifetimes)]
impl<'a, T> PtrInner<'a, T>
where
T: ?Sized + KnownLayout<PointerMetadata = usize>,
{
/// Splits `T` in two.
///
/// # Safety
///
/// The caller promises that:
/// - `l_len.get() <= self.meta()`.
///
/// ## (Non-)Overlap
///
/// Given `let (left, right) = ptr.split_at(l_len)`, it is guaranteed that
/// `left` and `right` are contiguous and non-overlapping if
/// `l_len.padding_needed_for() == 0`. This is true for all `[T]`.
///
/// If `l_len.padding_needed_for() != 0`, then the left pointer will overlap
/// the right pointer to satisfy `T`'s padding requirements.
pub(crate) unsafe fn split_at_unchecked(
self,
l_len: crate::util::MetadataOf<T>,
) -> (Self, PtrInner<'a, [T::Elem]>)
where
T: SplitAt,
{
let l_len = l_len.get();
// SAFETY: The caller promises that `l_len.get() <= self.meta()`.
// Trivially, `0 <= l_len`.
let left = unsafe { self.with_meta(l_len) };
let right = self.trailing_slice();
// SAFETY: The caller promises that `l_len <= self.meta() = slf.meta()`.
// Trivially, `slf.meta() <= slf.meta()`.
let right = unsafe { right.slice_unchecked(l_len..self.meta().get()) };
// SAFETY: If `l_len.padding_needed_for() == 0`, then `left` and `right`
// are non-overlapping. Proof: `left` is constructed `slf` with `l_len`
// as its (exclusive) upper bound. If `l_len.padding_needed_for() == 0`,
// then `left` requires no trailing padding following its final element.
// Since `right` is constructed from `slf`'s trailing slice with `l_len`
// as its (inclusive) lower bound, no byte is referred to by both
// pointers.
//
// Conversely, `l_len.padding_needed_for() == N`, where `N
// > 0`, `left` requires `N` bytes of trailing padding following its
// final element. Since `right` is constructed from the trailing slice
// of `slf` with `l_len` as its (inclusive) lower bound, the first `N`
// bytes of `right` are aliased by `left`.
(left, right)
}
/// Produces the trailing slice of `self`.
pub(crate) fn trailing_slice(self) -> PtrInner<'a, [T::Elem]>
where
T: SplitAt,
{
let offset = crate::trailing_slice_layout::<T>().offset;
let bytes = self.as_non_null().cast::<u8>().as_ptr();
// SAFETY:
// - By invariant on `T: KnownLayout`, `T::LAYOUT` describes `T`'s
// layout. `offset` is the offset of the trailing slice within `T`,
// which is by definition in-bounds or one byte past the end of any
// `T`, regardless of metadata. By invariant on `PtrInner`, `self`
// (and thus `bytes`) points to a byte range of size `<= isize::MAX`,
// and so `offset <= isize::MAX`. Since `size_of::<u8>() == 1`,
// `offset * size_of::<u8>() <= isize::MAX`.
// - If `offset > 0`, then by invariant on `PtrInner`, `self` (and thus
// `bytes`) points to a byte range entirely contained within the same
// allocated object as `self`. As explained above, this offset results
// in a pointer to or one byte past the end of this allocated object.
let bytes = unsafe { bytes.add(offset) };
// SAFETY: By the preceding safety argument, `bytes` is within or one
// byte past the end of the same allocated object as `self`, which
// ensures that it is non-null.
let bytes = unsafe { NonNull::new_unchecked(bytes) };
let ptr = KnownLayout::raw_from_ptr_len(bytes, self.meta().get());
// SAFETY:
// 0. If `ptr`'s referent is not zero sized, then `ptr` is derived from
// some valid Rust allocation, `A`, because `ptr` is derived from
// the same allocated object as `self`.
// 1. If `ptr`'s referent is not zero sized, then `ptr` has valid
// provenance for `A` because `raw` is derived from the same
// allocated object as `self` via provenance-preserving operations.
// 2. If `ptr`'s referent is not zero sized, then `ptr` addresses a byte
// range which is entirely contained in `A`, by previous safety proof
// on `bytes`.
// 3. `ptr` addresses a byte range whose length fits in an `isize`, by
// consequence of #2.
// 4. `ptr` addresses a byte range which does not wrap around the
// address space, by consequence of #2.
// 5. If `ptr`'s referent is not zero sized, then `A` is guaranteed to
// live for at least `'a`, because `ptr` is derived from `self`.
unsafe { PtrInner::new(ptr) }
}
}
#[allow(clippy::needless_lifetimes)]
impl<'a, T> PtrInner<'a, [T]> {
/// Creates a pointer which addresses the given `range` of self.
///
/// # Safety
///
/// `range` is a valid range (`start <= end`) and `end <= self.meta()`.
pub(crate) unsafe fn slice_unchecked(self, range: Range<usize>) -> Self {
let base = self.as_non_null().cast::<T>().as_ptr();
// SAFETY: The caller promises that `start <= end <= self.meta()`. By
// invariant, if `self`'s referent is not zero-sized, then `self` refers
// to a byte range which is contained within a single allocation, which
// is no more than `isize::MAX` bytes long, and which does not wrap
// around the address space. Thus, this pointer arithmetic remains
// in-bounds of the same allocation, and does not wrap around the
// address space. The offset (in bytes) does not overflow `isize`.
//
// If `self`'s referent is zero-sized, then these conditions are
// trivially satisfied.
let base = unsafe { base.add(range.start) };
// SAFETY: The caller promises that `start <= end`, and so this will not
// underflow.
#[allow(unstable_name_collisions)]
let len = unsafe { range.end.unchecked_sub(range.start) };
let ptr = core::ptr::slice_from_raw_parts_mut(base, len);
// SAFETY: By invariant, `self`'s referent is either a ZST or lives
// entirely in an allocation. `ptr` points inside of or one byte past
// the end of that referent. Thus, in either case, `ptr` is non-null.
let ptr = unsafe { NonNull::new_unchecked(ptr) };
// SAFETY:
//
// Lemma 0: `ptr` addresses a subset of the bytes addressed by `self`,
// and has the same provenance. Proof: The caller guarantees
// that `start <= end <= self.meta()`. Thus, `base` is
// in-bounds of `self`, and `base + (end - start)` is also
// in-bounds of self. Finally, `ptr` is constructed using
// provenance-preserving operations.
//
// 0. Per Lemma 0 and by invariant on `self`, if `ptr`'s referent is not
// zero sized, then `ptr` has valid provenance for its referent,
// which is entirely contained in some Rust allocation, `A`.
// 1. Per Lemma 0 and by invariant on `self`, if `ptr`'s referent is not
// zero sized, then `A` is guaranteed to live for at least `'a`.
unsafe { PtrInner::new(ptr) }
}
/// Iteratively projects the elements `PtrInner<T>` from `PtrInner<[T]>`.
pub(crate) fn iter(&self) -> impl Iterator<Item = PtrInner<'a, T>> {
// FIXME(#429): Once `NonNull::cast` documents that it preserves
// provenance, cite those docs.
let base = self.as_non_null().cast::<T>().as_ptr();
(0..self.meta().get()).map(move |i| {
// FIXME(https://github.com/rust-lang/rust/issues/74265): Use
// `NonNull::get_unchecked_mut`.
// SAFETY: If the following conditions are not satisfied
// `pointer::cast` may induce Undefined Behavior [1]:
//
// > - The computed offset, `count * size_of::<T>()` bytes, must not
// > overflow `isize``.
// > - If the computed offset is non-zero, then `self` must be
// > derived from a pointer to some allocated object, and the
// > entire memory range between `self` and the result must be in
// > bounds of that allocated object. In particular, this range
// > must not “wrap around” the edge of the address space.
//
// [1] https://doc.rust-lang.org/std/primitive.pointer.html#method.add
//
// We satisfy both of these conditions here:
// - By invariant on `Ptr`, `self` addresses a byte range whose
// length fits in an `isize`. Since `elem` is contained in `self`,
// the computed offset of `elem` must fit within `isize.`
// - If the computed offset is non-zero, then this means that the
// referent is not zero-sized. In this case, `base` points to an
// allocated object (by invariant on `self`). Thus:
// - By contract, `self.meta()` accurately reflects the number of
// elements in the slice. `i` is in bounds of `c.meta()` by
// construction, and so the result of this addition cannot
// overflow past the end of the allocation referred to by `c`.
// - By invariant on `Ptr`, `self` addresses a byte range which
// does not wrap around the address space. Since `elem` is
// contained in `self`, the computed offset of `elem` must wrap
// around the address space.
//
// FIXME(#429): Once `pointer::add` documents that it preserves
// provenance, cite those docs.
let elem = unsafe { base.add(i) };
// SAFETY: `elem` must not be null. `base` is constructed from a
// `NonNull` pointer, and the addition that produces `elem` must not
// overflow or wrap around, so `elem >= base > 0`.
//
// FIXME(#429): Once `NonNull::new_unchecked` documents that it
// preserves provenance, cite those docs.
let elem = unsafe { NonNull::new_unchecked(elem) };
// SAFETY: The safety invariants of `Ptr::new` (see definition) are
// satisfied:
// 0. If `elem`'s referent is not zero sized, then `elem` has valid
// provenance for its referent, because it derived from `self`
// using a series of provenance-preserving operations, and
// because `self` has valid provenance for its referent. By the
// same argument, `elem`'s referent is entirely contained within
// the same allocated object as `self`'s referent.
// 1. If `elem`'s referent is not zero sized, then the allocation of
// `elem` is guaranteed to live for at least `'a`, because `elem`
// is entirely contained in `self`, which lives for at least `'a`
// by invariant on `Ptr`.
unsafe { PtrInner::new(elem) }
})
}
}
impl<'a, T, const N: usize> PtrInner<'a, [T; N]> {
/// Casts this pointer-to-array into a slice.
///
/// # Safety
///
/// Callers may assume that the returned `PtrInner` references the same
/// address and length as `self`.
#[allow(clippy::wrong_self_convention)]
pub(crate) fn as_slice(self) -> PtrInner<'a, [T]> {
let start = self.as_non_null().cast::<T>().as_ptr();
let slice = core::ptr::slice_from_raw_parts_mut(start, N);
// SAFETY: `slice` is not null, because it is derived from `start`
// which is non-null.
let slice = unsafe { NonNull::new_unchecked(slice) };
// SAFETY: Lemma: In the following safety arguments, note that `slice`
// is derived from `self` in two steps: first, by casting `self: [T; N]`
// to `start: T`, then by constructing a pointer to a slice starting at
// `start` of length `N`. As a result, `slice` references exactly the
// same allocation as `self`, if any.
//
// 0. By the above lemma, if `slice`'s referent is not zero sized, then
// `slice` has the same referent as `self`. By invariant on `self`,
// this referent is entirely contained within some allocation, `A`.
// Because `slice` was constructed using provenance-preserving
// operations, it has provenance for its entire referent.
// 1. By the above lemma, if `slice`'s referent is not zero sized, then
// `A` is guaranteed to live for at least `'a`, because it is derived
// from the same allocation as `self`, which, by invariant on `Ptr`,
// lives for at least `'a`.
unsafe { PtrInner::new(slice) }
}
}
impl<'a> PtrInner<'a, [u8]> {
/// Attempts to cast `self` to a `U` using the given cast type.
///
/// If `U` is a slice DST and pointer metadata (`meta`) is provided, then
/// the cast will only succeed if it would produce an object with the given
/// metadata.
///
/// Returns `None` if the resulting `U` would be invalidly-aligned, if no
/// `U` can fit in `self`, or if the provided pointer metadata describes an
/// invalid instance of `U`. On success, returns a pointer to the
/// largest-possible `U` which fits in `self`.
///
/// # Safety
///
/// The caller may assume that this implementation is correct, and may rely
/// on that assumption for the soundness of their code. In particular, the
/// caller may assume that, if `try_cast_into` returns `Some((ptr,
/// remainder))`, then `ptr` and `remainder` refer to non-overlapping byte
/// ranges within `self`, and that `ptr` and `remainder` entirely cover
/// `self`. Finally:
/// - If this is a prefix cast, `ptr` has the same address as `self`.
/// - If this is a suffix cast, `remainder` has the same address as `self`.
#[inline]
pub(crate) fn try_cast_into<U>(
self,
cast_type: CastType,
meta: Option<U::PointerMetadata>,
) -> Result<(PtrInner<'a, U>, PtrInner<'a, [u8]>), CastError<Self, U>>
where
U: 'a + ?Sized + KnownLayout,
{
// PANICS: By invariant, the byte range addressed by
// `self.as_non_null()` does not wrap around the address space. This
// implies that the sum of the address (represented as a `usize`) and
// length do not overflow `usize`, as required by
// `validate_cast_and_convert_metadata`. Thus, this call to
// `validate_cast_and_convert_metadata` will only panic if `U` is a DST
// whose trailing slice element is zero-sized.
let maybe_metadata = MetadataOf::<U>::validate_cast_and_convert_metadata(
AsAddress::addr(self.as_non_null().as_ptr()),
self.meta(),
cast_type,
meta,
);
let (elems, split_at) = match maybe_metadata {
Ok((elems, split_at)) => (elems, split_at),
Err(MetadataCastError::Alignment) => {
// SAFETY: Since `validate_cast_and_convert_metadata` returned
// an alignment error, `U` must have an alignment requirement
// greater than one.
let err = unsafe { AlignmentError::<_, U>::new_unchecked(self) };
return Err(CastError::Alignment(err));
}
Err(MetadataCastError::Size) => return Err(CastError::Size(SizeError::new(self))),
};
// SAFETY: `validate_cast_and_convert_metadata` promises to return
// `split_at <= self.meta()`.
//
// Lemma 0: `l_slice` and `r_slice` are non-overlapping. Proof: By
// contract on `PtrInner::split_at_unchecked`, the produced `PtrInner`s
// are always non-overlapping if `self` is a `[T]`; here it is a `[u8]`.
let (l_slice, r_slice) = unsafe { self.split_at_unchecked(split_at) };
let (target, remainder) = match cast_type {
CastType::Prefix => (l_slice, r_slice),
CastType::Suffix => (r_slice, l_slice),
};
let base = target.as_non_null().cast::<u8>();
let ptr = U::raw_from_ptr_len(base, elems.get());
// SAFETY:
// 0. By invariant, if `target`'s referent is not zero sized, then
// `target` has provenance valid for some Rust allocation, `A`.
// Because `ptr` is derived from `target` via provenance-preserving
// operations, `ptr` will also have provenance valid for its entire
// referent.
// 1. `validate_cast_and_convert_metadata` promises that the object
// described by `elems` and `split_at` lives at a byte range which is
// a subset of the input byte range. Thus, by invariant, if
// `target`'s referent is not zero sized, then `target` refers to an
// allocation which is guaranteed to live for at least `'a`, and thus
// so does `ptr`.
Ok((unsafe { PtrInner::new(ptr) }, remainder))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::*;
#[test]
fn test_meta() {
let arr = [1; 16];
let dst = <[u8]>::ref_from_bytes(&arr[..]).unwrap();
let ptr = PtrInner::from_ref(dst);
assert_eq!(ptr.meta().get(), 16);
// SAFETY: 8 is less than 16
let ptr = unsafe { ptr.with_meta(8) };
assert_eq!(ptr.meta().get(), 8);
}
#[test]
fn test_split_at() {
fn test_split_at<const OFFSET: usize, const BUFFER_SIZE: usize>() {
#[derive(FromBytes, KnownLayout, SplitAt, Immutable)]
#[repr(C)]
struct SliceDst<const OFFSET: usize> {
prefix: [u8; OFFSET],
trailing: [u8],
}
let n: usize = BUFFER_SIZE - OFFSET;
let arr = [1; BUFFER_SIZE];
let dst = SliceDst::<OFFSET>::ref_from_bytes(&arr[..]).unwrap();
let ptr = PtrInner::from_ref(dst);
for i in 0..=n {
assert_eq!(ptr.meta().get(), n);
// SAFETY: `i` is in bounds by construction.
let i = unsafe { MetadataOf::new_unchecked(i) };
// SAFETY: `i` is in bounds by construction.
let (l, r) = unsafe { ptr.split_at_unchecked(i) };
// SAFETY: Points to a valid value by construction.
#[allow(clippy::undocumented_unsafe_blocks, clippy::as_conversions)]
// Clippy false positive
let l_sum: usize = l
.trailing_slice()
.iter()
.map(|ptr| unsafe { core::ptr::read_unaligned(ptr.as_non_null().as_ptr()) }
as usize)
.sum();
// SAFETY: Points to a valid value by construction.
#[allow(clippy::undocumented_unsafe_blocks, clippy::as_conversions)]
// Clippy false positive
let r_sum: usize = r
.iter()
.map(|ptr| unsafe { core::ptr::read_unaligned(ptr.as_non_null().as_ptr()) }
as usize)
.sum();
assert_eq!(l_sum, i.get());
assert_eq!(r_sum, n - i.get());
assert_eq!(l_sum + r_sum, n);
}
}
test_split_at::<0, 16>();
test_split_at::<1, 17>();
test_split_at::<2, 18>();
}
#[test]
fn test_trailing_slice() {
fn test_trailing_slice<const OFFSET: usize, const BUFFER_SIZE: usize>() {
#[derive(FromBytes, KnownLayout, SplitAt, Immutable)]
#[repr(C)]
struct SliceDst<const OFFSET: usize> {
prefix: [u8; OFFSET],
trailing: [u8],
}
let n: usize = BUFFER_SIZE - OFFSET;
let arr = [1; BUFFER_SIZE];
let dst = SliceDst::<OFFSET>::ref_from_bytes(&arr[..]).unwrap();
let ptr = PtrInner::from_ref(dst);
assert_eq!(ptr.meta().get(), n);
let trailing = ptr.trailing_slice();
assert_eq!(trailing.meta().get(), n);
assert_eq!(
// SAFETY: We assume this to be sound for the sake of this test,
// which will fail, here, in miri, if the safety precondition of
// `offset_of` is not satisfied.
unsafe {
#[allow(clippy::as_conversions)]
let offset = (trailing.as_non_null().as_ptr() as *mut u8)
.offset_from(ptr.as_non_null().as_ptr() as *mut _);
offset
},
isize::try_from(OFFSET).unwrap(),
);
// SAFETY: Points to a valid value by construction.
#[allow(clippy::undocumented_unsafe_blocks, clippy::as_conversions)]
// Clippy false positive
let trailing: usize =
trailing
.iter()
.map(|ptr| unsafe { core::ptr::read_unaligned(ptr.as_non_null().as_ptr()) }
as usize)
.sum();
assert_eq!(trailing, n);
}
test_trailing_slice::<0, 16>();
test_trailing_slice::<1, 17>();
test_trailing_slice::<2, 18>();
}
}

253
vendor/zerocopy/src/pointer/invariant.rs vendored Normal file
View File

@@ -0,0 +1,253 @@
// Copyright 2024 The Fuchsia Authors
//
// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to
// those terms.
#![allow(missing_copy_implementations, missing_debug_implementations)]
//! The parameterized invariants of a [`Ptr`][super::Ptr].
//!
//! Invariants are encoded as ([`Aliasing`], [`Alignment`], [`Validity`])
//! triples implementing the [`Invariants`] trait.
/// The invariants of a [`Ptr`][super::Ptr].
pub trait Invariants: Sealed {
type Aliasing: Aliasing;
type Alignment: Alignment;
type Validity: Validity;
}
impl<A: Aliasing, AA: Alignment, V: Validity> Invariants for (A, AA, V) {
type Aliasing = A;
type Alignment = AA;
type Validity = V;
}
/// The aliasing invariant of a [`Ptr`][super::Ptr].
///
/// All aliasing invariants must permit reading from the bytes of a pointer's
/// referent which are not covered by [`UnsafeCell`]s.
///
/// [`UnsafeCell`]: core::cell::UnsafeCell
pub trait Aliasing: Sealed {
/// Is `Self` [`Exclusive`]?
#[doc(hidden)]
const IS_EXCLUSIVE: bool;
}
/// The alignment invariant of a [`Ptr`][super::Ptr].
pub trait Alignment: Sealed {}
/// The validity invariant of a [`Ptr`][super::Ptr].
///
/// # Safety
///
/// In this section, we will use `Ptr<T, V>` as a shorthand for `Ptr<T, I:
/// Invariants<Validity = V>>` for brevity.
///
/// Each `V: Validity` defines a set of bit values which may appear in the
/// referent of a `Ptr<T, V>`, denoted `S(T, V)`. Each `V: Validity`, in its
/// documentation, provides a definition of `S(T, V)` which must be valid for
/// all `T: ?Sized`. Any `V: Validity` must guarantee that this set is only a
/// function of the *bit validity* of the referent type, `T`, and not of any
/// other property of `T`. As a consequence, given `V: Validity`, `T`, and `U`
/// where `T` and `U` have the same bit validity, `S(V, T) = S(V, U)`.
///
/// It is guaranteed that the referent of any `ptr: Ptr<T, V>` is a member of
/// `S(T, V)`. Unsafe code must ensure that this guarantee will be upheld for
/// any existing `Ptr`s or any `Ptr`s that that code creates.
///
/// An important implication of this guarantee is that it restricts what
/// transmutes are sound, where "transmute" is used in this context to refer to
/// changing the referent type or validity invariant of a `Ptr`, as either
/// change may change the set of bit values permitted to appear in the referent.
/// In particular, the following are necessary (but not sufficient) conditions
/// in order for a transmute from `src: Ptr<T, V>` to `dst: Ptr<U, W>` to be
/// sound:
/// - If `S(T, V) = S(U, W)`, then no restrictions apply; otherwise,
/// - If `dst` permits mutation of its referent (e.g. via `Exclusive` aliasing
/// or interior mutation under `Shared` aliasing), then it must hold that
/// `S(T, V) ⊇ S(U, W)` - in other words, the transmute must not expand the
/// set of allowed referent bit patterns. A violation of this requirement
/// would permit using `dst` to write `x` where `x ∈ S(U, W)` but `x ∉ S(T,
/// V)`, which would violate the guarantee that `src`'s referent may only
/// contain values in `S(T, V)`.
/// - If the referent may be mutated without going through `dst` while `dst` is
/// live (e.g. via interior mutation on a `Shared`-aliased `Ptr` or `&`
/// reference), then it must hold that `S(T, V) ⊆ S(U, W)` - in other words,
/// the transmute must not shrink the set of allowed referent bit patterns. A
/// violation of this requirement would permit using `src` or another
/// mechanism (e.g. a `&` reference used to derive `src`) to write `x` where
/// `x ∈ S(T, V)` but `x ∉ S(U, W)`, which would violate the guarantee that
/// `dst`'s referent may only contain values in `S(U, W)`.
pub unsafe trait Validity: Sealed {}
/// An [`Aliasing`] invariant which is either [`Shared`] or [`Exclusive`].
///
/// # Safety
///
/// Given `A: Reference`, callers may assume that either `A = Shared` or `A =
/// Exclusive`.
pub trait Reference: Aliasing + Sealed {}
/// The `Ptr<'a, T>` adheres to the aliasing rules of a `&'a T`.
///
/// The referent of a shared-aliased `Ptr` may be concurrently referenced by any
/// number of shared-aliased `Ptr` or `&T` references, or by any number of
/// `Ptr<U>` or `&U` references as permitted by `T`'s library safety invariants,
/// and may not be concurrently referenced by any exclusively-aliased `Ptr`s or
/// `&mut` references. The referent must not be mutated, except via
/// [`UnsafeCell`]s, and only when permitted by `T`'s library safety invariants.
///
/// [`UnsafeCell`]: core::cell::UnsafeCell
pub enum Shared {}
impl Aliasing for Shared {
const IS_EXCLUSIVE: bool = false;
}
impl Reference for Shared {}
/// The `Ptr<'a, T>` adheres to the aliasing rules of a `&'a mut T`.
///
/// The referent of an exclusively-aliased `Ptr` may not be concurrently
/// referenced by any other `Ptr`s or references, and may not be accessed (read
/// or written) other than via this `Ptr`.
pub enum Exclusive {}
impl Aliasing for Exclusive {
const IS_EXCLUSIVE: bool = true;
}
impl Reference for Exclusive {}
/// It is unknown whether the pointer is aligned.
pub enum Unaligned {}
impl Alignment for Unaligned {}
/// The referent is aligned: for `Ptr<T>`, the referent's address is a multiple
/// of the `T`'s alignment.
pub enum Aligned {}
impl Alignment for Aligned {}
/// Any bit pattern is allowed in the `Ptr`'s referent, including uninitialized
/// bytes.
pub enum Uninit {}
// SAFETY: `Uninit`'s validity is well-defined for all `T: ?Sized`, and is not a
// function of any property of `T` other than its bit validity (in fact, it's
// not even a property of `T`'s bit validity, but this is more than we are
// required to uphold).
unsafe impl Validity for Uninit {}
/// The byte ranges initialized in `T` are also initialized in the referent of a
/// `Ptr<T>`.
///
/// Formally: uninitialized bytes may only be present in `Ptr<T>`'s referent
/// where they are guaranteed to be present in `T`. This is a dynamic property:
/// if, at a particular byte offset, a valid enum discriminant is set, the
/// subsequent bytes may only have uninitialized bytes as specified by the
/// corresponding enum.
///
/// Formally, given `len = size_of_val_raw(ptr)`, at every byte offset, `b`, in
/// the range `[0, len)`:
/// - If, in any instance `t: T` of length `len`, the byte at offset `b` in `t`
/// is initialized, then the byte at offset `b` within `*ptr` must be
/// initialized.
/// - Let `c` be the contents of the byte range `[0, b)` in `*ptr`. Let `S` be
/// the subset of valid instances of `T` of length `len` which contain `c` in
/// the offset range `[0, b)`. If, in any instance of `t: T` in `S`, the byte
/// at offset `b` in `t` is initialized, then the byte at offset `b` in `*ptr`
/// must be initialized.
///
/// Pragmatically, this means that if `*ptr` is guaranteed to contain an enum
/// type at a particular offset, and the enum discriminant stored in `*ptr`
/// corresponds to a valid variant of that enum type, then it is guaranteed
/// that the appropriate bytes of `*ptr` are initialized as defined by that
/// variant's bit validity (although note that the variant may contain another
/// enum type, in which case the same rules apply depending on the state of
/// its discriminant, and so on recursively).
pub enum AsInitialized {}
// SAFETY: `AsInitialized`'s validity is well-defined for all `T: ?Sized`, and
// is not a function of any property of `T` other than its bit validity.
unsafe impl Validity for AsInitialized {}
/// The byte ranges in the referent are fully initialized. In other words, if
/// the referent is `N` bytes long, then it contains a bit-valid `[u8; N]`.
pub enum Initialized {}
// SAFETY: `Initialized`'s validity is well-defined for all `T: ?Sized`, and is
// not a function of any property of `T` other than its bit validity (in fact,
// it's not even a property of `T`'s bit validity, but this is more than we are
// required to uphold).
unsafe impl Validity for Initialized {}
/// The referent of a `Ptr<T>` is valid for `T`, upholding bit validity and any
/// library safety invariants.
pub enum Valid {}
// SAFETY: `Valid`'s validity is well-defined for all `T: ?Sized`, and is not a
// function of any property of `T` other than its bit validity.
unsafe impl Validity for Valid {}
/// # Safety
///
/// `DT: CastableFrom<ST, SV, DV>` is sound if `SV = DV = Uninit` or `SV = DV =
/// Initialized`.
pub unsafe trait CastableFrom<ST: ?Sized, SV, DV> {}
// SAFETY: `SV = DV = Uninit`.
unsafe impl<ST: ?Sized, DT: ?Sized> CastableFrom<ST, Uninit, Uninit> for DT {}
// SAFETY: `SV = DV = Initialized`.
unsafe impl<ST: ?Sized, DT: ?Sized> CastableFrom<ST, Initialized, Initialized> for DT {}
/// [`Ptr`](crate::Ptr) referents that permit unsynchronized read operations.
///
/// `T: Read<A, R>` implies that a pointer to `T` with aliasing `A` permits
/// unsynchronized read operations. This can be because `A` is [`Exclusive`] or
/// because `T` does not permit interior mutation.
///
/// # Safety
///
/// `T: Read<A, R>` if either of the following conditions holds:
/// - `A` is [`Exclusive`]
/// - `T` implements [`Immutable`](crate::Immutable)
///
/// As a consequence, if `T: Read<A, R>`, then any `Ptr<T, (A, ...)>` is
/// permitted to perform unsynchronized reads from its referent.
pub trait Read<A: Aliasing, R> {}
impl<A: Aliasing, T: ?Sized + crate::Immutable> Read<A, BecauseImmutable> for T {}
impl<T: ?Sized> Read<Exclusive, BecauseExclusive> for T {}
/// Unsynchronized reads are permitted because only one live [`Ptr`](crate::Ptr)
/// or reference may exist to the referent bytes at a time.
#[derive(Copy, Clone, Debug)]
#[doc(hidden)]
pub enum BecauseExclusive {}
/// Unsynchronized reads are permitted because no live [`Ptr`](crate::Ptr)s or
/// references permit interior mutation.
#[derive(Copy, Clone, Debug)]
#[doc(hidden)]
pub enum BecauseImmutable {}
use sealed::Sealed;
mod sealed {
use super::*;
pub trait Sealed {}
impl Sealed for Shared {}
impl Sealed for Exclusive {}
impl Sealed for Unaligned {}
impl Sealed for Aligned {}
impl Sealed for Uninit {}
impl Sealed for AsInitialized {}
impl Sealed for Initialized {}
impl Sealed for Valid {}
impl<A: Sealed, AA: Sealed, V: Sealed> Sealed for (A, AA, V) {}
impl Sealed for BecauseImmutable {}
impl Sealed for BecauseExclusive {}
}

40
vendor/zerocopy/src/pointer/mod.rs vendored Normal file
View File

@@ -0,0 +1,40 @@
// Copyright 2023 The Fuchsia Authors
//
// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to
// those terms.
//! Abstractions over raw pointers.
mod inner;
#[doc(hidden)]
pub mod invariant;
mod ptr;
mod transmute;
#[doc(hidden)]
pub use {inner::PtrInner, transmute::*};
#[doc(hidden)]
pub use {
invariant::{BecauseExclusive, BecauseImmutable, Read},
ptr::Ptr,
};
/// A shorthand for a maybe-valid, maybe-aligned reference. Used as the argument
/// to [`TryFromBytes::is_bit_valid`].
///
/// [`TryFromBytes::is_bit_valid`]: crate::TryFromBytes::is_bit_valid
pub type Maybe<'a, T, Aliasing = invariant::Shared, Alignment = invariant::Unaligned> =
Ptr<'a, T, (Aliasing, Alignment, invariant::Initialized)>;
/// Checks if the referent is zeroed.
pub(crate) fn is_zeroed<T, I>(ptr: Ptr<'_, T, I>) -> bool
where
T: crate::Immutable + crate::KnownLayout,
I: invariant::Invariants<Validity = invariant::Initialized>,
I::Aliasing: invariant::Reference,
{
ptr.as_bytes::<BecauseImmutable>().as_ref().iter().all(|&byte| byte == 0)
}

1460
vendor/zerocopy/src/pointer/ptr.rs vendored Normal file

File diff suppressed because it is too large Load Diff

479
vendor/zerocopy/src/pointer/transmute.rs vendored Normal file
View File

@@ -0,0 +1,479 @@
// Copyright 2025 The Fuchsia Authors
//
// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to
// those terms.
use core::{
cell::{Cell, UnsafeCell},
mem::{ManuallyDrop, MaybeUninit},
num::Wrapping,
};
use crate::{
pointer::{invariant::*, PtrInner},
FromBytes, Immutable, IntoBytes, Unalign,
};
/// Transmutations which are sound to attempt, conditional on validating the bit
/// validity of the destination type.
///
/// If a `Ptr` transmutation is `TryTransmuteFromPtr`, then it is sound to
/// perform that transmutation so long as some additional mechanism is used to
/// validate that the referent is bit-valid for the destination type. That
/// validation mechanism could be a type bound (such as `TransmuteFrom`) or a
/// runtime validity check.
///
/// # Safety
///
/// ## Post-conditions
///
/// Given `Dst: TryTransmuteFromPtr<Src, A, SV, DV, _>`, callers may assume the
/// following:
///
/// Given `src: Ptr<'a, Src, (A, _, SV)>`, if the referent of `src` is
/// `DV`-valid for `Dst`, then it is sound to transmute `src` into `dst: Ptr<'a,
/// Dst, (A, Unaligned, DV)>` by preserving pointer address and metadata.
///
/// ## Pre-conditions
///
/// Given `src: Ptr<Src, (A, _, SV)>` and `dst: Ptr<Dst, (A, Unaligned, DV)>`,
/// `Dst: TryTransmuteFromPtr<Src, A, SV, DV, _>` is sound if all of the
/// following hold:
/// - Forwards transmutation: Either of the following hold:
/// - So long as `dst` is active, no mutation of `dst`'s referent is allowed
/// except via `dst` itself
/// - The set of `DV`-valid `Dst`s is a superset of the set of `SV`-valid
/// `Src`s
/// - Reverse transmutation: Either of the following hold:
/// - `dst` does not permit mutation of its referent
/// - The set of `DV`-valid `Dst`s is a subset of the set of `SV`-valid `Src`s
/// - No safe code, given access to `src` and `dst`, can cause undefined
/// behavior: Any of the following hold:
/// - `A` is `Exclusive`
/// - `Src: Immutable` and `Dst: Immutable`
/// - It is sound for shared code to operate on a `&Src` and `&Dst` which
/// reference the same byte range at the same time
///
/// ## Proof
///
/// Given:
/// - `src: Ptr<'a, Src, (A, _, SV)>`
/// - `src`'s referent is `DV`-valid for `Dst`
/// - `Dst: SizeEq<Src>`
///
/// We are trying to prove that it is sound to perform a pointer address- and
/// metadata-preserving transmute from `src` to a `dst: Ptr<'a, Dst, (A,
/// Unaligned, DV)>`. We need to prove that such a transmute does not violate
/// any of `src`'s invariants, and that it satisfies all invariants of the
/// destination `Ptr` type.
///
/// First, all of `src`'s `PtrInner` invariants are upheld. `src`'s address and
/// metadata are unchanged, so:
/// - If its referent is not zero sized, then it still has valid provenance for
/// its referent, which is still entirely contained in some Rust allocation,
/// `A`
/// - If its referent is not zero sized, `A` is guaranteed to live for at least
/// `'a`
///
/// Since `Dst: SizeEq<Src>`, and since `dst` has the same address and metadata
/// as `src`, `dst` addresses the same byte range as `src`. `dst` also has the
/// same lifetime as `src`. Therefore, all of the `PtrInner` invariants
/// mentioned above also hold for `dst`.
///
/// Second, since `src`'s address is unchanged, it still satisfies its
/// alignment. Since `dst`'s alignment is `Unaligned`, it trivially satisfies
/// its alignment.
///
/// Third, aliasing is either `Exclusive` or `Shared`:
/// - If it is `Exclusive`, then both `src` and `dst` satisfy `Exclusive`
/// aliasing trivially: since `src` and `dst` have the same lifetime, `src` is
/// inaccessible so long as `dst` is alive, and no other live `Ptr`s or
/// references may reference the same referent.
/// - If it is `Shared`, then either:
/// - `Src: Immutable` and `Dst: Immutable`, and so `UnsafeCell`s trivially
/// cover the same byte ranges in both types.
/// - It is explicitly sound for safe code to operate on a `&Src` and a `&Dst`
/// pointing to the same byte range at the same time.
///
/// Fourth, `src`'s validity is satisfied. By invariant, `src`'s referent began
/// as an `SV`-valid `Src`. It is guaranteed to remain so, as either of the
/// following hold:
/// - `dst` does not permit mutation of its referent.
/// - The set of `DV`-valid `Dst`s is a superset of the set of `SV`-valid
/// `Src`s. Thus, any value written via `dst` is guaranteed to be `SV`-valid
/// for `Src`.
///
/// Fifth, `dst`'s validity is satisfied. It is a given of this proof that the
/// referent is `DV`-valid for `Dst`. It is guaranteed to remain so, as either
/// of the following hold:
/// - So long as `dst` is active, no mutation of the referent is allowed except
/// via `dst` itself.
/// - The set of `DV`-valid `Dst`s is a superset of the set of `SV`-valid
/// `Src`s. Thus, any value written via `src` is guaranteed to be a `DV`-valid
/// `Dst`.
pub unsafe trait TryTransmuteFromPtr<Src: ?Sized, A: Aliasing, SV: Validity, DV: Validity, R>:
SizeEq<Src>
{
}
#[allow(missing_copy_implementations, missing_debug_implementations)]
pub enum BecauseMutationCompatible {}
// SAFETY:
// - Forwards transmutation: By `Dst: MutationCompatible<Src, A, SV, DV, _>`, we
// know that at least one of the following holds:
// - So long as `dst: Ptr<Dst>` is active, no mutation of its referent is
// allowed except via `dst` itself if either of the following hold:
// - Aliasing is `Exclusive`, in which case, so long as the `Dst` `Ptr`
// exists, no mutation is permitted except via that `Ptr`
// - Aliasing is `Shared`, `Src: Immutable`, and `Dst: Immutable`, in which
// case no mutation is possible via either `Ptr`
// - `Dst: TransmuteFrom<Src, SV, DV>`. Since `Dst: SizeEq<Src>`, this bound
// guarantees that the set of `DV`-valid `Dst`s is a supserset of the set of
// `SV`-valid `Src`s.
// - Reverse transmutation: `Src: TransmuteFrom<Dst, DV, SV>`. Since `Dst:
// SizeEq<Src>`, this guarantees that the set of `DV`-valid `Dst`s is a subset
// of the set of `SV`-valid `Src`s.
// - No safe code, given access to `src` and `dst`, can cause undefined
// behavior: By `Dst: MutationCompatible<Src, A, SV, DV, _>`, at least one of
// the following holds:
// - `A` is `Exclusive`
// - `Src: Immutable` and `Dst: Immutable`
// - `Dst: InvariantsEq<Src>`, which guarantees that `Src` and `Dst` have the
// same invariants, and have `UnsafeCell`s covering the same byte ranges
unsafe impl<Src, Dst, SV, DV, A, R>
TryTransmuteFromPtr<Src, A, SV, DV, (BecauseMutationCompatible, R)> for Dst
where
A: Aliasing,
SV: Validity,
DV: Validity,
Src: TransmuteFrom<Dst, DV, SV> + ?Sized,
Dst: MutationCompatible<Src, A, SV, DV, R> + SizeEq<Src> + ?Sized,
{
}
// SAFETY:
// - Forwards transmutation: Since aliasing is `Shared` and `Src: Immutable`,
// `src` does not permit mutation of its referent.
// - Reverse transmutation: Since aliasing is `Shared` and `Dst: Immutable`,
// `dst` does not permit mutation of its referent.
// - No safe code, given access to `src` and `dst`, can cause undefined
// behavior: `Src: Immutable` and `Dst: Immutable`
unsafe impl<Src, Dst, SV, DV> TryTransmuteFromPtr<Src, Shared, SV, DV, BecauseImmutable> for Dst
where
SV: Validity,
DV: Validity,
Src: Immutable + ?Sized,
Dst: Immutable + SizeEq<Src> + ?Sized,
{
}
/// Denotes that `src: Ptr<Src, (A, _, SV)>` and `dst: Ptr<Self, (A, _, DV)>`,
/// referencing the same referent at the same time, cannot be used by safe code
/// to break library safety invariants of `Src` or `Self`.
///
/// # Safety
///
/// At least one of the following must hold:
/// - `Src: Read<A, _>` and `Self: Read<A, _>`
/// - `Self: InvariantsEq<Src>`, and, for some `V`:
/// - `Dst: TransmuteFrom<Src, V, V>`
/// - `Src: TransmuteFrom<Dst, V, V>`
pub unsafe trait MutationCompatible<Src: ?Sized, A: Aliasing, SV, DV, R> {}
#[allow(missing_copy_implementations, missing_debug_implementations)]
pub enum BecauseRead {}
// SAFETY: `Src: Read<A, _>` and `Dst: Read<A, _>`.
unsafe impl<Src: ?Sized, Dst: ?Sized, A: Aliasing, SV: Validity, DV: Validity, R, S>
MutationCompatible<Src, A, SV, DV, (BecauseRead, (R, S))> for Dst
where
Src: Read<A, R>,
Dst: Read<A, S>,
{
}
/// Denotes that two types have the same invariants.
///
/// # Safety
///
/// It is sound for safe code to operate on a `&T` and a `&Self` pointing to the
/// same referent at the same time - no such safe code can cause undefined
/// behavior.
pub unsafe trait InvariantsEq<T: ?Sized> {}
// SAFETY: Trivially sound to have multiple `&T` pointing to the same referent.
unsafe impl<T: ?Sized> InvariantsEq<T> for T {}
// SAFETY: `Dst: InvariantsEq<Src> + TransmuteFrom<Src, SV, DV>`, and `Src:
// TransmuteFrom<Dst, DV, SV>`.
unsafe impl<Src: ?Sized, Dst: ?Sized, A: Aliasing, SV: Validity, DV: Validity>
MutationCompatible<Src, A, SV, DV, BecauseInvariantsEq> for Dst
where
Src: TransmuteFrom<Dst, DV, SV>,
Dst: TransmuteFrom<Src, SV, DV> + InvariantsEq<Src>,
{
}
pub(crate) enum BecauseInvariantsEq {}
macro_rules! unsafe_impl_invariants_eq {
($tyvar:ident => $t:ty, $u:ty) => {{
crate::util::macros::__unsafe();
// SAFETY: The caller promises that this is sound.
unsafe impl<$tyvar> InvariantsEq<$t> for $u {}
// SAFETY: The caller promises that this is sound.
unsafe impl<$tyvar> InvariantsEq<$u> for $t {}
}};
}
impl_transitive_transmute_from!(T => MaybeUninit<T> => T => Wrapping<T>);
impl_transitive_transmute_from!(T => Wrapping<T> => T => MaybeUninit<T>);
// SAFETY: `ManuallyDrop<T>` has the same size and bit validity as `T` [1], and
// implements `Deref<Target = T>` [2]. Thus, it is already possible for safe
// code to obtain a `&T` and a `&ManuallyDrop<T>` to the same referent at the
// same time.
//
// [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html:
//
// `ManuallyDrop<T>` is guaranteed to have the same layout and bit
// validity as `T`
//
// [2] https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html#impl-Deref-for-ManuallyDrop%3CT%3E
unsafe impl<T: ?Sized> InvariantsEq<T> for ManuallyDrop<T> {}
// SAFETY: See previous safety comment.
unsafe impl<T: ?Sized> InvariantsEq<ManuallyDrop<T>> for T {}
/// Transmutations which are always sound.
///
/// `TransmuteFromPtr` is a shorthand for [`TryTransmuteFromPtr`] and
/// [`TransmuteFrom`].
///
/// # Safety
///
/// `Dst: TransmuteFromPtr<Src, A, SV, DV, _>` is equivalent to `Dst:
/// TryTransmuteFromPtr<Src, A, SV, DV, _> + TransmuteFrom<Src, SV, DV>`.
pub unsafe trait TransmuteFromPtr<Src: ?Sized, A: Aliasing, SV: Validity, DV: Validity, R>:
TryTransmuteFromPtr<Src, A, SV, DV, R> + TransmuteFrom<Src, SV, DV>
{
}
// SAFETY: The `where` bounds are equivalent to the safety invariant on
// `TransmuteFromPtr`.
unsafe impl<Src: ?Sized, Dst: ?Sized, A: Aliasing, SV: Validity, DV: Validity, R>
TransmuteFromPtr<Src, A, SV, DV, R> for Dst
where
Dst: TransmuteFrom<Src, SV, DV> + TryTransmuteFromPtr<Src, A, SV, DV, R>,
{
}
/// Denotes that any `SV`-valid `Src` may soundly be transmuted into a
/// `DV`-valid `Self`.
///
/// # Safety
///
/// Given `src: Ptr<Src, (_, _, SV)>` and `dst: Ptr<Dst, (_, _, DV)>`, if the
/// referents of `src` and `dst` are the same size, then the set of bit patterns
/// allowed to appear in `src`'s referent must be a subset of the set allowed to
/// appear in `dst`'s referent.
///
/// If the referents are not the same size, then `Dst: TransmuteFrom<Src, SV,
/// DV>` conveys no safety guarantee.
pub unsafe trait TransmuteFrom<Src: ?Sized, SV, DV> {}
/// # Safety
///
/// `T` and `Self` must have the same vtable kind (`Sized`, slice DST, `dyn`,
/// etc) and have the same size. In particular:
/// - If `T: Sized` and `Self: Sized`, then their sizes must be equal
/// - If `T: ?Sized` and `Self: ?Sized`, then it must be the case that, given
/// any `t: PtrInner<'_, T>`, `<Self as SizeEq<T>>::cast_from_raw(t)` produces
/// a pointer which addresses the same number of bytes as `t`. *Note that it
/// is **not** guaranteed that an `as` cast preserves referent size: it may be
/// the case that `cast_from_raw` modifies the pointer's metadata in order to
/// preserve referent size, which an `as` cast does not do.*
pub unsafe trait SizeEq<T: ?Sized> {
fn cast_from_raw(t: PtrInner<'_, T>) -> PtrInner<'_, Self>;
}
// SAFETY: `T` trivially has the same size and vtable kind as `T`, and since
// pointer `*mut T -> *mut T` pointer casts are no-ops, this cast trivially
// preserves referent size (when `T: ?Sized`).
unsafe impl<T: ?Sized> SizeEq<T> for T {
#[inline(always)]
fn cast_from_raw(t: PtrInner<'_, T>) -> PtrInner<'_, T> {
t
}
}
// SAFETY: Since `Src: IntoBytes`, the set of valid `Src`'s is the set of
// initialized bit patterns, which is exactly the set allowed in the referent of
// any `Initialized` `Ptr`.
unsafe impl<Src, Dst> TransmuteFrom<Src, Valid, Initialized> for Dst
where
Src: IntoBytes + ?Sized,
Dst: ?Sized,
{
}
// SAFETY: Since `Dst: FromBytes`, any initialized bit pattern may appear in the
// referent of a `Ptr<Dst, (_, _, Valid)>`. This is exactly equal to the set of
// bit patterns which may appear in the referent of any `Initialized` `Ptr`.
unsafe impl<Src, Dst> TransmuteFrom<Src, Initialized, Valid> for Dst
where
Src: ?Sized,
Dst: FromBytes + ?Sized,
{
}
// FIXME(#2354): This seems like a smell - the soundness of this bound has
// nothing to do with `Src` or `Dst` - we're basically just saying `[u8; N]` is
// transmutable into `[u8; N]`.
// SAFETY: The set of allowed bit patterns in the referent of any `Initialized`
// `Ptr` is the same regardless of referent type.
unsafe impl<Src, Dst> TransmuteFrom<Src, Initialized, Initialized> for Dst
where
Src: ?Sized,
Dst: ?Sized,
{
}
// FIXME(#2354): This seems like a smell - the soundness of this bound has
// nothing to do with `Dst` - we're basically just saying that any type is
// transmutable into `MaybeUninit<[u8; N]>`.
// SAFETY: A `Dst` with validity `Uninit` permits any byte sequence, and
// therefore can be transmuted from any value.
unsafe impl<Src, Dst, V> TransmuteFrom<Src, V, Uninit> for Dst
where
Src: ?Sized,
Dst: ?Sized,
V: Validity,
{
}
// SAFETY:
// - `ManuallyDrop<T>` has the same size as `T` [1]
// - `ManuallyDrop<T>` has the same validity as `T` [1]
//
// [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html:
//
// `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
// `T`
const _: () = unsafe { unsafe_impl_for_transparent_wrapper!(T: ?Sized => ManuallyDrop<T>) };
// SAFETY:
// - `Unalign<T>` promises to have the same size as `T`.
// - `Unalign<T>` promises to have the same validity as `T`.
const _: () = unsafe { unsafe_impl_for_transparent_wrapper!(T => Unalign<T>) };
// SAFETY: `Unalign<T>` promises to have the same size and validity as `T`.
// Given `u: &Unalign<T>`, it is already possible to obtain `let t =
// u.try_deref().unwrap()`. Because `Unalign<T>` has the same size as `T`, the
// returned `&T` must point to the same referent as `u`, and thus it must be
// sound for these two references to exist at the same time since it's already
// possible for safe code to get into this state.
const _: () = unsafe { unsafe_impl_invariants_eq!(T => T, Unalign<T>) };
// SAFETY:
// - `Wrapping<T>` has the same size as `T` [1].
// - `Wrapping<T>` has only one field, which is `pub` [2]. We are also
// guaranteed per that `Wrapping<T>` has the same layout as `T` [1]. The only
// way for both of these to be true simultaneously is for `Wrapping<T>` to
// have the same bit validity as `T`. In particular, in order to change the
// bit validity, one of the following would need to happen:
// - `Wrapping` could change its `repr`, but this would violate the layout
// guarantee.
// - `Wrapping` could add or change its fields, but this would be a
// stability-breaking change.
//
// [1] Per https://doc.rust-lang.org/1.85.0/core/num/struct.Wrapping.html#layout-1:
//
// `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`.
//
// [2] Definition from https://doc.rust-lang.org/1.85.0/core/num/struct.Wrapping.html:
//
// ```
// #[repr(transparent)]
// pub struct Wrapping<T>(pub T);
// ```
const _: () = unsafe { unsafe_impl_for_transparent_wrapper!(T => Wrapping<T>) };
// SAFETY: By the preceding safety proof, `Wrapping<T>` and `T` have the same
// layout and bit validity. Since a `Wrapping<T>`'s `T` field is `pub`, given
// `w: &Wrapping<T>`, it's possible to do `let t = &w.t`, which means that it's
// already possible for safe code to obtain a `&Wrapping<T>` and a `&T` pointing
// to the same referent at the same time. Thus, this must be sound.
const _: () = unsafe { unsafe_impl_invariants_eq!(T => T, Wrapping<T>) };
// SAFETY:
// - `UnsafeCell<T>` has the same size as `T` [1].
// - Per [1], `UnsafeCell<T>` has the same bit validity as `T`. Technically the
// term "representation" doesn't guarantee this, but the subsequent sentence
// in the documentation makes it clear that this is the intention.
//
// [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout:
//
// `UnsafeCell<T>` has the same in-memory representation as its inner type
// `T`. A consequence of this guarantee is that it is possible to convert
// between `T` and `UnsafeCell<T>`.
const _: () = unsafe { unsafe_impl_for_transparent_wrapper!(T: ?Sized => UnsafeCell<T>) };
// SAFETY:
// - `Cell<T>` has the same size as `T` [1].
// - Per [1], `Cell<T>` has the same bit validity as `T`. Technically the term
// "representation" doesn't guarantee this, but it does promise to have the
// "same memory layout and caveats as `UnsafeCell<T>`." The `UnsafeCell` docs
// [2] make it clear that bit validity is the intention even if that phrase
// isn't used.
//
// [1] Per https://doc.rust-lang.org/1.85.0/std/cell/struct.Cell.html#memory-layout:
//
// `Cell<T>` has the same memory layout and caveats as `UnsafeCell<T>`. In
// particular, this means that `Cell<T>` has the same in-memory representation
// as its inner type `T`.
//
// [2] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout:
//
// `UnsafeCell<T>` has the same in-memory representation as its inner type
// `T`. A consequence of this guarantee is that it is possible to convert
// between `T` and `UnsafeCell<T>`.
const _: () = unsafe { unsafe_impl_for_transparent_wrapper!(T: ?Sized => Cell<T>) };
impl_transitive_transmute_from!(T: ?Sized => Cell<T> => T => UnsafeCell<T>);
impl_transitive_transmute_from!(T: ?Sized => UnsafeCell<T> => T => Cell<T>);
// SAFETY: `MaybeUninit<T>` has no validity requirements. Currently this is not
// explicitly guaranteed, but it's obvious from `MaybeUninit`'s documentation
// that this is the intention:
// https://doc.rust-lang.org/1.85.0/core/mem/union.MaybeUninit.html
unsafe impl<T> TransmuteFrom<T, Uninit, Valid> for MaybeUninit<T> {}
// SAFETY: `MaybeUninit<T>` has the same size as `T` [1].
//
// [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
//
// `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
// `T`
unsafe impl<T> SizeEq<T> for MaybeUninit<T> {
#[inline(always)]
fn cast_from_raw(t: PtrInner<'_, T>) -> PtrInner<'_, MaybeUninit<T>> {
// SAFETY: Per preceding safety comment, `MaybeUninit<T>` and `T` have
// the same size, and so this cast preserves referent size.
unsafe { cast!(t) }
}
}
// SAFETY: See previous safety comment.
unsafe impl<T> SizeEq<MaybeUninit<T>> for T {
#[inline(always)]
fn cast_from_raw(t: PtrInner<'_, MaybeUninit<T>>) -> PtrInner<'_, T> {
// SAFETY: Per preceding safety comment, `MaybeUninit<T>` and `T` have
// the same size, and so this cast preserves referent size.
unsafe { cast!(t) }
}
}

1175
vendor/zerocopy/src/ref.rs vendored Normal file

File diff suppressed because it is too large Load Diff

904
vendor/zerocopy/src/split_at.rs vendored Normal file
View File

@@ -0,0 +1,904 @@
// Copyright 2025 The Fuchsia Authors
//
// Licensed under the 2-Clause BSD License <LICENSE-BSD or
// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to
// those terms.
use super::*;
use crate::pointer::invariant::{Aligned, Exclusive, Invariants, Shared, Valid};
/// Types that can be split in two.
///
/// This trait generalizes Rust's existing support for splitting slices to
/// support slices and slice-based dynamically-sized types ("slice DSTs").
///
/// # Implementation
///
/// **Do not implement this trait yourself!** Instead, use
/// [`#[derive(SplitAt)]`][derive]; e.g.:
///
/// ```
/// # use zerocopy_derive::{SplitAt, KnownLayout};
/// #[derive(SplitAt, KnownLayout)]
/// #[repr(C)]
/// struct MyStruct<T: ?Sized> {
/// # /*
/// ...,
/// # */
/// // `SplitAt` types must have at least one field.
/// field: T,
/// }
/// ```
///
/// This derive performs a sophisticated, compile-time safety analysis to
/// determine whether a type is `SplitAt`.
///
/// # Safety
///
/// This trait does not convey any safety guarantees to code outside this crate.
///
/// You must not rely on the `#[doc(hidden)]` internals of `SplitAt`. Future
/// releases of zerocopy may make backwards-breaking changes to these items,
/// including changes that only affect soundness, which may cause code which
/// uses those items to silently become unsound.
///
#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::SplitAt")]
#[cfg_attr(
not(feature = "derive"),
doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.SplitAt.html"),
)]
#[cfg_attr(
zerocopy_diagnostic_on_unimplemented_1_78_0,
diagnostic::on_unimplemented(note = "Consider adding `#[derive(SplitAt)]` to `{Self}`")
)]
// # Safety
//
// The trailing slice is well-aligned for its element type. `Self` is `[T]`, or
// a `repr(C)` or `repr(transparent)` slice DST.
pub unsafe trait SplitAt: KnownLayout<PointerMetadata = usize> {
/// The element type of the trailing slice.
type Elem;
#[doc(hidden)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized;
/// Unsafely splits `self` in two.
///
/// # Safety
///
/// The caller promises that `l_len` is not greater than the length of
/// `self`'s trailing slice.
#[inline]
#[must_use]
unsafe fn split_at_unchecked(&self, l_len: usize) -> Split<&Self> {
// SAFETY: By precondition on the caller, `l_len <= self.len()`.
unsafe { Split::<&Self>::new(self, l_len) }
}
/// Attempts to split `self` in two.
///
/// Returns `None` if `l_len` is greater than the length of `self`'s
/// trailing slice.
///
/// # Examples
///
/// ```
/// use zerocopy::{SplitAt, FromBytes};
/// # use zerocopy_derive::*;
///
/// #[derive(SplitAt, FromBytes, KnownLayout, Immutable)]
/// #[repr(C)]
/// struct Packet {
/// length: u8,
/// body: [u8],
/// }
///
/// // These bytes encode a `Packet`.
/// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
///
/// let packet = Packet::ref_from_bytes(bytes).unwrap();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
///
/// // Attempt to split `packet` at `length`.
/// let split = packet.split_at(packet.length as usize).unwrap();
///
/// // Use the `Immutable` bound on `Packet` to prove that it's okay to
/// // return concurrent references to `packet` and `rest`.
/// let (packet, rest) = split.via_immutable();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4]);
/// assert_eq!(rest, [5, 6, 7, 8, 9]);
/// ```
#[inline]
#[must_use = "has no side effects"]
fn split_at(&self, l_len: usize) -> Option<Split<&Self>> {
MetadataOf::new_in_bounds(self, l_len).map(
#[inline(always)]
|l_len| {
// SAFETY: We have ensured that `l_len <= self.len()` (by
// post-condition on `MetadataOf::new_in_bounds`)
unsafe { Split::new(self, l_len.get()) }
},
)
}
/// Unsafely splits `self` in two.
///
/// # Safety
///
/// The caller promises that `l_len` is not greater than the length of
/// `self`'s trailing slice.
#[inline]
#[must_use]
unsafe fn split_at_mut_unchecked(&mut self, l_len: usize) -> Split<&mut Self> {
// SAFETY: By precondition on the caller, `l_len <= self.len()`.
unsafe { Split::<&mut Self>::new(self, l_len) }
}
/// Attempts to split `self` in two.
///
/// Returns `None` if `l_len` is greater than the length of `self`'s
/// trailing slice, or if the given `l_len` would result in [the trailing
/// padding](KnownLayout#slice-dst-layout) of the left portion overlapping
/// the right portion.
///
///
/// # Examples
///
/// ```
/// use zerocopy::{SplitAt, FromBytes};
/// # use zerocopy_derive::*;
///
/// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes)]
/// #[repr(C)]
/// struct Packet<B: ?Sized> {
/// length: u8,
/// body: B,
/// }
///
/// // These bytes encode a `Packet`.
/// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
///
/// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
///
/// {
/// // Attempt to split `packet` at `length`.
/// let split = packet.split_at_mut(packet.length as usize).unwrap();
///
/// // Use the `IntoBytes` bound on `Packet` to prove that it's okay to
/// // return concurrent references to `packet` and `rest`.
/// let (packet, rest) = split.via_into_bytes();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4]);
/// assert_eq!(rest, [5, 6, 7, 8, 9]);
///
/// rest.fill(0);
/// }
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]);
/// ```
#[inline]
fn split_at_mut(&mut self, l_len: usize) -> Option<Split<&mut Self>> {
MetadataOf::new_in_bounds(self, l_len).map(
#[inline(always)]
|l_len| {
// SAFETY: We have ensured that `l_len <= self.len()` (by
// post-condition on `MetadataOf::new_in_bounds`)
unsafe { Split::new(self, l_len.get()) }
},
)
}
}
// SAFETY: `[T]`'s trailing slice is `[T]`, which is trivially aligned.
unsafe impl<T> SplitAt for [T] {
type Elem = T;
#[inline]
#[allow(dead_code)]
fn only_derive_is_allowed_to_implement_this_trait()
where
Self: Sized,
{
}
}
/// A `T` that has been split into two possibly-overlapping parts.
///
/// For some dynamically sized types, the padding that appears after the
/// trailing slice field [is a dynamic function of the trailing slice
/// length](KnownLayout#slice-dst-layout). If `T` is split at a length that
/// requires trailing padding, the trailing padding of the left part of the
/// split `T` will overlap the right part. If `T` is a mutable reference or
/// permits interior mutation, you must ensure that the left and right parts do
/// not overlap. You can do this at zero-cost using using
/// [`Self::via_immutable`], [`Self::via_into_bytes`], or
/// [`Self::via_unaligned`], or with a dynamic check by using
/// [`Self::via_runtime_check`].
#[derive(Debug)]
pub struct Split<T> {
/// A pointer to the source slice DST.
source: T,
/// The length of the future left half of `source`.
///
/// # Safety
///
/// If `source` is a pointer to a slice DST, `l_len` is no greater than
/// `source`'s length.
l_len: usize,
}
impl<T> Split<T> {
/// Produces a `Split` of `source` with `l_len`.
///
/// # Safety
///
/// `l_len` is no greater than `source`'s length.
#[inline(always)]
unsafe fn new(source: T, l_len: usize) -> Self {
Self { source, l_len }
}
}
impl<'a, T> Split<&'a T>
where
T: ?Sized + SplitAt,
{
#[inline(always)]
fn into_ptr(self) -> Split<Ptr<'a, T, (Shared, Aligned, Valid)>> {
let source = Ptr::from_ref(self.source);
// SAFETY: `Ptr::from_ref(self.source)` points to exactly `self.source`
// and thus maintains the invariants of `self` with respect to `l_len`.
unsafe { Split::new(source, self.l_len) }
}
/// Produces the split parts of `self`, using [`Immutable`] to ensure that
/// it is sound to have concurrent references to both parts.
///
/// # Examples
///
/// ```
/// use zerocopy::{SplitAt, FromBytes};
/// # use zerocopy_derive::*;
///
/// #[derive(SplitAt, FromBytes, KnownLayout, Immutable)]
/// #[repr(C)]
/// struct Packet {
/// length: u8,
/// body: [u8],
/// }
///
/// // These bytes encode a `Packet`.
/// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
///
/// let packet = Packet::ref_from_bytes(bytes).unwrap();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
///
/// // Attempt to split `packet` at `length`.
/// let split = packet.split_at(packet.length as usize).unwrap();
///
/// // Use the `Immutable` bound on `Packet` to prove that it's okay to
/// // return concurrent references to `packet` and `rest`.
/// let (packet, rest) = split.via_immutable();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4]);
/// assert_eq!(rest, [5, 6, 7, 8, 9]);
/// ```
#[must_use = "has no side effects"]
#[inline(always)]
pub fn via_immutable(self) -> (&'a T, &'a [T::Elem])
where
T: Immutable,
{
let (l, r) = self.into_ptr().via_immutable();
(l.as_ref(), r.as_ref())
}
/// Produces the split parts of `self`, using [`IntoBytes`] to ensure that
/// it is sound to have concurrent references to both parts.
///
/// # Examples
///
/// ```
/// use zerocopy::{SplitAt, FromBytes};
/// # use zerocopy_derive::*;
///
/// #[derive(SplitAt, FromBytes, KnownLayout, Immutable, IntoBytes)]
/// #[repr(C)]
/// struct Packet<B: ?Sized> {
/// length: u8,
/// body: B,
/// }
///
/// // These bytes encode a `Packet`.
/// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
///
/// let packet = Packet::<[u8]>::ref_from_bytes(bytes).unwrap();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
///
/// // Attempt to split `packet` at `length`.
/// let split = packet.split_at(packet.length as usize).unwrap();
///
/// // Use the `IntoBytes` bound on `Packet` to prove that it's okay to
/// // return concurrent references to `packet` and `rest`.
/// let (packet, rest) = split.via_into_bytes();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4]);
/// assert_eq!(rest, [5, 6, 7, 8, 9]);
/// ```
#[must_use = "has no side effects"]
#[inline(always)]
pub fn via_into_bytes(self) -> (&'a T, &'a [T::Elem])
where
T: IntoBytes,
{
let (l, r) = self.into_ptr().via_into_bytes();
(l.as_ref(), r.as_ref())
}
/// Produces the split parts of `self`, using [`Unaligned`] to ensure that
/// it is sound to have concurrent references to both parts.
///
/// # Examples
///
/// ```
/// use zerocopy::{SplitAt, FromBytes};
/// # use zerocopy_derive::*;
///
/// #[derive(SplitAt, FromBytes, KnownLayout, Immutable, Unaligned)]
/// #[repr(C)]
/// struct Packet {
/// length: u8,
/// body: [u8],
/// }
///
/// // These bytes encode a `Packet`.
/// let bytes = &[4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
///
/// let packet = Packet::ref_from_bytes(bytes).unwrap();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
///
/// // Attempt to split `packet` at `length`.
/// let split = packet.split_at(packet.length as usize).unwrap();
///
/// // Use the `Unaligned` bound on `Packet` to prove that it's okay to
/// // return concurrent references to `packet` and `rest`.
/// let (packet, rest) = split.via_unaligned();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4]);
/// assert_eq!(rest, [5, 6, 7, 8, 9]);
/// ```
#[must_use = "has no side effects"]
#[inline(always)]
pub fn via_unaligned(self) -> (&'a T, &'a [T::Elem])
where
T: Unaligned,
{
let (l, r) = self.into_ptr().via_unaligned();
(l.as_ref(), r.as_ref())
}
/// Produces the split parts of `self`, using a dynamic check to ensure that
/// it is sound to have concurrent references to both parts. You should
/// prefer using [`Self::via_immutable`], [`Self::via_into_bytes`], or
/// [`Self::via_unaligned`], which have no runtime cost.
///
/// Note that this check is overly conservative if `T` is [`Immutable`]; for
/// some types, this check will reject some splits which
/// [`Self::via_immutable`] will accept.
///
/// # Examples
///
/// ```
/// use zerocopy::{SplitAt, FromBytes, IntoBytes, network_endian::U16};
/// # use zerocopy_derive::*;
///
/// #[derive(SplitAt, FromBytes, KnownLayout, Immutable, Debug)]
/// #[repr(C, align(2))]
/// struct Packet {
/// length: U16,
/// body: [u8],
/// }
///
/// // These bytes encode a `Packet`.
/// let bytes = [
/// 4u16.to_be(),
/// 1u16.to_be(),
/// 2u16.to_be(),
/// 3u16.to_be(),
/// 4u16.to_be()
/// ];
///
/// let packet = Packet::ref_from_bytes(bytes.as_bytes()).unwrap();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [0, 1, 0, 2, 0, 3, 0, 4]);
///
/// // Attempt to split `packet` at `length`.
/// let split = packet.split_at(packet.length.into()).unwrap();
///
/// // Use a dynamic check to prove that it's okay to return concurrent
/// // references to `packet` and `rest`.
/// let (packet, rest) = split.via_runtime_check().unwrap();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [0, 1, 0, 2]);
/// assert_eq!(rest, [0, 3, 0, 4]);
///
/// // Attempt to split `packet` at `length - 1`.
/// let idx = packet.length.get() - 1;
/// let split = packet.split_at(idx as usize).unwrap();
///
/// // Attempt (and fail) to use a dynamic check to prove that it's okay
/// // to return concurrent references to `packet` and `rest`. Note that
/// // this is a case of `via_runtime_check` being overly conservative.
/// // Although the left and right parts indeed overlap, the `Immutable`
/// // bound ensures that concurrently referencing these overlapping
/// // parts is sound.
/// assert!(split.via_runtime_check().is_err());
/// ```
#[must_use = "has no side effects"]
#[inline(always)]
pub fn via_runtime_check(self) -> Result<(&'a T, &'a [T::Elem]), Self> {
match self.into_ptr().via_runtime_check() {
Ok((l, r)) => Ok((l.as_ref(), r.as_ref())),
Err(s) => Err(s.into_ref()),
}
}
/// Unsafely produces the split parts of `self`.
///
/// # Safety
///
/// If `T` permits interior mutation, the trailing padding bytes of the left
/// portion must not overlap the right portion. For some dynamically sized
/// types, the padding that appears after the trailing slice field [is a
/// dynamic function of the trailing slice
/// length](KnownLayout#slice-dst-layout). Thus, for some types, this
/// condition is dependent on the length of the left portion.
#[must_use = "has no side effects"]
#[inline(always)]
pub unsafe fn via_unchecked(self) -> (&'a T, &'a [T::Elem]) {
// SAFETY: The aliasing of `self.into_ptr()` is not `Exclusive`, but the
// caller has promised that if `T` permits interior mutation then the
// left and right portions of `self` split at `l_len` do not overlap.
let (l, r) = unsafe { self.into_ptr().via_unchecked() };
(l.as_ref(), r.as_ref())
}
}
impl<'a, T> Split<&'a mut T>
where
T: ?Sized + SplitAt,
{
#[inline(always)]
fn into_ptr(self) -> Split<Ptr<'a, T, (Exclusive, Aligned, Valid)>> {
let source = Ptr::from_mut(self.source);
// SAFETY: `Ptr::from_mut(self.source)` points to exactly `self.source`,
// and thus maintains the invariants of `self` with respect to `l_len`.
unsafe { Split::new(source, self.l_len) }
}
/// Produces the split parts of `self`, using [`IntoBytes`] to ensure that
/// it is sound to have concurrent references to both parts.
///
/// # Examples
///
/// ```
/// use zerocopy::{SplitAt, FromBytes};
/// # use zerocopy_derive::*;
///
/// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes)]
/// #[repr(C)]
/// struct Packet<B: ?Sized> {
/// length: u8,
/// body: B,
/// }
///
/// // These bytes encode a `Packet`.
/// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
///
/// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
///
/// {
/// // Attempt to split `packet` at `length`.
/// let split = packet.split_at_mut(packet.length as usize).unwrap();
///
/// // Use the `IntoBytes` bound on `Packet` to prove that it's okay to
/// // return concurrent references to `packet` and `rest`.
/// let (packet, rest) = split.via_into_bytes();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4]);
/// assert_eq!(rest, [5, 6, 7, 8, 9]);
///
/// rest.fill(0);
/// }
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]);
/// ```
#[must_use = "has no side effects"]
#[inline(always)]
pub fn via_into_bytes(self) -> (&'a mut T, &'a mut [T::Elem])
where
T: IntoBytes,
{
let (l, r) = self.into_ptr().via_into_bytes();
(l.as_mut(), r.as_mut())
}
/// Produces the split parts of `self`, using [`Unaligned`] to ensure that
/// it is sound to have concurrent references to both parts.
///
/// # Examples
///
/// ```
/// use zerocopy::{SplitAt, FromBytes};
/// # use zerocopy_derive::*;
///
/// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes, Unaligned)]
/// #[repr(C)]
/// struct Packet<B: ?Sized> {
/// length: u8,
/// body: B,
/// }
///
/// // These bytes encode a `Packet`.
/// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
///
/// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
///
/// {
/// // Attempt to split `packet` at `length`.
/// let split = packet.split_at_mut(packet.length as usize).unwrap();
///
/// // Use the `Unaligned` bound on `Packet` to prove that it's okay to
/// // return concurrent references to `packet` and `rest`.
/// let (packet, rest) = split.via_unaligned();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4]);
/// assert_eq!(rest, [5, 6, 7, 8, 9]);
///
/// rest.fill(0);
/// }
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]);
/// ```
#[must_use = "has no side effects"]
#[inline(always)]
pub fn via_unaligned(self) -> (&'a mut T, &'a mut [T::Elem])
where
T: Unaligned,
{
let (l, r) = self.into_ptr().via_unaligned();
(l.as_mut(), r.as_mut())
}
/// Produces the split parts of `self`, using a dynamic check to ensure that
/// it is sound to have concurrent references to both parts. You should
/// prefer using [`Self::via_into_bytes`] or [`Self::via_unaligned`], which
/// have no runtime cost.
///
/// # Examples
///
/// ```
/// use zerocopy::{SplitAt, FromBytes};
/// # use zerocopy_derive::*;
///
/// #[derive(SplitAt, FromBytes, KnownLayout, IntoBytes, Debug)]
/// #[repr(C)]
/// struct Packet<B: ?Sized> {
/// length: u8,
/// body: B,
/// }
///
/// // These bytes encode a `Packet`.
/// let mut bytes = &mut [4, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
///
/// let packet = Packet::<[u8]>::mut_from_bytes(bytes).unwrap();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4, 5, 6, 7, 8, 9]);
///
/// {
/// // Attempt to split `packet` at `length`.
/// let split = packet.split_at_mut(packet.length as usize).unwrap();
///
/// // Use a dynamic check to prove that it's okay to return concurrent
/// // references to `packet` and `rest`.
/// let (packet, rest) = split.via_runtime_check().unwrap();
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4]);
/// assert_eq!(rest, [5, 6, 7, 8, 9]);
///
/// rest.fill(0);
/// }
///
/// assert_eq!(packet.length, 4);
/// assert_eq!(packet.body, [1, 2, 3, 4, 0, 0, 0, 0, 0]);
/// ```
#[must_use = "has no side effects"]
#[inline(always)]
pub fn via_runtime_check(self) -> Result<(&'a mut T, &'a mut [T::Elem]), Self> {
match self.into_ptr().via_runtime_check() {
Ok((l, r)) => Ok((l.as_mut(), r.as_mut())),
Err(s) => Err(s.into_mut()),
}
}
/// Unsafely produces the split parts of `self`.
///
/// # Safety
///
/// The trailing padding bytes of the left portion must not overlap the
/// right portion. For some dynamically sized types, the padding that
/// appears after the trailing slice field [is a dynamic function of the
/// trailing slice length](KnownLayout#slice-dst-layout). Thus, for some
/// types, this condition is dependent on the length of the left portion.
#[must_use = "has no side effects"]
#[inline(always)]
pub unsafe fn via_unchecked(self) -> (&'a mut T, &'a mut [T::Elem]) {
// SAFETY: The aliasing of `self.into_ptr()` is `Exclusive`, and the
// caller has promised that the left and right portions of `self` split
// at `l_len` do not overlap.
let (l, r) = unsafe { self.into_ptr().via_unchecked() };
(l.as_mut(), r.as_mut())
}
}
impl<'a, T, I> Split<Ptr<'a, T, I>>
where
T: ?Sized + SplitAt,
I: Invariants<Alignment = Aligned, Validity = Valid>,
{
fn into_ref(self) -> Split<&'a T>
where
I: Invariants<Aliasing = Shared>,
{
// SAFETY: `self.source.as_ref()` points to exactly the same referent as
// `self.source` and thus maintains the invariants of `self` with
// respect to `l_len`.
unsafe { Split::new(self.source.as_ref(), self.l_len) }
}
fn into_mut(self) -> Split<&'a mut T>
where
I: Invariants<Aliasing = Exclusive>,
{
// SAFETY: `self.source.as_mut()` points to exactly the same referent as
// `self.source` and thus maintains the invariants of `self` with
// respect to `l_len`.
unsafe { Split::new(self.source.unify_invariants().as_mut(), self.l_len) }
}
/// Produces the length of `self`'s left part.
#[inline(always)]
fn l_len(&self) -> MetadataOf<T> {
// SAFETY: By invariant on `Split`, `self.l_len` is not greater than the
// length of `self.source`.
unsafe { MetadataOf::<T>::new_unchecked(self.l_len) }
}
/// Produces the split parts of `self`, using [`Immutable`] to ensure that
/// it is sound to have concurrent references to both parts.
#[inline(always)]
fn via_immutable(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>)
where
T: Immutable,
I: Invariants<Aliasing = Shared>,
{
// SAFETY: `Aliasing = Shared` and `T: Immutable`.
unsafe { self.via_unchecked() }
}
/// Produces the split parts of `self`, using [`IntoBytes`] to ensure that
/// it is sound to have concurrent references to both parts.
#[inline(always)]
fn via_into_bytes(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>)
where
T: IntoBytes,
{
// SAFETY: By `T: IntoBytes`, `T` has no padding for any length.
// Consequently, `T` can be split into non-overlapping parts at any
// index.
unsafe { self.via_unchecked() }
}
/// Produces the split parts of `self`, using [`Unaligned`] to ensure that
/// it is sound to have concurrent references to both parts.
#[inline(always)]
fn via_unaligned(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>)
where
T: Unaligned,
{
// SAFETY: By `T: SplitAt + Unaligned`, `T` is either a slice or a
// `repr(C)` or `repr(transparent)` slice DST that is well-aligned at
// any address and length. If `T` is a slice DST with alignment 1,
// `repr(C)` or `repr(transparent)` ensures that no padding is placed
// after the final element of the trailing slice. Consequently, `T` can
// be split into strictly non-overlapping parts any any index.
unsafe { self.via_unchecked() }
}
/// Produces the split parts of `self`, using a dynamic check to ensure that
/// it is sound to have concurrent references to both parts. You should
/// prefer using [`Self::via_immutable`], [`Self::via_into_bytes`], or
/// [`Self::via_unaligned`], which have no runtime cost.
#[inline(always)]
fn via_runtime_check(self) -> Result<(Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>), Self> {
let l_len = self.l_len();
// FIXME(#1290): Once we require `KnownLayout` on all fields, add an
// `IS_IMMUTABLE` associated const, and add `T::IS_IMMUTABLE ||` to the
// below check.
if l_len.padding_needed_for() == 0 {
// SAFETY: By `T: SplitAt`, `T` is either `[T]`, or a `repr(C)` or
// `repr(transparent)` slice DST, for which the trailing padding
// needed to accommodate `l_len` trailing elements is
// `l_len.padding_needed_for()`. If no trailing padding is required,
// the left and right parts are strictly non-overlapping.
Ok(unsafe { self.via_unchecked() })
} else {
Err(self)
}
}
/// Unsafely produces the split parts of `self`.
///
/// # Safety
///
/// The caller promises that if `I::Aliasing` is [`Exclusive`] or `T`
/// permits interior mutation, then `l_len.padding_needed_for() == 0`.
#[inline(always)]
unsafe fn via_unchecked(self) -> (Ptr<'a, T, I>, Ptr<'a, [T::Elem], I>) {
let l_len = self.l_len();
let inner = self.source.as_inner();
// SAFETY: By invariant on `Self::l_len`, `l_len` is not greater than
// the length of `inner`'s trailing slice.
let (left, right) = unsafe { inner.split_at_unchecked(l_len) };
// Lemma 0: `left` and `right` conform to the aliasing invariant
// `I::Aliasing`. Proof: If `I::Aliasing` is `Exclusive` or `T` permits
// interior mutation, the caller promises that `l_len.padding_needed_for()
// == 0`. Consequently, by post-condition on `PtrInner::split_at_unchecked`,
// there is no trailing padding after `left`'s final element that would
// overlap into `right`. If `I::Aliasing` is shared and `T` forbids interior
// mutation, then overlap between their referents is permissible.
// SAFETY:
// 0. `left` conforms to the aliasing invariant of `I::Aliasing`, by Lemma 0.
// 1. `left` conforms to the alignment invariant of `I::Alignment, because
// the referents of `left` and `Self` have the same address and type
// (and, thus, alignment requirement).
// 2. `left` conforms to the validity invariant of `I::Validity`, neither
// the type nor bytes of `left`'s referent have been changed.
let left = unsafe { Ptr::from_inner(left) };
// SAFETY:
// 0. `right` conforms to the aliasing invariant of `I::Aliasing`, by Lemma
// 0.
// 1. `right` conforms to the alignment invariant of `I::Alignment, because
// if `ptr` with `I::Alignment = Aligned`, then by invariant on `T:
// SplitAt`, the trailing slice of `ptr` (from which `right` is derived)
// will also be well-aligned.
// 2. `right` conforms to the validity invariant of `I::Validity`,
// because `right: [T::Elem]` is derived from the trailing slice of
// `ptr`, which, by contract on `T: SplitAt::Elem`, has type
// `[T::Elem]`. The `left` part cannot be used to invalidate `right`,
// because the caller promises that if `I::Aliasing` is `Exclusive`
// or `T` permits interior mutation, then `l_len.padding_needed_for()
// == 0` and thus the parts will be non-overlapping.
let right = unsafe { Ptr::from_inner(right) };
(left, right)
}
}
#[cfg(test)]
mod tests {
#[cfg(feature = "derive")]
#[test]
fn test_split_at() {
use crate::{FromBytes, Immutable, IntoBytes, KnownLayout, SplitAt};
#[derive(FromBytes, KnownLayout, SplitAt, IntoBytes, Immutable, Debug)]
#[repr(C)]
struct SliceDst<const OFFSET: usize> {
prefix: [u8; OFFSET],
trailing: [u8],
}
#[allow(clippy::as_conversions)]
fn test_split_at<const OFFSET: usize, const BUFFER_SIZE: usize>() {
// Test `split_at`
let n: usize = BUFFER_SIZE - OFFSET;
let arr = [1; BUFFER_SIZE];
let dst = SliceDst::<OFFSET>::ref_from_bytes(&arr[..]).unwrap();
for i in 0..=n {
let (l, r) = dst.split_at(i).unwrap().via_runtime_check().unwrap();
let l_sum: u8 = l.trailing.iter().sum();
let r_sum: u8 = r.iter().sum();
assert_eq!(l_sum, i as u8);
assert_eq!(r_sum, (n - i) as u8);
assert_eq!(l_sum + r_sum, n as u8);
}
// Test `split_at_mut`
let n: usize = BUFFER_SIZE - OFFSET;
let mut arr = [1; BUFFER_SIZE];
let dst = SliceDst::<OFFSET>::mut_from_bytes(&mut arr[..]).unwrap();
for i in 0..=n {
let (l, r) = dst.split_at_mut(i).unwrap().via_runtime_check().unwrap();
let l_sum: u8 = l.trailing.iter().sum();
let r_sum: u8 = r.iter().sum();
assert_eq!(l_sum, i as u8);
assert_eq!(r_sum, (n - i) as u8);
assert_eq!(l_sum + r_sum, n as u8);
}
}
test_split_at::<0, 16>();
test_split_at::<1, 17>();
test_split_at::<2, 18>();
}
#[cfg(feature = "derive")]
#[test]
#[allow(clippy::as_conversions)]
fn test_split_at_overlapping() {
use crate::{FromBytes, Immutable, IntoBytes, KnownLayout, SplitAt};
#[derive(FromBytes, KnownLayout, SplitAt, Immutable)]
#[repr(C, align(2))]
struct SliceDst {
prefix: u8,
trailing: [u8],
}
const N: usize = 16;
let arr = [1u16; N];
let dst = SliceDst::ref_from_bytes(arr.as_bytes()).unwrap();
for i in 0..N {
let split = dst.split_at(i).unwrap().via_runtime_check();
if i % 2 == 1 {
assert!(split.is_ok());
} else {
assert!(split.is_err());
}
}
}
}

1335
vendor/zerocopy/src/util/macro_util.rs vendored Normal file

File diff suppressed because it is too large Load Diff

941
vendor/zerocopy/src/util/macros.rs vendored Normal file
View File

@@ -0,0 +1,941 @@
// Copyright 2023 The Fuchsia Authors
//
// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to
// those terms.
/// Unsafely implements trait(s) for a type.
///
/// # Safety
///
/// The trait impl must be sound.
///
/// When implementing `TryFromBytes`:
/// - If no `is_bit_valid` impl is provided, then it must be valid for
/// `is_bit_valid` to unconditionally return `true`. In other words, it must
/// be the case that any initialized sequence of bytes constitutes a valid
/// instance of `$ty`.
/// - If an `is_bit_valid` impl is provided, then the impl of `is_bit_valid`
/// must only return `true` if its argument refers to a valid `$ty`.
macro_rules! unsafe_impl {
// Implement `$trait` for `$ty` with no bounds.
($(#[$attr:meta])* $ty:ty: $trait:ident $(; |$candidate:ident| $is_bit_valid:expr)?) => {{
crate::util::macros::__unsafe();
$(#[$attr])*
// SAFETY: The caller promises that this is sound.
unsafe impl $trait for $ty {
unsafe_impl!(@method $trait $(; |$candidate| $is_bit_valid)?);
}
}};
// Implement all `$traits` for `$ty` with no bounds.
//
// The 2 arms under this one are there so we can apply
// N attributes for each one of M trait implementations.
// The simple solution of:
//
// ($(#[$attrs:meta])* $ty:ty: $($traits:ident),*) => {
// $( unsafe_impl!( $(#[$attrs])* $ty: $traits ) );*
// }
//
// Won't work. The macro processor sees that the outer repetition
// contains both $attrs and $traits and expects them to match the same
// amount of fragments.
//
// To solve this we must:
// 1. Pack the attributes into a single token tree fragment we can match over.
// 2. Expand the traits.
// 3. Unpack and expand the attributes.
($(#[$attrs:meta])* $ty:ty: $($traits:ident),*) => {
unsafe_impl!(@impl_traits_with_packed_attrs { $(#[$attrs])* } $ty: $($traits),*)
};
(@impl_traits_with_packed_attrs $attrs:tt $ty:ty: $($traits:ident),*) => {{
$( unsafe_impl!(@unpack_attrs $attrs $ty: $traits); )*
}};
(@unpack_attrs { $(#[$attrs:meta])* } $ty:ty: $traits:ident) => {
unsafe_impl!($(#[$attrs])* $ty: $traits);
};
// This arm is identical to the following one, except it contains a
// preceding `const`. If we attempt to handle these with a single arm, there
// is an inherent ambiguity between `const` (the keyword) and `const` (the
// ident match for `$tyvar:ident`).
//
// To explain how this works, consider the following invocation:
//
// unsafe_impl!(const N: usize, T: ?Sized + Copy => Clone for Foo<T>);
//
// In this invocation, here are the assignments to meta-variables:
//
// |---------------|------------|
// | Meta-variable | Assignment |
// |---------------|------------|
// | $constname | N |
// | $constty | usize |
// | $tyvar | T |
// | $optbound | Sized |
// | $bound | Copy |
// | $trait | Clone |
// | $ty | Foo<T> |
// |---------------|------------|
//
// The following arm has the same behavior with the exception of the lack of
// support for a leading `const` parameter.
(
$(#[$attr:meta])*
const $constname:ident : $constty:ident $(,)?
$($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
=> $trait:ident for $ty:ty $(; |$candidate:ident| $is_bit_valid:expr)?
) => {
unsafe_impl!(
@inner
$(#[$attr])*
@const $constname: $constty,
$($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)*
=> $trait for $ty $(; |$candidate| $is_bit_valid)?
);
};
(
$(#[$attr:meta])*
$($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
=> $trait:ident for $ty:ty $(; |$candidate:ident| $is_bit_valid:expr)?
) => {{
unsafe_impl!(
@inner
$(#[$attr])*
$($tyvar $(: $(? $optbound +)* + $($bound +)*)?,)*
=> $trait for $ty $(; |$candidate| $is_bit_valid)?
);
}};
(
@inner
$(#[$attr:meta])*
$(@const $constname:ident : $constty:ident,)*
$($tyvar:ident $(: $(? $optbound:ident +)* + $($bound:ident +)* )?,)*
=> $trait:ident for $ty:ty $(; |$candidate:ident| $is_bit_valid:expr)?
) => {{
crate::util::macros::__unsafe();
$(#[$attr])*
#[allow(non_local_definitions)]
// SAFETY: The caller promises that this is sound.
unsafe impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),* $(, const $constname: $constty,)*> $trait for $ty {
unsafe_impl!(@method $trait $(; |$candidate| $is_bit_valid)?);
}
}};
(@method TryFromBytes ; |$candidate:ident| $is_bit_valid:expr) => {
#[allow(clippy::missing_inline_in_public_items, dead_code)]
#[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))]
fn only_derive_is_allowed_to_implement_this_trait() {}
#[inline]
fn is_bit_valid<AA: crate::pointer::invariant::Reference>($candidate: Maybe<'_, Self, AA>) -> bool {
$is_bit_valid
}
};
(@method TryFromBytes) => {
#[allow(clippy::missing_inline_in_public_items)]
#[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))]
fn only_derive_is_allowed_to_implement_this_trait() {}
#[inline(always)] fn is_bit_valid<AA: crate::pointer::invariant::Reference>(_: Maybe<'_, Self, AA>) -> bool { true }
};
(@method $trait:ident) => {
#[allow(clippy::missing_inline_in_public_items, dead_code)]
#[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))]
fn only_derive_is_allowed_to_implement_this_trait() {}
};
(@method $trait:ident; |$_candidate:ident| $_is_bit_valid:expr) => {
compile_error!("Can't provide `is_bit_valid` impl for trait other than `TryFromBytes`");
};
}
/// Implements `$trait` for `$ty` where `$ty: TransmuteFrom<$repr>` (and
/// vice-versa).
///
/// Calling this macro is safe; the internals of the macro emit appropriate
/// trait bounds which ensure that the given impl is sound.
macro_rules! impl_for_transmute_from {
(
$(#[$attr:meta])*
$($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?)?
=> $trait:ident for $ty:ty [$($unsafe_cell:ident)? <$repr:ty>]
) => {
const _: () = {
$(#[$attr])*
#[allow(non_local_definitions)]
// SAFETY: `is_trait<T, R>` (defined and used below) requires `T:
// TransmuteFrom<R>`, `R: TransmuteFrom<T>`, and `R: $trait`. It is
// called using `$ty` and `$repr`, ensuring that `$ty` and `$repr`
// have equivalent bit validity, and ensuring that `$repr: $trait`.
// The supported traits - `TryFromBytes`, `FromZeros`, `FromBytes`,
// and `IntoBytes` - are defined only in terms of the bit validity
// of a type. Therefore, `$repr: $trait` ensures that `$ty: $trait`
// is sound.
unsafe impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?)?> $trait for $ty {
#[allow(dead_code, clippy::missing_inline_in_public_items)]
#[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))]
fn only_derive_is_allowed_to_implement_this_trait() {
use crate::pointer::{*, invariant::Valid};
impl_for_transmute_from!(@assert_is_supported_trait $trait);
fn is_trait<T, R>()
where
T: TransmuteFrom<R, Valid, Valid> + ?Sized,
R: TransmuteFrom<T, Valid, Valid> + ?Sized,
R: $trait,
{
}
#[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))]
fn f<$($tyvar $(: $(? $optbound +)* $($bound +)*)?)?>() {
is_trait::<$ty, $repr>();
}
}
impl_for_transmute_from!(
@is_bit_valid
$(<$tyvar $(: $(? $optbound +)* $($bound +)*)?>)?
$trait for $ty [$($unsafe_cell)? <$repr>]
);
}
};
};
(@assert_is_supported_trait TryFromBytes) => {};
(@assert_is_supported_trait FromZeros) => {};
(@assert_is_supported_trait FromBytes) => {};
(@assert_is_supported_trait IntoBytes) => {};
(
@is_bit_valid
$(<$tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?>)?
TryFromBytes for $ty:ty [UnsafeCell<$repr:ty>]
) => {
#[inline]
fn is_bit_valid<A: crate::pointer::invariant::Reference>(candidate: Maybe<'_, Self, A>) -> bool {
let c: Maybe<'_, Self, crate::pointer::invariant::Exclusive> = candidate.into_exclusive_or_pme();
let c: Maybe<'_, $repr, _> = c.transmute::<_, _, (_, (_, (BecauseExclusive, BecauseExclusive)))>();
// SAFETY: This macro ensures that `$repr` and `Self` have the same
// size and bit validity. Thus, a bit-valid instance of `$repr` is
// also a bit-valid instance of `Self`.
<$repr as TryFromBytes>::is_bit_valid(c)
}
};
(
@is_bit_valid
$(<$tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?>)?
TryFromBytes for $ty:ty [<$repr:ty>]
) => {
#[inline]
fn is_bit_valid<A: crate::pointer::invariant::Reference>(candidate: $crate::Maybe<'_, Self, A>) -> bool {
// SAFETY: This macro ensures that `$repr` and `Self` have the same
// size and bit validity. Thus, a bit-valid instance of `$repr` is
// also a bit-valid instance of `Self`.
<$repr as TryFromBytes>::is_bit_valid(candidate.transmute())
}
};
(
@is_bit_valid
$(<$tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?>)?
$trait:ident for $ty:ty [$($unsafe_cell:ident)? <$repr:ty>]
) => {
// Trait other than `TryFromBytes`; no `is_bit_valid` impl.
};
}
/// Implements a trait for a type, bounding on each member of the power set of
/// a set of type variables. This is useful for implementing traits for tuples
/// or `fn` types.
///
/// The last argument is the name of a macro which will be called in every
/// `impl` block, and is expected to expand to the name of the type for which to
/// implement the trait.
///
/// For example, the invocation:
/// ```ignore
/// unsafe_impl_for_power_set!(A, B => Foo for type!(...))
/// ```
/// ...expands to:
/// ```ignore
/// unsafe impl Foo for type!() { ... }
/// unsafe impl<B> Foo for type!(B) { ... }
/// unsafe impl<A, B> Foo for type!(A, B) { ... }
/// ```
macro_rules! unsafe_impl_for_power_set {
(
$first:ident $(, $rest:ident)* $(-> $ret:ident)? => $trait:ident for $macro:ident!(...)
$(; |$candidate:ident| $is_bit_valid:expr)?
) => {
unsafe_impl_for_power_set!(
$($rest),* $(-> $ret)? => $trait for $macro!(...)
$(; |$candidate| $is_bit_valid)?
);
unsafe_impl_for_power_set!(
@impl $first $(, $rest)* $(-> $ret)? => $trait for $macro!(...)
$(; |$candidate| $is_bit_valid)?
);
};
(
$(-> $ret:ident)? => $trait:ident for $macro:ident!(...)
$(; |$candidate:ident| $is_bit_valid:expr)?
) => {
unsafe_impl_for_power_set!(
@impl $(-> $ret)? => $trait for $macro!(...)
$(; |$candidate| $is_bit_valid)?
);
};
(
@impl $($vars:ident),* $(-> $ret:ident)? => $trait:ident for $macro:ident!(...)
$(; |$candidate:ident| $is_bit_valid:expr)?
) => {
unsafe_impl!(
$($vars,)* $($ret)? => $trait for $macro!($($vars),* $(-> $ret)?)
$(; |$candidate| $is_bit_valid)?
);
};
}
/// Expands to an `Option<extern "C" fn>` type with the given argument types and
/// return type. Designed for use with `unsafe_impl_for_power_set`.
macro_rules! opt_extern_c_fn {
($($args:ident),* -> $ret:ident) => { Option<extern "C" fn($($args),*) -> $ret> };
}
/// Expands to an `Option<unsafe extern "C" fn>` type with the given argument
/// types and return type. Designed for use with `unsafe_impl_for_power_set`.
macro_rules! opt_unsafe_extern_c_fn {
($($args:ident),* -> $ret:ident) => { Option<unsafe extern "C" fn($($args),*) -> $ret> };
}
/// Expands to an `Option<fn>` type with the given argument types and return
/// type. Designed for use with `unsafe_impl_for_power_set`.
macro_rules! opt_fn {
($($args:ident),* -> $ret:ident) => { Option<fn($($args),*) -> $ret> };
}
/// Expands to an `Option<unsafe fn>` type with the given argument types and
/// return type. Designed for use with `unsafe_impl_for_power_set`.
macro_rules! opt_unsafe_fn {
($($args:ident),* -> $ret:ident) => { Option<unsafe fn($($args),*) -> $ret> };
}
/// Implements trait(s) for a type or verifies the given implementation by
/// referencing an existing (derived) implementation.
///
/// This macro exists so that we can provide zerocopy-derive as an optional
/// dependency and still get the benefit of using its derives to validate that
/// our trait impls are sound.
///
/// When compiling without `--cfg 'feature = "derive"` and without `--cfg test`,
/// `impl_or_verify!` emits the provided trait impl. When compiling with either
/// of those cfgs, it is expected that the type in question is deriving the
/// traits instead. In this case, `impl_or_verify!` emits code which validates
/// that the given trait impl is at least as restrictive as the the impl emitted
/// by the custom derive. This has the effect of confirming that the impl which
/// is emitted when the `derive` feature is disabled is actually sound (on the
/// assumption that the impl emitted by the custom derive is sound).
///
/// The caller is still required to provide a safety comment (e.g. using the
/// `const _: () = unsafe` macro) . The reason for this restriction is that, while
/// `impl_or_verify!` can guarantee that the provided impl is sound when it is
/// compiled with the appropriate cfgs, there is no way to guarantee that it is
/// ever compiled with those cfgs. In particular, it would be possible to
/// accidentally place an `impl_or_verify!` call in a context that is only ever
/// compiled when the `derive` feature is disabled. If that were to happen,
/// there would be nothing to prevent an unsound trait impl from being emitted.
/// Requiring a safety comment reduces the likelihood of emitting an unsound
/// impl in this case, and also provides useful documentation for readers of the
/// code.
///
/// Finally, if a `TryFromBytes::is_bit_valid` impl is provided, it must adhere
/// to the safety preconditions of [`unsafe_impl!`].
///
/// ## Example
///
/// ```rust,ignore
/// // Note that these derives are gated by `feature = "derive"`
/// #[cfg_attr(any(feature = "derive", test), derive(FromZeros, FromBytes, IntoBytes, Unaligned))]
/// #[repr(transparent)]
/// struct Wrapper<T>(T);
///
/// const _: () = unsafe {
/// /// SAFETY:
/// /// `Wrapper<T>` is `repr(transparent)`, so it is sound to implement any
/// /// zerocopy trait if `T` implements that trait.
/// impl_or_verify!(T: FromZeros => FromZeros for Wrapper<T>);
/// impl_or_verify!(T: FromBytes => FromBytes for Wrapper<T>);
/// impl_or_verify!(T: IntoBytes => IntoBytes for Wrapper<T>);
/// impl_or_verify!(T: Unaligned => Unaligned for Wrapper<T>);
/// }
/// ```
macro_rules! impl_or_verify {
// The following two match arms follow the same pattern as their
// counterparts in `unsafe_impl!`; see the documentation on those arms for
// more details.
(
const $constname:ident : $constty:ident $(,)?
$($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
=> $trait:ident for $ty:ty
) => {
impl_or_verify!(@impl { unsafe_impl!(
const $constname: $constty, $($tyvar $(: $(? $optbound +)* $($bound +)*)?),* => $trait for $ty
); });
impl_or_verify!(@verify $trait, {
impl<const $constname: $constty, $($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {}
});
};
(
$($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),*
=> $trait:ident for $ty:ty $(; |$candidate:ident| $is_bit_valid:expr)?
) => {
impl_or_verify!(@impl { unsafe_impl!(
$($tyvar $(: $(? $optbound +)* $($bound +)*)?),* => $trait for $ty
$(; |$candidate| $is_bit_valid)?
); });
impl_or_verify!(@verify $trait, {
impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?),*> Subtrait for $ty {}
});
};
(@impl $impl_block:tt) => {
#[cfg(not(any(feature = "derive", test)))]
{ $impl_block };
};
(@verify $trait:ident, $impl_block:tt) => {
#[cfg(any(feature = "derive", test))]
{
// On some toolchains, `Subtrait` triggers the `dead_code` lint
// because it is implemented but never used.
#[allow(dead_code)]
trait Subtrait: $trait {}
$impl_block
};
};
}
/// Implements `KnownLayout` for a sized type.
macro_rules! impl_known_layout {
($(const $constvar:ident : $constty:ty, $tyvar:ident $(: ?$optbound:ident)? => $ty:ty),* $(,)?) => {
$(impl_known_layout!(@inner const $constvar: $constty, $tyvar $(: ?$optbound)? => $ty);)*
};
($($tyvar:ident $(: ?$optbound:ident)? => $ty:ty),* $(,)?) => {
$(impl_known_layout!(@inner , $tyvar $(: ?$optbound)? => $ty);)*
};
($($(#[$attrs:meta])* $ty:ty),*) => { $(impl_known_layout!(@inner , => $(#[$attrs])* $ty);)* };
(@inner $(const $constvar:ident : $constty:ty)? , $($tyvar:ident $(: ?$optbound:ident)?)? => $(#[$attrs:meta])* $ty:ty) => {
const _: () = {
use core::ptr::NonNull;
#[allow(non_local_definitions)]
$(#[$attrs])*
// SAFETY: Delegates safety to `DstLayout::for_type`.
unsafe impl<$($tyvar $(: ?$optbound)?)? $(, const $constvar : $constty)?> KnownLayout for $ty {
#[allow(clippy::missing_inline_in_public_items)]
#[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))]
fn only_derive_is_allowed_to_implement_this_trait() where Self: Sized {}
type PointerMetadata = ();
// SAFETY: `CoreMaybeUninit<T>::LAYOUT` and `T::LAYOUT` are
// identical because `CoreMaybeUninit<T>` has the same size and
// alignment as `T` [1], and `CoreMaybeUninit` admits
// uninitialized bytes in all positions.
//
// [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
//
// `MaybeUninit<T>` is guaranteed to have the same size,
// alignment, and ABI as `T`
type MaybeUninit = core::mem::MaybeUninit<Self>;
const LAYOUT: crate::DstLayout = crate::DstLayout::for_type::<$ty>();
// SAFETY: `.cast` preserves address and provenance.
//
// FIXME(#429): Add documentation to `.cast` that promises that
// it preserves provenance.
#[inline(always)]
fn raw_from_ptr_len(bytes: NonNull<u8>, _meta: ()) -> NonNull<Self> {
bytes.cast::<Self>()
}
#[inline(always)]
fn pointer_to_metadata(_ptr: *mut Self) -> () {
}
}
};
};
}
/// Implements `KnownLayout` for a type in terms of the implementation of
/// another type with the same representation.
///
/// # Safety
///
/// - `$ty` and `$repr` must have the same:
/// - Fixed prefix size
/// - Alignment
/// - (For DSTs) trailing slice element size
/// - It must be valid to perform an `as` cast from `*mut $repr` to `*mut $ty`,
/// and this operation must preserve referent size (ie, `size_of_val_raw`).
macro_rules! unsafe_impl_known_layout {
($($tyvar:ident: ?Sized + KnownLayout =>)? #[repr($repr:ty)] $ty:ty) => {{
use core::ptr::NonNull;
crate::util::macros::__unsafe();
#[allow(non_local_definitions)]
// SAFETY: The caller promises that this is sound.
unsafe impl<$($tyvar: ?Sized + KnownLayout)?> KnownLayout for $ty {
#[allow(clippy::missing_inline_in_public_items, dead_code)]
#[cfg_attr(all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), coverage(off))]
fn only_derive_is_allowed_to_implement_this_trait() {}
type PointerMetadata = <$repr as KnownLayout>::PointerMetadata;
type MaybeUninit = <$repr as KnownLayout>::MaybeUninit;
const LAYOUT: DstLayout = <$repr as KnownLayout>::LAYOUT;
// SAFETY: All operations preserve address and provenance. Caller
// has promised that the `as` cast preserves size.
//
// FIXME(#429): Add documentation to `NonNull::new_unchecked` that
// it preserves provenance.
#[inline(always)]
fn raw_from_ptr_len(bytes: NonNull<u8>, meta: <$repr as KnownLayout>::PointerMetadata) -> NonNull<Self> {
#[allow(clippy::as_conversions)]
let ptr = <$repr>::raw_from_ptr_len(bytes, meta).as_ptr() as *mut Self;
// SAFETY: `ptr` was converted from `bytes`, which is non-null.
unsafe { NonNull::new_unchecked(ptr) }
}
#[inline(always)]
fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata {
#[allow(clippy::as_conversions)]
let ptr = ptr as *mut $repr;
<$repr>::pointer_to_metadata(ptr)
}
}
}};
}
/// Uses `align_of` to confirm that a type or set of types have alignment 1.
///
/// Note that `align_of<T>` requires `T: Sized`, so this macro doesn't work for
/// unsized types.
macro_rules! assert_unaligned {
($($tys:ty),*) => {
$(
// We only compile this assertion under `cfg(test)` to avoid taking
// an extra non-dev dependency (and making this crate more expensive
// to compile for our dependents).
#[cfg(test)]
static_assertions::const_assert_eq!(core::mem::align_of::<$tys>(), 1);
)*
};
}
/// Emits a function definition as either `const fn` or `fn` depending on
/// whether the current toolchain version supports `const fn` with generic trait
/// bounds.
macro_rules! maybe_const_trait_bounded_fn {
// This case handles both `self` methods (where `self` is by value) and
// non-method functions. Each `$args` may optionally be followed by `:
// $arg_tys:ty`, which can be omitted for `self`.
($(#[$attr:meta])* $vis:vis const fn $name:ident($($args:ident $(: $arg_tys:ty)?),* $(,)?) $(-> $ret_ty:ty)? $body:block) => {
#[cfg(zerocopy_generic_bounds_in_const_fn_1_61_0)]
$(#[$attr])* $vis const fn $name($($args $(: $arg_tys)?),*) $(-> $ret_ty)? $body
#[cfg(not(zerocopy_generic_bounds_in_const_fn_1_61_0))]
$(#[$attr])* $vis fn $name($($args $(: $arg_tys)?),*) $(-> $ret_ty)? $body
};
}
/// Either panic (if the current Rust toolchain supports panicking in `const
/// fn`) or evaluate a constant that will cause an array indexing error whose
/// error message will include the format string.
///
/// The type that this expression evaluates to must be `Copy`, or else the
/// non-panicking desugaring will fail to compile.
macro_rules! const_panic {
(@non_panic $($_arg:tt)+) => {{
// This will type check to whatever type is expected based on the call
// site.
let panic: [_; 0] = [];
// This will always fail (since we're indexing into an array of size 0.
#[allow(unconditional_panic)]
panic[0]
}};
($($arg:tt)+) => {{
#[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
panic!($($arg)+);
#[cfg(not(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
const_panic!(@non_panic $($arg)+)
}};
}
/// Either assert (if the current Rust toolchain supports panicking in `const
/// fn`) or evaluate the expression and, if it evaluates to `false`, call
/// `const_panic!`. This is used in place of `assert!` in const contexts to
/// accommodate old toolchains.
macro_rules! const_assert {
($e:expr) => {{
#[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
assert!($e);
#[cfg(not(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
{
let e = $e;
if !e {
let _: () = const_panic!(@non_panic concat!("assertion failed: ", stringify!($e)));
}
}
}};
($e:expr, $($args:tt)+) => {{
#[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
assert!($e, $($args)+);
#[cfg(not(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
{
let e = $e;
if !e {
let _: () = const_panic!(@non_panic concat!("assertion failed: ", stringify!($e), ": ", stringify!($arg)), $($args)*);
}
}
}};
}
/// Like `const_assert!`, but relative to `debug_assert!`.
macro_rules! const_debug_assert {
($e:expr $(, $msg:expr)?) => {{
#[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
debug_assert!($e $(, $msg)?);
#[cfg(not(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
{
// Use this (rather than `#[cfg(debug_assertions)]`) to ensure that
// `$e` is always compiled even if it will never be evaluated at
// runtime.
if cfg!(debug_assertions) {
let e = $e;
if !e {
let _: () = const_panic!(@non_panic concat!("assertion failed: ", stringify!($e) $(, ": ", $msg)?));
}
}
}
}}
}
/// Either invoke `unreachable!()` or `loop {}` depending on whether the Rust
/// toolchain supports panicking in `const fn`.
macro_rules! const_unreachable {
() => {{
#[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
unreachable!();
#[cfg(not(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
loop {}
}};
}
/// Asserts at compile time that `$condition` is true for `Self` or the given
/// `$tyvar`s. Unlike `const_assert`, this is *strictly* a compile-time check;
/// it cannot be evaluated in a runtime context. The condition is checked after
/// monomorphization and, upon failure, emits a compile error.
macro_rules! static_assert {
(Self $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )? => $condition:expr $(, $args:tt)*) => {{
trait StaticAssert {
const ASSERT: bool;
}
impl<T $(: $(? $optbound +)* $($bound +)*)?> StaticAssert for T {
const ASSERT: bool = {
const_assert!($condition $(, $args)*);
$condition
};
}
const_assert!(<Self as StaticAssert>::ASSERT);
}};
($($tyvar:ident $(: $(? $optbound:ident $(+)?)* $($bound:ident $(+)?)* )?),* => $condition:expr $(, $args:tt)*) => {{
trait StaticAssert {
const ASSERT: bool;
}
// NOTE: We use `PhantomData` so we can support unsized types.
impl<$($tyvar $(: $(? $optbound +)* $($bound +)*)?,)*> StaticAssert for ($(core::marker::PhantomData<$tyvar>,)*) {
const ASSERT: bool = {
const_assert!($condition $(, $args)*);
$condition
};
}
const_assert!(<($(core::marker::PhantomData<$tyvar>,)*) as StaticAssert>::ASSERT);
}};
}
/// Assert at compile time that `tyvar` does not have a zero-sized DST
/// component.
macro_rules! static_assert_dst_is_not_zst {
($tyvar:ident) => {{
use crate::KnownLayout;
static_assert!($tyvar: ?Sized + KnownLayout => {
let dst_is_zst = match $tyvar::LAYOUT.size_info {
crate::SizeInfo::Sized { .. } => false,
crate::SizeInfo::SliceDst(TrailingSliceLayout { elem_size, .. }) => {
elem_size == 0
}
};
!dst_is_zst
}, "cannot call this method on a dynamically-sized type whose trailing slice element is zero-sized");
}}
}
/// # Safety
///
/// The caller must ensure that the cast does not grow the size of the referent.
/// Preserving or shrinking the size of the referent are both acceptable.
macro_rules! cast {
($p:expr) => {{
let ptr: crate::pointer::PtrInner<'_, _> = $p;
let ptr = ptr.as_non_null();
let ptr = ptr.as_ptr();
#[allow(clippy::as_conversions)]
let ptr = ptr as *mut _;
#[allow(unused_unsafe)]
// SAFETY: `NonNull::as_ptr` returns a non-null pointer, so the argument
// to `NonNull::new_unchecked` is also non-null.
let ptr = unsafe { core::ptr::NonNull::new_unchecked(ptr) };
// SAFETY: The caller promises that the cast preserves or shrinks
// referent size. By invariant on `$p: PtrInner` (guaranteed by type
// annotation above), `$p` refers to a byte range entirely contained
// inside of a single allocation, has provenance for that whole byte
// range, and will not outlive the allocation. All of these conditions
// are preserved when preserving or shrinking referent size.
crate::pointer::PtrInner::new(ptr)
}};
}
/// Implements `TransmuteFrom` and `SizeEq` for `T` and `$wrapper<T>`.
///
/// # Safety
///
/// `T` and `$wrapper<T>` must have the same bit validity, and must have the
/// same size in the sense of `SizeEq`.
macro_rules! unsafe_impl_for_transparent_wrapper {
(T $(: ?$optbound:ident)? => $wrapper:ident<T>) => {{
crate::util::macros::__unsafe();
use crate::pointer::{TransmuteFrom, PtrInner, SizeEq, invariant::Valid};
// SAFETY: The caller promises that `T` and `$wrapper<T>` have the same
// bit validity.
unsafe impl<T $(: ?$optbound)?> TransmuteFrom<T, Valid, Valid> for $wrapper<T> {}
// SAFETY: See previous safety comment.
unsafe impl<T $(: ?$optbound)?> TransmuteFrom<$wrapper<T>, Valid, Valid> for T {}
// SAFETY: The caller promises that `T` and `$wrapper<T>` satisfy
// `SizeEq`.
unsafe impl<T $(: ?$optbound)?> SizeEq<T> for $wrapper<T> {
#[inline(always)]
fn cast_from_raw(t: PtrInner<'_, T>) -> PtrInner<'_, $wrapper<T>> {
// SAFETY: See previous safety comment.
unsafe { cast!(t) }
}
}
// SAFETY: See previous safety comment.
unsafe impl<T $(: ?$optbound)?> SizeEq<$wrapper<T>> for T {
#[inline(always)]
fn cast_from_raw(t: PtrInner<'_, $wrapper<T>>) -> PtrInner<'_, T> {
// SAFETY: See previous safety comment.
unsafe { cast!(t) }
}
}
}};
}
macro_rules! impl_transitive_transmute_from {
($($tyvar:ident $(: ?$optbound:ident)?)? => $t:ty => $u:ty => $v:ty) => {
const _: () = {
use crate::pointer::{TransmuteFrom, PtrInner, SizeEq, invariant::Valid};
// SAFETY: Since `$u: SizeEq<$t>` and `$v: SizeEq<U>`, this impl is
// transitively sound.
unsafe impl<$($tyvar $(: ?$optbound)?)?> SizeEq<$t> for $v
where
$u: SizeEq<$t>,
$v: SizeEq<$u>,
{
#[inline(always)]
fn cast_from_raw(t: PtrInner<'_, $t>) -> PtrInner<'_, $v> {
let u = <$u as SizeEq<_>>::cast_from_raw(t);
<$v as SizeEq<_>>::cast_from_raw(u)
}
}
// SAFETY: Since `$u: TransmuteFrom<$t, Valid, Valid>`, it is sound
// to transmute a bit-valid `$t` to a bit-valid `$u`. Since `$v:
// TransmuteFrom<$u, Valid, Valid>`, it is sound to transmute that
// bit-valid `$u` to a bit-valid `$v`.
unsafe impl<$($tyvar $(: ?$optbound)?)?> TransmuteFrom<$t, Valid, Valid> for $v
where
$u: TransmuteFrom<$t, Valid, Valid>,
$v: TransmuteFrom<$u, Valid, Valid>,
{}
};
};
}
#[rustfmt::skip]
macro_rules! impl_size_eq {
($t:ty, $u:ty) => {
const _: () = {
use crate::{KnownLayout, pointer::{PtrInner, SizeEq}};
static_assert!(=> {
let t = <$t as KnownLayout>::LAYOUT;
let u = <$u as KnownLayout>::LAYOUT;
t.align.get() >= u.align.get() && match (t.size_info, u.size_info) {
(SizeInfo::Sized { size: t }, SizeInfo::Sized { size: u }) => t == u,
(
SizeInfo::SliceDst(TrailingSliceLayout { offset: t_offset, elem_size: t_elem_size }),
SizeInfo::SliceDst(TrailingSliceLayout { offset: u_offset, elem_size: u_elem_size })
) => t_offset == u_offset && t_elem_size == u_elem_size,
_ => false,
}
});
// SAFETY: See inline.
unsafe impl SizeEq<$t> for $u {
#[inline(always)]
fn cast_from_raw(t: PtrInner<'_, $t>) -> PtrInner<'_, $u> {
// SAFETY: We've asserted that their
// `KnownLayout::LAYOUT.size_info`s are equal, and so this
// cast is guaranteed to preserve address and referent size.
// It trivially preserves provenance.
unsafe { cast!(t) }
}
}
// SAFETY: See previous safety comment.
unsafe impl SizeEq<$u> for $t {
#[inline(always)]
fn cast_from_raw(u: PtrInner<'_, $u>) -> PtrInner<'_, $t> {
// SAFETY: See previous safety comment.
unsafe { cast!(u) }
}
}
};
};
}
/// Invokes `$blk` in a context in which `$src<$t>` and `$dst<$u>` implement
/// `SizeEq`.
///
/// This macro emits code which implements `SizeEq`, and ensures that the impl
/// is sound via PME.
///
/// # Safety
///
/// Inside of `$blk`, the caller must only use `$src` and `$dst` as `$src<$t>`
/// and `$dst<$u>`. The caller must not use `$src` or `$dst` to wrap any other
/// types.
macro_rules! unsafe_with_size_eq {
(<$src:ident<$t:ident>, $dst:ident<$u:ident>> $blk:expr) => {{
crate::util::macros::__unsafe();
use crate::{KnownLayout, pointer::PtrInner};
#[repr(transparent)]
struct $src<T: ?Sized>(T);
#[repr(transparent)]
struct $dst<U: ?Sized>(U);
// SAFETY: Since `$src<T>` is a `#[repr(transparent)]` wrapper around
// `T`, it has the same bit validity and size as `T`.
unsafe_impl_for_transparent_wrapper!(T: ?Sized => $src<T>);
// SAFETY: Since `$dst<T>` is a `#[repr(transparent)]` wrapper around
// `T`, it has the same bit validity and size as `T`.
unsafe_impl_for_transparent_wrapper!(T: ?Sized => $dst<T>);
// SAFETY: `$src<T>` is a `#[repr(transparent)]` wrapper around `T` with
// no added semantics.
unsafe impl<T: ?Sized> InvariantsEq<$src<T>> for T {}
// SAFETY: `$dst<T>` is a `#[repr(transparent)]` wrapper around `T` with
// no added semantics.
unsafe impl<T: ?Sized> InvariantsEq<$dst<T>> for T {}
// SAFETY: See inline for the soundness of this impl when
// `cast_from_raw` is actually instantiated (otherwise, PMEs may not be
// triggered).
//
// We manually instantiate `cast_from_raw` below to ensure that this PME
// can be triggered, and the caller promises not to use `$src` and
// `$dst` with any wrapped types other than `$t` and `$u` respectively.
unsafe impl<T: ?Sized, U: ?Sized> SizeEq<$src<T>> for $dst<U>
where
T: KnownLayout<PointerMetadata = usize>,
U: KnownLayout<PointerMetadata = usize>,
{
fn cast_from_raw(src: PtrInner<'_, $src<T>>) -> PtrInner<'_, Self> {
// SAFETY: `crate::layout::cast_from_raw` promises to satisfy
// the safety invariants of `SizeEq::cast_from_raw`, or to
// generate a PME. Since `$src<T>` and `$dst<U>` are
// `#[repr(transparent)]` wrappers around `T` and `U`
// respectively, a `cast_from_raw` impl which satisfies the
// conditions for casting from `NonNull<T>` to `NonNull<U>` also
// satisfies the conditions for casting from `NonNull<$src<T>>`
// to `NonNull<$dst<U>>`.
// SAFETY: By the preceding safety comment, this cast preserves
// referent size.
let src: PtrInner<'_, T> = unsafe { cast!(src) };
let dst: PtrInner<'_, U> = crate::layout::cast_from_raw(src);
// SAFETY: By the preceding safety comment, this cast preserves
// referent size.
unsafe { cast!(dst) }
}
}
// See safety comment on the preceding `unsafe impl` block for an
// explanation of why we need this block.
if 1 == 0 {
let ptr = <$t as KnownLayout>::raw_dangling();
#[allow(unused_unsafe)]
// SAFETY: This call is never executed.
let ptr = unsafe { crate::pointer::PtrInner::new(ptr) };
#[allow(unused_unsafe)]
// SAFETY: This call is never executed.
let ptr = unsafe { cast!(ptr) };
let _ = <$dst<$u> as SizeEq<$src<$t>>>::cast_from_raw(ptr);
}
impl_for_transmute_from!(T: ?Sized + TryFromBytes => TryFromBytes for $src<T>[<T>]);
impl_for_transmute_from!(T: ?Sized + FromBytes => FromBytes for $src<T>[<T>]);
impl_for_transmute_from!(T: ?Sized + FromZeros => FromZeros for $src<T>[<T>]);
impl_for_transmute_from!(T: ?Sized + IntoBytes => IntoBytes for $src<T>[<T>]);
impl_for_transmute_from!(U: ?Sized + TryFromBytes => TryFromBytes for $dst<U>[<U>]);
impl_for_transmute_from!(U: ?Sized + FromBytes => FromBytes for $dst<U>[<U>]);
impl_for_transmute_from!(U: ?Sized + FromZeros => FromZeros for $dst<U>[<U>]);
impl_for_transmute_from!(U: ?Sized + IntoBytes => IntoBytes for $dst<U>[<U>]);
// SAFETY: `$src<T>` is a `#[repr(transparent)]` wrapper around `T`, and
// so permits interior mutation exactly when `T` does.
unsafe_impl!(T: ?Sized + Immutable => Immutable for $src<T>);
// SAFETY: `$dst<T>` is a `#[repr(transparent)]` wrapper around `T`, and
// so permits interior mutation exactly when `T` does.
unsafe_impl!(T: ?Sized + Immutable => Immutable for $dst<T>);
$blk
}};
}
/// A no-op `unsafe fn` for use in macro expansions.
///
/// Calling this function in a macro expansion ensures that the macro's caller
/// must wrap the call in `unsafe { ... }`.
pub(crate) const unsafe fn __unsafe() {}

827
vendor/zerocopy/src/util/mod.rs vendored Normal file
View File

@@ -0,0 +1,827 @@
// Copyright 2023 The Fuchsia Authors
//
// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to
// those terms.
#[macro_use]
pub(crate) mod macros;
#[doc(hidden)]
pub mod macro_util;
use core::{
marker::PhantomData,
mem::{self, ManuallyDrop},
num::NonZeroUsize,
ptr::NonNull,
};
use super::*;
/// Like [`PhantomData`], but [`Send`] and [`Sync`] regardless of whether the
/// wrapped `T` is.
pub(crate) struct SendSyncPhantomData<T: ?Sized>(PhantomData<T>);
// SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound
// to be called from multiple threads.
unsafe impl<T: ?Sized> Send for SendSyncPhantomData<T> {}
// SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound
// to be called from multiple threads.
unsafe impl<T: ?Sized> Sync for SendSyncPhantomData<T> {}
impl<T: ?Sized> Default for SendSyncPhantomData<T> {
fn default() -> SendSyncPhantomData<T> {
SendSyncPhantomData(PhantomData)
}
}
impl<T: ?Sized> PartialEq for SendSyncPhantomData<T> {
fn eq(&self, other: &Self) -> bool {
self.0.eq(&other.0)
}
}
impl<T: ?Sized> Eq for SendSyncPhantomData<T> {}
pub(crate) trait AsAddress {
fn addr(self) -> usize;
}
impl<T: ?Sized> AsAddress for &T {
#[inline(always)]
fn addr(self) -> usize {
let ptr: *const T = self;
AsAddress::addr(ptr)
}
}
impl<T: ?Sized> AsAddress for &mut T {
#[inline(always)]
fn addr(self) -> usize {
let ptr: *const T = self;
AsAddress::addr(ptr)
}
}
impl<T: ?Sized> AsAddress for NonNull<T> {
#[inline(always)]
fn addr(self) -> usize {
AsAddress::addr(self.as_ptr())
}
}
impl<T: ?Sized> AsAddress for *const T {
#[inline(always)]
fn addr(self) -> usize {
// FIXME(#181), FIXME(https://github.com/rust-lang/rust/issues/95228):
// Use `.addr()` instead of `as usize` once it's stable, and get rid of
// this `allow`. Currently, `as usize` is the only way to accomplish
// this.
#[allow(clippy::as_conversions)]
#[cfg_attr(
__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
allow(lossy_provenance_casts)
)]
return self.cast::<()>() as usize;
}
}
impl<T: ?Sized> AsAddress for *mut T {
#[inline(always)]
fn addr(self) -> usize {
let ptr: *const T = self;
AsAddress::addr(ptr)
}
}
/// Validates that `t` is aligned to `align_of::<U>()`.
#[inline(always)]
pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> {
// `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in
// turn guarantees that this mod operation will not panic.
#[allow(clippy::arithmetic_side_effects)]
let remainder = t.addr() % mem::align_of::<U>();
if remainder == 0 {
Ok(())
} else {
// SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`.
// That's only possible if `align_of::<U>() > 1`.
Err(unsafe { AlignmentError::new_unchecked(()) })
}
}
/// Returns the bytes needed to pad `len` to the next multiple of `align`.
///
/// This function assumes that align is a power of two; there are no guarantees
/// on the answer it gives if this is not the case.
#[cfg_attr(
kani,
kani::requires(len <= isize::MAX as usize),
kani::requires(align.is_power_of_two()),
kani::ensures(|&p| (len + p) % align.get() == 0),
// Ensures that we add the minimum required padding.
kani::ensures(|&p| p < align.get()),
)]
pub(crate) const fn padding_needed_for(len: usize, align: NonZeroUsize) -> usize {
#[cfg(kani)]
#[kani::proof_for_contract(padding_needed_for)]
fn proof() {
padding_needed_for(kani::any(), kani::any());
}
// Abstractly, we want to compute:
// align - (len % align).
// Handling the case where len%align is 0.
// Because align is a power of two, len % align = len & (align-1).
// Guaranteed not to underflow as align is nonzero.
#[allow(clippy::arithmetic_side_effects)]
let mask = align.get() - 1;
// To efficiently subtract this value from align, we can use the bitwise complement.
// Note that ((!len) & (align-1)) gives us a number that with (len &
// (align-1)) sums to align-1. So subtracting 1 from x before taking the
// complement subtracts `len` from `align`. Some quick inspection of
// cases shows that this also handles the case where `len % align = 0`
// correctly too: len-1 % align then equals align-1, so the complement mod
// align will be 0, as desired.
//
// The following reasoning can be verified quickly by an SMT solver
// supporting the theory of bitvectors:
// ```smtlib
// ; Naive implementation of padding
// (define-fun padding1 (
// (len (_ BitVec 32))
// (align (_ BitVec 32))) (_ BitVec 32)
// (ite
// (= (_ bv0 32) (bvand len (bvsub align (_ bv1 32))))
// (_ bv0 32)
// (bvsub align (bvand len (bvsub align (_ bv1 32))))))
//
// ; The implementation below
// (define-fun padding2 (
// (len (_ BitVec 32))
// (align (_ BitVec 32))) (_ BitVec 32)
// (bvand (bvnot (bvsub len (_ bv1 32))) (bvsub align (_ bv1 32))))
//
// (define-fun is-power-of-two ((x (_ BitVec 32))) Bool
// (= (_ bv0 32) (bvand x (bvsub x (_ bv1 32)))))
//
// (declare-const len (_ BitVec 32))
// (declare-const align (_ BitVec 32))
// ; Search for a case where align is a power of two and padding2 disagrees with padding1
// (assert (and (is-power-of-two align)
// (not (= (padding1 len align) (padding2 len align)))))
// (simplify (padding1 (_ bv300 32) (_ bv32 32))) ; 20
// (simplify (padding2 (_ bv300 32) (_ bv32 32))) ; 20
// (simplify (padding1 (_ bv322 32) (_ bv32 32))) ; 30
// (simplify (padding2 (_ bv322 32) (_ bv32 32))) ; 30
// (simplify (padding1 (_ bv8 32) (_ bv8 32))) ; 0
// (simplify (padding2 (_ bv8 32) (_ bv8 32))) ; 0
// (check-sat) ; unsat, also works for 64-bit bitvectors
// ```
!(len.wrapping_sub(1)) & mask
}
/// Rounds `n` down to the largest value `m` such that `m <= n` and `m % align
/// == 0`.
///
/// # Panics
///
/// May panic if `align` is not a power of two. Even if it doesn't panic in this
/// case, it will produce nonsense results.
#[inline(always)]
#[cfg_attr(
kani,
kani::requires(align.is_power_of_two()),
kani::ensures(|&m| m <= n && m % align.get() == 0),
// Guarantees that `m` is the *largest* value such that `m % align == 0`.
kani::ensures(|&m| {
// If this `checked_add` fails, then the next multiple would wrap
// around, which trivially satisfies the "largest value" requirement.
m.checked_add(align.get()).map(|next_mul| next_mul > n).unwrap_or(true)
})
)]
pub(crate) const fn round_down_to_next_multiple_of_alignment(
n: usize,
align: NonZeroUsize,
) -> usize {
#[cfg(kani)]
#[kani::proof_for_contract(round_down_to_next_multiple_of_alignment)]
fn proof() {
round_down_to_next_multiple_of_alignment(kani::any(), kani::any());
}
let align = align.get();
#[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
debug_assert!(align.is_power_of_two());
// Subtraction can't underflow because `align.get() >= 1`.
#[allow(clippy::arithmetic_side_effects)]
let mask = !(align - 1);
n & mask
}
pub(crate) const fn max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize {
if a.get() < b.get() {
b
} else {
a
}
}
pub(crate) const fn min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize {
if a.get() > b.get() {
b
} else {
a
}
}
/// Copies `src` into the prefix of `dst`.
///
/// # Safety
///
/// The caller guarantees that `src.len() <= dst.len()`.
#[inline(always)]
pub(crate) unsafe fn copy_unchecked(src: &[u8], dst: &mut [u8]) {
debug_assert!(src.len() <= dst.len());
// SAFETY: This invocation satisfies the safety contract of
// copy_nonoverlapping [1]:
// - `src.as_ptr()` is trivially valid for reads of `src.len()` bytes
// - `dst.as_ptr()` is valid for writes of `src.len()` bytes, because the
// caller has promised that `src.len() <= dst.len()`
// - `src` and `dst` are, trivially, properly aligned
// - the region of memory beginning at `src` with a size of `src.len()`
// bytes does not overlap with the region of memory beginning at `dst`
// with the same size, because `dst` is derived from an exclusive
// reference.
unsafe {
core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len());
};
}
/// Unsafely transmutes the given `src` into a type `Dst`.
///
/// # Safety
///
/// The value `src` must be a valid instance of `Dst`.
#[inline(always)]
pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst {
static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>());
#[repr(C)]
union Transmute<Src, Dst> {
src: ManuallyDrop<Src>,
dst: ManuallyDrop<Dst>,
}
// SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst`
// fields both start at the same offset and the types of those fields are
// transparent wrappers around `Src` and `Dst` [1]. Consequently,
// initializng `Transmute` with with `src` and then reading out `dst` is
// equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src`
// to `Dst` is valid because — by contract on the caller — `src` is a valid
// instance of `Dst`.
//
// [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html:
//
// `ManuallyDrop<T>` is guaranteed to have the same layout and bit
// validity as `T`, and is subject to the same layout optimizations as
// `T`.
//
// [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields:
//
// Effectively, writing to and then reading from a union with the C
// representation is analogous to a transmute from the type used for
// writing to the type used for reading.
unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) }
}
/// Uses `allocate` to create a `Box<T>`.
///
/// # Errors
///
/// Returns an error on allocation failure. Allocation failure is guaranteed
/// never to cause a panic or an abort.
///
/// # Safety
///
/// `allocate` must be either `alloc::alloc::alloc` or
/// `alloc::alloc::alloc_zeroed`. The referent of the box returned by `new_box`
/// has the same bit-validity as the referent of the pointer returned by the
/// given `allocate` and sufficient size to store `T` with `meta`.
#[must_use = "has no side effects (other than allocation)"]
#[cfg(feature = "alloc")]
#[inline]
pub(crate) unsafe fn new_box<T>(
meta: T::PointerMetadata,
allocate: unsafe fn(core::alloc::Layout) -> *mut u8,
) -> Result<alloc::boxed::Box<T>, AllocError>
where
T: ?Sized + crate::KnownLayout,
{
let size = match T::size_for_metadata(meta) {
Some(size) => size,
None => return Err(AllocError),
};
let align = T::LAYOUT.align.get();
// On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a bug in
// which sufficiently-large allocations (those which, when rounded up to the
// alignment, overflow `isize`) are not rejected, which can cause undefined
// behavior. See #64 for details.
//
// FIXME(#67): Once our MSRV is > 1.64.0, remove this assertion.
#[allow(clippy::as_conversions)]
let max_alloc = (isize::MAX as usize).saturating_sub(align);
if size > max_alloc {
return Err(AllocError);
}
// FIXME(https://github.com/rust-lang/rust/issues/55724): Use
// `Layout::repeat` once it's stabilized.
let layout = Layout::from_size_align(size, align).or(Err(AllocError))?;
let ptr = if layout.size() != 0 {
// SAFETY: By contract on the caller, `allocate` is either
// `alloc::alloc::alloc` or `alloc::alloc::alloc_zeroed`. The above
// check ensures their shared safety precondition: that the supplied
// layout is not zero-sized type [1].
//
// [1] Per https://doc.rust-lang.org/stable/std/alloc/trait.GlobalAlloc.html#tymethod.alloc:
//
// This function is unsafe because undefined behavior can result if
// the caller does not ensure that layout has non-zero size.
let ptr = unsafe { allocate(layout) };
match NonNull::new(ptr) {
Some(ptr) => ptr,
None => return Err(AllocError),
}
} else {
let align = T::LAYOUT.align.get();
// We use `transmute` instead of an `as` cast since Miri (with strict
// provenance enabled) notices and complains that an `as` cast creates a
// pointer with no provenance. Miri isn't smart enough to realize that
// we're only executing this branch when we're constructing a zero-sized
// `Box`, which doesn't require provenance.
//
// SAFETY: any initialized bit sequence is a bit-valid `*mut u8`. All
// bits of a `usize` are initialized.
#[allow(unknown_lints)] // For `integer_to_ptr_transmutes`
#[allow(clippy::useless_transmute, integer_to_ptr_transmutes)]
let dangling = unsafe { mem::transmute::<usize, *mut u8>(align) };
// SAFETY: `dangling` is constructed from `T::LAYOUT.align`, which is a
// `NonZeroUsize`, which is guaranteed to be non-zero.
//
// `Box<[T]>` does not allocate when `T` is zero-sized or when `len` is
// zero, but it does require a non-null dangling pointer for its
// allocation.
//
// FIXME(https://github.com/rust-lang/rust/issues/95228): Use
// `std::ptr::without_provenance` once it's stable. That may optimize
// better. As written, Rust may assume that this consumes "exposed"
// provenance, and thus Rust may have to assume that this may consume
// provenance from any pointer whose provenance has been exposed.
unsafe { NonNull::new_unchecked(dangling) }
};
let ptr = T::raw_from_ptr_len(ptr, meta);
// FIXME(#429): Add a "SAFETY" comment and remove this `allow`. Make sure to
// include a justification that `ptr.as_ptr()` is validly-aligned in the ZST
// case (in which we manually construct a dangling pointer) and to justify
// why `Box` is safe to drop (it's because `allocate` uses the system
// allocator).
#[allow(clippy::undocumented_unsafe_blocks)]
Ok(unsafe { alloc::boxed::Box::from_raw(ptr.as_ptr()) })
}
mod len_of {
use super::*;
/// A witness type for metadata of a valid instance of `&T`.
pub(crate) struct MetadataOf<T: ?Sized + KnownLayout> {
/// # Safety
///
/// The size of an instance of `&T` with the given metadata is not
/// larger than `isize::MAX`.
meta: T::PointerMetadata,
_p: PhantomData<T>,
}
impl<T: ?Sized + KnownLayout> Copy for MetadataOf<T> {}
impl<T: ?Sized + KnownLayout> Clone for MetadataOf<T> {
fn clone(&self) -> Self {
*self
}
}
impl<T: ?Sized> MetadataOf<T>
where
T: KnownLayout,
{
/// Returns `None` if `meta` is greater than `t`'s metadata.
#[inline(always)]
pub(crate) fn new_in_bounds(t: &T, meta: usize) -> Option<Self>
where
T: KnownLayout<PointerMetadata = usize>,
{
if meta <= Ptr::from_ref(t).len() {
// SAFETY: We have checked that `meta` is not greater than `t`'s
// metadata, which, by invariant on `&T`, addresses no more than
// `isize::MAX` bytes [1][2].
//
// [1] Per https://doc.rust-lang.org/1.85.0/std/primitive.reference.html#safety:
//
// For all types, `T: ?Sized`, and for all `t: &T` or `t:
// &mut T`, when such values cross an API boundary, the
// following invariants must generally be upheld:
//
// * `t` is non-null
// * `t` is aligned to `align_of_val(t)`
// * if `size_of_val(t) > 0`, then `t` is dereferenceable for
// `size_of_val(t)` many bytes
//
// If `t` points at address `a`, being "dereferenceable" for
// N bytes means that the memory range `[a, a + N)` is all
// contained within a single allocated object.
//
// [2] Per https://doc.rust-lang.org/1.85.0/std/ptr/index.html#allocated-object:
//
// For any allocated object with `base` address, `size`, and
// a set of `addresses`, the following are guaranteed:
// - For all addresses `a` in `addresses`, `a` is in the
// range `base .. (base + size)` (note that this requires
// `a < base + size`, not `a <= base + size`)
// - `base` is not equal to [`null()`] (i.e., the address
// with the numerical value 0)
// - `base + size <= usize::MAX`
// - `size <= isize::MAX`
Some(unsafe { Self::new_unchecked(meta) })
} else {
None
}
}
/// # Safety
///
/// The size of an instance of `&T` with the given metadata is not
/// larger than `isize::MAX`.
pub(crate) unsafe fn new_unchecked(meta: T::PointerMetadata) -> Self {
// SAFETY: The caller has promised that the size of an instance of
// `&T` with the given metadata is not larger than `isize::MAX`.
Self { meta, _p: PhantomData }
}
pub(crate) fn get(&self) -> T::PointerMetadata
where
T::PointerMetadata: Copy,
{
self.meta
}
#[inline]
pub(crate) fn padding_needed_for(&self) -> usize
where
T: KnownLayout<PointerMetadata = usize>,
{
let trailing_slice_layout = crate::trailing_slice_layout::<T>();
// SAFETY: By invariant on `self`, a `&T` with metadata `self.meta`
// describes an object of size `<= isize::MAX`. This computes the
// size of such a `&T` without any trailing padding, and so neither
// the multiplication nor the addition will overflow.
//
// FIXME(#67): Remove this allow. See NumExt for more details.
#[allow(unstable_name_collisions, clippy::incompatible_msrv)]
let unpadded_size = unsafe {
let trailing_size = self.meta.unchecked_mul(trailing_slice_layout.elem_size);
trailing_size.unchecked_add(trailing_slice_layout.offset)
};
util::padding_needed_for(unpadded_size, T::LAYOUT.align)
}
#[inline(always)]
pub(crate) fn validate_cast_and_convert_metadata(
addr: usize,
bytes_len: MetadataOf<[u8]>,
cast_type: CastType,
meta: Option<T::PointerMetadata>,
) -> Result<(MetadataOf<T>, MetadataOf<[u8]>), MetadataCastError> {
let layout = match meta {
None => T::LAYOUT,
// This can return `None` if the metadata describes an object
// which can't fit in an `isize`.
Some(meta) => {
let size = match T::size_for_metadata(meta) {
Some(size) => size,
None => return Err(MetadataCastError::Size),
};
DstLayout {
align: T::LAYOUT.align,
size_info: crate::SizeInfo::Sized { size },
statically_shallow_unpadded: false,
}
}
};
// Lemma 0: By contract on `validate_cast_and_convert_metadata`, if
// the result is `Ok(..)`, then a `&T` with `elems` trailing slice
// elements is no larger in size than `bytes_len.get()`.
let (elems, split_at) =
layout.validate_cast_and_convert_metadata(addr, bytes_len.get(), cast_type)?;
let elems = T::PointerMetadata::from_elem_count(elems);
// For a slice DST type, if `meta` is `Some(elems)`, then we
// synthesize `layout` to describe a sized type whose size is equal
// to the size of the instance that we are asked to cast. For sized
// types, `validate_cast_and_convert_metadata` returns `elems == 0`.
// Thus, in this case, we need to use the `elems` passed by the
// caller, not the one returned by
// `validate_cast_and_convert_metadata`.
//
// Lemma 1: A `&T` with `elems` trailing slice elements is no larger
// in size than `bytes_len.get()`. Proof:
// - If `meta` is `None`, then `elems` satisfies this condition by
// Lemma 0.
// - If `meta` is `Some(meta)`, then `layout` describes an object
// whose size is equal to the size of an `&T` with `meta`
// metadata. By Lemma 0, that size is not larger than
// `bytes_len.get()`.
//
// Lemma 2: A `&T` with `elems` trailing slice elements is no larger
// than `isize::MAX` bytes. Proof: By Lemma 1, a `&T` with metadata
// `elems` is not larger in size than `bytes_len.get()`. By
// invariant on `MetadataOf<[u8]>`, a `&[u8]` with metadata
// `bytes_len` is not larger than `isize::MAX`. Because
// `size_of::<u8>()` is `1`, a `&[u8]` with metadata `bytes_len` has
// size `bytes_len.get()` bytes. Therefore, a `&T` with metadata
// `elems` has size not larger than `isize::MAX`.
let elems = meta.unwrap_or(elems);
// SAFETY: See Lemma 2.
let elems = unsafe { MetadataOf::new_unchecked(elems) };
// SAFETY: Let `size` be the size of a `&T` with metadata `elems`.
// By post-condition on `validate_cast_and_convert_metadata`, one of
// the following conditions holds:
// - `split_at == size`, in which case, by Lemma 2, `split_at <=
// isize::MAX`. Since `size_of::<u8>() == 1`, a `[u8]` with
// `split_at` elems has size not larger than `isize::MAX`.
// - `split_at == bytes_len - size`. Since `bytes_len:
// MetadataOf<u8>`, and since `size` is non-negative, `split_at`
// addresses no more bytes than `bytes_len` does. Since
// `bytes_len: MetadataOf<u8>`, `bytes_len` describes a `[u8]`
// which has no more than `isize::MAX` bytes, and thus so does
// `split_at`.
let split_at = unsafe { MetadataOf::<[u8]>::new_unchecked(split_at) };
Ok((elems, split_at))
}
}
}
pub(crate) use len_of::MetadataOf;
/// Since we support multiple versions of Rust, there are often features which
/// have been stabilized in the most recent stable release which do not yet
/// exist (stably) on our MSRV. This module provides polyfills for those
/// features so that we can write more "modern" code, and just remove the
/// polyfill once our MSRV supports the corresponding feature. Without this,
/// we'd have to write worse/more verbose code and leave FIXME comments sprinkled
/// throughout the codebase to update to the new pattern once it's stabilized.
///
/// Each trait is imported as `_` at the crate root; each polyfill should "just
/// work" at usage sites.
pub(crate) mod polyfills {
use core::ptr::{self, NonNull};
// A polyfill for `NonNull::slice_from_raw_parts` that we can use before our
// MSRV is 1.70, when that function was stabilized.
//
// The `#[allow(unused)]` is necessary because, on sufficiently recent
// toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent
// method rather than to this trait, and so this trait is considered unused.
//
// FIXME(#67): Once our MSRV is 1.70, remove this.
#[allow(unused)]
pub(crate) trait NonNullExt<T> {
fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>;
}
impl<T> NonNullExt<T> for NonNull<T> {
// NOTE on coverage: this will never be tested in nightly since it's a
// polyfill for a feature which has been stabilized on our nightly
// toolchain.
#[cfg_attr(
all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
coverage(off)
)]
#[inline(always)]
fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]> {
let ptr = ptr::slice_from_raw_parts_mut(data.as_ptr(), len);
// SAFETY: `ptr` is converted from `data`, which is non-null.
unsafe { NonNull::new_unchecked(ptr) }
}
}
// A polyfill for `Self::unchecked_sub` that we can use until methods like
// `usize::unchecked_sub` is stabilized.
//
// The `#[allow(unused)]` is necessary because, on sufficiently recent
// toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent
// method rather than to this trait, and so this trait is considered unused.
//
// FIXME(#67): Once our MSRV is high enough, remove this.
#[allow(unused)]
pub(crate) trait NumExt {
/// Add without checking for overflow.
///
/// # Safety
///
/// The caller promises that the addition will not overflow.
unsafe fn unchecked_add(self, rhs: Self) -> Self;
/// Subtract without checking for underflow.
///
/// # Safety
///
/// The caller promises that the subtraction will not underflow.
unsafe fn unchecked_sub(self, rhs: Self) -> Self;
/// Multiply without checking for overflow.
///
/// # Safety
///
/// The caller promises that the multiplication will not overflow.
unsafe fn unchecked_mul(self, rhs: Self) -> Self;
}
// NOTE on coverage: these will never be tested in nightly since they're
// polyfills for a feature which has been stabilized on our nightly
// toolchain.
impl NumExt for usize {
#[cfg_attr(
all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
coverage(off)
)]
#[inline(always)]
unsafe fn unchecked_add(self, rhs: usize) -> usize {
match self.checked_add(rhs) {
Some(x) => x,
None => {
// SAFETY: The caller promises that the addition will not
// underflow.
unsafe { core::hint::unreachable_unchecked() }
}
}
}
#[cfg_attr(
all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
coverage(off)
)]
#[inline(always)]
unsafe fn unchecked_sub(self, rhs: usize) -> usize {
match self.checked_sub(rhs) {
Some(x) => x,
None => {
// SAFETY: The caller promises that the subtraction will not
// underflow.
unsafe { core::hint::unreachable_unchecked() }
}
}
}
#[cfg_attr(
all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
coverage(off)
)]
#[inline(always)]
unsafe fn unchecked_mul(self, rhs: usize) -> usize {
match self.checked_mul(rhs) {
Some(x) => x,
None => {
// SAFETY: The caller promises that the multiplication will
// not overflow.
unsafe { core::hint::unreachable_unchecked() }
}
}
}
}
}
#[cfg(test)]
pub(crate) mod testutil {
use crate::*;
/// A `T` which is aligned to at least `align_of::<A>()`.
#[derive(Default)]
pub(crate) struct Align<T, A> {
pub(crate) t: T,
_a: [A; 0],
}
impl<T: Default, A> Align<T, A> {
pub(crate) fn set_default(&mut self) {
self.t = T::default();
}
}
impl<T, A> Align<T, A> {
pub(crate) const fn new(t: T) -> Align<T, A> {
Align { t, _a: [] }
}
}
/// A `T` which is guaranteed not to satisfy `align_of::<A>()`.
///
/// It must be the case that `align_of::<T>() < align_of::<A>()` in order
/// for this type to work properly.
#[repr(C)]
pub(crate) struct ForceUnalign<T: Unaligned, A> {
// The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is
// placed at the minimum offset that guarantees its alignment. If
// `align_of::<T>() < align_of::<A>()`, then that offset will be
// guaranteed *not* to satisfy `align_of::<A>()`.
//
// Note that we need `T: Unaligned` in order to guarantee that there is
// no padding between `_u` and `t`.
_u: u8,
pub(crate) t: T,
_a: [A; 0],
}
impl<T: Unaligned, A> ForceUnalign<T, A> {
pub(crate) fn new(t: T) -> ForceUnalign<T, A> {
ForceUnalign { _u: 0, t, _a: [] }
}
}
// A `u64` with alignment 8.
//
// Though `u64` has alignment 8 on some platforms, it's not guaranteed. By
// contrast, `AU64` is guaranteed to have alignment 8 on all platforms.
#[derive(
KnownLayout,
Immutable,
FromBytes,
IntoBytes,
Eq,
PartialEq,
Ord,
PartialOrd,
Default,
Debug,
Copy,
Clone,
)]
#[repr(C, align(8))]
pub(crate) struct AU64(pub(crate) u64);
impl AU64 {
// Converts this `AU64` to bytes using this platform's endianness.
pub(crate) fn to_bytes(self) -> [u8; 8] {
crate::transmute!(self)
}
}
impl Display for AU64 {
#[cfg_attr(
all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
coverage(off)
)]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(&self.0, f)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_round_down_to_next_multiple_of_alignment() {
fn alt_impl(n: usize, align: NonZeroUsize) -> usize {
let mul = n / align.get();
mul * align.get()
}
for align in [1, 2, 4, 8, 16] {
for n in 0..256 {
let align = NonZeroUsize::new(align).unwrap();
let want = alt_impl(n, align);
let got = round_down_to_next_multiple_of_alignment(n, align);
assert_eq!(got, want, "round_down_to_next_multiple_of_alignment({}, {})", n, align);
}
}
}
#[rustversion::since(1.57.0)]
#[test]
#[should_panic]
fn test_round_down_to_next_multiple_of_alignment_zerocopy_panic_in_const_and_vec_try_reserve() {
round_down_to_next_multiple_of_alignment(0, NonZeroUsize::new(3).unwrap());
}
}

752
vendor/zerocopy/src/wrappers.rs vendored Normal file
View File

@@ -0,0 +1,752 @@
// Copyright 2023 The Fuchsia Authors
//
// Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
// This file may not be copied, modified, or distributed except according to
// those terms.
use core::{fmt, hash::Hash};
use super::*;
/// A type with no alignment requirement.
///
/// An `Unalign` wraps a `T`, removing any alignment requirement. `Unalign<T>`
/// has the same size and bit validity as `T`, but not necessarily the same
/// alignment [or ABI]. This is useful if a type with an alignment requirement
/// needs to be read from a chunk of memory which provides no alignment
/// guarantees.
///
/// Since `Unalign` has no alignment requirement, the inner `T` may not be
/// properly aligned in memory. There are five ways to access the inner `T`:
/// - by value, using [`get`] or [`into_inner`]
/// - by reference inside of a callback, using [`update`]
/// - fallibly by reference, using [`try_deref`] or [`try_deref_mut`]; these can
/// fail if the `Unalign` does not satisfy `T`'s alignment requirement at
/// runtime
/// - unsafely by reference, using [`deref_unchecked`] or
/// [`deref_mut_unchecked`]; it is the caller's responsibility to ensure that
/// the `Unalign` satisfies `T`'s alignment requirement
/// - (where `T: Unaligned`) infallibly by reference, using [`Deref::deref`] or
/// [`DerefMut::deref_mut`]
///
/// [or ABI]: https://github.com/google/zerocopy/issues/164
/// [`get`]: Unalign::get
/// [`into_inner`]: Unalign::into_inner
/// [`update`]: Unalign::update
/// [`try_deref`]: Unalign::try_deref
/// [`try_deref_mut`]: Unalign::try_deref_mut
/// [`deref_unchecked`]: Unalign::deref_unchecked
/// [`deref_mut_unchecked`]: Unalign::deref_mut_unchecked
///
/// # Example
///
/// In this example, we need `EthernetFrame` to have no alignment requirement -
/// and thus implement [`Unaligned`]. `EtherType` is `#[repr(u16)]` and so
/// cannot implement `Unaligned`. We use `Unalign` to relax `EtherType`'s
/// alignment requirement so that `EthernetFrame` has no alignment requirement
/// and can implement `Unaligned`.
///
/// ```rust
/// use zerocopy::*;
/// # use zerocopy_derive::*;
/// # #[derive(FromBytes, KnownLayout, Immutable, Unaligned)] #[repr(C)] struct Mac([u8; 6]);
///
/// # #[derive(PartialEq, Copy, Clone, Debug)]
/// #[derive(TryFromBytes, KnownLayout, Immutable)]
/// #[repr(u16)]
/// enum EtherType {
/// Ipv4 = 0x0800u16.to_be(),
/// Arp = 0x0806u16.to_be(),
/// Ipv6 = 0x86DDu16.to_be(),
/// # /*
/// ...
/// # */
/// }
///
/// #[derive(TryFromBytes, KnownLayout, Immutable, Unaligned)]
/// #[repr(C)]
/// struct EthernetFrame {
/// src: Mac,
/// dst: Mac,
/// ethertype: Unalign<EtherType>,
/// payload: [u8],
/// }
///
/// let bytes = &[
/// # 0, 1, 2, 3, 4, 5,
/// # 6, 7, 8, 9, 10, 11,
/// # /*
/// ...
/// # */
/// 0x86, 0xDD, // EtherType
/// 0xDE, 0xAD, 0xBE, 0xEF // Payload
/// ][..];
///
/// // PANICS: Guaranteed not to panic because `bytes` is of the right
/// // length, has the right contents, and `EthernetFrame` has no
/// // alignment requirement.
/// let packet = EthernetFrame::try_ref_from_bytes(&bytes).unwrap();
///
/// assert_eq!(packet.ethertype.get(), EtherType::Ipv6);
/// assert_eq!(packet.payload, [0xDE, 0xAD, 0xBE, 0xEF]);
/// ```
///
/// # Safety
///
/// `Unalign<T>` is guaranteed to have the same size and bit validity as `T`,
/// and to have [`UnsafeCell`]s covering the same byte ranges as `T`.
/// `Unalign<T>` is guaranteed to have alignment 1.
// NOTE: This type is sound to use with types that need to be dropped. The
// reason is that the compiler-generated drop code automatically moves all
// values to aligned memory slots before dropping them in-place. This is not
// well-documented, but it's hinted at in places like [1] and [2]. However, this
// also means that `T` must be `Sized`; unless something changes, we can never
// support unsized `T`. [3]
//
// [1] https://github.com/rust-lang/rust/issues/54148#issuecomment-420529646
// [2] https://github.com/google/zerocopy/pull/126#discussion_r1018512323
// [3] https://github.com/google/zerocopy/issues/209
#[allow(missing_debug_implementations)]
#[derive(Default, Copy)]
#[cfg_attr(any(feature = "derive", test), derive(Immutable, FromBytes, IntoBytes, Unaligned))]
#[repr(C, packed)]
pub struct Unalign<T>(T);
// We do not use `derive(KnownLayout)` on `Unalign`, because the derive is not
// smart enough to realize that `Unalign<T>` is always sized and thus emits a
// `KnownLayout` impl bounded on `T: KnownLayout.` This is overly restrictive.
impl_known_layout!(T => Unalign<T>);
// SAFETY:
// - `Unalign<T>` promises to have alignment 1, and so we don't require that `T:
// Unaligned`.
// - `Unalign<T>` has the same bit validity as `T`, and so it is `FromZeros`,
// `FromBytes`, or `IntoBytes` exactly when `T` is as well.
// - `Immutable`: `Unalign<T>` has the same fields as `T`, so it contains
// `UnsafeCell`s exactly when `T` does.
// - `TryFromBytes`: `Unalign<T>` has the same the same bit validity as `T`, so
// `T::is_bit_valid` is a sound implementation of `is_bit_valid`.
#[allow(unused_unsafe)] // Unused when `feature = "derive"`.
const _: () = unsafe {
impl_or_verify!(T => Unaligned for Unalign<T>);
impl_or_verify!(T: Immutable => Immutable for Unalign<T>);
impl_or_verify!(
T: TryFromBytes => TryFromBytes for Unalign<T>;
|c| T::is_bit_valid(c.transmute())
);
impl_or_verify!(T: FromZeros => FromZeros for Unalign<T>);
impl_or_verify!(T: FromBytes => FromBytes for Unalign<T>);
impl_or_verify!(T: IntoBytes => IntoBytes for Unalign<T>);
};
// Note that `Unalign: Clone` only if `T: Copy`. Since the inner `T` may not be
// aligned, there's no way to safely call `T::clone`, and so a `T: Clone` bound
// is not sufficient to implement `Clone` for `Unalign`.
impl<T: Copy> Clone for Unalign<T> {
#[inline(always)]
fn clone(&self) -> Unalign<T> {
*self
}
}
impl<T> Unalign<T> {
/// Constructs a new `Unalign`.
#[inline(always)]
pub const fn new(val: T) -> Unalign<T> {
Unalign(val)
}
/// Consumes `self`, returning the inner `T`.
#[inline(always)]
pub const fn into_inner(self) -> T {
// SAFETY: Since `Unalign` is `#[repr(C, packed)]`, it has the same size
// and bit validity as `T`.
//
// We do this instead of just destructuring in order to prevent
// `Unalign`'s `Drop::drop` from being run, since dropping is not
// supported in `const fn`s.
//
// FIXME(https://github.com/rust-lang/rust/issues/73255): Destructure
// instead of using unsafe.
unsafe { crate::util::transmute_unchecked(self) }
}
/// Attempts to return a reference to the wrapped `T`, failing if `self` is
/// not properly aligned.
///
/// If `self` does not satisfy `align_of::<T>()`, then `try_deref` returns
/// `Err`.
///
/// If `T: Unaligned`, then `Unalign<T>` implements [`Deref`], and callers
/// may prefer [`Deref::deref`], which is infallible.
#[inline(always)]
pub fn try_deref(&self) -> Result<&T, AlignmentError<&Self, T>> {
let inner = Ptr::from_ref(self).transmute();
match inner.try_into_aligned() {
Ok(aligned) => Ok(aligned.as_ref()),
Err(err) => Err(err.map_src(|src| src.into_unalign().as_ref())),
}
}
/// Attempts to return a mutable reference to the wrapped `T`, failing if
/// `self` is not properly aligned.
///
/// If `self` does not satisfy `align_of::<T>()`, then `try_deref` returns
/// `Err`.
///
/// If `T: Unaligned`, then `Unalign<T>` implements [`DerefMut`], and
/// callers may prefer [`DerefMut::deref_mut`], which is infallible.
#[inline(always)]
pub fn try_deref_mut(&mut self) -> Result<&mut T, AlignmentError<&mut Self, T>> {
let inner = Ptr::from_mut(self).transmute::<_, _, (_, (_, _))>();
match inner.try_into_aligned() {
Ok(aligned) => Ok(aligned.as_mut()),
Err(err) => Err(err.map_src(|src| src.into_unalign().as_mut())),
}
}
/// Returns a reference to the wrapped `T` without checking alignment.
///
/// If `T: Unaligned`, then `Unalign<T>` implements[ `Deref`], and callers
/// may prefer [`Deref::deref`], which is safe.
///
/// # Safety
///
/// The caller must guarantee that `self` satisfies `align_of::<T>()`.
#[inline(always)]
pub const unsafe fn deref_unchecked(&self) -> &T {
// SAFETY: `Unalign<T>` is `repr(transparent)`, so there is a valid `T`
// at the same memory location as `self`. It has no alignment guarantee,
// but the caller has promised that `self` is properly aligned, so we
// know that it is sound to create a reference to `T` at this memory
// location.
//
// We use `mem::transmute` instead of `&*self.get_ptr()` because
// dereferencing pointers is not stable in `const` on our current MSRV
// (1.56 as of this writing).
unsafe { mem::transmute(self) }
}
/// Returns a mutable reference to the wrapped `T` without checking
/// alignment.
///
/// If `T: Unaligned`, then `Unalign<T>` implements[ `DerefMut`], and
/// callers may prefer [`DerefMut::deref_mut`], which is safe.
///
/// # Safety
///
/// The caller must guarantee that `self` satisfies `align_of::<T>()`.
#[inline(always)]
pub unsafe fn deref_mut_unchecked(&mut self) -> &mut T {
// SAFETY: `self.get_mut_ptr()` returns a raw pointer to a valid `T` at
// the same memory location as `self`. It has no alignment guarantee,
// but the caller has promised that `self` is properly aligned, so we
// know that the pointer itself is aligned, and thus that it is sound to
// create a reference to a `T` at this memory location.
unsafe { &mut *self.get_mut_ptr() }
}
/// Gets an unaligned raw pointer to the inner `T`.
///
/// # Safety
///
/// The returned raw pointer is not necessarily aligned to
/// `align_of::<T>()`. Most functions which operate on raw pointers require
/// those pointers to be aligned, so calling those functions with the result
/// of `get_ptr` will result in undefined behavior if alignment is not
/// guaranteed using some out-of-band mechanism. In general, the only
/// functions which are safe to call with this pointer are those which are
/// explicitly documented as being sound to use with an unaligned pointer,
/// such as [`read_unaligned`].
///
/// Even if the caller is permitted to mutate `self` (e.g. they have
/// ownership or a mutable borrow), it is not guaranteed to be sound to
/// write through the returned pointer. If writing is required, prefer
/// [`get_mut_ptr`] instead.
///
/// [`read_unaligned`]: core::ptr::read_unaligned
/// [`get_mut_ptr`]: Unalign::get_mut_ptr
#[inline(always)]
pub const fn get_ptr(&self) -> *const T {
ptr::addr_of!(self.0)
}
/// Gets an unaligned mutable raw pointer to the inner `T`.
///
/// # Safety
///
/// The returned raw pointer is not necessarily aligned to
/// `align_of::<T>()`. Most functions which operate on raw pointers require
/// those pointers to be aligned, so calling those functions with the result
/// of `get_ptr` will result in undefined behavior if alignment is not
/// guaranteed using some out-of-band mechanism. In general, the only
/// functions which are safe to call with this pointer are those which are
/// explicitly documented as being sound to use with an unaligned pointer,
/// such as [`read_unaligned`].
///
/// [`read_unaligned`]: core::ptr::read_unaligned
// FIXME(https://github.com/rust-lang/rust/issues/57349): Make this `const`.
#[inline(always)]
pub fn get_mut_ptr(&mut self) -> *mut T {
ptr::addr_of_mut!(self.0)
}
/// Sets the inner `T`, dropping the previous value.
// FIXME(https://github.com/rust-lang/rust/issues/57349): Make this `const`.
#[inline(always)]
pub fn set(&mut self, t: T) {
*self = Unalign::new(t);
}
/// Updates the inner `T` by calling a function on it.
///
/// If [`T: Unaligned`], then `Unalign<T>` implements [`DerefMut`], and that
/// impl should be preferred over this method when performing updates, as it
/// will usually be faster and more ergonomic.
///
/// For large types, this method may be expensive, as it requires copying
/// `2 * size_of::<T>()` bytes. \[1\]
///
/// \[1\] Since the inner `T` may not be aligned, it would not be sound to
/// invoke `f` on it directly. Instead, `update` moves it into a
/// properly-aligned location in the local stack frame, calls `f` on it, and
/// then moves it back to its original location in `self`.
///
/// [`T: Unaligned`]: Unaligned
#[inline]
pub fn update<O, F: FnOnce(&mut T) -> O>(&mut self, f: F) -> O {
if mem::align_of::<T>() == 1 {
// While we advise callers to use `DerefMut` when `T: Unaligned`,
// not all callers will be able to guarantee `T: Unaligned` in all
// cases. In particular, callers who are themselves providing an API
// which is generic over `T` may sometimes be called by *their*
// callers with `T` such that `align_of::<T>() == 1`, but cannot
// guarantee this in the general case. Thus, this optimization may
// sometimes be helpful.
// SAFETY: Since `T`'s alignment is 1, `self` satisfies its
// alignment by definition.
let t = unsafe { self.deref_mut_unchecked() };
return f(t);
}
// On drop, this moves `copy` out of itself and uses `ptr::write` to
// overwrite `slf`.
struct WriteBackOnDrop<T> {
copy: ManuallyDrop<T>,
slf: *mut Unalign<T>,
}
impl<T> Drop for WriteBackOnDrop<T> {
fn drop(&mut self) {
// SAFETY: We never use `copy` again as required by
// `ManuallyDrop::take`.
let copy = unsafe { ManuallyDrop::take(&mut self.copy) };
// SAFETY: `slf` is the raw pointer value of `self`. We know it
// is valid for writes and properly aligned because `self` is a
// mutable reference, which guarantees both of these properties.
unsafe { ptr::write(self.slf, Unalign::new(copy)) };
}
}
// SAFETY: We know that `self` is valid for reads, properly aligned, and
// points to an initialized `Unalign<T>` because it is a mutable
// reference, which guarantees all of these properties.
//
// Since `T: !Copy`, it would be unsound in the general case to allow
// both the original `Unalign<T>` and the copy to be used by safe code.
// We guarantee that the copy is used to overwrite the original in the
// `Drop::drop` impl of `WriteBackOnDrop`. So long as this `drop` is
// called before any other safe code executes, soundness is upheld.
// While this method can terminate in two ways (by returning normally or
// by unwinding due to a panic in `f`), in both cases, `write_back` is
// dropped - and its `drop` called - before any other safe code can
// execute.
let copy = unsafe { ptr::read(self) }.into_inner();
let mut write_back = WriteBackOnDrop { copy: ManuallyDrop::new(copy), slf: self };
let ret = f(&mut write_back.copy);
drop(write_back);
ret
}
}
impl<T: Copy> Unalign<T> {
/// Gets a copy of the inner `T`.
// FIXME(https://github.com/rust-lang/rust/issues/57349): Make this `const`.
#[inline(always)]
pub fn get(&self) -> T {
let Unalign(val) = *self;
val
}
}
impl<T: Unaligned> Deref for Unalign<T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &T {
Ptr::from_ref(self).transmute().bikeshed_recall_aligned().as_ref()
}
}
impl<T: Unaligned> DerefMut for Unalign<T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut T {
Ptr::from_mut(self).transmute::<_, _, (_, (_, _))>().bikeshed_recall_aligned().as_mut()
}
}
impl<T: Unaligned + PartialOrd> PartialOrd<Unalign<T>> for Unalign<T> {
#[inline(always)]
fn partial_cmp(&self, other: &Unalign<T>) -> Option<Ordering> {
PartialOrd::partial_cmp(self.deref(), other.deref())
}
}
impl<T: Unaligned + Ord> Ord for Unalign<T> {
#[inline(always)]
fn cmp(&self, other: &Unalign<T>) -> Ordering {
Ord::cmp(self.deref(), other.deref())
}
}
impl<T: Unaligned + PartialEq> PartialEq<Unalign<T>> for Unalign<T> {
#[inline(always)]
fn eq(&self, other: &Unalign<T>) -> bool {
PartialEq::eq(self.deref(), other.deref())
}
}
impl<T: Unaligned + Eq> Eq for Unalign<T> {}
impl<T: Unaligned + Hash> Hash for Unalign<T> {
#[inline(always)]
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
self.deref().hash(state);
}
}
impl<T: Unaligned + Debug> Debug for Unalign<T> {
#[inline(always)]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Debug::fmt(self.deref(), f)
}
}
impl<T: Unaligned + Display> Display for Unalign<T> {
#[inline(always)]
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Display::fmt(self.deref(), f)
}
}
/// A wrapper type to construct uninitialized instances of `T`.
///
/// `MaybeUninit` is identical to the [standard library
/// `MaybeUninit`][core-maybe-uninit] type except that it supports unsized
/// types.
///
/// # Layout
///
/// The same layout guarantees and caveats apply to `MaybeUninit<T>` as apply to
/// the [standard library `MaybeUninit`][core-maybe-uninit] with one exception:
/// for `T: !Sized`, there is no single value for `T`'s size. Instead, for such
/// types, the following are guaranteed:
/// - Every [valid size][valid-size] for `T` is a valid size for
/// `MaybeUninit<T>` and vice versa
/// - Given `t: *const T` and `m: *const MaybeUninit<T>` with identical fat
/// pointer metadata, `t` and `m` address the same number of bytes (and
/// likewise for `*mut`)
///
/// [core-maybe-uninit]: core::mem::MaybeUninit
/// [valid-size]: crate::KnownLayout#what-is-a-valid-size
#[repr(transparent)]
#[doc(hidden)]
pub struct MaybeUninit<T: ?Sized + KnownLayout>(
// SAFETY: `MaybeUninit<T>` has the same size as `T`, because (by invariant
// on `T::MaybeUninit`) `T::MaybeUninit` has `T::LAYOUT` identical to `T`,
// and because (invariant on `T::LAYOUT`) we can trust that `LAYOUT`
// accurately reflects the layout of `T`. By invariant on `T::MaybeUninit`,
// it admits uninitialized bytes in all positions. Because `MaybeUninit` is
// marked `repr(transparent)`, these properties additionally hold true for
// `Self`.
T::MaybeUninit,
);
#[doc(hidden)]
impl<T: ?Sized + KnownLayout> MaybeUninit<T> {
/// Constructs a `MaybeUninit<T>` initialized with the given value.
#[inline(always)]
pub fn new(val: T) -> Self
where
T: Sized,
Self: Sized,
{
// SAFETY: It is valid to transmute `val` to `MaybeUninit<T>` because it
// is both valid to transmute `val` to `T::MaybeUninit`, and it is valid
// to transmute from `T::MaybeUninit` to `MaybeUninit<T>`.
//
// First, it is valid to transmute `val` to `T::MaybeUninit` because, by
// invariant on `T::MaybeUninit`:
// - For `T: Sized`, `T` and `T::MaybeUninit` have the same size.
// - All byte sequences of the correct size are valid values of
// `T::MaybeUninit`.
//
// Second, it is additionally valid to transmute from `T::MaybeUninit`
// to `MaybeUninit<T>`, because `MaybeUninit<T>` is a
// `repr(transparent)` wrapper around `T::MaybeUninit`.
//
// These two transmutes are collapsed into one so we don't need to add a
// `T::MaybeUninit: Sized` bound to this function's `where` clause.
unsafe { crate::util::transmute_unchecked(val) }
}
/// Constructs an uninitialized `MaybeUninit<T>`.
#[must_use]
#[inline(always)]
pub fn uninit() -> Self
where
T: Sized,
Self: Sized,
{
let uninit = CoreMaybeUninit::<T>::uninit();
// SAFETY: It is valid to transmute from `CoreMaybeUninit<T>` to
// `MaybeUninit<T>` since they both admit uninitialized bytes in all
// positions, and they have the same size (i.e., that of `T`).
//
// `MaybeUninit<T>` has the same size as `T`, because (by invariant on
// `T::MaybeUninit`) `T::MaybeUninit` has `T::LAYOUT` identical to `T`,
// and because (invariant on `T::LAYOUT`) we can trust that `LAYOUT`
// accurately reflects the layout of `T`.
//
// `CoreMaybeUninit<T>` has the same size as `T` [1] and admits
// uninitialized bytes in all positions.
//
// [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
//
// `MaybeUninit<T>` is guaranteed to have the same size, alignment,
// and ABI as `T`
unsafe { crate::util::transmute_unchecked(uninit) }
}
/// Creates a `Box<MaybeUninit<T>>`.
///
/// This function is useful for allocating large, uninit values on the heap
/// without ever creating a temporary instance of `Self` on the stack.
///
/// # Errors
///
/// Returns an error on allocation failure. Allocation failure is guaranteed
/// never to cause a panic or an abort.
#[cfg(feature = "alloc")]
#[inline]
pub fn new_boxed_uninit(meta: T::PointerMetadata) -> Result<Box<Self>, AllocError> {
// SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of
// `new_box`. The referent of the pointer returned by `alloc` (and,
// consequently, the `Box` derived from it) is a valid instance of
// `Self`, because `Self` is `MaybeUninit` and thus admits arbitrary
// (un)initialized bytes.
unsafe { crate::util::new_box(meta, alloc::alloc::alloc) }
}
/// Extracts the value from the `MaybeUninit<T>` container.
///
/// # Safety
///
/// The caller must ensure that `self` is in an bit-valid state. Depending
/// on subsequent use, it may also need to be in a library-valid state.
#[inline(always)]
pub unsafe fn assume_init(self) -> T
where
T: Sized,
Self: Sized,
{
// SAFETY: The caller guarantees that `self` is in an bit-valid state.
unsafe { crate::util::transmute_unchecked(self) }
}
}
impl<T: ?Sized + KnownLayout> fmt::Debug for MaybeUninit<T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad(core::any::type_name::<Self>())
}
}
#[cfg(test)]
mod tests {
use core::panic::AssertUnwindSafe;
use super::*;
use crate::util::testutil::*;
#[test]
fn test_unalign() {
// Test methods that don't depend on alignment.
let mut u = Unalign::new(AU64(123));
assert_eq!(u.get(), AU64(123));
assert_eq!(u.into_inner(), AU64(123));
assert_eq!(u.get_ptr(), <*const _>::cast::<AU64>(&u));
assert_eq!(u.get_mut_ptr(), <*mut _>::cast::<AU64>(&mut u));
u.set(AU64(321));
assert_eq!(u.get(), AU64(321));
// Test methods that depend on alignment (when alignment is satisfied).
let mut u: Align<_, AU64> = Align::new(Unalign::new(AU64(123)));
assert_eq!(u.t.try_deref().unwrap(), &AU64(123));
assert_eq!(u.t.try_deref_mut().unwrap(), &mut AU64(123));
// SAFETY: The `Align<_, AU64>` guarantees proper alignment.
assert_eq!(unsafe { u.t.deref_unchecked() }, &AU64(123));
// SAFETY: The `Align<_, AU64>` guarantees proper alignment.
assert_eq!(unsafe { u.t.deref_mut_unchecked() }, &mut AU64(123));
*u.t.try_deref_mut().unwrap() = AU64(321);
assert_eq!(u.t.get(), AU64(321));
// Test methods that depend on alignment (when alignment is not
// satisfied).
let mut u: ForceUnalign<_, AU64> = ForceUnalign::new(Unalign::new(AU64(123)));
assert!(matches!(u.t.try_deref(), Err(AlignmentError { .. })));
assert!(matches!(u.t.try_deref_mut(), Err(AlignmentError { .. })));
// Test methods that depend on `T: Unaligned`.
let mut u = Unalign::new(123u8);
assert_eq!(u.try_deref(), Ok(&123));
assert_eq!(u.try_deref_mut(), Ok(&mut 123));
assert_eq!(u.deref(), &123);
assert_eq!(u.deref_mut(), &mut 123);
*u = 21;
assert_eq!(u.get(), 21);
// Test that some `Unalign` functions and methods are `const`.
const _UNALIGN: Unalign<u64> = Unalign::new(0);
const _UNALIGN_PTR: *const u64 = _UNALIGN.get_ptr();
const _U64: u64 = _UNALIGN.into_inner();
// Make sure all code is considered "used".
//
// FIXME(https://github.com/rust-lang/rust/issues/104084): Remove this
// attribute.
#[allow(dead_code)]
const _: () = {
let x: Align<_, AU64> = Align::new(Unalign::new(AU64(123)));
// Make sure that `deref_unchecked` is `const`.
//
// SAFETY: The `Align<_, AU64>` guarantees proper alignment.
let au64 = unsafe { x.t.deref_unchecked() };
match au64 {
AU64(123) => {}
_ => const_unreachable!(),
}
};
}
#[test]
fn test_unalign_update() {
let mut u = Unalign::new(AU64(123));
u.update(|a| a.0 += 1);
assert_eq!(u.get(), AU64(124));
// Test that, even if the callback panics, the original is still
// correctly overwritten. Use a `Box` so that Miri is more likely to
// catch any unsoundness (which would likely result in two `Box`es for
// the same heap object, which is the sort of thing that Miri would
// probably catch).
let mut u = Unalign::new(Box::new(AU64(123)));
let res = std::panic::catch_unwind(AssertUnwindSafe(|| {
u.update(|a| {
a.0 += 1;
panic!();
})
}));
assert!(res.is_err());
assert_eq!(u.into_inner(), Box::new(AU64(124)));
// Test the align_of::<T>() == 1 optimization.
let mut u = Unalign::new([0u8, 1]);
u.update(|a| a[0] += 1);
assert_eq!(u.get(), [1u8, 1]);
}
#[test]
fn test_unalign_copy_clone() {
// Test that `Copy` and `Clone` do not cause soundness issues. This test
// is mainly meant to exercise UB that would be caught by Miri.
// `u.t` is definitely not validly-aligned for `AU64`'s alignment of 8.
let u = ForceUnalign::<_, AU64>::new(Unalign::new(AU64(123)));
#[allow(clippy::clone_on_copy)]
let v = u.t.clone();
let w = u.t;
assert_eq!(u.t.get(), v.get());
assert_eq!(u.t.get(), w.get());
assert_eq!(v.get(), w.get());
}
#[test]
fn test_unalign_trait_impls() {
let zero = Unalign::new(0u8);
let one = Unalign::new(1u8);
assert!(zero < one);
assert_eq!(PartialOrd::partial_cmp(&zero, &one), Some(Ordering::Less));
assert_eq!(Ord::cmp(&zero, &one), Ordering::Less);
assert_ne!(zero, one);
assert_eq!(zero, zero);
assert!(!PartialEq::eq(&zero, &one));
assert!(PartialEq::eq(&zero, &zero));
fn hash<T: Hash>(t: &T) -> u64 {
let mut h = std::collections::hash_map::DefaultHasher::new();
t.hash(&mut h);
h.finish()
}
assert_eq!(hash(&zero), hash(&0u8));
assert_eq!(hash(&one), hash(&1u8));
assert_eq!(format!("{:?}", zero), format!("{:?}", 0u8));
assert_eq!(format!("{:?}", one), format!("{:?}", 1u8));
assert_eq!(format!("{}", zero), format!("{}", 0u8));
assert_eq!(format!("{}", one), format!("{}", 1u8));
}
#[test]
#[allow(clippy::as_conversions)]
fn test_maybe_uninit() {
// int
{
let input = 42;
let uninit = MaybeUninit::new(input);
// SAFETY: `uninit` is in an initialized state
let output = unsafe { uninit.assume_init() };
assert_eq!(input, output);
}
// thin ref
{
let input = 42;
let uninit = MaybeUninit::new(&input);
// SAFETY: `uninit` is in an initialized state
let output = unsafe { uninit.assume_init() };
assert_eq!(&input as *const _, output as *const _);
assert_eq!(input, *output);
}
// wide ref
{
let input = [1, 2, 3, 4];
let uninit = MaybeUninit::new(&input[..]);
// SAFETY: `uninit` is in an initialized state
let output = unsafe { uninit.assume_init() };
assert_eq!(&input[..] as *const _, output as *const _);
assert_eq!(input, *output);
}
}
}