Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

794
vendor/bumpalo/src/alloc.rs vendored Normal file
View File

@@ -0,0 +1,794 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unstable_name_collisions)]
#![allow(dead_code)]
#![allow(deprecated)]
//! Memory allocation APIs
use core::cmp;
use core::fmt;
use core::mem;
use core::ptr::{self, NonNull};
use core::usize;
pub use core::alloc::{Layout, LayoutErr};
fn new_layout_err() -> LayoutErr {
Layout::from_size_align(1, 3).unwrap_err()
}
pub fn handle_alloc_error(layout: Layout) -> ! {
panic!("encountered allocation error: {:?}", layout)
}
pub trait UnstableLayoutMethods {
fn padding_needed_for(&self, align: usize) -> usize;
fn repeat(&self, n: usize) -> Result<(Layout, usize), LayoutErr>;
fn array<T>(n: usize) -> Result<Layout, LayoutErr>;
}
impl UnstableLayoutMethods for Layout {
fn padding_needed_for(&self, align: usize) -> usize {
let len = self.size();
// Rounded up value is:
// len_rounded_up = (len + align - 1) & !(align - 1);
// and then we return the padding difference: `len_rounded_up - len`.
//
// We use modular arithmetic throughout:
//
// 1. align is guaranteed to be > 0, so align - 1 is always
// valid.
//
// 2. `len + align - 1` can overflow by at most `align - 1`,
// so the &-mask with `!(align - 1)` will ensure that in the
// case of overflow, `len_rounded_up` will itself be 0.
// Thus the returned padding, when added to `len`, yields 0,
// which trivially satisfies the alignment `align`.
//
// (Of course, attempts to allocate blocks of memory whose
// size and padding overflow in the above manner should cause
// the allocator to yield an error anyway.)
let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
len_rounded_up.wrapping_sub(len)
}
fn repeat(&self, n: usize) -> Result<(Layout, usize), LayoutErr> {
let padded_size = self
.size()
.checked_add(self.padding_needed_for(self.align()))
.ok_or_else(new_layout_err)?;
let alloc_size = padded_size.checked_mul(n).ok_or_else(new_layout_err)?;
unsafe {
// self.align is already known to be valid and alloc_size has been
// padded already.
Ok((
Layout::from_size_align_unchecked(alloc_size, self.align()),
padded_size,
))
}
}
fn array<T>(n: usize) -> Result<Layout, LayoutErr> {
Layout::new::<T>().repeat(n).map(|(k, offs)| {
debug_assert!(offs == mem::size_of::<T>());
k
})
}
}
/// Represents the combination of a starting address and
/// a total capacity of the returned block.
// #[unstable(feature = "allocator_api", issue = "32838")]
#[derive(Debug)]
pub struct Excess(pub NonNull<u8>, pub usize);
fn size_align<T>() -> (usize, usize) {
(mem::size_of::<T>(), mem::align_of::<T>())
}
/// The `AllocErr` error indicates an allocation failure
/// that may be due to resource exhaustion or to
/// something wrong when combining the given input arguments with this
/// allocator.
// #[unstable(feature = "allocator_api", issue = "32838")]
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct AllocErr;
// (we need this for downstream impl of trait Error)
// #[unstable(feature = "allocator_api", issue = "32838")]
impl fmt::Display for AllocErr {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("memory allocation failed")
}
}
/// The `CannotReallocInPlace` error is used when `grow_in_place` or
/// `shrink_in_place` were unable to reuse the given memory block for
/// a requested layout.
// #[unstable(feature = "allocator_api", issue = "32838")]
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct CannotReallocInPlace;
// #[unstable(feature = "allocator_api", issue = "32838")]
impl CannotReallocInPlace {
pub fn description(&self) -> &str {
"cannot reallocate allocator's memory in place"
}
}
// (we need this for downstream impl of trait Error)
// #[unstable(feature = "allocator_api", issue = "32838")]
impl fmt::Display for CannotReallocInPlace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
/// An implementation of `Alloc` can allocate, reallocate, and
/// deallocate arbitrary blocks of data described via `Layout`.
///
/// Some of the methods require that a memory block be *currently
/// allocated* via an allocator. This means that:
///
/// * the starting address for that memory block was previously
/// returned by a previous call to an allocation method (`alloc`,
/// `alloc_zeroed`, `alloc_excess`, `alloc_one`, `alloc_array`) or
/// reallocation method (`realloc`, `realloc_excess`, or
/// `realloc_array`), and
///
/// * the memory block has not been subsequently deallocated, where
/// blocks are deallocated either by being passed to a deallocation
/// method (`dealloc`, `dealloc_one`, `dealloc_array`) or by being
/// passed to a reallocation method (see above) that returns `Ok`.
///
/// A note regarding zero-sized types and zero-sized layouts: many
/// methods in the `Alloc` trait state that allocation requests
/// must be non-zero size, or else undefined behavior can result.
///
/// * However, some higher-level allocation methods (`alloc_one`,
/// `alloc_array`) are well-defined on zero-sized types and can
/// optionally support them: it is left up to the implementor
/// whether to return `Err`, or to return `Ok` with some pointer.
///
/// * If an `Alloc` implementation chooses to return `Ok` in this
/// case (i.e. the pointer denotes a zero-sized inaccessible block)
/// then that returned pointer must be considered "currently
/// allocated". On such an allocator, *all* methods that take
/// currently-allocated pointers as inputs must accept these
/// zero-sized pointers, *without* causing undefined behavior.
///
/// * In other words, if a zero-sized pointer can flow out of an
/// allocator, then that allocator must likewise accept that pointer
/// flowing back into its deallocation and reallocation methods.
///
/// Some of the methods require that a layout *fit* a memory block.
/// What it means for a layout to "fit" a memory block means (or
/// equivalently, for a memory block to "fit" a layout) is that the
/// following two conditions must hold:
///
/// 1. The block's starting address must be aligned to `layout.align()`.
///
/// 2. The block's size must fall in the range `[use_min, use_max]`, where:
///
/// * `use_min` is `self.usable_size(layout).0`, and
///
/// * `use_max` is the capacity that was (or would have been)
/// returned when (if) the block was allocated via a call to
/// `alloc_excess` or `realloc_excess`.
///
/// Note that:
///
/// * the size of the layout most recently used to allocate the block
/// is guaranteed to be in the range `[use_min, use_max]`, and
///
/// * a lower-bound on `use_max` can be safely approximated by a call to
/// `usable_size`.
///
/// * if a layout `k` fits a memory block (denoted by `ptr`)
/// currently allocated via an allocator `a`, then it is legal to
/// use that layout to deallocate it, i.e. `a.dealloc(ptr, k);`.
///
/// # Unsafety
///
/// The `Alloc` trait is an `unsafe` trait for a number of reasons, and
/// implementors must ensure that they adhere to these contracts:
///
/// * Pointers returned from allocation functions must point to valid memory and
/// retain their validity until at least the instance of `Alloc` is dropped
/// itself.
///
/// * `Layout` queries and calculations in general must be correct. Callers of
/// this trait are allowed to rely on the contracts defined on each method,
/// and implementors must ensure such contracts remain true.
///
/// Note that this list may get tweaked over time as clarifications are made in
/// the future.
// #[unstable(feature = "allocator_api", issue = "32838")]
pub unsafe trait Alloc {
// (Note: some existing allocators have unspecified but well-defined
// behavior in response to a zero size allocation request ;
// e.g. in C, `malloc` of 0 will either return a null pointer or a
// unique pointer, but will not have arbitrary undefined
// behavior.
// However in jemalloc for example,
// `mallocx(0)` is documented as undefined behavior.)
/// Returns a pointer meeting the size and alignment guarantees of
/// `layout`.
///
/// If this method returns an `Ok(addr)`, then the `addr` returned
/// will be non-null address pointing to a block of storage
/// suitable for holding an instance of `layout`.
///
/// The returned block of storage may or may not have its contents
/// initialized. (Extension subtraits might restrict this
/// behavior, e.g. to ensure initialization to particular sets of
/// bit patterns.)
///
/// # Safety
///
/// This function is unsafe because undefined behavior can result
/// if the caller does not ensure that `layout` has non-zero size.
///
/// (Extension subtraits might provide more specific bounds on
/// behavior, e.g. guarantee a sentinel address or a null pointer
/// in response to a zero-size allocation request.)
///
/// # Errors
///
/// Returning `Err` indicates that either memory is exhausted or
/// `layout` does not meet allocator's size or alignment
/// constraints.
///
/// Implementations are encouraged to return `Err` on memory
/// exhaustion rather than panicking or aborting, but this is not
/// a strict requirement. (Specifically: it is *legal* to
/// implement this trait atop an underlying native allocation
/// library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to an
/// allocation error are encouraged to call the [`handle_alloc_error`] function,
/// rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
unsafe fn alloc(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr>;
/// Deallocate the memory referenced by `ptr`.
///
/// # Safety
///
/// This function is unsafe because undefined behavior can result
/// if the caller does not ensure all of the following:
///
/// * `ptr` must denote a block of memory currently allocated via
/// this allocator,
///
/// * `layout` must *fit* that block of memory,
///
/// * In addition to fitting the block of memory `layout`, the
/// alignment of the `layout` must match the alignment used
/// to allocate that block of memory.
unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout);
// == ALLOCATOR-SPECIFIC QUANTITIES AND LIMITS ==
// usable_size
/// Returns bounds on the guaranteed usable size of a successful
/// allocation created with the specified `layout`.
///
/// In particular, if one has a memory block allocated via a given
/// allocator `a` and layout `k` where `a.usable_size(k)` returns
/// `(l, u)`, then one can pass that block to `a.dealloc()` with a
/// layout in the size range [l, u].
///
/// (All implementors of `usable_size` must ensure that
/// `l <= k.size() <= u`)
///
/// Both the lower- and upper-bounds (`l` and `u` respectively)
/// are provided, because an allocator based on size classes could
/// misbehave if one attempts to deallocate a block without
/// providing a correct value for its size (i.e., one within the
/// range `[l, u]`).
///
/// Clients who wish to make use of excess capacity are encouraged
/// to use the `alloc_excess` and `realloc_excess` instead, as
/// this method is constrained to report conservative values that
/// serve as valid bounds for *all possible* allocation method
/// calls.
///
/// However, for clients that do not wish to track the capacity
/// returned by `alloc_excess` locally, this method is likely to
/// produce useful results.
#[inline]
fn usable_size(&self, layout: &Layout) -> (usize, usize) {
(layout.size(), layout.size())
}
// == METHODS FOR MEMORY REUSE ==
// realloc. alloc_excess, realloc_excess
/// Returns a pointer suitable for holding data described by
/// a new layout with `layout`s alignment and a size given
/// by `new_size`. To
/// accomplish this, this may extend or shrink the allocation
/// referenced by `ptr` to fit the new layout.
///
/// If this returns `Ok`, then ownership of the memory block
/// referenced by `ptr` has been transferred to this
/// allocator. The memory may or may not have been freed, and
/// should be considered unusable (unless of course it was
/// transferred back to the caller again via the return value of
/// this method).
///
/// If this method returns `Err`, then ownership of the memory
/// block has not been transferred to this allocator, and the
/// contents of the memory block are unaltered.
///
/// # Safety
///
/// This function is unsafe because undefined behavior can result
/// if the caller does not ensure all of the following:
///
/// * `ptr` must be currently allocated via this allocator,
///
/// * `layout` must *fit* the `ptr` (see above). (The `new_size`
/// argument need not fit it.)
///
/// * `new_size` must be greater than zero.
///
/// * `new_size`, when rounded up to the nearest multiple of `layout.align()`,
/// must not overflow (i.e. the rounded value must be less than `usize::MAX`).
///
/// (Extension subtraits might provide more specific bounds on
/// behavior, e.g. guarantee a sentinel address or a null pointer
/// in response to a zero-size allocation request.)
///
/// # Errors
///
/// Returns `Err` only if the new layout
/// does not meet the allocator's size
/// and alignment constraints of the allocator, or if reallocation
/// otherwise fails.
///
/// Implementations are encouraged to return `Err` on memory
/// exhaustion rather than panicking or aborting, but this is not
/// a strict requirement. (Specifically: it is *legal* to
/// implement this trait atop an underlying native allocation
/// library that aborts on memory exhaustion.)
///
/// Clients wishing to abort computation in response to a
/// reallocation error are encouraged to call the [`handle_alloc_error`] function,
/// rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
unsafe fn realloc(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
) -> Result<NonNull<u8>, AllocErr> {
let old_size = layout.size();
if new_size >= old_size {
if let Ok(()) = self.grow_in_place(ptr, layout, new_size) {
return Ok(ptr);
}
} else if new_size < old_size {
if let Ok(()) = self.shrink_in_place(ptr, layout, new_size) {
return Ok(ptr);
}
}
// otherwise, fall back on alloc + copy + dealloc.
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
let result = self.alloc(new_layout);
if let Ok(new_ptr) = result {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), cmp::min(old_size, new_size));
self.dealloc(ptr, layout);
}
result
}
/// Behaves like `alloc`, but also ensures that the contents
/// are set to zero before being returned.
///
/// # Safety
///
/// This function is unsafe for the same reasons that `alloc` is.
///
/// # Errors
///
/// Returning `Err` indicates that either memory is exhausted or
/// `layout` does not meet allocator's size or alignment
/// constraints, just as in `alloc`.
///
/// Clients wishing to abort computation in response to an
/// allocation error are encouraged to call the [`handle_alloc_error`] function,
/// rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
unsafe fn alloc_zeroed(&mut self, layout: Layout) -> Result<NonNull<u8>, AllocErr> {
let size = layout.size();
let p = self.alloc(layout);
if let Ok(p) = p {
ptr::write_bytes(p.as_ptr(), 0, size);
}
p
}
/// Behaves like `alloc`, but also returns the whole size of
/// the returned block. For some `layout` inputs, like arrays, this
/// may include extra storage usable for additional data.
///
/// # Safety
///
/// This function is unsafe for the same reasons that `alloc` is.
///
/// # Errors
///
/// Returning `Err` indicates that either memory is exhausted or
/// `layout` does not meet allocator's size or alignment
/// constraints, just as in `alloc`.
///
/// Clients wishing to abort computation in response to an
/// allocation error are encouraged to call the [`handle_alloc_error`] function,
/// rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
unsafe fn alloc_excess(&mut self, layout: Layout) -> Result<Excess, AllocErr> {
let usable_size = self.usable_size(&layout);
self.alloc(layout).map(|p| Excess(p, usable_size.1))
}
/// Behaves like `realloc`, but also returns the whole size of
/// the returned block. For some `layout` inputs, like arrays, this
/// may include extra storage usable for additional data.
///
/// # Safety
///
/// This function is unsafe for the same reasons that `realloc` is.
///
/// # Errors
///
/// Returning `Err` indicates that either memory is exhausted or
/// `layout` does not meet allocator's size or alignment
/// constraints, just as in `realloc`.
///
/// Clients wishing to abort computation in response to a
/// reallocation error are encouraged to call the [`handle_alloc_error`] function,
/// rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
unsafe fn realloc_excess(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
) -> Result<Excess, AllocErr> {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
let usable_size = self.usable_size(&new_layout);
self.realloc(ptr, layout, new_size)
.map(|p| Excess(p, usable_size.1))
}
/// Attempts to extend the allocation referenced by `ptr` to fit `new_size`.
///
/// If this returns `Ok`, then the allocator has asserted that the
/// memory block referenced by `ptr` now fits `new_size`, and thus can
/// be used to carry data of a layout of that size and same alignment as
/// `layout`. (The allocator is allowed to
/// expend effort to accomplish this, such as extending the memory block to
/// include successor blocks, or virtual memory tricks.)
///
/// Regardless of what this method returns, ownership of the
/// memory block referenced by `ptr` has not been transferred, and
/// the contents of the memory block are unaltered.
///
/// # Safety
///
/// This function is unsafe because undefined behavior can result
/// if the caller does not ensure all of the following:
///
/// * `ptr` must be currently allocated via this allocator,
///
/// * `layout` must *fit* the `ptr` (see above); note the
/// `new_size` argument need not fit it,
///
/// * `new_size` must not be less than `layout.size()`,
///
/// # Errors
///
/// Returns `Err(CannotReallocInPlace)` when the allocator is
/// unable to assert that the memory block referenced by `ptr`
/// could fit `layout`.
///
/// Note that one cannot pass `CannotReallocInPlace` to the `handle_alloc_error`
/// function; clients are expected either to be able to recover from
/// `grow_in_place` failures without aborting, or to fall back on
/// another reallocation method before resorting to an abort.
unsafe fn grow_in_place(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
) -> Result<(), CannotReallocInPlace> {
let _ = ptr; // this default implementation doesn't care about the actual address.
debug_assert!(new_size >= layout.size());
let (_l, u) = self.usable_size(&layout);
// _l <= layout.size() [guaranteed by usable_size()]
// layout.size() <= new_layout.size() [required by this method]
if new_size <= u {
Ok(())
} else {
Err(CannotReallocInPlace)
}
}
/// Attempts to shrink the allocation referenced by `ptr` to fit `new_size`.
///
/// If this returns `Ok`, then the allocator has asserted that the
/// memory block referenced by `ptr` now fits `new_size`, and
/// thus can only be used to carry data of that smaller
/// layout. (The allocator is allowed to take advantage of this,
/// carving off portions of the block for reuse elsewhere.) The
/// truncated contents of the block within the smaller layout are
/// unaltered, and ownership of block has not been transferred.
///
/// If this returns `Err`, then the memory block is considered to
/// still represent the original (larger) `layout`. None of the
/// block has been carved off for reuse elsewhere, ownership of
/// the memory block has not been transferred, and the contents of
/// the memory block are unaltered.
///
/// # Safety
///
/// This function is unsafe because undefined behavior can result
/// if the caller does not ensure all of the following:
///
/// * `ptr` must be currently allocated via this allocator,
///
/// * `layout` must *fit* the `ptr` (see above); note the
/// `new_size` argument need not fit it,
///
/// * `new_size` must not be greater than `layout.size()`
/// (and must be greater than zero),
///
/// # Errors
///
/// Returns `Err(CannotReallocInPlace)` when the allocator is
/// unable to assert that the memory block referenced by `ptr`
/// could fit `layout`.
///
/// Note that one cannot pass `CannotReallocInPlace` to the `handle_alloc_error`
/// function; clients are expected either to be able to recover from
/// `shrink_in_place` failures without aborting, or to fall back
/// on another reallocation method before resorting to an abort.
unsafe fn shrink_in_place(
&mut self,
ptr: NonNull<u8>,
layout: Layout,
new_size: usize,
) -> Result<(), CannotReallocInPlace> {
let _ = ptr; // this default implementation doesn't care about the actual address.
debug_assert!(new_size <= layout.size());
let (l, _u) = self.usable_size(&layout);
// layout.size() <= _u [guaranteed by usable_size()]
// new_layout.size() <= layout.size() [required by this method]
if l <= new_size {
Ok(())
} else {
Err(CannotReallocInPlace)
}
}
// == COMMON USAGE PATTERNS ==
// alloc_one, dealloc_one, alloc_array, realloc_array. dealloc_array
/// Allocates a block suitable for holding an instance of `T`.
///
/// Captures a common usage pattern for allocators.
///
/// The returned block is suitable for passing to the
/// `alloc`/`realloc` methods of this allocator.
///
/// Note to implementors: If this returns `Ok(ptr)`, then `ptr`
/// must be considered "currently allocated" and must be
/// acceptable input to methods such as `realloc` or `dealloc`,
/// *even if* `T` is a zero-sized type. In other words, if your
/// `Alloc` implementation overrides this method in a manner
/// that can return a zero-sized `ptr`, then all reallocation and
/// deallocation methods need to be similarly overridden to accept
/// such values as input.
///
/// # Errors
///
/// Returning `Err` indicates that either memory is exhausted or
/// `T` does not meet allocator's size or alignment constraints.
///
/// For zero-sized `T`, may return either of `Ok` or `Err`, but
/// will *not* yield undefined behavior.
///
/// Clients wishing to abort computation in response to an
/// allocation error are encouraged to call the [`handle_alloc_error`] function,
/// rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
fn alloc_one<T>(&mut self) -> Result<NonNull<T>, AllocErr>
where
Self: Sized,
{
let k = Layout::new::<T>();
if k.size() > 0 {
unsafe { self.alloc(k).map(|p| p.cast()) }
} else {
Err(AllocErr)
}
}
/// Deallocates a block suitable for holding an instance of `T`.
///
/// The given block must have been produced by this allocator,
/// and must be suitable for storing a `T` (in terms of alignment
/// as well as minimum and maximum size); otherwise yields
/// undefined behavior.
///
/// Captures a common usage pattern for allocators.
///
/// # Safety
///
/// This function is unsafe because undefined behavior can result
/// if the caller does not ensure both:
///
/// * `ptr` must denote a block of memory currently allocated via this allocator
///
/// * the layout of `T` must *fit* that block of memory.
unsafe fn dealloc_one<T>(&mut self, ptr: NonNull<T>)
where
Self: Sized,
{
let k = Layout::new::<T>();
if k.size() > 0 {
self.dealloc(ptr.cast(), k);
}
}
/// Allocates a block suitable for holding `n` instances of `T`.
///
/// Captures a common usage pattern for allocators.
///
/// The returned block is suitable for passing to the
/// `alloc`/`realloc` methods of this allocator.
///
/// Note to implementors: If this returns `Ok(ptr)`, then `ptr`
/// must be considered "currently allocated" and must be
/// acceptable input to methods such as `realloc` or `dealloc`,
/// *even if* `T` is a zero-sized type. In other words, if your
/// `Alloc` implementation overrides this method in a manner
/// that can return a zero-sized `ptr`, then all reallocation and
/// deallocation methods need to be similarly overridden to accept
/// such values as input.
///
/// # Errors
///
/// Returning `Err` indicates that either memory is exhausted or
/// `[T; n]` does not meet allocator's size or alignment
/// constraints.
///
/// For zero-sized `T` or `n == 0`, may return either of `Ok` or
/// `Err`, but will *not* yield undefined behavior.
///
/// Always returns `Err` on arithmetic overflow.
///
/// Clients wishing to abort computation in response to an
/// allocation error are encouraged to call the [`handle_alloc_error`] function,
/// rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
fn alloc_array<T>(&mut self, n: usize) -> Result<NonNull<T>, AllocErr>
where
Self: Sized,
{
match Layout::array::<T>(n) {
Ok(layout) if layout.size() > 0 => unsafe { self.alloc(layout).map(|p| p.cast()) },
_ => Err(AllocErr),
}
}
/// Reallocates a block previously suitable for holding `n_old`
/// instances of `T`, returning a block suitable for holding
/// `n_new` instances of `T`.
///
/// Captures a common usage pattern for allocators.
///
/// The returned block is suitable for passing to the
/// `alloc`/`realloc` methods of this allocator.
///
/// # Safety
///
/// This function is unsafe because undefined behavior can result
/// if the caller does not ensure all of the following:
///
/// * `ptr` must be currently allocated via this allocator,
///
/// * the layout of `[T; n_old]` must *fit* that block of memory.
///
/// # Errors
///
/// Returning `Err` indicates that either memory is exhausted or
/// `[T; n_new]` does not meet allocator's size or alignment
/// constraints.
///
/// For zero-sized `T` or `n_new == 0`, may return either of `Ok` or
/// `Err`, but will *not* yield undefined behavior.
///
/// Always returns `Err` on arithmetic overflow.
///
/// Clients wishing to abort computation in response to a
/// reallocation error are encouraged to call the [`handle_alloc_error`] function,
/// rather than directly invoking `panic!` or similar.
///
/// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
unsafe fn realloc_array<T>(
&mut self,
ptr: NonNull<T>,
n_old: usize,
n_new: usize,
) -> Result<NonNull<T>, AllocErr>
where
Self: Sized,
{
match (Layout::array::<T>(n_old), Layout::array::<T>(n_new)) {
(Ok(ref k_old), Ok(ref k_new)) if k_old.size() > 0 && k_new.size() > 0 => {
debug_assert!(k_old.align() == k_new.align());
self.realloc(ptr.cast(), *k_old, k_new.size())
.map(NonNull::cast)
}
_ => Err(AllocErr),
}
}
/// Deallocates a block suitable for holding `n` instances of `T`.
///
/// Captures a common usage pattern for allocators.
///
/// # Safety
///
/// This function is unsafe because undefined behavior can result
/// if the caller does not ensure both:
///
/// * `ptr` must denote a block of memory currently allocated via this allocator
///
/// * the layout of `[T; n]` must *fit* that block of memory.
///
/// # Errors
///
/// Returning `Err` indicates that either `[T; n]` or the given
/// memory block does not meet allocator's size or alignment
/// constraints.
///
/// Always returns `Err` on arithmetic overflow.
unsafe fn dealloc_array<T>(&mut self, ptr: NonNull<T>, n: usize) -> Result<(), AllocErr>
where
Self: Sized,
{
match Layout::array::<T>(n) {
Ok(k) if k.size() > 0 => {
self.dealloc(ptr.cast(), k);
Ok(())
}
_ => Err(AllocErr),
}
}
}

699
vendor/bumpalo/src/boxed.rs vendored Normal file
View File

@@ -0,0 +1,699 @@
//! A pointer type for bump allocation.
//!
//! [`Box<'a, T>`] provides the simplest form of
//! bump allocation in `bumpalo`. Boxes provide ownership for this allocation, and
//! drop their contents when they go out of scope.
//!
//! # Examples
//!
//! Move a value from the stack to the heap by creating a [`Box`]:
//!
//! ```
//! use bumpalo::{Bump, boxed::Box};
//!
//! let b = Bump::new();
//!
//! let val: u8 = 5;
//! let boxed: Box<u8> = Box::new_in(val, &b);
//! ```
//!
//! Move a value from a [`Box`] back to the stack by [dereferencing]:
//!
//! ```
//! use bumpalo::{Bump, boxed::Box};
//!
//! let b = Bump::new();
//!
//! let boxed: Box<u8> = Box::new_in(5, &b);
//! let val: u8 = *boxed;
//! ```
//!
//! Running [`Drop`] implementations on bump-allocated values:
//!
//! ```
//! use bumpalo::{Bump, boxed::Box};
//! use std::sync::atomic::{AtomicUsize, Ordering};
//!
//! static NUM_DROPPED: AtomicUsize = AtomicUsize::new(0);
//!
//! struct CountDrops;
//!
//! impl Drop for CountDrops {
//! fn drop(&mut self) {
//! NUM_DROPPED.fetch_add(1, Ordering::SeqCst);
//! }
//! }
//!
//! // Create a new bump arena.
//! let bump = Bump::new();
//!
//! // Create a `CountDrops` inside the bump arena.
//! let mut c = Box::new_in(CountDrops, &bump);
//!
//! // No `CountDrops` have been dropped yet.
//! assert_eq!(NUM_DROPPED.load(Ordering::SeqCst), 0);
//!
//! // Drop our `Box<CountDrops>`.
//! drop(c);
//!
//! // Its `Drop` implementation was run, and so `NUM_DROPS` has been incremented.
//! assert_eq!(NUM_DROPPED.load(Ordering::SeqCst), 1);
//! ```
//!
//! Creating a recursive data structure:
//!
//! ```
//! use bumpalo::{Bump, boxed::Box};
//!
//! let b = Bump::new();
//!
//! #[derive(Debug)]
//! enum List<'a, T> {
//! Cons(T, Box<'a, List<'a, T>>),
//! Nil,
//! }
//!
//! let list: List<i32> = List::Cons(1, Box::new_in(List::Cons(2, Box::new_in(List::Nil, &b)), &b));
//! println!("{:?}", list);
//! ```
//!
//! This will print `Cons(1, Cons(2, Nil))`.
//!
//! Recursive structures must be boxed, because if the definition of `Cons`
//! looked like this:
//!
//! ```compile_fail,E0072
//! # enum List<T> {
//! Cons(T, List<T>),
//! # }
//! ```
//!
//! It wouldn't work. This is because the size of a `List` depends on how many
//! elements are in the list, and so we don't know how much memory to allocate
//! for a `Cons`. By introducing a [`Box<'a, T>`], which has a defined size, we know how
//! big `Cons` needs to be.
//!
//! # Memory layout
//!
//! For non-zero-sized values, a [`Box`] will use the provided [`Bump`] allocator for
//! its allocation. It is valid to convert both ways between a [`Box`] and a
//! pointer allocated with the [`Bump`] allocator, given that the
//! [`Layout`] used with the allocator is correct for the type. More precisely,
//! a `value: *mut T` that has been allocated with the [`Bump`] allocator
//! with `Layout::for_value(&*value)` may be converted into a box using
//! [`Box::<T>::from_raw(value)`]. Conversely, the memory backing a `value: *mut
//! T` obtained from [`Box::<T>::into_raw`] will be deallocated by the
//! [`Bump`] allocator with [`Layout::for_value(&*value)`].
//!
//! Note that roundtrip `Box::from_raw(Box::into_raw(b))` looses the lifetime bound to the
//! [`Bump`] immutable borrow which guarantees that the allocator will not be reset
//! and memory will not be freed.
//!
//! [dereferencing]: https://doc.rust-lang.org/std/ops/trait.Deref.html
//! [`Box`]: struct.Box.html
//! [`Box<'a, T>`]: struct.Box.html
//! [`Box::<T>::from_raw(value)`]: struct.Box.html#method.from_raw
//! [`Box::<T>::into_raw`]: struct.Box.html#method.into_raw
//! [`Bump`]: ../struct.Bump.html
//! [`Drop`]: https://doc.rust-lang.org/std/ops/trait.Drop.html
//! [`Layout`]: https://doc.rust-lang.org/std/alloc/struct.Layout.html
//! [`Layout::for_value(&*value)`]: https://doc.rust-lang.org/std/alloc/struct.Layout.html#method.for_value
use {
crate::Bump,
{
core::{
any::Any,
borrow,
cmp::Ordering,
convert::TryFrom,
future::Future,
hash::{Hash, Hasher},
iter::FusedIterator,
mem::ManuallyDrop,
ops::{Deref, DerefMut},
pin::Pin,
task::{Context, Poll},
},
core_alloc::fmt,
},
};
/// An owned pointer to a bump-allocated `T` value, that runs `Drop`
/// implementations.
///
/// See the [module-level documentation][crate::boxed] for more details.
#[repr(transparent)]
pub struct Box<'a, T: ?Sized>(&'a mut T);
impl<'a, T> Box<'a, T> {
/// Allocates memory on the heap and then places `x` into it.
///
/// This doesn't actually allocate if `T` is zero-sized.
///
/// # Examples
///
/// ```
/// use bumpalo::{Bump, boxed::Box};
///
/// let b = Bump::new();
///
/// let five = Box::new_in(5, &b);
/// ```
#[inline(always)]
pub fn new_in(x: T, a: &'a Bump) -> Box<'a, T> {
Box(a.alloc(x))
}
/// Constructs a new `Pin<Box<T>>`. If `T` does not implement `Unpin`, then
/// `x` will be pinned in memory and unable to be moved.
#[inline(always)]
pub fn pin_in(x: T, a: &'a Bump) -> Pin<Box<'a, T>> {
Box(a.alloc(x)).into()
}
/// Consumes the `Box`, returning the wrapped value.
///
/// # Examples
///
/// ```
/// use bumpalo::{Bump, boxed::Box};
///
/// let b = Bump::new();
///
/// let hello = Box::new_in("hello".to_owned(), &b);
/// assert_eq!(Box::into_inner(hello), "hello");
/// ```
pub fn into_inner(b: Box<'a, T>) -> T {
// `Box::into_raw` returns a pointer that is properly aligned and non-null.
// The underlying `Bump` only frees the memory, but won't call the destructor.
unsafe { core::ptr::read(Box::into_raw(b)) }
}
}
impl<'a, T: ?Sized> Box<'a, T> {
/// Constructs a box from a raw pointer.
///
/// After calling this function, the raw pointer is owned by the
/// resulting `Box`. Specifically, the `Box` destructor will call
/// the destructor of `T` and free the allocated memory. For this
/// to be safe, the memory must have been allocated in accordance
/// with the memory layout used by `Box` .
///
/// # Safety
///
/// This function is unsafe because improper use may lead to
/// memory problems. For example, a double-free may occur if the
/// function is called twice on the same raw pointer.
///
/// # Examples
///
/// Recreate a `Box` which was previously converted to a raw pointer
/// using [`Box::into_raw`]:
/// ```
/// use bumpalo::{Bump, boxed::Box};
///
/// let b = Bump::new();
///
/// let x = Box::new_in(5, &b);
/// let ptr = Box::into_raw(x);
/// let x = unsafe { Box::from_raw(ptr) }; // Note that new `x`'s lifetime is unbound. It must be bound to the `b` immutable borrow before `b` is reset.
/// ```
/// Manually create a `Box` from scratch by using the bump allocator:
/// ```
/// use std::alloc::{alloc, Layout};
/// use bumpalo::{Bump, boxed::Box};
///
/// let b = Bump::new();
///
/// unsafe {
/// let ptr = b.alloc_layout(Layout::new::<i32>()).as_ptr() as *mut i32;
/// *ptr = 5;
/// let x = Box::from_raw(ptr); // Note that `x`'s lifetime is unbound. It must be bound to the `b` immutable borrow before `b` is reset.
/// }
/// ```
#[inline]
pub unsafe fn from_raw(raw: *mut T) -> Self {
Box(&mut *raw)
}
/// Consumes the `Box`, returning a wrapped raw pointer.
///
/// The pointer will be properly aligned and non-null.
///
/// After calling this function, the caller is responsible for the
/// value previously managed by the `Box`. In particular, the
/// caller should properly destroy `T`. The easiest way to
/// do this is to convert the raw pointer back into a `Box` with the
/// [`Box::from_raw`] function, allowing the `Box` destructor to perform
/// the cleanup.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// # Examples
///
/// Converting the raw pointer back into a `Box` with [`Box::from_raw`]
/// for automatic cleanup:
/// ```
/// use bumpalo::{Bump, boxed::Box};
///
/// let b = Bump::new();
///
/// let x = Box::new_in(String::from("Hello"), &b);
/// let ptr = Box::into_raw(x);
/// let x = unsafe { Box::from_raw(ptr) }; // Note that new `x`'s lifetime is unbound. It must be bound to the `b` immutable borrow before `b` is reset.
/// ```
/// Manual cleanup by explicitly running the destructor:
/// ```
/// use std::ptr;
/// use bumpalo::{Bump, boxed::Box};
///
/// let b = Bump::new();
///
/// let mut x = Box::new_in(String::from("Hello"), &b);
/// let p = Box::into_raw(x);
/// unsafe {
/// ptr::drop_in_place(p);
/// }
/// ```
#[inline]
pub fn into_raw(b: Box<'a, T>) -> *mut T {
let mut b = ManuallyDrop::new(b);
b.deref_mut().0 as *mut T
}
/// Consumes and leaks the `Box`, returning a mutable reference,
/// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime
/// `'a`. If the type has only static references, or none at all, then this
/// may be chosen to be `'static`.
///
/// This function is mainly useful for data that lives for the remainder of
/// the program's life. Dropping the returned reference will cause a memory
/// leak. If this is not acceptable, the reference should first be wrapped
/// with the [`Box::from_raw`] function producing a `Box`. This `Box` can
/// then be dropped which will properly destroy `T` and release the
/// allocated memory.
///
/// Note: this is an associated function, which means that you have
/// to call it as `Box::leak(b)` instead of `b.leak()`. This
/// is so that there is no conflict with a method on the inner type.
///
/// # Examples
///
/// Simple usage:
///
/// ```
/// use bumpalo::{Bump, boxed::Box};
///
/// let b = Bump::new();
///
/// let x = Box::new_in(41, &b);
/// let reference: &mut usize = Box::leak(x);
/// *reference += 1;
/// assert_eq!(*reference, 42);
/// ```
///
///```
/// # #[cfg(feature = "collections")]
/// # {
/// use bumpalo::{Bump, boxed::Box, vec};
///
/// let b = Bump::new();
///
/// let x = vec![in &b; 1, 2, 3].into_boxed_slice();
/// let reference = Box::leak(x);
/// reference[0] = 4;
/// assert_eq!(*reference, [4, 2, 3]);
/// # }
///```
#[inline]
pub fn leak(b: Box<'a, T>) -> &'a mut T {
unsafe { &mut *Box::into_raw(b) }
}
}
impl<'a, T: ?Sized> Drop for Box<'a, T> {
fn drop(&mut self) {
unsafe {
// `Box` owns value of `T`, but not memory behind it.
core::ptr::drop_in_place(self.0);
}
}
}
impl<'a, T> Default for Box<'a, [T]> {
fn default() -> Box<'a, [T]> {
// It should be OK to `drop_in_place` empty slice of anything.
Box(&mut [])
}
}
impl<'a> Default for Box<'a, str> {
fn default() -> Box<'a, str> {
// Empty slice is valid string.
// It should be OK to `drop_in_place` empty str.
unsafe { Box::from_raw(Box::into_raw(Box::<[u8]>::default()) as *mut str) }
}
}
impl<'a, 'b, T: ?Sized + PartialEq> PartialEq<Box<'b, T>> for Box<'a, T> {
#[inline]
fn eq(&self, other: &Box<'b, T>) -> bool {
PartialEq::eq(&**self, &**other)
}
#[inline]
fn ne(&self, other: &Box<'b, T>) -> bool {
PartialEq::ne(&**self, &**other)
}
}
impl<'a, 'b, T: ?Sized + PartialOrd> PartialOrd<Box<'b, T>> for Box<'a, T> {
#[inline]
fn partial_cmp(&self, other: &Box<'b, T>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
#[inline]
fn lt(&self, other: &Box<'b, T>) -> bool {
PartialOrd::lt(&**self, &**other)
}
#[inline]
fn le(&self, other: &Box<'b, T>) -> bool {
PartialOrd::le(&**self, &**other)
}
#[inline]
fn ge(&self, other: &Box<'b, T>) -> bool {
PartialOrd::ge(&**self, &**other)
}
#[inline]
fn gt(&self, other: &Box<'b, T>) -> bool {
PartialOrd::gt(&**self, &**other)
}
}
impl<'a, T: ?Sized + Ord> Ord for Box<'a, T> {
#[inline]
fn cmp(&self, other: &Box<'a, T>) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
impl<'a, T: ?Sized + Eq> Eq for Box<'a, T> {}
impl<'a, T: ?Sized + Hash> Hash for Box<'a, T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
impl<'a, T: ?Sized + Hasher> Hasher for Box<'a, T> {
fn finish(&self) -> u64 {
(**self).finish()
}
fn write(&mut self, bytes: &[u8]) {
(**self).write(bytes)
}
fn write_u8(&mut self, i: u8) {
(**self).write_u8(i)
}
fn write_u16(&mut self, i: u16) {
(**self).write_u16(i)
}
fn write_u32(&mut self, i: u32) {
(**self).write_u32(i)
}
fn write_u64(&mut self, i: u64) {
(**self).write_u64(i)
}
fn write_u128(&mut self, i: u128) {
(**self).write_u128(i)
}
fn write_usize(&mut self, i: usize) {
(**self).write_usize(i)
}
fn write_i8(&mut self, i: i8) {
(**self).write_i8(i)
}
fn write_i16(&mut self, i: i16) {
(**self).write_i16(i)
}
fn write_i32(&mut self, i: i32) {
(**self).write_i32(i)
}
fn write_i64(&mut self, i: i64) {
(**self).write_i64(i)
}
fn write_i128(&mut self, i: i128) {
(**self).write_i128(i)
}
fn write_isize(&mut self, i: isize) {
(**self).write_isize(i)
}
}
impl<'a, T: ?Sized> From<Box<'a, T>> for Pin<Box<'a, T>> {
/// Converts a `Box<T>` into a `Pin<Box<T>>`.
///
/// This conversion does not allocate on the heap and happens in place.
fn from(boxed: Box<'a, T>) -> Self {
// It's not possible to move or replace the insides of a `Pin<Box<T>>`
// when `T: !Unpin`, so it's safe to pin it directly without any
// additional requirements.
unsafe { Pin::new_unchecked(boxed) }
}
}
impl<'a> Box<'a, dyn Any> {
#[inline]
/// Attempt to downcast the box to a concrete type.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(value: Box<dyn Any>) {
/// if let Ok(string) = value.downcast::<String>() {
/// println!("String ({}): {}", string.len(), string);
/// }
/// }
///
/// let my_string = "Hello World".to_string();
/// print_if_string(Box::new(my_string));
/// print_if_string(Box::new(0i8));
/// ```
pub fn downcast<T: Any>(self) -> Result<Box<'a, T>, Box<'a, dyn Any>> {
if self.is::<T>() {
unsafe {
let raw: *mut dyn Any = Box::into_raw(self);
Ok(Box::from_raw(raw as *mut T))
}
} else {
Err(self)
}
}
}
impl<'a> Box<'a, dyn Any + Send> {
#[inline]
/// Attempt to downcast the box to a concrete type.
///
/// # Examples
///
/// ```
/// use std::any::Any;
///
/// fn print_if_string(value: Box<dyn Any + Send>) {
/// if let Ok(string) = value.downcast::<String>() {
/// println!("String ({}): {}", string.len(), string);
/// }
/// }
///
/// let my_string = "Hello World".to_string();
/// print_if_string(Box::new(my_string));
/// print_if_string(Box::new(0i8));
/// ```
pub fn downcast<T: Any>(self) -> Result<Box<'a, T>, Box<'a, dyn Any + Send>> {
if self.is::<T>() {
unsafe {
let raw: *mut (dyn Any + Send) = Box::into_raw(self);
Ok(Box::from_raw(raw as *mut T))
}
} else {
Err(self)
}
}
}
impl<'a, T: fmt::Display + ?Sized> fmt::Display for Box<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<'a, T: fmt::Debug + ?Sized> fmt::Debug for Box<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> fmt::Pointer for Box<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// It's not possible to extract the inner Uniq directly from the Box,
// instead we cast it to a *const which aliases the Unique
let ptr: *const T = &**self;
fmt::Pointer::fmt(&ptr, f)
}
}
impl<'a, T: ?Sized> Deref for Box<'a, T> {
type Target = T;
fn deref(&self) -> &T {
&*self.0
}
}
impl<'a, T: ?Sized> DerefMut for Box<'a, T> {
fn deref_mut(&mut self) -> &mut T {
self.0
}
}
impl<'a, I: Iterator + ?Sized> Iterator for Box<'a, I> {
type Item = I::Item;
fn next(&mut self) -> Option<I::Item> {
(**self).next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(**self).size_hint()
}
fn nth(&mut self, n: usize) -> Option<I::Item> {
(**self).nth(n)
}
fn last(self) -> Option<I::Item> {
#[inline]
fn some<T>(_: Option<T>, x: T) -> Option<T> {
Some(x)
}
self.fold(None, some)
}
}
impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for Box<'a, I> {
fn next_back(&mut self) -> Option<I::Item> {
(**self).next_back()
}
fn nth_back(&mut self, n: usize) -> Option<I::Item> {
(**self).nth_back(n)
}
}
impl<'a, I: ExactSizeIterator + ?Sized> ExactSizeIterator for Box<'a, I> {
fn len(&self) -> usize {
(**self).len()
}
}
impl<'a, I: FusedIterator + ?Sized> FusedIterator for Box<'a, I> {}
#[cfg(feature = "collections")]
impl<'a, A> Box<'a, [A]> {
/// Creates a value from an iterator.
/// This method is an adapted version of [`FromIterator::from_iter`][from_iter].
/// It cannot be made as that trait implementation given different signature.
///
/// [from_iter]: https://doc.rust-lang.org/std/iter/trait.FromIterator.html#tymethod.from_iter
///
/// # Examples
///
/// Basic usage:
/// ```
/// use bumpalo::{Bump, boxed::Box, vec};
///
/// let b = Bump::new();
///
/// let five_fives = std::iter::repeat(5).take(5);
/// let slice = Box::from_iter_in(five_fives, &b);
/// assert_eq!(vec![in &b; 5, 5, 5, 5, 5], &*slice);
/// ```
pub fn from_iter_in<T: IntoIterator<Item = A>>(iter: T, a: &'a Bump) -> Self {
use crate::collections::Vec;
let mut vec = Vec::new_in(a);
vec.extend(iter);
vec.into_boxed_slice()
}
}
impl<'a, T: ?Sized> borrow::Borrow<T> for Box<'a, T> {
fn borrow(&self) -> &T {
&**self
}
}
impl<'a, T: ?Sized> borrow::BorrowMut<T> for Box<'a, T> {
fn borrow_mut(&mut self) -> &mut T {
&mut **self
}
}
impl<'a, T: ?Sized> AsRef<T> for Box<'a, T> {
fn as_ref(&self) -> &T {
&**self
}
}
impl<'a, T: ?Sized> AsMut<T> for Box<'a, T> {
fn as_mut(&mut self) -> &mut T {
&mut **self
}
}
impl<'a, T: ?Sized> Unpin for Box<'a, T> {}
impl<'a, F: ?Sized + Future + Unpin> Future for Box<'a, F> {
type Output = F::Output;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
F::poll(Pin::new(&mut *self), cx)
}
}
/// This impl replaces unsize coercion.
impl<'a, T, const N: usize> From<Box<'a, [T; N]>> for Box<'a, [T]> {
fn from(arr: Box<'a, [T; N]>) -> Box<'a, [T]> {
let mut arr = ManuallyDrop::new(arr);
let ptr = core::ptr::slice_from_raw_parts_mut(arr.as_mut_ptr(), N);
unsafe { Box::from_raw(ptr) }
}
}
/// This impl replaces unsize coercion.
impl<'a, T, const N: usize> TryFrom<Box<'a, [T]>> for Box<'a, [T; N]> {
type Error = Box<'a, [T]>;
fn try_from(slice: Box<'a, [T]>) -> Result<Box<'a, [T; N]>, Box<'a, [T]>> {
if slice.len() == N {
let mut slice = ManuallyDrop::new(slice);
let ptr = slice.as_mut_ptr() as *mut [T; N];
Ok(unsafe { Box::from_raw(ptr) })
} else {
Err(slice)
}
}
}
#[cfg(feature = "serde")]
mod serialize {
use super::*;
use serde::{Serialize, Serializer};
impl<'a, T> Serialize for Box<'a, T>
where
T: Serialize,
{
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
T::serialize(self, serializer)
}
}
}

View File

@@ -0,0 +1,152 @@
#[cfg(feature = "boxed")]
use crate::boxed::Box;
use crate::collections::{String, Vec};
use crate::Bump;
/// A trait for types that support being constructed from an iterator, parameterized by an allocator.
pub trait FromIteratorIn<A> {
/// The allocator type
type Alloc;
/// Similar to [`FromIterator::from_iter`][from_iter], but with a given allocator.
///
/// [from_iter]: https://doc.rust-lang.org/std/iter/trait.FromIterator.html#tymethod.from_iter
///
/// ```
/// # use bumpalo::collections::{FromIteratorIn, Vec};
/// # use bumpalo::Bump;
/// #
/// let five_fives = std::iter::repeat(5).take(5);
/// let bump = Bump::new();
///
/// let v = Vec::from_iter_in(five_fives, &bump);
///
/// assert_eq!(v, [5, 5, 5, 5, 5]);
/// ```
fn from_iter_in<I>(iter: I, alloc: Self::Alloc) -> Self
where
I: IntoIterator<Item = A>;
}
#[cfg(feature = "boxed")]
impl<'bump, T> FromIteratorIn<T> for Box<'bump, [T]> {
type Alloc = &'bump Bump;
fn from_iter_in<I>(iter: I, alloc: Self::Alloc) -> Self
where
I: IntoIterator<Item = T>,
{
Box::from_iter_in(iter, alloc)
}
}
impl<'bump, T> FromIteratorIn<T> for Vec<'bump, T> {
type Alloc = &'bump Bump;
fn from_iter_in<I>(iter: I, alloc: Self::Alloc) -> Self
where
I: IntoIterator<Item = T>,
{
Vec::from_iter_in(iter, alloc)
}
}
impl<T, V: FromIteratorIn<T>> FromIteratorIn<Option<T>> for Option<V> {
type Alloc = V::Alloc;
fn from_iter_in<I>(iter: I, alloc: Self::Alloc) -> Self
where
I: IntoIterator<Item = Option<T>>,
{
iter.into_iter()
.map(|x| x.ok_or(()))
.collect_in::<Result<_, _>>(alloc)
.ok()
}
}
impl<T, E, V: FromIteratorIn<T>> FromIteratorIn<Result<T, E>> for Result<V, E> {
type Alloc = V::Alloc;
/// Takes each element in the `Iterator`: if it is an `Err`, no further
/// elements are taken, and the `Err` is returned. Should no `Err` occur, a
/// container with the values of each `Result` is returned.
///
/// Here is an example which increments every integer in a vector,
/// checking for overflow:
///
/// ```
/// # use bumpalo::collections::{FromIteratorIn, CollectIn, Vec, String};
/// # use bumpalo::Bump;
/// #
/// let bump = Bump::new();
///
/// let v = vec![1, 2, u32::MAX];
/// let res: Result<Vec<u32>, &'static str> = v.iter().take(2).map(|x: &u32|
/// x.checked_add(1).ok_or("Overflow!")
/// ).collect_in(&bump);
/// assert_eq!(res, Ok(bumpalo::vec![in &bump; 2, 3]));
///
/// let res: Result<Vec<u32>, &'static str> = v.iter().map(|x: &u32|
/// x.checked_add(1).ok_or("Overflow!")
/// ).collect_in(&bump);
/// assert_eq!(res, Err("Overflow!"));
/// ```
fn from_iter_in<I>(iter: I, alloc: Self::Alloc) -> Self
where
I: IntoIterator<Item = Result<T, E>>,
{
let mut iter = iter.into_iter();
let mut error = None;
let container = core::iter::from_fn(|| match iter.next() {
Some(Ok(x)) => Some(x),
Some(Err(e)) => {
error = Some(e);
None
}
None => None,
})
.collect_in(alloc);
match error {
Some(e) => Err(e),
None => Ok(container),
}
}
}
impl<'bump> FromIteratorIn<char> for String<'bump> {
type Alloc = &'bump Bump;
fn from_iter_in<I>(iter: I, alloc: Self::Alloc) -> Self
where
I: IntoIterator<Item = char>,
{
String::from_iter_in(iter, alloc)
}
}
/// Extension trait for iterators, in order to allow allocator-parameterized collections to be constructed more easily.
pub trait CollectIn: Iterator + Sized {
/// Collect all items from an iterator, into a collection parameterized by an allocator.
/// Similar to [`Iterator::collect`][collect].
///
/// [collect]: https://doc.rust-lang.org/std/iter/trait.Iterator.html#method.collect
///
/// ```
/// # use bumpalo::collections::{FromIteratorIn, CollectIn, Vec, String};
/// # use bumpalo::Bump;
/// #
/// let bump = Bump::new();
///
/// let str = "hello, world!".to_owned();
/// let bump_str: String = str.chars().collect_in(&bump);
/// assert_eq!(&bump_str, &str);
///
/// let nums: Vec<i32> = (0..=3).collect_in::<Vec<_>>(&bump);
/// assert_eq!(&nums, &[0,1,2,3]);
/// ```
fn collect_in<C: FromIteratorIn<Self::Item>>(self, alloc: C::Alloc) -> C {
C::from_iter_in(self, alloc)
}
}
impl<I: Iterator> CollectIn for I {}

93
vendor/bumpalo/src/collections/mod.rs vendored Normal file
View File

@@ -0,0 +1,93 @@
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Collection types that allocate inside a [`Bump`] arena.
//!
//! [`Bump`]: ../struct.Bump.html
#![allow(deprecated)]
mod raw_vec;
pub mod vec;
pub use self::vec::Vec;
mod str;
pub mod string;
pub use self::string::String;
mod collect_in;
pub use collect_in::{CollectIn, FromIteratorIn};
// pub mod binary_heap;
// mod btree;
// pub mod linked_list;
// pub mod vec_deque;
// pub mod btree_map {
// //! A map based on a B-Tree.
// pub use super::btree::map::*;
// }
// pub mod btree_set {
// //! A set based on a B-Tree.
// pub use super::btree::set::*;
// }
// #[doc(no_inline)]
// pub use self::binary_heap::BinaryHeap;
// #[doc(no_inline)]
// pub use self::btree_map::BTreeMap;
// #[doc(no_inline)]
// pub use self::btree_set::BTreeSet;
// #[doc(no_inline)]
// pub use self::linked_list::LinkedList;
// #[doc(no_inline)]
// pub use self::vec_deque::VecDeque;
use crate::alloc::{AllocErr, LayoutErr};
/// Augments `AllocErr` with a `CapacityOverflow` variant.
#[derive(Clone, PartialEq, Eq, Debug)]
// #[unstable(feature = "try_reserve", reason = "new API", issue="48043")]
pub enum CollectionAllocErr {
/// Error due to the computed capacity exceeding the collection's maximum
/// (usually `isize::MAX` bytes).
CapacityOverflow,
/// Error due to the allocator (see the documentation for the [`AllocErr`] type).
AllocErr,
}
// #[unstable(feature = "try_reserve", reason = "new API", issue="48043")]
impl From<AllocErr> for CollectionAllocErr {
#[inline]
fn from(AllocErr: AllocErr) -> Self {
CollectionAllocErr::AllocErr
}
}
// #[unstable(feature = "try_reserve", reason = "new API", issue="48043")]
impl From<LayoutErr> for CollectionAllocErr {
#[inline]
fn from(_: LayoutErr) -> Self {
CollectionAllocErr::CapacityOverflow
}
}
// /// An intermediate trait for specialization of `Extend`.
// #[doc(hidden)]
// trait SpecExtend<I: IntoIterator> {
// /// Extends `self` with the contents of the given iterator.
// fn spec_extend(&mut self, iter: I);
// }

View File

@@ -0,0 +1,781 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(unstable_name_collisions)]
#![allow(dead_code)]
use crate::Bump;
use core::cmp;
use core::mem;
use core::ptr::{self, NonNull};
use crate::alloc::{handle_alloc_error, Alloc, Layout, UnstableLayoutMethods};
use crate::collections::CollectionAllocErr;
use crate::collections::CollectionAllocErr::*;
// use boxed::Box;
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
/// In particular:
///
/// * Produces Unique::empty() on zero-sized types
/// * Produces Unique::empty() on zero-length allocations
/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics)
/// * Guards against 32-bit systems allocating more than isize::MAX bytes
/// * Guards against overflowing your length
/// * Aborts on OOM
/// * Avoids freeing Unique::empty()
/// * Contains a ptr::Unique and thus endows the user with all related benefits
///
/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
/// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec
/// to handle the actual things *stored* inside of a RawVec.
///
/// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types.
/// This enables you to use capacity growing logic catch the overflows in your length
/// that might occur with zero-sized types.
///
/// However this means that you need to be careful when round-tripping this type
/// with a `Box<[T]>`: `cap()` won't yield the len. However `with_capacity`,
/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity
/// field. This allows zero-sized types to not be special-cased by consumers of
/// this type.
#[allow(missing_debug_implementations)]
pub struct RawVec<'a, T> {
ptr: NonNull<T>,
cap: usize,
a: &'a Bump,
}
impl<'a, T> RawVec<'a, T> {
/// Like `new` but parameterized over the choice of allocator for
/// the returned RawVec.
pub fn new_in(a: &'a Bump) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
RawVec {
ptr: NonNull::dangling(),
cap: 0,
a,
}
}
/// Like `with_capacity` but parameterized over the choice of
/// allocator for the returned RawVec.
#[inline]
pub fn with_capacity_in(cap: usize, a: &'a Bump) -> Self {
RawVec::allocate_in(cap, false, a)
}
/// Like `with_capacity_zeroed` but parameterized over the choice
/// of allocator for the returned RawVec.
#[inline]
pub fn with_capacity_zeroed_in(cap: usize, a: &'a Bump) -> Self {
RawVec::allocate_in(cap, true, a)
}
fn allocate_in(cap: usize, zeroed: bool, mut a: &'a Bump) -> Self {
unsafe {
let elem_size = mem::size_of::<T>();
let alloc_size = cap
.checked_mul(elem_size)
.unwrap_or_else(|| capacity_overflow());
alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow());
// handles ZSTs and `cap = 0` alike
let ptr = if alloc_size == 0 {
NonNull::<T>::dangling()
} else {
let align = mem::align_of::<T>();
let layout = Layout::from_size_align(alloc_size, align).unwrap();
let result = if zeroed {
a.alloc_zeroed(layout)
} else {
Alloc::alloc(&mut a, layout)
};
match result {
Ok(ptr) => ptr.cast(),
Err(_) => handle_alloc_error(layout),
}
};
RawVec { ptr, cap, a }
}
}
}
impl<'a, T> RawVec<'a, T> {
/// Reconstitutes a RawVec from a pointer, capacity, and allocator.
///
/// # Undefined Behavior
///
/// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The
/// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems).
/// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed.
pub unsafe fn from_raw_parts_in(ptr: *mut T, cap: usize, a: &'a Bump) -> Self {
RawVec {
ptr: NonNull::new_unchecked(ptr),
cap,
a,
}
}
}
impl<'a, T> RawVec<'a, T> {
/// Gets a raw pointer to the start of the allocation. Note that this is
/// Unique::empty() if `cap = 0` or T is zero-sized. In the former case, you must
/// be careful.
pub fn ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
/// Gets the capacity of the allocation.
///
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn cap(&self) -> usize {
if mem::size_of::<T>() == 0 {
!0
} else {
self.cap
}
}
/// Returns a shared reference to the allocator backing this RawVec.
pub fn bump(&self) -> &'a Bump {
self.a
}
fn current_layout(&self) -> Option<Layout> {
if self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
// checks to get our current layout.
unsafe {
let align = mem::align_of::<T>();
let size = mem::size_of::<T>() * self.cap;
Some(Layout::from_size_align_unchecked(size, align))
}
}
}
/// Doubles the size of the type's backing allocation. This is common enough
/// to want to do that it's easiest to just have a dedicated method. Slightly
/// more efficient logic can be provided for this than the general case.
///
/// This function is ideal for when pushing elements one-at-a-time because
/// you don't need to incur the costs of the more general computations
/// reserve needs to do to guard against overflow. You do however need to
/// manually check if your `len == cap`.
///
/// # Panics
///
/// * Panics if T is zero-sized on the assumption that you managed to exhaust
/// all `usize::MAX` slots in your imaginary buffer.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM
///
/// # Examples
///
/// ```ignore
/// # #![feature(alloc, raw_vec_internals)]
/// # extern crate alloc;
/// # use std::ptr;
/// # use alloc::raw_vec::RawVec;
/// struct MyVec<T> {
/// buf: RawVec<T>,
/// len: usize,
/// }
///
/// impl<T> MyVec<T> {
/// pub fn push(&mut self, elem: T) {
/// if self.len == self.buf.cap() { self.buf.double(); }
/// // double would have aborted or panicked if the len exceeded
/// // `isize::MAX` so this is safe to do unchecked now.
/// unsafe {
/// ptr::write(self.buf.ptr().add(self.len), elem);
/// }
/// self.len += 1;
/// }
/// }
/// # fn main() {
/// # let mut vec = MyVec { buf: RawVec::new(), len: 0 };
/// # vec.push(1);
/// # }
/// ```
#[inline(never)]
#[cold]
pub fn double(&mut self) {
unsafe {
let elem_size = mem::size_of::<T>();
// since we set the capacity to usize::MAX when elem_size is
// 0, getting to here necessarily means the RawVec is overfull.
assert!(elem_size != 0, "capacity overflow");
let (new_cap, uniq) = match self.current_layout() {
Some(cur) => {
// Since we guarantee that we never allocate more than
// isize::MAX bytes, `elem_size * self.cap <= isize::MAX` as
// a precondition, so this can't overflow. Additionally the
// alignment will never be too large as to "not be
// satisfiable", so `Layout::from_size_align` will always
// return `Some`.
//
// tl;dr; we bypass runtime checks due to dynamic assertions
// in this module, allowing us to use
// `from_size_align_unchecked`.
let new_cap = 2 * self.cap;
let new_size = new_cap * elem_size;
alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
let ptr_res = self.a.realloc(self.ptr.cast(), cur, new_size);
match ptr_res {
Ok(ptr) => (new_cap, ptr.cast()),
Err(_) => handle_alloc_error(Layout::from_size_align_unchecked(
new_size,
cur.align(),
)),
}
}
None => {
// skip to 4 because tiny Vec's are dumb; but not if that
// would cause overflow
let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
match self.a.alloc_array::<T>(new_cap) {
Ok(ptr) => (new_cap, ptr),
Err(_) => handle_alloc_error(Layout::array::<T>(new_cap).unwrap()),
}
}
};
self.ptr = uniq;
self.cap = new_cap;
}
}
/// Attempts to double the size of the type's backing allocation in place. This is common
/// enough to want to do that it's easiest to just have a dedicated method. Slightly
/// more efficient logic can be provided for this than the general case.
///
/// Returns true if the reallocation attempt has succeeded, or false otherwise.
///
/// # Panics
///
/// * Panics if T is zero-sized on the assumption that you managed to exhaust
/// all `usize::MAX` slots in your imaginary buffer.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
#[inline(never)]
#[cold]
pub fn double_in_place(&mut self) -> bool {
unsafe {
let elem_size = mem::size_of::<T>();
let old_layout = match self.current_layout() {
Some(layout) => layout,
None => return false, // nothing to double
};
// since we set the capacity to usize::MAX when elem_size is
// 0, getting to here necessarily means the RawVec is overfull.
assert!(elem_size != 0, "capacity overflow");
// Since we guarantee that we never allocate more than isize::MAX
// bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so
// this can't overflow.
//
// Similarly like with `double` above we can go straight to
// `Layout::from_size_align_unchecked` as we know this won't
// overflow and the alignment is sufficiently small.
let new_cap = 2 * self.cap;
let new_size = new_cap * elem_size;
alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
match self.a.grow_in_place(self.ptr.cast(), old_layout, new_size) {
Ok(_) => {
// We can't directly divide `size`.
self.cap = new_cap;
true
}
Err(_) => false,
}
}
}
/// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
pub fn try_reserve_exact(
&mut self,
used_cap: usize,
needed_extra_cap: usize,
) -> Result<(), CollectionAllocErr> {
self.fallible_reserve_internal(used_cap, needed_extra_cap, Exact)
}
/// Ensures that the buffer contains at least enough space to hold
/// `used_cap + needed_extra_cap` elements. If it doesn't already,
/// will reallocate the minimum possible amount of memory necessary.
/// Generally this will be exactly the amount of memory necessary,
/// but in principle the allocator is free to give back more than
/// we asked for.
///
/// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// # Panics
///
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM
pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
self.infallible_reserve_internal(used_cap, needed_extra_cap, Exact)
}
/// Calculates the buffer's new size given that it'll hold `used_cap +
/// needed_extra_cap` elements. This logic is used in amortized reserve methods.
/// Returns `(new_capacity, new_alloc_size)`.
fn amortized_new_size(
&self,
used_cap: usize,
needed_extra_cap: usize,
) -> Result<usize, CollectionAllocErr> {
// Nothing we can really do about these checks :(
let required_cap = used_cap
.checked_add(needed_extra_cap)
.ok_or(CapacityOverflow)?;
// Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
let double_cap = self.cap * 2;
// `double_cap` guarantees exponential growth.
Ok(cmp::max(double_cap, required_cap))
}
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
pub fn try_reserve(
&mut self,
used_cap: usize,
needed_extra_cap: usize,
) -> Result<(), CollectionAllocErr> {
self.fallible_reserve_internal(used_cap, needed_extra_cap, Amortized)
}
/// Ensures that the buffer contains at least enough space to hold
/// `used_cap + needed_extra_cap` elements. If it doesn't already have
/// enough capacity, will reallocate enough space plus comfortable slack
/// space to get amortized `O(1)` behavior. Will limit this behavior
/// if it would needlessly cause itself to panic.
///
/// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// This is ideal for implementing a bulk-push operation like `extend`.
///
/// # Panics
///
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM
///
/// # Examples
///
/// ```ignore
/// # #![feature(alloc, raw_vec_internals)]
/// # extern crate alloc;
/// # use std::ptr;
/// # use alloc::raw_vec::RawVec;
/// struct MyVec<T> {
/// buf: RawVec<T>,
/// len: usize,
/// }
///
/// impl<T: Clone> MyVec<T> {
/// pub fn push_all(&mut self, elems: &[T]) {
/// self.buf.reserve(self.len, elems.len());
/// // reserve would have aborted or panicked if the len exceeded
/// // `isize::MAX` so this is safe to do unchecked now.
/// for x in elems {
/// unsafe {
/// ptr::write(self.buf.ptr().add(self.len), x.clone());
/// }
/// self.len += 1;
/// }
/// }
/// }
/// # fn main() {
/// # let mut vector = MyVec { buf: RawVec::new(), len: 0 };
/// # vector.push_all(&[1, 3, 5, 7, 9]);
/// # }
/// ```
#[inline(always)]
pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
self.infallible_reserve_internal(used_cap, needed_extra_cap, Amortized)
}
/// Attempts to ensure that the buffer contains at least enough space to hold
/// `used_cap + needed_extra_cap` elements. If it doesn't already have
/// enough capacity, will reallocate in place enough space plus comfortable slack
/// space to get amortized `O(1)` behavior. Will limit this behaviour
/// if it would needlessly cause itself to panic.
///
/// If `used_cap` exceeds `self.cap()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// Returns true if the reallocation attempt has succeeded, or false otherwise.
///
/// # Panics
///
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
/// * Panics on 32-bit platforms if the requested capacity exceeds
/// `isize::MAX` bytes.
pub fn reserve_in_place(&mut self, used_cap: usize, needed_extra_cap: usize) -> bool {
unsafe {
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// panic.
// Don't actually need any more capacity. If the current `cap` is 0, we can't
// reallocate in place.
// Wrapping in case they give a bad `used_cap`
let old_layout = match self.current_layout() {
Some(layout) => layout,
None => return false,
};
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
return false;
}
let new_cap = self
.amortized_new_size(used_cap, needed_extra_cap)
.unwrap_or_else(|_| capacity_overflow());
// Here, `cap < used_cap + needed_extra_cap <= new_cap`
// (regardless of whether `self.cap - used_cap` wrapped).
// Therefore we can safely call grow_in_place.
let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
// FIXME: may crash and burn on over-reserve
alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow());
match self
.a
.grow_in_place(self.ptr.cast(), old_layout, new_layout.size())
{
Ok(_) => {
self.cap = new_cap;
true
}
Err(_) => false,
}
}
}
/// Shrinks the allocation down to the specified amount. If the given amount
/// is 0, actually completely deallocates.
///
/// # Panics
///
/// Panics if the given amount is *larger* than the current capacity.
///
/// # Aborts
///
/// Aborts on OOM.
pub fn shrink_to_fit(&mut self, amount: usize) {
let elem_size = mem::size_of::<T>();
// Set the `cap` because they might be about to promote to a `Box<[T]>`
if elem_size == 0 {
self.cap = amount;
return;
}
// This check is my waterloo; it's the only thing Vec wouldn't have to do.
assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
if amount == 0 {
// We want to create a new zero-length vector within the
// same allocator. We use ptr::write to avoid an
// erroneous attempt to drop the contents, and we use
// ptr::read to sidestep condition against destructuring
// types that implement Drop.
unsafe {
let a = self.a;
self.dealloc_buffer();
ptr::write(self, RawVec::new_in(a));
}
} else if self.cap != amount {
unsafe {
// We know here that our `amount` is greater than zero. This
// implies, via the assert above, that capacity is also greater
// than zero, which means that we've got a current layout that
// "fits"
//
// We also know that `self.cap` is greater than `amount`, and
// consequently we don't need runtime checks for creating either
// layout
let old_size = elem_size * self.cap;
let new_size = elem_size * amount;
let align = mem::align_of::<T>();
let old_layout = Layout::from_size_align_unchecked(old_size, align);
match self.a.realloc(self.ptr.cast(), old_layout, new_size) {
Ok(p) => self.ptr = p.cast(),
Err(_) => {
handle_alloc_error(Layout::from_size_align_unchecked(new_size, align))
}
}
}
self.cap = amount;
}
}
}
#[cfg(feature = "boxed")]
impl<'a, T> RawVec<'a, T> {
/// Converts the entire buffer into `Box<[T]>`.
///
/// Note that this will correctly reconstitute any `cap` changes
/// that may have been performed. (See description of type for details.)
///
/// # Undefined Behavior
///
/// All elements of `RawVec<T>` must be initialized. Notice that
/// the rules around uninitialized boxed values are not finalized yet,
/// but until they are, it is advisable to avoid them.
pub unsafe fn into_box(self) -> crate::boxed::Box<'a, [T]> {
use crate::boxed::Box;
// NOTE: not calling `cap()` here; actually using the real `cap` field!
let slice = core::slice::from_raw_parts_mut(self.ptr(), self.cap);
let output: Box<'a, [T]> = Box::from_raw(slice);
mem::forget(self);
output
}
}
enum Fallibility {
Fallible,
Infallible,
}
use self::Fallibility::*;
enum ReserveStrategy {
Exact,
Amortized,
}
use self::ReserveStrategy::*;
impl<'a, T> RawVec<'a, T> {
#[inline(always)]
fn fallible_reserve_internal(
&mut self,
used_cap: usize,
needed_extra_cap: usize,
strategy: ReserveStrategy,
) -> Result<(), CollectionAllocErr> {
// This portion of the method should always be inlined.
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
return Ok(());
}
// This portion of the method should never be inlined, and will only be called when
// the check above has confirmed that it is necessary.
self.reserve_internal_or_error(used_cap, needed_extra_cap, Fallible, strategy)
}
#[inline(always)]
fn infallible_reserve_internal(
&mut self,
used_cap: usize,
needed_extra_cap: usize,
strategy: ReserveStrategy,
) {
// This portion of the method should always be inlined.
if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
return;
}
// This portion of the method should never be inlined, and will only be called when
// the check above has confirmed that it is necessary.
self.reserve_internal_or_panic(used_cap, needed_extra_cap, strategy)
}
#[inline(never)]
fn reserve_internal_or_panic(
&mut self,
used_cap: usize,
needed_extra_cap: usize,
strategy: ReserveStrategy,
) {
// Delegates the call to `reserve_internal_or_error` and panics in the event of an error.
// This allows the method to have a return type of `()`, simplifying the assembly at the
// call site.
match self.reserve_internal(used_cap, needed_extra_cap, Infallible, strategy) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocErr) => unreachable!(),
Ok(()) => { /* yay */ }
}
}
#[inline(never)]
fn reserve_internal_or_error(
&mut self,
used_cap: usize,
needed_extra_cap: usize,
fallibility: Fallibility,
strategy: ReserveStrategy,
) -> Result<(), CollectionAllocErr> {
// Delegates the call to `reserve_internal`, which can be inlined.
self.reserve_internal(used_cap, needed_extra_cap, fallibility, strategy)
}
/// Helper method to reserve additional space, reallocating the backing memory.
/// The caller is responsible for confirming that there is not already enough space available.
fn reserve_internal(
&mut self,
used_cap: usize,
needed_extra_cap: usize,
fallibility: Fallibility,
strategy: ReserveStrategy,
) -> Result<(), CollectionAllocErr> {
unsafe {
use crate::AllocErr;
// NOTE: we don't early branch on ZSTs here because we want this
// to actually catch "asking for more than usize::MAX" in that case.
// If we make it past the first branch then we are guaranteed to
// panic.
// Nothing we can really do about these checks :(
let new_cap = match strategy {
Exact => used_cap
.checked_add(needed_extra_cap)
.ok_or(CapacityOverflow)?,
Amortized => self.amortized_new_size(used_cap, needed_extra_cap)?,
};
let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let res = match self.current_layout() {
Some(layout) => {
debug_assert!(new_layout.align() == layout.align());
self.a.realloc(self.ptr.cast(), layout, new_layout.size())
}
None => Alloc::alloc(&mut self.a, new_layout),
};
if let (Err(AllocErr), Infallible) = (&res, fallibility) {
handle_alloc_error(new_layout);
}
self.ptr = res?.cast();
self.cap = new_cap;
Ok(())
}
}
}
impl<'a, T> RawVec<'a, T> {
/// Frees the memory owned by the RawVec *without* trying to Drop its contents.
pub unsafe fn dealloc_buffer(&mut self) {
let elem_size = mem::size_of::<T>();
if elem_size != 0 {
if let Some(layout) = self.current_layout() {
self.a.dealloc(self.ptr.cast(), layout);
}
}
}
}
impl<'a, T> Drop for RawVec<'a, T> {
/// Frees the memory owned by the RawVec *without* trying to Drop its contents.
fn drop(&mut self) {
unsafe {
self.dealloc_buffer();
}
}
}
// We need to guarantee the following:
// * We don't ever allocate `> isize::MAX` byte-size objects
// * We don't overflow `usize::MAX` and actually allocate too little
//
// On 64-bit we just need to check for overflow since trying to allocate
// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
// an extra guard for this in case we're running on a platform which can use
// all 4GB in user-space. e.g. PAE or x32
#[inline]
fn alloc_guard(alloc_size: usize) -> Result<(), CollectionAllocErr> {
if mem::size_of::<usize>() < 8 && alloc_size > ::core::isize::MAX as usize {
Err(CapacityOverflow)
} else {
Ok(())
}
}
// One central function responsible for reporting capacity overflows. This'll
// ensure that the code generation related to these panics is minimal as there's
// only one location which panics rather than a bunch throughout the module.
fn capacity_overflow() -> ! {
panic!("capacity overflow")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn reserve_does_not_overallocate() {
let bump = Bump::new();
{
let mut v: RawVec<u32> = RawVec::new_in(&bump);
// First `reserve` allocates like `reserve_exact`
v.reserve(0, 9);
assert_eq!(9, v.cap());
}
{
let mut v: RawVec<u32> = RawVec::new_in(&bump);
v.reserve(0, 7);
assert_eq!(7, v.cap());
// 97 if more than double of 7, so `reserve` should work
// like `reserve_exact`.
v.reserve(7, 90);
assert_eq!(97, v.cap());
}
{
let mut v: RawVec<u32> = RawVec::new_in(&bump);
v.reserve(0, 12);
assert_eq!(12, v.cap());
v.reserve(12, 3);
// 3 is less than half of 12, so `reserve` must grow
// exponentially. At the time of writing this test grow
// factor is 2, so new capacity is 24, however, grow factor
// of 1.5 is OK too. Hence `>= 18` in assert.
assert!(v.cap() >= 12 + 12 / 2);
}
}
}

View File

@@ -0,0 +1,209 @@
// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::collections::str as core_str;
use core::char;
use core::fmt;
use core::fmt::Write;
use core::str;
/// Lossy UTF-8 string.
pub struct Utf8Lossy<'a> {
bytes: &'a [u8],
}
impl<'a> Utf8Lossy<'a> {
pub fn from_bytes(bytes: &'a [u8]) -> Utf8Lossy<'a> {
Utf8Lossy { bytes }
}
pub fn chunks(&self) -> Utf8LossyChunksIter<'a> {
Utf8LossyChunksIter {
source: &self.bytes,
}
}
}
/// Iterator over lossy UTF-8 string
#[allow(missing_debug_implementations)]
pub struct Utf8LossyChunksIter<'a> {
source: &'a [u8],
}
#[derive(PartialEq, Eq, Debug)]
pub struct Utf8LossyChunk<'a> {
/// Sequence of valid chars.
/// Can be empty between broken UTF-8 chars.
pub valid: &'a str,
/// Single broken char, empty if none.
/// Empty iff iterator item is last.
pub broken: &'a [u8],
}
impl<'a> Iterator for Utf8LossyChunksIter<'a> {
type Item = Utf8LossyChunk<'a>;
fn next(&mut self) -> Option<Utf8LossyChunk<'a>> {
if self.source.is_empty() {
return None;
}
const TAG_CONT_U8: u8 = 128;
fn unsafe_get(xs: &[u8], i: usize) -> u8 {
unsafe { *xs.get_unchecked(i) }
}
fn safe_get(xs: &[u8], i: usize) -> u8 {
if i >= xs.len() {
0
} else {
unsafe_get(xs, i)
}
}
let mut i = 0;
while i < self.source.len() {
let i_ = i;
let byte = unsafe_get(self.source, i);
i += 1;
if byte < 128 {
} else {
let w = core_str::utf8_char_width(byte);
macro_rules! error {
() => {{
unsafe {
let r = Utf8LossyChunk {
valid: str::from_utf8_unchecked(&self.source[0..i_]),
broken: &self.source[i_..i],
};
self.source = &self.source[i..];
return Some(r);
}
}};
}
match w {
2 => {
if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
error!();
}
i += 1;
}
3 => {
match (byte, safe_get(self.source, i)) {
(0xE0, 0xA0..=0xBF) => (),
(0xE1..=0xEC, 0x80..=0xBF) => (),
(0xED, 0x80..=0x9F) => (),
(0xEE..=0xEF, 0x80..=0xBF) => (),
_ => {
error!();
}
}
i += 1;
if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
error!();
}
i += 1;
}
4 => {
match (byte, safe_get(self.source, i)) {
(0xF0, 0x90..=0xBF) => (),
(0xF1..=0xF3, 0x80..=0xBF) => (),
(0xF4, 0x80..=0x8F) => (),
_ => {
error!();
}
}
i += 1;
if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
error!();
}
i += 1;
if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
error!();
}
i += 1;
}
_ => {
error!();
}
}
}
}
let r = Utf8LossyChunk {
valid: unsafe { str::from_utf8_unchecked(self.source) },
broken: &[],
};
self.source = &[];
Some(r)
}
}
impl<'a> fmt::Display for Utf8Lossy<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// If we're the empty string then our iterator won't actually yield
// anything, so perform the formatting manually
if self.bytes.is_empty() {
return "".fmt(f);
}
for Utf8LossyChunk { valid, broken } in self.chunks() {
// If we successfully decoded the whole chunk as a valid string then
// we can return a direct formatting of the string which will also
// respect various formatting flags if possible.
if valid.len() == self.bytes.len() {
assert!(broken.is_empty());
return valid.fmt(f);
}
f.write_str(valid)?;
if !broken.is_empty() {
f.write_char(char::REPLACEMENT_CHARACTER)?;
}
}
Ok(())
}
}
impl<'a> fmt::Debug for Utf8Lossy<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_char('"')?;
for Utf8LossyChunk { valid, broken } in self.chunks() {
// Valid part.
// Here we partially parse UTF-8 again which is suboptimal.
{
let mut from = 0;
for (i, c) in valid.char_indices() {
let esc = c.escape_debug();
// If char needs escaping, flush backlog so far and write, else skip
if esc.len() != 1 {
f.write_str(&valid[from..i])?;
for c in esc {
f.write_char(c)?;
}
from = i + c.len_utf8();
}
}
f.write_str(&valid[from..])?;
}
// Broken parts of string as hex escape.
for &b in broken {
write!(f, "\\x{:02x}", b)?;
}
}
f.write_char('"')
}
}

View File

@@ -0,0 +1,43 @@
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! String manipulation
//!
//! For more details, see std::str
#[allow(missing_docs)]
pub mod lossy;
// https://tools.ietf.org/html/rfc3629
#[rustfmt::skip]
static UTF8_CHAR_WIDTH: [u8; 256] = [
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x1F
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x3F
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x5F
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x7F
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0x9F
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0xBF
0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // 0xDF
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, // 0xEF
4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0, // 0xFF
];
/// Given a first byte, determines how many bytes are in this UTF-8 character.
#[inline]
pub fn utf8_char_width(b: u8) -> usize {
UTF8_CHAR_WIDTH[b as usize] as usize
}

2168
vendor/bumpalo/src/collections/string.rs vendored Normal file

File diff suppressed because it is too large Load Diff

2969
vendor/bumpalo/src/collections/vec.rs vendored Normal file

File diff suppressed because it is too large Load Diff

2639
vendor/bumpalo/src/lib.rs vendored Normal file

File diff suppressed because it is too large Load Diff