Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

422
vendor/memmap2/src/advice.rs vendored Normal file
View File

@@ -0,0 +1,422 @@
/// Values supported by [`Mmap::advise`][crate::Mmap::advise] and [`MmapMut::advise`][crate::MmapMut::advise] functions.
///
/// See [madvise()](https://man7.org/linux/man-pages/man2/madvise.2.html) map page.
#[repr(i32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub enum Advice {
/// **MADV_NORMAL**
///
/// No special treatment. This is the default.
Normal = libc::MADV_NORMAL,
/// **MADV_RANDOM**
///
/// Expect page references in random order. (Hence, read
/// ahead may be less useful than normally.)
Random = libc::MADV_RANDOM,
/// **MADV_SEQUENTIAL**
///
/// Expect page references in sequential order. (Hence, pages
/// in the given range can be aggressively read ahead, and may
/// be freed soon after they are accessed.)
Sequential = libc::MADV_SEQUENTIAL,
/// **MADV_WILLNEED**
///
/// Expect access in the near future. (Hence, it might be a
/// good idea to read some pages ahead.)
WillNeed = libc::MADV_WILLNEED,
/// **MADV_DONTFORK** - Linux only (since Linux 2.6.16)
///
/// Do not make the pages in this range available to the child
/// after a fork(2). This is useful to prevent copy-on-write
/// semantics from changing the physical location of a page if
/// the parent writes to it after a fork(2). (Such page
/// relocations cause problems for hardware that DMAs into the
/// page.)
#[cfg(target_os = "linux")]
DontFork = libc::MADV_DONTFORK,
/// **MADV_DOFORK** - Linux only (since Linux 2.6.16)
///
/// Undo the effect of MADV_DONTFORK, restoring the default
/// behavior, whereby a mapping is inherited across fork(2).
#[cfg(target_os = "linux")]
DoFork = libc::MADV_DOFORK,
/// **MADV_MERGEABLE** - Linux only (since Linux 2.6.32)
///
/// Enable Kernel Samepage Merging (KSM) for the pages in the
/// range specified by addr and length. The kernel regularly
/// scans those areas of user memory that have been marked as
/// mergeable, looking for pages with identical content.
/// These are replaced by a single write-protected page (which
/// is automatically copied if a process later wants to update
/// the content of the page). KSM merges only private
/// anonymous pages (see mmap(2)).
///
/// The KSM feature is intended for applications that generate
/// many instances of the same data (e.g., virtualization
/// systems such as KVM). It can consume a lot of processing
/// power; use with care. See the Linux kernel source file
/// Documentation/admin-guide/mm/ksm.rst for more details.
///
/// The MADV_MERGEABLE and MADV_UNMERGEABLE operations are
/// available only if the kernel was configured with
/// CONFIG_KSM.
#[cfg(target_os = "linux")]
Mergeable = libc::MADV_MERGEABLE,
/// **MADV_UNMERGEABLE** - Linux only (since Linux 2.6.32)
///
/// Undo the effect of an earlier MADV_MERGEABLE operation on
/// the specified address range; KSM unmerges whatever pages
/// it had merged in the address range specified by addr and
/// length.
#[cfg(target_os = "linux")]
Unmergeable = libc::MADV_UNMERGEABLE,
/// **MADV_HUGEPAGE** - Linux only (since Linux 2.6.38)
///
/// Enable Transparent Huge Pages (THP) for pages in the range
/// specified by addr and length. Currently, Transparent Huge
/// Pages work only with private anonymous pages (see
/// mmap(2)). The kernel will regularly scan the areas marked
/// as huge page candidates to replace them with huge pages.
/// The kernel will also allocate huge pages directly when the
/// region is naturally aligned to the huge page size (see
/// posix_memalign(2)).
///
/// This feature is primarily aimed at applications that use
/// large mappings of data and access large regions of that
/// memory at a time (e.g., virtualization systems such as
/// QEMU). It can very easily waste memory (e.g., a 2 MB
/// mapping that only ever accesses 1 byte will result in 2 MB
/// of wired memory instead of one 4 KB page). See the Linux
/// kernel source file
/// Documentation/admin-guide/mm/transhuge.rst for more
/// details.
///
/// Most common kernels configurations provide MADV_HUGEPAGE-
/// style behavior by default, and thus MADV_HUGEPAGE is
/// normally not necessary. It is mostly intended for
/// embedded systems, where MADV_HUGEPAGE-style behavior may
/// not be enabled by default in the kernel. On such systems,
/// this flag can be used in order to selectively enable THP.
/// Whenever MADV_HUGEPAGE is used, it should always be in
/// regions of memory with an access pattern that the
/// developer knows in advance won't risk to increase the
/// memory footprint of the application when transparent
/// hugepages are enabled.
///
/// The MADV_HUGEPAGE and MADV_NOHUGEPAGE operations are
/// available only if the kernel was configured with
/// CONFIG_TRANSPARENT_HUGEPAGE.
#[cfg(target_os = "linux")]
HugePage = libc::MADV_HUGEPAGE,
/// **MADV_NOHUGEPAGE** - Linux only (since Linux 2.6.38)
///
/// Ensures that memory in the address range specified by addr
/// and length will not be backed by transparent hugepages.
#[cfg(target_os = "linux")]
NoHugePage = libc::MADV_NOHUGEPAGE,
/// **MADV_DONTDUMP** - Linux only (since Linux 3.4)
///
/// Exclude from a core dump those pages in the range
/// specified by addr and length. This is useful in
/// applications that have large areas of memory that are
/// known not to be useful in a core dump. The effect of
/// **MADV_DONTDUMP** takes precedence over the bit mask that is
/// set via the `/proc/[pid]/coredump_filter` file (see
/// core(5)).
#[cfg(target_os = "linux")]
DontDump = libc::MADV_DONTDUMP,
/// **MADV_DODUMP** - Linux only (since Linux 3.4)
///
/// Undo the effect of an earlier MADV_DONTDUMP.
#[cfg(target_os = "linux")]
DoDump = libc::MADV_DODUMP,
/// **MADV_HWPOISON** - Linux only (since Linux 2.6.32)
///
/// Poison the pages in the range specified by addr and length
/// and handle subsequent references to those pages like a
/// hardware memory corruption. This operation is available
/// only for privileged (CAP_SYS_ADMIN) processes. This
/// operation may result in the calling process receiving a
/// SIGBUS and the page being unmapped.
///
/// This feature is intended for testing of memory error-
/// handling code; it is available only if the kernel was
/// configured with CONFIG_MEMORY_FAILURE.
#[cfg(target_os = "linux")]
HwPoison = libc::MADV_HWPOISON,
/// **MADV_POPULATE_READ** - Linux only (since Linux 5.14)
///
/// Populate (prefault) page tables readable, faulting in all
/// pages in the range just as if manually reading from each
/// page; however, avoid the actual memory access that would have
/// been performed after handling the fault.
///
/// In contrast to MAP_POPULATE, MADV_POPULATE_READ does not hide
/// errors, can be applied to (parts of) existing mappings and
/// will always populate (prefault) page tables readable. One
/// example use case is prefaulting a file mapping, reading all
/// file content from disk; however, pages won't be dirtied and
/// consequently won't have to be written back to disk when
/// evicting the pages from memory.
///
/// Depending on the underlying mapping, map the shared zeropage,
/// preallocate memory or read the underlying file; files with
/// holes might or might not preallocate blocks. If populating
/// fails, a SIGBUS signal is not generated; instead, an error is
/// returned.
///
/// If MADV_POPULATE_READ succeeds, all page tables have been
/// populated (prefaulted) readable once. If MADV_POPULATE_READ
/// fails, some page tables might have been populated.
///
/// MADV_POPULATE_READ cannot be applied to mappings without read
/// permissions and special mappings, for example, mappings
/// marked with kernel-internal flags such as VM_PFNMAP or VM_IO,
/// or secret memory regions created using memfd_secret(2).
///
/// Note that with MADV_POPULATE_READ, the process can be killed
/// at any moment when the system runs out of memory.
#[cfg(target_os = "linux")]
PopulateRead = libc::MADV_POPULATE_READ,
/// **MADV_POPULATE_WRITE** - Linux only (since Linux 5.14)
///
/// Populate (prefault) page tables writable, faulting in all
/// pages in the range just as if manually writing to each each
/// page; however, avoid the actual memory access that would have
/// been performed after handling the fault.
///
/// In contrast to MAP_POPULATE, MADV_POPULATE_WRITE does not
/// hide errors, can be applied to (parts of) existing mappings
/// and will always populate (prefault) page tables writable.
/// One example use case is preallocating memory, breaking any
/// CoW (Copy on Write).
///
/// Depending on the underlying mapping, preallocate memory or
/// read the underlying file; files with holes will preallocate
/// blocks. If populating fails, a SIGBUS signal is not gener
/// ated; instead, an error is returned.
///
/// If MADV_POPULATE_WRITE succeeds, all page tables have been
/// populated (prefaulted) writable once. If MADV_POPULATE_WRITE
/// fails, some page tables might have been populated.
///
/// MADV_POPULATE_WRITE cannot be applied to mappings without
/// write permissions and special mappings, for example, mappings
/// marked with kernel-internal flags such as VM_PFNMAP or VM_IO,
/// or secret memory regions created using memfd_secret(2).
///
/// Note that with MADV_POPULATE_WRITE, the process can be killed
/// at any moment when the system runs out of memory.
#[cfg(target_os = "linux")]
PopulateWrite = libc::MADV_POPULATE_WRITE,
/// **MADV_ZERO_WIRED_PAGES** - Darwin only
///
/// Indicates that the application would like the wired pages in this address range to be
/// zeroed out if the address range is deallocated without first unwiring the pages (i.e.
/// a munmap(2) without a preceding munlock(2) or the application quits). This is used
/// with `madvise()` system call.
#[cfg(any(target_os = "macos", target_os = "ios"))]
ZeroWiredPages = libc::MADV_ZERO_WIRED_PAGES,
}
/// Values supported by [`Mmap::unchecked_advise`][crate::Mmap::unchecked_advise] and [`MmapMut::unchecked_advise`][crate::MmapMut::unchecked_advise] functions.
///
/// These flags can be passed to the [madvise (2)][man_page] system call
/// and effects on the mapped pages which are conceptually writes,
/// i.e. the change the observable contents of these pages which
/// implies undefined behaviour if the mapping is still borrowed.
///
/// Hence, these potentially unsafe flags must be used with the unsafe
/// methods and the programmer has to justify that the code
/// does not keep any borrows of the mapping active while the mapped pages
/// are updated by the kernel's memory management subsystem.
///
/// [man_page]: https://man7.org/linux/man-pages/man2/madvise.2.html
#[repr(i32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub enum UncheckedAdvice {
/// **MADV_DONTNEED**
///
/// Do not expect access in the near future. (For the time
/// being, the application is finished with the given range,
/// so the kernel can free resources associated with it.)
///
/// After a successful MADV_DONTNEED operation, the semantics
/// of memory access in the specified region are changed:
/// subsequent accesses of pages in the range will succeed,
/// but will result in either repopulating the memory contents
/// from the up-to-date contents of the underlying mapped file
/// (for shared file mappings, shared anonymous mappings, and
/// shmem-based techniques such as System V shared memory
/// segments) or zero-fill-on-demand pages for anonymous
/// private mappings.
///
/// Note that, when applied to shared mappings, MADV_DONTNEED
/// might not lead to immediate freeing of the pages in the
/// range. The kernel is free to delay freeing the pages
/// until an appropriate moment. The resident set size (RSS)
/// of the calling process will be immediately reduced
/// however.
///
/// **MADV_DONTNEED** cannot be applied to locked pages, Huge TLB
/// pages, or VM_PFNMAP pages. (Pages marked with the kernel-
/// internal VM_PFNMAP flag are special memory areas that are
/// not managed by the virtual memory subsystem. Such pages
/// are typically created by device drivers that map the pages
/// into user space.)
///
/// # Safety
///
/// Using the returned value with conceptually write to the
/// mapped pages, i.e. borrowing the mapping when the pages
/// are freed results in undefined behaviour.
DontNeed = libc::MADV_DONTNEED,
//
// The rest are Linux-specific
//
/// **MADV_FREE** - Linux (since Linux 4.5) and Darwin
///
/// The application no longer requires the pages in the range
/// specified by addr and len. The kernel can thus free these
/// pages, but the freeing could be delayed until memory
/// pressure occurs. For each of the pages that has been
/// marked to be freed but has not yet been freed, the free
/// operation will be canceled if the caller writes into the
/// page. After a successful MADV_FREE operation, any stale
/// data (i.e., dirty, unwritten pages) will be lost when the
/// kernel frees the pages. However, subsequent writes to
/// pages in the range will succeed and then kernel cannot
/// free those dirtied pages, so that the caller can always
/// see just written data. If there is no subsequent write,
/// the kernel can free the pages at any time. Once pages in
/// the range have been freed, the caller will see zero-fill-
/// on-demand pages upon subsequent page references.
///
/// The MADV_FREE operation can be applied only to private
/// anonymous pages (see mmap(2)). In Linux before version
/// 4.12, when freeing pages on a swapless system, the pages
/// in the given range are freed instantly, regardless of
/// memory pressure.
///
/// # Safety
///
/// Using the returned value with conceptually write to the
/// mapped pages, i.e. borrowing the mapping while the pages
/// are still being freed results in undefined behaviour.
#[cfg(any(target_os = "linux", target_os = "macos", target_os = "ios"))]
Free = libc::MADV_FREE,
/// **MADV_REMOVE** - Linux only (since Linux 2.6.16)
///
/// Free up a given range of pages and its associated backing
/// store. This is equivalent to punching a hole in the
/// corresponding byte range of the backing store (see
/// fallocate(2)). Subsequent accesses in the specified
/// address range will see bytes containing zero.
///
/// The specified address range must be mapped shared and
/// writable. This flag cannot be applied to locked pages,
/// Huge TLB pages, or VM_PFNMAP pages.
///
/// In the initial implementation, only tmpfs(5) was supported
/// **MADV_REMOVE**; but since Linux 3.5, any filesystem which
/// supports the fallocate(2) FALLOC_FL_PUNCH_HOLE mode also
/// supports MADV_REMOVE. Hugetlbfs fails with the error
/// EINVAL and other filesystems fail with the error
/// EOPNOTSUPP.
///
/// # Safety
///
/// Using the returned value with conceptually write to the
/// mapped pages, i.e. borrowing the mapping when the pages
/// are freed results in undefined behaviour.
#[cfg(target_os = "linux")]
Remove = libc::MADV_REMOVE,
/// **MADV_FREE_REUSABLE** - Darwin only
///
/// Behaves like **MADV_FREE**, but the freed pages are accounted for in the RSS of the process.
///
/// # Safety
///
/// Using the returned value with conceptually write to the
/// mapped pages, i.e. borrowing the mapping while the pages
/// are still being freed results in undefined behaviour.
#[cfg(any(target_os = "macos", target_os = "ios"))]
FreeReusable = libc::MADV_FREE_REUSABLE,
/// **MADV_FREE_REUSE** - Darwin only
///
/// Marks a memory region previously freed by **MADV_FREE_REUSABLE** as non-reusable, accounts
/// for the pages in the RSS of the process. Pages that have been freed will be replaced by
/// zero-filled pages on demand, other pages will be left as is.
///
/// # Safety
///
/// Using the returned value with conceptually write to the
/// mapped pages, i.e. borrowing the mapping while the pages
/// are still being freed results in undefined behaviour.
#[cfg(any(target_os = "macos", target_os = "ios"))]
FreeReuse = libc::MADV_FREE_REUSE,
}
// Future expansion:
// MADV_SOFT_OFFLINE (since Linux 2.6.33)
// MADV_WIPEONFORK (since Linux 4.14)
// MADV_KEEPONFORK (since Linux 4.14)
// MADV_COLD (since Linux 5.4)
// MADV_PAGEOUT (since Linux 5.4)
#[cfg(target_os = "linux")]
impl Advice {
/// Performs a runtime check if this advice is supported by the kernel.
/// Only supported on Linux. See the [`madvise(2)`] man page.
///
/// [`madvise(2)`]: https://man7.org/linux/man-pages/man2/madvise.2.html#VERSIONS
pub fn is_supported(self) -> bool {
(unsafe { libc::madvise(std::ptr::null_mut(), 0, self as libc::c_int) }) == 0
}
}
#[cfg(target_os = "linux")]
impl UncheckedAdvice {
/// Performs a runtime check if this advice is supported by the kernel.
/// Only supported on Linux. See the [`madvise(2)`] man page.
///
/// [`madvise(2)`]: https://man7.org/linux/man-pages/man2/madvise.2.html#VERSIONS
pub fn is_supported(self) -> bool {
(unsafe { libc::madvise(std::ptr::null_mut(), 0, self as libc::c_int) }) == 0
}
}
#[cfg(test)]
mod tests {
#[cfg(target_os = "linux")]
#[test]
fn test_is_supported() {
use super::*;
assert!(Advice::Normal.is_supported());
assert!(Advice::Random.is_supported());
assert!(Advice::Sequential.is_supported());
assert!(Advice::WillNeed.is_supported());
assert!(UncheckedAdvice::DontNeed.is_supported());
}
}

2307
vendor/memmap2/src/lib.rs vendored Normal file

File diff suppressed because it is too large Load Diff

87
vendor/memmap2/src/stub.rs vendored Normal file
View File

@@ -0,0 +1,87 @@
use std::fs::File;
use std::io;
// A stable alternative to https://doc.rust-lang.org/stable/std/primitive.never.html
enum Never {}
pub struct MmapInner {
never: Never,
}
impl MmapInner {
fn new() -> io::Result<MmapInner> {
Err(io::Error::new(
io::ErrorKind::Other,
"platform not supported",
))
}
pub fn map(_: usize, _: &File, _: u64, _: bool, _: bool) -> io::Result<MmapInner> {
MmapInner::new()
}
pub fn map_exec(_: usize, _: &File, _: u64, _: bool, _: bool) -> io::Result<MmapInner> {
MmapInner::new()
}
pub fn map_mut(_: usize, _: &File, _: u64, _: bool, _: bool) -> io::Result<MmapInner> {
MmapInner::new()
}
pub fn map_copy(_: usize, _: &File, _: u64, _: bool, _: bool) -> io::Result<MmapInner> {
MmapInner::new()
}
pub fn map_copy_read_only(
_: usize,
_: &File,
_: u64,
_: bool,
_: bool,
) -> io::Result<MmapInner> {
MmapInner::new()
}
pub fn map_anon(_: usize, _: bool, _: bool, _: Option<u8>, _: bool) -> io::Result<MmapInner> {
MmapInner::new()
}
pub fn flush(&self, _: usize, _: usize) -> io::Result<()> {
match self.never {}
}
pub fn flush_async(&self, _: usize, _: usize) -> io::Result<()> {
match self.never {}
}
pub fn make_read_only(&mut self) -> io::Result<()> {
match self.never {}
}
pub fn make_exec(&mut self) -> io::Result<()> {
match self.never {}
}
pub fn make_mut(&mut self) -> io::Result<()> {
match self.never {}
}
#[inline]
pub fn ptr(&self) -> *const u8 {
match self.never {}
}
#[inline]
pub fn mut_ptr(&mut self) -> *mut u8 {
match self.never {}
}
#[inline]
pub fn len(&self) -> usize {
match self.never {}
}
}
pub fn file_len(file: &File) -> io::Result<u64> {
Ok(file.metadata()?.len())
}

512
vendor/memmap2/src/unix.rs vendored Normal file
View File

@@ -0,0 +1,512 @@
use std::fs::File;
use std::mem::ManuallyDrop;
use std::os::unix::io::{FromRawFd, RawFd};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::{io, ptr};
#[cfg(any(
all(target_os = "linux", not(target_arch = "mips")),
target_os = "freebsd",
target_os = "android"
))]
const MAP_STACK: libc::c_int = libc::MAP_STACK;
#[cfg(not(any(
all(target_os = "linux", not(target_arch = "mips")),
target_os = "freebsd",
target_os = "android"
)))]
const MAP_STACK: libc::c_int = 0;
#[cfg(any(target_os = "linux", target_os = "android"))]
const MAP_POPULATE: libc::c_int = libc::MAP_POPULATE;
#[cfg(not(any(target_os = "linux", target_os = "android")))]
const MAP_POPULATE: libc::c_int = 0;
#[cfg(any(target_os = "linux", target_os = "android"))]
const MAP_HUGETLB: libc::c_int = libc::MAP_HUGETLB;
#[cfg(target_os = "linux")]
const MAP_HUGE_MASK: libc::c_int = libc::MAP_HUGE_MASK;
#[cfg(any(target_os = "linux", target_os = "android"))]
const MAP_HUGE_SHIFT: libc::c_int = libc::MAP_HUGE_SHIFT;
#[cfg(not(any(target_os = "linux", target_os = "android")))]
const MAP_HUGETLB: libc::c_int = 0;
#[cfg(not(target_os = "linux"))]
const MAP_HUGE_MASK: libc::c_int = 0;
#[cfg(not(any(target_os = "linux", target_os = "android")))]
const MAP_HUGE_SHIFT: libc::c_int = 0;
#[cfg(any(
target_os = "linux",
target_os = "android",
target_os = "macos",
target_os = "netbsd",
target_os = "solaris",
target_os = "illumos",
))]
const MAP_NORESERVE: libc::c_int = libc::MAP_NORESERVE;
#[cfg(not(any(
target_os = "linux",
target_os = "android",
target_os = "macos",
target_os = "netbsd",
target_os = "solaris",
target_os = "illumos",
)))]
const MAP_NORESERVE: libc::c_int = 0;
#[cfg(any(
target_os = "android",
all(target_os = "linux", not(target_env = "musl"))
))]
use libc::{mmap64 as mmap, off64_t as off_t};
#[cfg(not(any(
target_os = "android",
all(target_os = "linux", not(target_env = "musl"))
)))]
use libc::{mmap, off_t};
pub struct MmapInner {
ptr: *mut libc::c_void,
len: usize,
}
impl MmapInner {
/// Creates a new `MmapInner`.
///
/// This is a thin wrapper around the `mmap` system call.
fn new(
len: usize,
prot: libc::c_int,
flags: libc::c_int,
file: RawFd,
offset: u64,
) -> io::Result<MmapInner> {
let alignment = offset % page_size() as u64;
let aligned_offset = offset - alignment;
let (map_len, map_offset) = Self::adjust_mmap_params(len, alignment as usize)?;
unsafe {
let ptr = mmap(
ptr::null_mut(),
map_len as libc::size_t,
prot,
flags,
file,
aligned_offset as off_t,
);
if ptr == libc::MAP_FAILED {
Err(io::Error::last_os_error())
} else {
Ok(Self::from_raw_parts(ptr, len, map_offset))
}
}
}
fn adjust_mmap_params(len: usize, alignment: usize) -> io::Result<(usize, usize)> {
// Rust's slice cannot be larger than isize::MAX.
// See https://doc.rust-lang.org/std/slice/fn.from_raw_parts.html
//
// This is not a problem on 64-bit targets, but on 32-bit one
// having a file or an anonymous mapping larger than 2GB is quite normal
// and we have to prevent it.
//
// The code below is essentially the same as in Rust's std:
// https://github.com/rust-lang/rust/blob/db78ab70a88a0a5e89031d7ee4eccec835dcdbde/library/alloc/src/raw_vec.rs#L495
if std::mem::size_of::<usize>() < 8 && len > isize::MAX as usize {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"memory map length overflows isize",
));
}
let map_len = len + alignment;
let map_offset = alignment;
// `libc::mmap` does not support zero-size mappings. POSIX defines:
//
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html
// > If `len` is zero, `mmap()` shall fail and no mapping shall be established.
//
// So if we would create such a mapping, crate a one-byte mapping instead:
let map_len = map_len.max(1);
// Note that in that case `MmapInner::len` is still set to zero,
// and `Mmap` will still dereferences to an empty slice.
//
// If this mapping is backed by an empty file, we create a mapping larger than the file.
// This is unusual but well-defined. On the same man page, POSIX further defines:
//
// > The `mmap()` function can be used to map a region of memory that is larger
// > than the current size of the object.
//
// (The object here is the file.)
//
// > Memory access within the mapping but beyond the current end of the underlying
// > objects may result in SIGBUS signals being sent to the process. The reason for this
// > is that the size of the object can be manipulated by other processes and can change
// > at any moment. The implementation should tell the application that a memory reference
// > is outside the object where this can be detected; otherwise, written data may be lost
// > and read data may not reflect actual data in the object.
//
// Because `MmapInner::len` is not incremented, this increment of `aligned_len`
// will not allow accesses past the end of the file and will not cause SIGBUS.
//
// (SIGBUS is still possible by mapping a non-empty file and then truncating it
// to a shorter size, but that is unrelated to this handling of empty files.)
Ok((map_len, map_offset))
}
/// Get the current memory mapping as a `(ptr, map_len, offset)` tuple.
///
/// Note that `map_len` is the length of the memory mapping itself and
/// _not_ the one that would be passed to `from_raw_parts`.
fn as_mmap_params(&self) -> (*mut libc::c_void, usize, usize) {
let offset = self.ptr as usize % page_size();
let len = self.len + offset;
// There are two possible memory layouts we could have, depending on
// the length and offset passed when constructing this instance:
//
// 1. The "normal" memory layout looks like this:
//
// |<------------------>|<---------------------->|
// mmap ptr offset ptr public slice
//
// That is, we have
// - The start of the page-aligned memory mapping returned by mmap,
// followed by,
// - Some number of bytes that are memory mapped but ignored since
// they are before the byte offset requested by the user, followed
// by,
// - The actual memory mapped slice requested by the user.
//
// This maps cleanly to a (ptr, len, offset) tuple.
//
// 2. Then, we have the case where the user requested a zero-length
// memory mapping. mmap(2) does not support zero-length mappings so
// this crate works around that by actually making a mapping of
// length one. This means that we have
// - A length zero slice, followed by,
// - A single memory mapped byte
//
// Note that this only happens if the offset within the page is also
// zero. Otherwise, we have a memory map of offset bytes and not a
// zero-length memory map.
//
// This doesn't fit cleanly into a (ptr, len, offset) tuple. Instead,
// we fudge it slightly: a zero-length memory map turns into a
// mapping of length one and can't be told apart outside of this
// method without knowing the original length.
if len == 0 {
(self.ptr, 1, 0)
} else {
(unsafe { self.ptr.offset(-(offset as isize)) }, len, offset)
}
}
/// Construct this `MmapInner` from its raw components
///
/// # Safety
///
/// - `ptr` must point to the start of memory mapping that can be freed
/// using `munmap(2)` (i.e. returned by `mmap(2)` or `mremap(2)`)
/// - The memory mapping at `ptr` must have a length of `len + offset`.
/// - If `len + offset == 0` then the memory mapping must be of length 1.
/// - `offset` must be less than the current page size.
unsafe fn from_raw_parts(ptr: *mut libc::c_void, len: usize, offset: usize) -> Self {
debug_assert_eq!(ptr as usize % page_size(), 0, "ptr not page-aligned");
debug_assert!(offset < page_size(), "offset larger than page size");
Self {
ptr: ptr.add(offset),
len,
}
}
pub fn map(
len: usize,
file: RawFd,
offset: u64,
populate: bool,
no_reserve: bool,
) -> io::Result<MmapInner> {
let populate = if populate { MAP_POPULATE } else { 0 };
let no_reserve = if no_reserve { MAP_NORESERVE } else { 0 };
MmapInner::new(
len,
libc::PROT_READ,
libc::MAP_SHARED | populate | no_reserve,
file,
offset,
)
}
pub fn map_exec(
len: usize,
file: RawFd,
offset: u64,
populate: bool,
no_reserve: bool,
) -> io::Result<MmapInner> {
let populate = if populate { MAP_POPULATE } else { 0 };
let no_reserve = if no_reserve { MAP_NORESERVE } else { 0 };
MmapInner::new(
len,
libc::PROT_READ | libc::PROT_EXEC,
libc::MAP_SHARED | populate | no_reserve,
file,
offset,
)
}
pub fn map_mut(
len: usize,
file: RawFd,
offset: u64,
populate: bool,
no_reserve: bool,
) -> io::Result<MmapInner> {
let populate = if populate { MAP_POPULATE } else { 0 };
let no_reserve = if no_reserve { MAP_NORESERVE } else { 0 };
MmapInner::new(
len,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_SHARED | populate | no_reserve,
file,
offset,
)
}
pub fn map_copy(
len: usize,
file: RawFd,
offset: u64,
populate: bool,
no_reserve: bool,
) -> io::Result<MmapInner> {
let populate = if populate { MAP_POPULATE } else { 0 };
let no_reserve = if no_reserve { MAP_NORESERVE } else { 0 };
MmapInner::new(
len,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_PRIVATE | populate | no_reserve,
file,
offset,
)
}
pub fn map_copy_read_only(
len: usize,
file: RawFd,
offset: u64,
populate: bool,
no_reserve: bool,
) -> io::Result<MmapInner> {
let populate = if populate { MAP_POPULATE } else { 0 };
let no_reserve = if no_reserve { MAP_NORESERVE } else { 0 };
MmapInner::new(
len,
libc::PROT_READ,
libc::MAP_PRIVATE | populate | no_reserve,
file,
offset,
)
}
/// Open an anonymous memory map.
pub fn map_anon(
len: usize,
stack: bool,
populate: bool,
huge: Option<u8>,
no_reserve: bool,
) -> io::Result<MmapInner> {
let stack = if stack { MAP_STACK } else { 0 };
let populate = if populate { MAP_POPULATE } else { 0 };
let hugetlb = if huge.is_some() { MAP_HUGETLB } else { 0 };
let hugetlb_size = huge.map_or(0, |mask| {
(u64::from(mask) & (MAP_HUGE_MASK as u64)) << MAP_HUGE_SHIFT
}) as i32;
let no_reserve = if no_reserve { MAP_NORESERVE } else { 0 };
MmapInner::new(
len,
libc::PROT_READ | libc::PROT_WRITE,
libc::MAP_PRIVATE
| libc::MAP_ANON
| stack
| populate
| hugetlb
| hugetlb_size
| no_reserve,
-1,
0,
)
}
pub fn flush(&self, offset: usize, len: usize) -> io::Result<()> {
let alignment = (self.ptr as usize + offset) % page_size();
let offset = offset as isize - alignment as isize;
let len = len + alignment;
let result =
unsafe { libc::msync(self.ptr.offset(offset), len as libc::size_t, libc::MS_SYNC) };
if result == 0 {
Ok(())
} else {
Err(io::Error::last_os_error())
}
}
pub fn flush_async(&self, offset: usize, len: usize) -> io::Result<()> {
let alignment = (self.ptr as usize + offset) % page_size();
let offset = offset as isize - alignment as isize;
let len = len + alignment;
let result =
unsafe { libc::msync(self.ptr.offset(offset), len as libc::size_t, libc::MS_ASYNC) };
if result == 0 {
Ok(())
} else {
Err(io::Error::last_os_error())
}
}
fn mprotect(&mut self, prot: libc::c_int) -> io::Result<()> {
unsafe {
let alignment = self.ptr as usize % page_size();
let ptr = self.ptr.offset(-(alignment as isize));
let len = self.len + alignment;
let len = len.max(1);
if libc::mprotect(ptr, len, prot) == 0 {
Ok(())
} else {
Err(io::Error::last_os_error())
}
}
}
pub fn make_read_only(&mut self) -> io::Result<()> {
self.mprotect(libc::PROT_READ)
}
pub fn make_exec(&mut self) -> io::Result<()> {
self.mprotect(libc::PROT_READ | libc::PROT_EXEC)
}
pub fn make_mut(&mut self) -> io::Result<()> {
self.mprotect(libc::PROT_READ | libc::PROT_WRITE)
}
#[inline]
pub fn ptr(&self) -> *const u8 {
self.ptr as *const u8
}
#[inline]
pub fn mut_ptr(&mut self) -> *mut u8 {
self.ptr.cast()
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
pub fn advise(&self, advice: libc::c_int, offset: usize, len: usize) -> io::Result<()> {
let alignment = (self.ptr as usize + offset) % page_size();
let offset = offset as isize - alignment as isize;
let len = len + alignment;
unsafe {
if libc::madvise(self.ptr.offset(offset), len, advice) != 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
}
#[cfg(target_os = "linux")]
pub fn remap(&mut self, new_len: usize, options: crate::RemapOptions) -> io::Result<()> {
let (old_ptr, old_len, offset) = self.as_mmap_params();
let (map_len, offset) = Self::adjust_mmap_params(new_len, offset)?;
unsafe {
let new_ptr = libc::mremap(old_ptr, old_len, map_len, options.into_flags());
if new_ptr == libc::MAP_FAILED {
Err(io::Error::last_os_error())
} else {
// We explicitly don't drop self since the pointer within is no longer valid.
ptr::write(self, Self::from_raw_parts(new_ptr, new_len, offset));
Ok(())
}
}
}
pub fn lock(&self) -> io::Result<()> {
unsafe {
if libc::mlock(self.ptr, self.len) != 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
}
pub fn unlock(&self) -> io::Result<()> {
unsafe {
if libc::munlock(self.ptr, self.len) != 0 {
Err(io::Error::last_os_error())
} else {
Ok(())
}
}
}
}
impl Drop for MmapInner {
fn drop(&mut self) {
let (ptr, len, _) = self.as_mmap_params();
// Any errors during unmapping/closing are ignored as the only way
// to report them would be through panicking which is highly discouraged
// in Drop impls, c.f. https://github.com/rust-lang/lang-team/issues/97
unsafe { libc::munmap(ptr, len as libc::size_t) };
}
}
unsafe impl Sync for MmapInner {}
unsafe impl Send for MmapInner {}
fn page_size() -> usize {
static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
match PAGE_SIZE.load(Ordering::Relaxed) {
0 => {
let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize };
PAGE_SIZE.store(page_size, Ordering::Relaxed);
page_size
}
page_size => page_size,
}
}
pub fn file_len(file: RawFd) -> io::Result<u64> {
// SAFETY: We must not close the passed-in fd by dropping the File we create,
// we ensure this by immediately wrapping it in a ManuallyDrop.
unsafe {
let file = ManuallyDrop::new(File::from_raw_fd(file));
Ok(file.metadata()?.len())
}
}

530
vendor/memmap2/src/windows.rs vendored Normal file
View File

@@ -0,0 +1,530 @@
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
use std::fs::File;
use std::mem::ManuallyDrop;
use std::os::raw::c_void;
use std::os::windows::io::{FromRawHandle, RawHandle};
use std::{io, mem, ptr};
type BOOL = i32;
type WORD = u16;
type DWORD = u32;
type WCHAR = u16;
type HANDLE = *mut c_void;
type LPHANDLE = *mut HANDLE;
type LPVOID = *mut c_void;
type LPCVOID = *const c_void;
type ULONG_PTR = usize;
type SIZE_T = ULONG_PTR;
type LPCWSTR = *const WCHAR;
type PDWORD = *mut DWORD;
type DWORD_PTR = ULONG_PTR;
type LPSECURITY_ATTRIBUTES = *mut SECURITY_ATTRIBUTES;
type LPSYSTEM_INFO = *mut SYSTEM_INFO;
const INVALID_HANDLE_VALUE: HANDLE = -1isize as HANDLE;
const DUPLICATE_SAME_ACCESS: DWORD = 0x00000002;
const STANDARD_RIGHTS_REQUIRED: DWORD = 0x000F0000;
const SECTION_QUERY: DWORD = 0x0001;
const SECTION_MAP_WRITE: DWORD = 0x0002;
const SECTION_MAP_READ: DWORD = 0x0004;
const SECTION_MAP_EXECUTE: DWORD = 0x0008;
const SECTION_EXTEND_SIZE: DWORD = 0x0010;
const SECTION_MAP_EXECUTE_EXPLICIT: DWORD = 0x0020;
const SECTION_ALL_ACCESS: DWORD = STANDARD_RIGHTS_REQUIRED
| SECTION_QUERY
| SECTION_MAP_WRITE
| SECTION_MAP_READ
| SECTION_MAP_EXECUTE
| SECTION_EXTEND_SIZE;
const PAGE_READONLY: DWORD = 0x02;
const PAGE_READWRITE: DWORD = 0x04;
const PAGE_WRITECOPY: DWORD = 0x08;
const PAGE_EXECUTE_READ: DWORD = 0x20;
const PAGE_EXECUTE_READWRITE: DWORD = 0x40;
const PAGE_EXECUTE_WRITECOPY: DWORD = 0x80;
const FILE_MAP_WRITE: DWORD = SECTION_MAP_WRITE;
const FILE_MAP_READ: DWORD = SECTION_MAP_READ;
const FILE_MAP_ALL_ACCESS: DWORD = SECTION_ALL_ACCESS;
const FILE_MAP_EXECUTE: DWORD = SECTION_MAP_EXECUTE_EXPLICIT;
const FILE_MAP_COPY: DWORD = 0x00000001;
#[repr(C)]
struct SECURITY_ATTRIBUTES {
nLength: DWORD,
lpSecurityDescriptor: LPVOID,
bInheritHandle: BOOL,
}
#[repr(C)]
struct SYSTEM_INFO {
wProcessorArchitecture: WORD,
wReserved: WORD,
dwPageSize: DWORD,
lpMinimumApplicationAddress: LPVOID,
lpMaximumApplicationAddress: LPVOID,
dwActiveProcessorMask: DWORD_PTR,
dwNumberOfProcessors: DWORD,
dwProcessorType: DWORD,
dwAllocationGranularity: DWORD,
wProcessorLevel: WORD,
wProcessorRevision: WORD,
}
#[allow(dead_code)]
#[repr(C)]
#[derive(Copy, Clone)]
pub struct FILETIME {
pub dwLowDateTime: DWORD,
pub dwHighDateTime: DWORD,
}
extern "system" {
fn GetCurrentProcess() -> HANDLE;
fn CloseHandle(hObject: HANDLE) -> BOOL;
fn DuplicateHandle(
hSourceProcessHandle: HANDLE,
hSourceHandle: HANDLE,
hTargetProcessHandle: HANDLE,
lpTargetHandle: LPHANDLE,
dwDesiredAccess: DWORD,
bInheritHandle: BOOL,
dwOptions: DWORD,
) -> BOOL;
fn CreateFileMappingW(
hFile: HANDLE,
lpFileMappingAttributes: LPSECURITY_ATTRIBUTES,
flProtect: DWORD,
dwMaximumSizeHigh: DWORD,
dwMaximumSizeLow: DWORD,
lpName: LPCWSTR,
) -> HANDLE;
fn FlushFileBuffers(hFile: HANDLE) -> BOOL;
fn FlushViewOfFile(lpBaseAddress: LPCVOID, dwNumberOfBytesToFlush: SIZE_T) -> BOOL;
fn UnmapViewOfFile(lpBaseAddress: LPCVOID) -> BOOL;
fn MapViewOfFile(
hFileMappingObject: HANDLE,
dwDesiredAccess: DWORD,
dwFileOffsetHigh: DWORD,
dwFileOffsetLow: DWORD,
dwNumberOfBytesToMap: SIZE_T,
) -> LPVOID;
fn VirtualProtect(
lpAddress: LPVOID,
dwSize: SIZE_T,
flNewProtect: DWORD,
lpflOldProtect: PDWORD,
) -> BOOL;
fn GetSystemInfo(lpSystemInfo: LPSYSTEM_INFO);
}
/// Returns a fixed aligned pointer that is valid for `slice::from_raw_parts::<u8>` with `len == 0`.
///
/// This aligns the pointer to `allocation_granularity()` or 1 if unknown.
fn empty_slice_ptr() -> *mut c_void {
allocation_granularity().max(1) as *mut c_void
}
pub struct MmapInner {
handle: Option<RawHandle>,
ptr: *mut c_void,
len: usize,
copy: bool,
}
impl MmapInner {
/// Creates a new `MmapInner`.
///
/// This is a thin wrapper around the `CreateFileMappingW` and `MapViewOfFile` system calls.
pub fn new(
handle: RawHandle,
protect: DWORD,
access: DWORD,
offset: u64,
len: usize,
copy: bool,
) -> io::Result<MmapInner> {
let alignment = offset % allocation_granularity() as u64;
let aligned_offset = offset - alignment as u64;
let aligned_len = len + alignment as usize;
if aligned_len == 0 {
// `CreateFileMappingW` documents:
//
// https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-createfilemappingw
// > An attempt to map a file with a length of 0 (zero) fails with an error code
// > of ERROR_FILE_INVALID. Applications should test for files with a length of 0
// > (zero) and reject those files.
//
// For such files, dont create a mapping at all and use a marker pointer instead.
return Ok(MmapInner {
handle: None,
ptr: empty_slice_ptr(),
len: 0,
copy,
});
}
unsafe {
let mapping = CreateFileMappingW(handle, ptr::null_mut(), protect, 0, 0, ptr::null());
if mapping.is_null() {
return Err(io::Error::last_os_error());
}
let ptr = MapViewOfFile(
mapping,
access,
(aligned_offset >> 16 >> 16) as DWORD,
(aligned_offset & 0xffffffff) as DWORD,
aligned_len as SIZE_T,
);
CloseHandle(mapping);
if ptr.is_null() {
return Err(io::Error::last_os_error());
}
let mut new_handle = 0 as RawHandle;
let cur_proc = GetCurrentProcess();
let ok = DuplicateHandle(
cur_proc,
handle,
cur_proc,
&mut new_handle,
0,
0,
DUPLICATE_SAME_ACCESS,
);
if ok == 0 {
UnmapViewOfFile(ptr);
return Err(io::Error::last_os_error());
}
Ok(MmapInner {
handle: Some(new_handle),
ptr: ptr.offset(alignment as isize),
len,
copy,
})
}
}
pub fn map(
len: usize,
handle: RawHandle,
offset: u64,
_populate: bool,
_no_reserve: bool,
) -> io::Result<MmapInner> {
let write = protection_supported(handle, PAGE_READWRITE);
let exec = protection_supported(handle, PAGE_EXECUTE_READ);
let mut access = FILE_MAP_READ;
let protection = match (write, exec) {
(true, true) => {
access |= FILE_MAP_WRITE | FILE_MAP_EXECUTE;
PAGE_EXECUTE_READWRITE
}
(true, false) => {
access |= FILE_MAP_WRITE;
PAGE_READWRITE
}
(false, true) => {
access |= FILE_MAP_EXECUTE;
PAGE_EXECUTE_READ
}
(false, false) => PAGE_READONLY,
};
let mut inner = MmapInner::new(handle, protection, access, offset, len, false)?;
if write || exec {
inner.make_read_only()?;
}
Ok(inner)
}
pub fn map_exec(
len: usize,
handle: RawHandle,
offset: u64,
_populate: bool,
_no_reserve: bool,
) -> io::Result<MmapInner> {
let write = protection_supported(handle, PAGE_READWRITE);
let mut access = FILE_MAP_READ | FILE_MAP_EXECUTE;
let protection = if write {
access |= FILE_MAP_WRITE;
PAGE_EXECUTE_READWRITE
} else {
PAGE_EXECUTE_READ
};
let mut inner = MmapInner::new(handle, protection, access, offset, len, false)?;
if write {
inner.make_exec()?;
}
Ok(inner)
}
pub fn map_mut(
len: usize,
handle: RawHandle,
offset: u64,
_populate: bool,
_no_reserve: bool,
) -> io::Result<MmapInner> {
let exec = protection_supported(handle, PAGE_EXECUTE_READ);
let mut access = FILE_MAP_READ | FILE_MAP_WRITE;
let protection = if exec {
access |= FILE_MAP_EXECUTE;
PAGE_EXECUTE_READWRITE
} else {
PAGE_READWRITE
};
let mut inner = MmapInner::new(handle, protection, access, offset, len, false)?;
if exec {
inner.make_mut()?;
}
Ok(inner)
}
pub fn map_copy(
len: usize,
handle: RawHandle,
offset: u64,
_populate: bool,
_no_reserve: bool,
) -> io::Result<MmapInner> {
let exec = protection_supported(handle, PAGE_EXECUTE_READWRITE);
let mut access = FILE_MAP_COPY;
let protection = if exec {
access |= FILE_MAP_EXECUTE;
PAGE_EXECUTE_WRITECOPY
} else {
PAGE_WRITECOPY
};
let mut inner = MmapInner::new(handle, protection, access, offset, len, true)?;
if exec {
inner.make_mut()?;
}
Ok(inner)
}
pub fn map_copy_read_only(
len: usize,
handle: RawHandle,
offset: u64,
_populate: bool,
_no_reserve: bool,
) -> io::Result<MmapInner> {
let write = protection_supported(handle, PAGE_READWRITE);
let exec = protection_supported(handle, PAGE_EXECUTE_READ);
let mut access = FILE_MAP_COPY;
let protection = if exec {
access |= FILE_MAP_EXECUTE;
PAGE_EXECUTE_WRITECOPY
} else {
PAGE_WRITECOPY
};
let mut inner = MmapInner::new(handle, protection, access, offset, len, true)?;
if write || exec {
inner.make_read_only()?;
}
Ok(inner)
}
pub fn map_anon(
len: usize,
_stack: bool,
_populate: bool,
_huge: Option<u8>,
_no_reserve: bool,
) -> io::Result<MmapInner> {
// Ensure a non-zero length for the underlying mapping
let mapped_len = len.max(1);
unsafe {
// Create a mapping and view with maximum access permissions, then use `VirtualProtect`
// to set the actual `Protection`. This way, we can set more permissive protection later
// on.
// Also see https://msdn.microsoft.com/en-us/library/windows/desktop/aa366537.aspx
let mapping = CreateFileMappingW(
INVALID_HANDLE_VALUE,
ptr::null_mut(),
PAGE_EXECUTE_READWRITE,
(mapped_len >> 16 >> 16) as DWORD,
(mapped_len & 0xffffffff) as DWORD,
ptr::null(),
);
if mapping.is_null() {
return Err(io::Error::last_os_error());
}
let access = FILE_MAP_ALL_ACCESS | FILE_MAP_EXECUTE;
let ptr = MapViewOfFile(mapping, access, 0, 0, mapped_len as SIZE_T);
CloseHandle(mapping);
if ptr.is_null() {
return Err(io::Error::last_os_error());
}
let mut old = 0;
let result = VirtualProtect(ptr, mapped_len as SIZE_T, PAGE_READWRITE, &mut old);
if result != 0 {
Ok(MmapInner {
handle: None,
ptr,
len,
copy: false,
})
} else {
Err(io::Error::last_os_error())
}
}
}
pub fn flush(&self, offset: usize, len: usize) -> io::Result<()> {
self.flush_async(offset, len)?;
if let Some(handle) = self.handle {
let ok = unsafe { FlushFileBuffers(handle) };
if ok == 0 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
pub fn flush_async(&self, offset: usize, len: usize) -> io::Result<()> {
if self.ptr == empty_slice_ptr() {
return Ok(());
}
let result = unsafe { FlushViewOfFile(self.ptr.add(offset), len as SIZE_T) };
if result != 0 {
Ok(())
} else {
Err(io::Error::last_os_error())
}
}
fn virtual_protect(&mut self, protect: DWORD) -> io::Result<()> {
if self.ptr == empty_slice_ptr() {
return Ok(());
}
unsafe {
let alignment = self.ptr as usize % allocation_granularity();
let ptr = self.ptr.offset(-(alignment as isize));
let aligned_len = self.len as SIZE_T + alignment as SIZE_T;
let mut old = 0;
let result = VirtualProtect(ptr, aligned_len, protect, &mut old);
if result != 0 {
Ok(())
} else {
Err(io::Error::last_os_error())
}
}
}
pub fn make_read_only(&mut self) -> io::Result<()> {
self.virtual_protect(PAGE_READONLY)
}
pub fn make_exec(&mut self) -> io::Result<()> {
if self.copy {
self.virtual_protect(PAGE_EXECUTE_WRITECOPY)
} else {
self.virtual_protect(PAGE_EXECUTE_READ)
}
}
pub fn make_mut(&mut self) -> io::Result<()> {
if self.copy {
self.virtual_protect(PAGE_WRITECOPY)
} else {
self.virtual_protect(PAGE_READWRITE)
}
}
#[inline]
pub fn ptr(&self) -> *const u8 {
self.ptr as *const u8
}
#[inline]
pub fn mut_ptr(&mut self) -> *mut u8 {
self.ptr.cast()
}
#[inline]
pub fn len(&self) -> usize {
self.len
}
}
impl Drop for MmapInner {
fn drop(&mut self) {
if self.ptr == empty_slice_ptr() {
return;
}
let alignment = self.ptr as usize % allocation_granularity();
// Any errors during unmapping/closing are ignored as the only way
// to report them would be through panicking which is highly discouraged
// in Drop impls, c.f. https://github.com/rust-lang/lang-team/issues/97
unsafe {
let ptr = self.ptr.offset(-(alignment as isize));
UnmapViewOfFile(ptr);
if let Some(handle) = self.handle {
CloseHandle(handle);
}
}
}
}
unsafe impl Sync for MmapInner {}
unsafe impl Send for MmapInner {}
fn protection_supported(handle: RawHandle, protection: DWORD) -> bool {
unsafe {
let mapping = CreateFileMappingW(handle, ptr::null_mut(), protection, 0, 0, ptr::null());
if mapping.is_null() {
return false;
}
CloseHandle(mapping);
true
}
}
fn allocation_granularity() -> usize {
unsafe {
let mut info = mem::zeroed();
GetSystemInfo(&mut info);
info.dwAllocationGranularity as usize
}
}
pub fn file_len(handle: RawHandle) -> io::Result<u64> {
// SAFETY: We must not close the passed-in fd by dropping the File we create,
// we ensure this by immediately wrapping it in a ManuallyDrop.
unsafe {
let file = ManuallyDrop::new(File::from_raw_handle(handle));
Ok(file.metadata()?.len())
}
}