Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

View File

@@ -0,0 +1,102 @@
#[repr(C)]
#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct __BindgenBitfieldUnit<Storage> {
storage: Storage,
}
impl<Storage> __BindgenBitfieldUnit<Storage> {
#[inline]
pub const fn new(storage: Storage) -> Self {
Self { storage }
}
}
impl<Storage> __BindgenBitfieldUnit<Storage>
where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
byte & mask == mask
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
if val {
*byte |= mask;
} else {
*byte &= !mask;
}
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
self.storage.as_ref().len()
);
let mut val = 0;
for i in 0..(bit_width as usize) {
if self.get_bit(i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
self.storage.as_ref().len()
);
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
}

View File

@@ -0,0 +1,260 @@
//! Tests for `__BindgenBitfieldUnit`.
//!
//! Note that bit-fields are allocated right to left (least to most significant
//! bits).
//!
//! From the x86 PS ABI:
//!
//! ```c
//! struct {
//! int j : 5;
//! int k : 6;
//! int m : 7;
//! };
//! ```
//!
//! ```ignore
//! +------------------------------------------------------------+
//! | | | | |
//! | padding | m | k | j |
//! |31 18|17 11|10 5|4 0|
//! +------------------------------------------------------------+
//! ```
use super::bitfield_unit::__BindgenBitfieldUnit;
#[test]
fn bitfield_unit_get_bit() {
let unit = __BindgenBitfieldUnit::<[u8; 2]>::new([0b10011101, 0b00011101]);
let mut bits = vec![];
for i in 0..16 {
bits.push(unit.get_bit(i));
}
println!();
println!("bits = {:?}", bits);
assert_eq!(
bits,
&[
// 0b10011101
true, false, true, true, true, false, false, true,
// 0b00011101
true, false, true, true, true, false, false, false
]
);
}
#[test]
fn bitfield_unit_set_bit() {
let mut unit =
__BindgenBitfieldUnit::<[u8; 2]>::new([0b00000000, 0b00000000]);
for i in 0..16 {
if i % 3 == 0 {
unit.set_bit(i, true);
}
}
for i in 0..16 {
assert_eq!(unit.get_bit(i), i % 3 == 0);
}
let mut unit =
__BindgenBitfieldUnit::<[u8; 2]>::new([0b11111111, 0b11111111]);
for i in 0..16 {
if i % 3 == 0 {
unit.set_bit(i, false);
}
}
for i in 0..16 {
assert_eq!(unit.get_bit(i), i % 3 != 0);
}
}
macro_rules! bitfield_unit_get {
(
$(
With $storage:expr , then get($start:expr, $len:expr) is $expected:expr;
)*
) => {
#[test]
fn bitfield_unit_get() {
$({
let expected = $expected;
let unit = __BindgenBitfieldUnit::<_>::new($storage);
let actual = unit.get($start, $len);
println!();
println!("expected = {:064b}", expected);
println!("actual = {:064b}", actual);
assert_eq!(expected, actual);
})*
}
}
}
bitfield_unit_get! {
// Let's just exhaustively test getting the bits from a single byte, since
// there are few enough combinations...
With [0b11100010], then get(0, 1) is 0;
With [0b11100010], then get(1, 1) is 1;
With [0b11100010], then get(2, 1) is 0;
With [0b11100010], then get(3, 1) is 0;
With [0b11100010], then get(4, 1) is 0;
With [0b11100010], then get(5, 1) is 1;
With [0b11100010], then get(6, 1) is 1;
With [0b11100010], then get(7, 1) is 1;
With [0b11100010], then get(0, 2) is 0b10;
With [0b11100010], then get(1, 2) is 0b01;
With [0b11100010], then get(2, 2) is 0b00;
With [0b11100010], then get(3, 2) is 0b00;
With [0b11100010], then get(4, 2) is 0b10;
With [0b11100010], then get(5, 2) is 0b11;
With [0b11100010], then get(6, 2) is 0b11;
With [0b11100010], then get(0, 3) is 0b010;
With [0b11100010], then get(1, 3) is 0b001;
With [0b11100010], then get(2, 3) is 0b000;
With [0b11100010], then get(3, 3) is 0b100;
With [0b11100010], then get(4, 3) is 0b110;
With [0b11100010], then get(5, 3) is 0b111;
With [0b11100010], then get(0, 4) is 0b0010;
With [0b11100010], then get(1, 4) is 0b0001;
With [0b11100010], then get(2, 4) is 0b1000;
With [0b11100010], then get(3, 4) is 0b1100;
With [0b11100010], then get(4, 4) is 0b1110;
With [0b11100010], then get(0, 5) is 0b00010;
With [0b11100010], then get(1, 5) is 0b10001;
With [0b11100010], then get(2, 5) is 0b11000;
With [0b11100010], then get(3, 5) is 0b11100;
With [0b11100010], then get(0, 6) is 0b100010;
With [0b11100010], then get(1, 6) is 0b110001;
With [0b11100010], then get(2, 6) is 0b111000;
With [0b11100010], then get(0, 7) is 0b1100010;
With [0b11100010], then get(1, 7) is 0b1110001;
With [0b11100010], then get(0, 8) is 0b11100010;
// OK. Now let's test getting bits from across byte boundaries.
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(0, 16) is 0b1111111101010101;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(1, 16) is 0b0111111110101010;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(2, 16) is 0b0011111111010101;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(3, 16) is 0b0001111111101010;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(4, 16) is 0b0000111111110101;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(5, 16) is 0b0000011111111010;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(6, 16) is 0b0000001111111101;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(7, 16) is 0b0000000111111110;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(8, 16) is 0b0000000011111111;
}
macro_rules! bitfield_unit_set {
(
$(
set($start:expr, $len:expr, $val:expr) is $expected:expr;
)*
) => {
#[test]
fn bitfield_unit_set() {
$(
let mut unit = __BindgenBitfieldUnit::<[u8; 4]>::new([0, 0, 0, 0]);
unit.set($start, $len, $val);
let actual = unit.get(0, 32);
println!();
println!("set({}, {}, {:032b}", $start, $len, $val);
println!("expected = {:064b}", $expected);
println!("actual = {:064b}", actual);
assert_eq!($expected, actual);
)*
}
}
}
bitfield_unit_set! {
// Once again, let's exhaustively test single byte combinations.
set(0, 1, 0b11111111) is 0b00000001;
set(1, 1, 0b11111111) is 0b00000010;
set(2, 1, 0b11111111) is 0b00000100;
set(3, 1, 0b11111111) is 0b00001000;
set(4, 1, 0b11111111) is 0b00010000;
set(5, 1, 0b11111111) is 0b00100000;
set(6, 1, 0b11111111) is 0b01000000;
set(7, 1, 0b11111111) is 0b10000000;
set(0, 2, 0b11111111) is 0b00000011;
set(1, 2, 0b11111111) is 0b00000110;
set(2, 2, 0b11111111) is 0b00001100;
set(3, 2, 0b11111111) is 0b00011000;
set(4, 2, 0b11111111) is 0b00110000;
set(5, 2, 0b11111111) is 0b01100000;
set(6, 2, 0b11111111) is 0b11000000;
set(0, 3, 0b11111111) is 0b00000111;
set(1, 3, 0b11111111) is 0b00001110;
set(2, 3, 0b11111111) is 0b00011100;
set(3, 3, 0b11111111) is 0b00111000;
set(4, 3, 0b11111111) is 0b01110000;
set(5, 3, 0b11111111) is 0b11100000;
set(0, 4, 0b11111111) is 0b00001111;
set(1, 4, 0b11111111) is 0b00011110;
set(2, 4, 0b11111111) is 0b00111100;
set(3, 4, 0b11111111) is 0b01111000;
set(4, 4, 0b11111111) is 0b11110000;
set(0, 5, 0b11111111) is 0b00011111;
set(1, 5, 0b11111111) is 0b00111110;
set(2, 5, 0b11111111) is 0b01111100;
set(3, 5, 0b11111111) is 0b11111000;
set(0, 6, 0b11111111) is 0b00111111;
set(1, 6, 0b11111111) is 0b01111110;
set(2, 6, 0b11111111) is 0b11111100;
set(0, 7, 0b11111111) is 0b01111111;
set(1, 7, 0b11111111) is 0b11111110;
set(0, 8, 0b11111111) is 0b11111111;
// And, now let's cross byte boundaries.
set(0, 16, 0b1111111111111111) is 0b00000000000000001111111111111111;
set(1, 16, 0b1111111111111111) is 0b00000000000000011111111111111110;
set(2, 16, 0b1111111111111111) is 0b00000000000000111111111111111100;
set(3, 16, 0b1111111111111111) is 0b00000000000001111111111111111000;
set(4, 16, 0b1111111111111111) is 0b00000000000011111111111111110000;
set(5, 16, 0b1111111111111111) is 0b00000000000111111111111111100000;
set(6, 16, 0b1111111111111111) is 0b00000000001111111111111111000000;
set(7, 16, 0b1111111111111111) is 0b00000000011111111111111110000000;
set(8, 16, 0b1111111111111111) is 0b00000000111111111111111100000000;
}

242
vendor/bindgen-0.70.1/codegen/dyngen.rs vendored Normal file
View File

@@ -0,0 +1,242 @@
use crate::codegen;
use crate::ir::context::BindgenContext;
use crate::ir::function::ClangAbi;
use proc_macro2::{Ident, TokenStream};
/// Used to build the output tokens for dynamic bindings.
#[derive(Default)]
pub(crate) struct DynamicItems {
/// Tracks the tokens that will appears inside the library struct -- e.g.:
/// ```ignore
/// struct Lib {
/// __library: ::libloading::Library,
/// pub x: Result<unsafe extern ..., ::libloading::Error>, // <- tracks these
/// ...
/// }
/// ```
struct_members: Vec<proc_macro2::TokenStream>,
/// Tracks the tokens that will appear inside the library struct's implementation, e.g.:
///
/// ```ignore
/// impl Lib {
/// ...
/// pub unsafe fn foo(&self, ...) { // <- tracks these
/// ...
/// }
/// }
/// ```
struct_implementation: Vec<proc_macro2::TokenStream>,
/// Tracks the initialization of the fields inside the `::new` constructor of the library
/// struct, e.g.:
/// ```ignore
/// impl Lib {
///
/// pub unsafe fn new<P>(path: P) -> Result<Self, ::libloading::Error>
/// where
/// P: AsRef<::std::ffi::OsStr>,
/// {
/// ...
/// let foo = __library.get(...) ...; // <- tracks these
/// ...
/// }
///
/// ...
/// }
/// ```
constructor_inits: Vec<proc_macro2::TokenStream>,
/// Tracks the information that is passed to the library struct at the end of the `::new`
/// constructor, e.g.:
/// ```ignore
/// impl LibFoo {
/// pub unsafe fn new<P>(path: P) -> Result<Self, ::libloading::Error>
/// where
/// P: AsRef<::std::ffi::OsStr>,
/// {
/// ...
/// Ok(LibFoo {
/// __library: __library,
/// foo,
/// bar, // <- tracks these
/// ...
/// })
/// }
/// }
/// ```
init_fields: Vec<proc_macro2::TokenStream>,
}
impl DynamicItems {
pub(crate) fn new() -> Self {
Self::default()
}
pub(crate) fn get_tokens(
&self,
lib_ident: Ident,
ctx: &BindgenContext,
) -> proc_macro2::TokenStream {
let struct_members = &self.struct_members;
let constructor_inits = &self.constructor_inits;
let init_fields = &self.init_fields;
let struct_implementation = &self.struct_implementation;
let from_library = if ctx.options().wrap_unsafe_ops {
quote!(unsafe { Self::from_library(library) })
} else {
quote!(Self::from_library(library))
};
quote! {
pub struct #lib_ident {
__library: ::libloading::Library,
#(#struct_members)*
}
impl #lib_ident {
pub unsafe fn new<P>(
path: P
) -> Result<Self, ::libloading::Error>
where P: AsRef<::std::ffi::OsStr> {
let library = ::libloading::Library::new(path)?;
#from_library
}
pub unsafe fn from_library<L>(
library: L
) -> Result<Self, ::libloading::Error>
where L: Into<::libloading::Library> {
let __library = library.into();
#( #constructor_inits )*
Ok(#lib_ident {
__library,
#( #init_fields ),*
})
}
#( #struct_implementation )*
}
}
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn push_func(
&mut self,
ident: Ident,
abi: ClangAbi,
is_variadic: bool,
is_required: bool,
args: Vec<proc_macro2::TokenStream>,
args_identifiers: Vec<proc_macro2::TokenStream>,
ret: proc_macro2::TokenStream,
ret_ty: proc_macro2::TokenStream,
attributes: Vec<proc_macro2::TokenStream>,
ctx: &BindgenContext,
) {
if !is_variadic {
assert_eq!(args.len(), args_identifiers.len());
}
let signature = quote! { unsafe extern #abi fn ( #( #args),* ) #ret };
let member = if is_required {
signature
} else {
quote! { Result<#signature, ::libloading::Error> }
};
self.struct_members.push(quote! {
pub #ident: #member,
});
// N.B: If the signature was required, it won't be wrapped in a Result<...>
// and we can simply call it directly.
let fn_ = if is_required {
quote! { self.#ident }
} else {
quote! { self.#ident.as_ref().expect("Expected function, got error.") }
};
let call_body = if ctx.options().wrap_unsafe_ops {
quote!(unsafe { (#fn_)(#( #args_identifiers ),*) })
} else {
quote!((#fn_)(#( #args_identifiers ),*) )
};
// We can't implement variadic functions from C easily, so we allow to
// access the function pointer so that the user can call it just fine.
if !is_variadic {
self.struct_implementation.push(quote! {
#(#attributes)*
pub unsafe fn #ident ( &self, #( #args ),* ) #ret_ty {
#call_body
}
});
}
// N.B: Unwrap the signature upon construction if it is required to be resolved.
let ident_str = codegen::helpers::ast_ty::cstr_expr(ident.to_string());
let library_get = if ctx.options().wrap_unsafe_ops {
quote!(unsafe { __library.get(#ident_str) })
} else {
quote!(__library.get(#ident_str))
};
self.constructor_inits.push(if is_required {
quote! {
let #ident = #library_get.map(|sym| *sym)?;
}
} else {
quote! {
let #ident = #library_get.map(|sym| *sym);
}
});
self.init_fields.push(quote! {
#ident
});
}
pub fn push_var(
&mut self,
ident: Ident,
ty: TokenStream,
is_required: bool,
) {
let member = if is_required {
quote! { *mut #ty }
} else {
quote! { Result<*mut #ty, ::libloading::Error> }
};
self.struct_members.push(quote! {
pub #ident: #member,
});
let deref = if is_required {
quote! { self.#ident }
} else {
quote! { *self.#ident.as_ref().expect("Expected variable, got error.") }
};
self.struct_implementation.push(quote! {
pub unsafe fn #ident (&self) -> *mut #ty {
#deref
}
});
let ident_str = codegen::helpers::ast_ty::cstr_expr(ident.to_string());
self.constructor_inits.push(if is_required {
quote! {
let #ident = __library.get::<*mut #ty>(#ident_str).map(|sym| *sym)?;
}
} else {
quote! {
let #ident = __library.get::<*mut #ty>(#ident_str).map(|sym| *sym);
}
});
self.init_fields.push(quote! {
#ident
});
}
}

53
vendor/bindgen-0.70.1/codegen/error.rs vendored Normal file
View File

@@ -0,0 +1,53 @@
use std::error;
use std::fmt;
/// Errors that can occur during code generation.
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum Error {
/// Tried to generate an opaque blob for a type that did not have a layout.
NoLayoutForOpaqueBlob,
/// Tried to instantiate an opaque template definition, or a template
/// definition that is too difficult for us to understand (like a partial
/// template specialization).
InstantiationOfOpaqueType,
/// Function ABI is not supported.
UnsupportedAbi(&'static str),
/// The pointer type size does not match the target's pointer size.
InvalidPointerSize {
ty_name: String,
ty_size: usize,
ptr_size: usize,
},
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::NoLayoutForOpaqueBlob => {
"Tried to generate an opaque blob, but had no layout.".fmt(f)
}
Error::InstantiationOfOpaqueType => {
"Instantiation of opaque template type or partial template specialization."
.fmt(f)
}
Error::UnsupportedAbi(abi) => {
write!(
f,
"{} ABI is not supported by the configured Rust target.",
abi
)
}
Error::InvalidPointerSize { ty_name, ty_size, ptr_size } => {
write!(f, "The {} pointer type has size {} but the current target's pointer size is {}.", ty_name, ty_size, ptr_size)
}
}
}
}
impl error::Error for Error {}
/// A `Result` of `T` or an error of `bindgen::codegen::error::Error`.
pub(crate) type Result<T> = ::std::result::Result<T, Error>;

383
vendor/bindgen-0.70.1/codegen/helpers.rs vendored Normal file
View File

@@ -0,0 +1,383 @@
//! Helpers for code generation that don't need macro expansion.
use proc_macro2::{Ident, Span};
use crate::ir::context::BindgenContext;
use crate::ir::layout::Layout;
pub(crate) mod attributes {
use proc_macro2::{Ident, Span, TokenStream};
use std::{borrow::Cow, str::FromStr};
pub(crate) fn repr(which: &str) -> TokenStream {
let which = Ident::new(which, Span::call_site());
quote! {
#[repr( #which )]
}
}
pub(crate) fn repr_list(which_ones: &[&str]) -> TokenStream {
let which_ones = which_ones
.iter()
.cloned()
.map(|one| TokenStream::from_str(one).expect("repr to be valid"));
quote! {
#[repr( #( #which_ones ),* )]
}
}
pub(crate) fn derives(which_ones: &[&str]) -> TokenStream {
let which_ones = which_ones
.iter()
.cloned()
.map(|one| TokenStream::from_str(one).expect("derive to be valid"));
quote! {
#[derive( #( #which_ones ),* )]
}
}
pub(crate) fn inline() -> TokenStream {
quote! {
#[inline]
}
}
pub(crate) fn must_use() -> TokenStream {
quote! {
#[must_use]
}
}
pub(crate) fn non_exhaustive() -> TokenStream {
quote! {
#[non_exhaustive]
}
}
pub(crate) fn doc(comment: String) -> TokenStream {
if comment.is_empty() {
quote!()
} else {
quote!(#[doc = #comment])
}
}
pub(crate) fn link_name<const MANGLE: bool>(name: &str) -> TokenStream {
// LLVM mangles the name by default but it's already mangled.
// Prefixing the name with \u{1} should tell LLVM to not mangle it.
let name: Cow<'_, str> = if MANGLE {
name.into()
} else {
format!("\u{1}{}", name).into()
};
quote! {
#[link_name = #name]
}
}
}
/// Generates a proper type for a field or type with a given `Layout`, that is,
/// a type with the correct size and alignment restrictions.
pub(crate) fn blob(ctx: &BindgenContext, layout: Layout) -> syn::Type {
let opaque = layout.opaque();
// FIXME(emilio, #412): We fall back to byte alignment, but there are
// some things that legitimately are more than 8-byte aligned.
//
// Eventually we should be able to `unwrap` here, but...
let ty = match opaque.known_rust_type_for_array(ctx) {
Some(ty) => ty,
None => {
warn!("Found unknown alignment on code generation!");
syn::parse_quote! { u8 }
}
};
let data_len = opaque.array_size(ctx).unwrap_or(layout.size);
if data_len == 1 {
ty
} else {
syn::parse_quote! { [ #ty ; #data_len ] }
}
}
/// Integer type of the same size as the given `Layout`.
pub(crate) fn integer_type(
ctx: &BindgenContext,
layout: Layout,
) -> Option<syn::Type> {
Layout::known_type_for_size(ctx, layout.size)
}
pub(crate) const BITFIELD_UNIT: &str = "__BindgenBitfieldUnit";
/// Generates a bitfield allocation unit type for a type with the given `Layout`.
pub(crate) fn bitfield_unit(ctx: &BindgenContext, layout: Layout) -> syn::Type {
let size = layout.size;
let bitfield_unit_name = Ident::new(BITFIELD_UNIT, Span::call_site());
let ty = syn::parse_quote! { #bitfield_unit_name<[u8; #size]> };
if ctx.options().enable_cxx_namespaces {
return syn::parse_quote! { root::#ty };
}
ty
}
pub(crate) mod ast_ty {
use crate::ir::context::BindgenContext;
use crate::ir::function::FunctionSig;
use crate::ir::layout::Layout;
use crate::ir::ty::{FloatKind, IntKind};
use crate::RustTarget;
use proc_macro2::TokenStream;
use std::str::FromStr;
pub(crate) fn c_void(ctx: &BindgenContext) -> syn::Type {
// ctypes_prefix takes precedence
match ctx.options().ctypes_prefix {
Some(ref prefix) => {
let prefix = TokenStream::from_str(prefix.as_str()).unwrap();
syn::parse_quote! { #prefix::c_void }
}
None => {
if ctx.options().use_core &&
ctx.options().rust_features.core_ffi_c_void
{
syn::parse_quote! { ::core::ffi::c_void }
} else {
syn::parse_quote! { ::std::os::raw::c_void }
}
}
}
}
pub(crate) fn raw_type(ctx: &BindgenContext, name: &str) -> syn::Type {
let ident = ctx.rust_ident_raw(name);
match ctx.options().ctypes_prefix {
Some(ref prefix) => {
let prefix = TokenStream::from_str(prefix.as_str()).unwrap();
syn::parse_quote! { #prefix::#ident }
}
None => {
if ctx.options().use_core &&
ctx.options().rust_features().core_ffi_c
{
syn::parse_quote! { ::core::ffi::#ident }
} else {
syn::parse_quote! { ::std::os::raw::#ident }
}
}
}
}
pub(crate) fn int_kind_rust_type(
ctx: &BindgenContext,
ik: IntKind,
layout: Option<Layout>,
) -> syn::Type {
match ik {
IntKind::Bool => syn::parse_quote! { bool },
IntKind::Char { .. } => raw_type(ctx, "c_char"),
IntKind::SChar => raw_type(ctx, "c_schar"),
IntKind::UChar => raw_type(ctx, "c_uchar"),
IntKind::Short => raw_type(ctx, "c_short"),
IntKind::UShort => raw_type(ctx, "c_ushort"),
IntKind::Int => raw_type(ctx, "c_int"),
IntKind::UInt => raw_type(ctx, "c_uint"),
IntKind::Long => raw_type(ctx, "c_long"),
IntKind::ULong => raw_type(ctx, "c_ulong"),
IntKind::LongLong => raw_type(ctx, "c_longlong"),
IntKind::ULongLong => raw_type(ctx, "c_ulonglong"),
IntKind::WChar => {
let layout =
layout.expect("Couldn't compute wchar_t's layout?");
Layout::known_type_for_size(ctx, layout.size)
.expect("Non-representable wchar_t?")
}
IntKind::I8 => syn::parse_quote! { i8 },
IntKind::U8 => syn::parse_quote! { u8 },
IntKind::I16 => syn::parse_quote! { i16 },
IntKind::U16 => syn::parse_quote! { u16 },
IntKind::I32 => syn::parse_quote! { i32 },
IntKind::U32 => syn::parse_quote! { u32 },
IntKind::I64 => syn::parse_quote! { i64 },
IntKind::U64 => syn::parse_quote! { u64 },
IntKind::Custom { name, .. } => {
syn::parse_str(name).expect("Invalid integer type.")
}
IntKind::U128 => {
if ctx.options().rust_features.i128_and_u128 {
syn::parse_quote! { u128 }
} else {
// Best effort thing, but wrong alignment
// unfortunately.
syn::parse_quote! { [u64; 2] }
}
}
IntKind::I128 => {
if ctx.options().rust_features.i128_and_u128 {
syn::parse_quote! { i128 }
} else {
syn::parse_quote! { [u64; 2] }
}
}
}
}
pub(crate) fn float_kind_rust_type(
ctx: &BindgenContext,
fk: FloatKind,
layout: Option<Layout>,
) -> syn::Type {
// TODO: we probably should take the type layout into account more
// often?
//
// Also, maybe this one shouldn't be the default?
match (fk, ctx.options().convert_floats) {
(FloatKind::Float16, _) => {
// TODO: do f16 when rust lands it
ctx.generated_bindgen_float16();
if ctx.options().enable_cxx_namespaces {
syn::parse_quote! { root::__BindgenFloat16 }
} else {
syn::parse_quote! { __BindgenFloat16 }
}
}
(FloatKind::Float, true) => syn::parse_quote! { f32 },
(FloatKind::Double, true) => syn::parse_quote! { f64 },
(FloatKind::Float, false) => raw_type(ctx, "c_float"),
(FloatKind::Double, false) => raw_type(ctx, "c_double"),
(FloatKind::LongDouble, _) => {
match layout {
Some(layout) => {
match layout.size {
4 => syn::parse_quote! { f32 },
8 => syn::parse_quote! { f64 },
// TODO(emilio): If rust ever gains f128 we should
// use it here and below.
_ => super::integer_type(ctx, layout)
.unwrap_or(syn::parse_quote! { f64 }),
}
}
None => {
debug_assert!(
false,
"How didn't we know the layout for a primitive type?"
);
syn::parse_quote! { f64 }
}
}
}
(FloatKind::Float128, _) => {
if ctx.options().rust_features.i128_and_u128 {
syn::parse_quote! { u128 }
} else {
syn::parse_quote! { [u64; 2] }
}
}
}
}
pub(crate) fn int_expr(val: i64) -> TokenStream {
// Don't use quote! { #val } because that adds the type suffix.
let val = proc_macro2::Literal::i64_unsuffixed(val);
quote!(#val)
}
pub(crate) fn uint_expr(val: u64) -> TokenStream {
// Don't use quote! { #val } because that adds the type suffix.
let val = proc_macro2::Literal::u64_unsuffixed(val);
quote!(#val)
}
pub(crate) fn cstr_expr(mut string: String) -> TokenStream {
string.push('\0');
let b = proc_macro2::Literal::byte_string(string.as_bytes());
quote! {
#b
}
}
pub(crate) fn float_expr(
ctx: &BindgenContext,
f: f64,
) -> Result<TokenStream, ()> {
if f.is_finite() {
let val = proc_macro2::Literal::f64_unsuffixed(f);
return Ok(quote!(#val));
}
let prefix = ctx.trait_prefix();
let rust_target = ctx.options().rust_target;
if f.is_nan() {
let tokens = if rust_target >= RustTarget::Stable_1_43 {
quote! {
f64::NAN
}
} else {
quote! {
::#prefix::f64::NAN
}
};
return Ok(tokens);
}
if f.is_infinite() {
let tokens = if f.is_sign_positive() {
if rust_target >= RustTarget::Stable_1_43 {
quote! {
f64::INFINITY
}
} else {
quote! {
::#prefix::f64::INFINITY
}
}
} else {
// Negative infinity
if rust_target >= RustTarget::Stable_1_43 {
quote! {
f64::NEG_INFINITY
}
} else {
quote! {
::#prefix::f64::NEG_INFINITY
}
}
};
return Ok(tokens);
}
warn!("Unknown non-finite float number: {:?}", f);
Err(())
}
pub(crate) fn arguments_from_signature(
signature: &FunctionSig,
ctx: &BindgenContext,
) -> Vec<TokenStream> {
let mut unnamed_arguments = 0;
signature
.argument_types()
.iter()
.map(|&(ref name, _ty)| match *name {
Some(ref name) => {
let name = ctx.rust_ident(name);
quote! { #name }
}
None => {
unnamed_arguments += 1;
let name =
ctx.rust_ident(format!("arg{}", unnamed_arguments));
quote! { #name }
}
})
.collect()
}
}

View File

@@ -0,0 +1,245 @@
use crate::ir::comp::{BitfieldUnit, CompKind, Field, FieldData, FieldMethods};
use crate::ir::context::BindgenContext;
use crate::ir::item::{HasTypeParamInArray, IsOpaque, Item, ItemCanonicalName};
use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT};
pub(crate) fn gen_debug_impl(
ctx: &BindgenContext,
fields: &[Field],
item: &Item,
kind: CompKind,
) -> proc_macro2::TokenStream {
let struct_name = item.canonical_name(ctx);
let mut format_string = format!("{} {{{{ ", struct_name);
let mut tokens = vec![];
if item.is_opaque(ctx, &()) {
format_string.push_str("opaque");
} else {
match kind {
CompKind::Union => {
format_string.push_str("union");
}
CompKind::Struct => {
let processed_fields = fields.iter().filter_map(|f| match f {
Field::DataMember(ref fd) => fd.impl_debug(ctx, ()),
Field::Bitfields(ref bu) => bu.impl_debug(ctx, ()),
});
for (i, (fstring, toks)) in processed_fields.enumerate() {
if i > 0 {
format_string.push_str(", ");
}
tokens.extend(toks);
format_string.push_str(&fstring);
}
}
}
}
format_string.push_str(" }}");
tokens.insert(0, quote! { #format_string });
let prefix = ctx.trait_prefix();
quote! {
fn fmt(&self, f: &mut ::#prefix::fmt::Formatter<'_>) -> ::#prefix ::fmt::Result {
write!(f, #( #tokens ),*)
}
}
}
/// A trait for the things which we can codegen tokens that contribute towards a
/// generated `impl Debug`.
pub(crate) trait ImplDebug<'a> {
/// Any extra parameter required by this a particular `ImplDebug` implementation.
type Extra;
/// Generate a format string snippet to be included in the larger `impl Debug`
/// format string, and the code to get the format string's interpolation values.
fn impl_debug(
&self,
ctx: &BindgenContext,
extra: Self::Extra,
) -> Option<(String, Vec<proc_macro2::TokenStream>)>;
}
impl<'a> ImplDebug<'a> for FieldData {
type Extra = ();
fn impl_debug(
&self,
ctx: &BindgenContext,
_: Self::Extra,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
if let Some(name) = self.name() {
ctx.resolve_item(self.ty()).impl_debug(ctx, name)
} else {
None
}
}
}
impl<'a> ImplDebug<'a> for BitfieldUnit {
type Extra = ();
fn impl_debug(
&self,
ctx: &BindgenContext,
_: Self::Extra,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
let mut format_string = String::new();
let mut tokens = vec![];
for (i, bitfield) in self.bitfields().iter().enumerate() {
if i > 0 {
format_string.push_str(", ");
}
if let Some(bitfield_name) = bitfield.name() {
format_string.push_str(&format!("{} : {{:?}}", bitfield_name));
let getter_name = bitfield.getter_name();
let name_ident = ctx.rust_ident_raw(getter_name);
tokens.push(quote! {
self.#name_ident ()
});
}
}
Some((format_string, tokens))
}
}
impl<'a> ImplDebug<'a> for Item {
type Extra = &'a str;
fn impl_debug(
&self,
ctx: &BindgenContext,
name: &str,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
let name_ident = ctx.rust_ident(name);
// We don't know if blocklisted items `impl Debug` or not, so we can't
// add them to the format string we're building up.
if !ctx.allowlisted_items().contains(&self.id()) {
return None;
}
let ty = match self.as_type() {
Some(ty) => ty,
None => {
return None;
}
};
fn debug_print(
name: &str,
name_ident: proc_macro2::TokenStream,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
Some((
format!("{}: {{:?}}", name),
vec![quote! {
self.#name_ident
}],
))
}
match *ty.kind() {
// Handle the simple cases.
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Complex(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::Comp(..) |
TypeKind::ObjCSel => debug_print(name, quote! { #name_ident }),
TypeKind::TemplateInstantiation(ref inst) => {
if inst.is_opaque(ctx, self) {
Some((format!("{}: opaque", name), vec![]))
} else {
debug_print(name, quote! { #name_ident })
}
}
// The generic is not required to implement Debug, so we can not debug print that type
TypeKind::TypeParam => {
Some((format!("{}: Non-debuggable generic", name), vec![]))
}
TypeKind::Array(_, len) => {
// Generics are not required to implement Debug
if self.has_type_param_in_array(ctx) {
Some((
format!("{}: Array with length {}", name, len),
vec![],
))
} else if len < RUST_DERIVE_IN_ARRAY_LIMIT ||
ctx.options().rust_features().larger_arrays
{
// The simple case
debug_print(name, quote! { #name_ident })
} else if ctx.options().use_core {
// There is no String in core; reducing field visibility to avoid breaking
// no_std setups.
Some((format!("{}: [...]", name), vec![]))
} else {
// Let's implement our own print function
Some((
format!("{}: [{{}}]", name),
vec![quote! {
self.#name_ident
.iter()
.enumerate()
.map(|(i, v)| format!("{}{:?}", if i > 0 { ", " } else { "" }, v))
.collect::<String>()
}],
))
}
}
TypeKind::Vector(_, len) => {
if ctx.options().use_core {
// There is no format! in core; reducing field visibility to avoid breaking
// no_std setups.
Some((format!("{}(...)", name), vec![]))
} else {
let self_ids = 0..len;
Some((
format!("{}({{}})", name),
vec![quote! {
#(format!("{:?}", self.#self_ids)),*
}],
))
}
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
// We follow the aliases
ctx.resolve_item(t).impl_debug(ctx, name)
}
TypeKind::Pointer(inner) => {
let inner_type = ctx.resolve_type(inner).canonical_type(ctx);
match *inner_type.kind() {
TypeKind::Function(ref sig)
if !sig.function_pointers_can_derive() =>
{
Some((format!("{}: FunctionPointer", name), vec![]))
}
_ => debug_print(name, quote! { #name_ident }),
}
}
TypeKind::Opaque => None,
}
}
}

View File

@@ -0,0 +1,142 @@
use crate::ir::comp::{CompInfo, CompKind, Field, FieldMethods};
use crate::ir::context::BindgenContext;
use crate::ir::item::{IsOpaque, Item};
use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT};
/// Generate a manual implementation of `PartialEq` trait for the
/// specified compound type.
pub(crate) fn gen_partialeq_impl(
ctx: &BindgenContext,
comp_info: &CompInfo,
item: &Item,
ty_for_impl: &proc_macro2::TokenStream,
) -> Option<proc_macro2::TokenStream> {
let mut tokens = vec![];
if item.is_opaque(ctx, &()) {
tokens.push(quote! {
&self._bindgen_opaque_blob[..] == &other._bindgen_opaque_blob[..]
});
} else if comp_info.kind() == CompKind::Union {
assert!(!ctx.options().untagged_union);
tokens.push(quote! {
&self.bindgen_union_field[..] == &other.bindgen_union_field[..]
});
} else {
for base in comp_info.base_members().iter() {
if !base.requires_storage(ctx) {
continue;
}
let ty_item = ctx.resolve_item(base.ty);
let field_name = &base.field_name;
if ty_item.is_opaque(ctx, &()) {
let field_name = ctx.rust_ident(field_name);
tokens.push(quote! {
&self. #field_name [..] == &other. #field_name [..]
});
} else {
tokens.push(gen_field(ctx, ty_item, field_name));
}
}
for field in comp_info.fields() {
match *field {
Field::DataMember(ref fd) => {
let ty_item = ctx.resolve_item(fd.ty());
let name = fd.name().unwrap();
tokens.push(gen_field(ctx, ty_item, name));
}
Field::Bitfields(ref bu) => {
for bitfield in bu.bitfields() {
if bitfield.name().is_some() {
let getter_name = bitfield.getter_name();
let name_ident = ctx.rust_ident_raw(getter_name);
tokens.push(quote! {
self.#name_ident () == other.#name_ident ()
});
}
}
}
}
}
}
Some(quote! {
fn eq(&self, other: & #ty_for_impl) -> bool {
#( #tokens )&&*
}
})
}
fn gen_field(
ctx: &BindgenContext,
ty_item: &Item,
name: &str,
) -> proc_macro2::TokenStream {
fn quote_equals(
name_ident: proc_macro2::Ident,
) -> proc_macro2::TokenStream {
quote! { self.#name_ident == other.#name_ident }
}
let name_ident = ctx.rust_ident(name);
let ty = ty_item.expect_type();
match *ty.kind() {
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Complex(..) |
TypeKind::Float(..) |
TypeKind::Enum(..) |
TypeKind::TypeParam |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::Reference(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel |
TypeKind::Comp(..) |
TypeKind::Pointer(_) |
TypeKind::Function(..) |
TypeKind::Opaque => quote_equals(name_ident),
TypeKind::TemplateInstantiation(ref inst) => {
if inst.is_opaque(ctx, ty_item) {
quote! {
&self. #name_ident [..] == &other. #name_ident [..]
}
} else {
quote_equals(name_ident)
}
}
TypeKind::Array(_, len) => {
if len <= RUST_DERIVE_IN_ARRAY_LIMIT ||
ctx.options().rust_features().larger_arrays
{
quote_equals(name_ident)
} else {
quote! {
&self. #name_ident [..] == &other. #name_ident [..]
}
}
}
TypeKind::Vector(_, len) => {
let self_ids = 0..len;
let other_ids = 0..len;
quote! {
#(self.#self_ids == other.#other_ids &&)* true
}
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
let inner_item = ctx.resolve_item(t);
gen_field(ctx, inner_item, name)
}
}
}

5736
vendor/bindgen-0.70.1/codegen/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,72 @@
use syn::{
visit_mut::{visit_file_mut, visit_item_mod_mut, VisitMut},
File, Item, ItemForeignMod, ItemMod,
};
pub(super) fn merge_extern_blocks(file: &mut File) {
Visitor.visit_file_mut(file)
}
struct Visitor;
impl VisitMut for Visitor {
fn visit_file_mut(&mut self, file: &mut File) {
visit_items(&mut file.items);
visit_file_mut(self, file)
}
fn visit_item_mod_mut(&mut self, item_mod: &mut ItemMod) {
if let Some((_, ref mut items)) = item_mod.content {
visit_items(items);
}
visit_item_mod_mut(self, item_mod)
}
}
fn visit_items(items: &mut Vec<Item>) {
// Keep all the extern blocks in a different `Vec` for faster search.
let mut extern_blocks = Vec::<ItemForeignMod>::new();
for item in std::mem::take(items) {
if let Item::ForeignMod(ItemForeignMod {
attrs,
abi,
brace_token,
unsafety,
items: extern_block_items,
}) = item
{
let mut exists = false;
for extern_block in &mut extern_blocks {
// Check if there is a extern block with the same ABI and
// attributes.
if extern_block.attrs == attrs && extern_block.abi == abi {
// Merge the items of the two blocks.
extern_block.items.extend_from_slice(&extern_block_items);
exists = true;
break;
}
}
// If no existing extern block had the same ABI and attributes, store
// it.
if !exists {
extern_blocks.push(ItemForeignMod {
attrs,
abi,
brace_token,
unsafety,
items: extern_block_items,
});
}
} else {
// If the item is not an extern block, we don't have to do anything and just
// push it back.
items.push(item);
}
}
// Move all the extern blocks alongside the rest of the items.
for extern_block in extern_blocks {
items.push(Item::ForeignMod(extern_block));
}
}

View File

@@ -0,0 +1,57 @@
use proc_macro2::TokenStream;
use quote::ToTokens;
use syn::{parse2, File};
use crate::BindgenOptions;
mod merge_extern_blocks;
mod sort_semantically;
use merge_extern_blocks::merge_extern_blocks;
use sort_semantically::sort_semantically;
struct PostProcessingPass {
should_run: fn(&BindgenOptions) -> bool,
run: fn(&mut File),
}
// TODO: This can be a const fn when mutable references are allowed in const
// context.
macro_rules! pass {
($pass:ident) => {
PostProcessingPass {
should_run: |options| options.$pass,
run: |file| $pass(file),
}
};
}
const PASSES: &[PostProcessingPass] =
&[pass!(merge_extern_blocks), pass!(sort_semantically)];
pub(crate) fn postprocessing(
items: Vec<TokenStream>,
options: &BindgenOptions,
) -> TokenStream {
let items = items.into_iter().collect();
let require_syn = PASSES.iter().any(|pass| (pass.should_run)(options));
if !require_syn {
return items;
}
// This syn business is a hack, for now. This means that we are re-parsing already
// generated code using `syn` (as opposed to `quote`) because `syn` provides us more
// control over the elements.
// The `unwrap` here is deliberate because bindgen should generate valid rust items at all
// times.
let mut file = parse2::<File>(items).unwrap();
for pass in PASSES {
if (pass.should_run)(options) {
(pass.run)(&mut file);
}
}
file.into_token_stream()
}

View File

@@ -0,0 +1,46 @@
use syn::{
visit_mut::{visit_file_mut, visit_item_mod_mut, VisitMut},
File, Item, ItemMod,
};
pub(super) fn sort_semantically(file: &mut File) {
Visitor.visit_file_mut(file)
}
struct Visitor;
impl VisitMut for Visitor {
fn visit_file_mut(&mut self, file: &mut File) {
visit_items(&mut file.items);
visit_file_mut(self, file)
}
fn visit_item_mod_mut(&mut self, item_mod: &mut ItemMod) {
if let Some((_, ref mut items)) = item_mod.content {
visit_items(items);
}
visit_item_mod_mut(self, item_mod)
}
}
fn visit_items(items: &mut [Item]) {
items.sort_by_key(|item| match item {
Item::Type(_) => 0,
Item::Struct(_) => 1,
Item::Const(_) => 2,
Item::Fn(_) => 3,
Item::Enum(_) => 4,
Item::Union(_) => 5,
Item::Static(_) => 6,
Item::Trait(_) => 7,
Item::TraitAlias(_) => 8,
Item::Impl(_) => 9,
Item::Mod(_) => 10,
Item::Use(_) => 11,
Item::Verbatim(_) => 12,
Item::ExternCrate(_) => 13,
Item::ForeignMod(_) => 14,
Item::Macro(_) => 15,
_ => 18,
});
}

View File

@@ -0,0 +1,444 @@
use std::io::Write;
use crate::callbacks::IntKind;
use crate::ir::comp::CompKind;
use crate::ir::context::{BindgenContext, TypeId};
use crate::ir::function::{Function, FunctionKind};
use crate::ir::item::Item;
use crate::ir::item::ItemCanonicalName;
use crate::ir::item_kind::ItemKind;
use crate::ir::ty::{FloatKind, Type, TypeKind};
use super::{CodegenError, WrapAsVariadic};
fn get_loc(item: &Item) -> String {
item.location()
.map(|x| x.to_string())
.unwrap_or_else(|| "unknown".to_owned())
}
pub(super) trait CSerialize<'a> {
type Extra;
fn serialize<W: Write>(
&self,
ctx: &BindgenContext,
extra: Self::Extra,
stack: &mut Vec<String>,
writer: &mut W,
) -> Result<(), CodegenError>;
}
impl<'a> CSerialize<'a> for Item {
type Extra = &'a Option<WrapAsVariadic>;
fn serialize<W: Write>(
&self,
ctx: &BindgenContext,
extra: Self::Extra,
stack: &mut Vec<String>,
writer: &mut W,
) -> Result<(), CodegenError> {
match self.kind() {
ItemKind::Function(func) => {
func.serialize(ctx, (self, extra), stack, writer)
}
kind => Err(CodegenError::Serialize {
msg: format!("Cannot serialize item kind {:?}", kind),
loc: get_loc(self),
}),
}
}
}
impl<'a> CSerialize<'a> for Function {
type Extra = (&'a Item, &'a Option<WrapAsVariadic>);
fn serialize<W: Write>(
&self,
ctx: &BindgenContext,
(item, wrap_as_variadic): Self::Extra,
stack: &mut Vec<String>,
writer: &mut W,
) -> Result<(), CodegenError> {
if self.kind() != FunctionKind::Function {
return Err(CodegenError::Serialize {
msg: format!(
"Cannot serialize function kind {:?}",
self.kind(),
),
loc: get_loc(item),
});
}
let signature = match ctx.resolve_type(self.signature()).kind() {
TypeKind::Function(signature) => signature,
_ => unreachable!(),
};
assert!(!signature.is_variadic());
let name = self.name();
// Function arguments stored as `(name, type_id)` tuples.
let args = {
let mut count = 0;
let idx_to_prune = wrap_as_variadic.as_ref().map(
|WrapAsVariadic {
idx_of_va_list_arg, ..
}| *idx_of_va_list_arg,
);
signature
.argument_types()
.iter()
.cloned()
.enumerate()
.filter_map(|(idx, (opt_name, type_id))| {
if Some(idx) == idx_to_prune {
None
} else {
Some((
opt_name.unwrap_or_else(|| {
let name = format!("arg_{}", count);
count += 1;
name
}),
type_id,
))
}
})
.collect::<Vec<_>>()
};
// The name used for the wrapper self.
let wrap_name = format!("{}{}", name, ctx.wrap_static_fns_suffix());
// The function's return type
let (ret_item, ret_ty) = {
let type_id = signature.return_type();
let ret_item = ctx.resolve_item(type_id);
let ret_ty = ret_item.expect_type();
// Write `ret_ty`.
ret_ty.serialize(ctx, ret_item, stack, writer)?;
(ret_item, ret_ty)
};
const INDENT: &str = " ";
// Write `wrap_name(args`.
write!(writer, " {}(", wrap_name)?;
serialize_args(&args, ctx, writer)?;
if wrap_as_variadic.is_none() {
// Write `) { name(` if the function returns void and `) { return name(` if it does not.
if ret_ty.is_void() {
write!(writer, ") {{ {}(", name)?;
} else {
write!(writer, ") {{ return {}(", name)?;
}
} else {
// Write `, ...) {`
writeln!(writer, ", ...) {{")?;
// Declare the return type `RET_TY ret;` if their is a need to do so
if !ret_ty.is_void() {
write!(writer, "{INDENT}")?;
ret_ty.serialize(ctx, ret_item, stack, writer)?;
writeln!(writer, " ret;")?;
}
// Setup va_list
writeln!(writer, "{INDENT}va_list ap;\n")?;
writeln!(
writer,
"{INDENT}va_start(ap, {});",
args.last().unwrap().0
)?;
write!(writer, "{INDENT}")?;
// Write `ret = name(` or `name(` depending if the function returns something
if !ret_ty.is_void() {
write!(writer, "ret = ")?;
}
write!(writer, "{}(", name)?;
}
// Get the arguments names and insert at the right place if necessary `ap`
let mut args: Vec<_> = args.into_iter().map(|(name, _)| name).collect();
if let Some(WrapAsVariadic {
idx_of_va_list_arg, ..
}) = wrap_as_variadic
{
args.insert(*idx_of_va_list_arg, "ap".to_owned());
}
// Write `arg_names);`.
serialize_sep(", ", args.iter(), ctx, writer, |name, _, buf| {
write!(buf, "{}", name).map_err(From::from)
})?;
#[rustfmt::skip]
write!(writer, ");{}", if wrap_as_variadic.is_none() { " " } else { "\n" })?;
if wrap_as_variadic.is_some() {
// End va_list and return the result if their is one
writeln!(writer, "{INDENT}va_end(ap);")?;
if !ret_ty.is_void() {
writeln!(writer, "{INDENT}return ret;")?;
}
}
writeln!(writer, "}}")?;
Ok(())
}
}
impl<'a> CSerialize<'a> for TypeId {
type Extra = ();
fn serialize<W: Write>(
&self,
ctx: &BindgenContext,
(): Self::Extra,
stack: &mut Vec<String>,
writer: &mut W,
) -> Result<(), CodegenError> {
let item = ctx.resolve_item(*self);
item.expect_type().serialize(ctx, item, stack, writer)
}
}
impl<'a> CSerialize<'a> for Type {
type Extra = &'a Item;
fn serialize<W: Write>(
&self,
ctx: &BindgenContext,
item: Self::Extra,
stack: &mut Vec<String>,
writer: &mut W,
) -> Result<(), CodegenError> {
match self.kind() {
TypeKind::Void => {
if self.is_const() {
write!(writer, "const ")?;
}
write!(writer, "void")?
}
TypeKind::NullPtr => {
if self.is_const() {
write!(writer, "const ")?;
}
write!(writer, "nullptr_t")?
}
TypeKind::Int(int_kind) => {
if self.is_const() {
write!(writer, "const ")?;
}
match int_kind {
IntKind::Bool => write!(writer, "bool")?,
IntKind::SChar => write!(writer, "signed char")?,
IntKind::UChar => write!(writer, "unsigned char")?,
IntKind::WChar => write!(writer, "wchar_t")?,
IntKind::Short => write!(writer, "short")?,
IntKind::UShort => write!(writer, "unsigned short")?,
IntKind::Int => write!(writer, "int")?,
IntKind::UInt => write!(writer, "unsigned int")?,
IntKind::Long => write!(writer, "long")?,
IntKind::ULong => write!(writer, "unsigned long")?,
IntKind::LongLong => write!(writer, "long long")?,
IntKind::ULongLong => write!(writer, "unsigned long long")?,
IntKind::Char { .. } => write!(writer, "char")?,
int_kind => {
return Err(CodegenError::Serialize {
msg: format!(
"Cannot serialize integer kind {:?}",
int_kind
),
loc: get_loc(item),
})
}
}
}
TypeKind::Float(float_kind) => {
if self.is_const() {
write!(writer, "const ")?;
}
match float_kind {
FloatKind::Float16 => write!(writer, "_Float16")?,
FloatKind::Float => write!(writer, "float")?,
FloatKind::Double => write!(writer, "double")?,
FloatKind::LongDouble => write!(writer, "long double")?,
FloatKind::Float128 => write!(writer, "__float128")?,
}
}
TypeKind::Complex(float_kind) => {
if self.is_const() {
write!(writer, "const ")?;
}
match float_kind {
FloatKind::Float16 => write!(writer, "_Float16 complex")?,
FloatKind::Float => write!(writer, "float complex")?,
FloatKind::Double => write!(writer, "double complex")?,
FloatKind::LongDouble => {
write!(writer, "long double complex")?
}
FloatKind::Float128 => write!(writer, "__complex128")?,
}
}
TypeKind::Alias(type_id) => {
if let Some(name) = self.name() {
if self.is_const() {
write!(writer, "const {}", name)?;
} else {
write!(writer, "{}", name)?;
}
} else {
type_id.serialize(ctx, (), stack, writer)?;
}
}
TypeKind::Array(type_id, length) => {
type_id.serialize(ctx, (), stack, writer)?;
write!(writer, " [{}]", length)?
}
TypeKind::Function(signature) => {
if self.is_const() {
stack.push("const ".to_string());
}
signature.return_type().serialize(
ctx,
(),
&mut vec![],
writer,
)?;
write!(writer, " (")?;
while let Some(item) = stack.pop() {
write!(writer, "{}", item)?;
}
write!(writer, ")")?;
let args = signature.argument_types();
if args.is_empty() {
write!(writer, " (void)")?;
} else {
write!(writer, " (")?;
serialize_sep(
", ",
args.iter(),
ctx,
writer,
|(name, type_id), ctx, buf| {
let mut stack = vec![];
if let Some(name) = name {
stack.push(name.clone());
}
type_id.serialize(ctx, (), &mut stack, buf)
},
)?;
write!(writer, ")")?
}
}
TypeKind::ResolvedTypeRef(type_id) => {
if self.is_const() {
write!(writer, "const ")?;
}
type_id.serialize(ctx, (), stack, writer)?
}
TypeKind::Pointer(type_id) => {
if self.is_const() {
stack.push("*const ".to_owned());
} else {
stack.push("*".to_owned());
}
type_id.serialize(ctx, (), stack, writer)?
}
TypeKind::Comp(comp_info) => {
if self.is_const() {
write!(writer, "const ")?;
}
let name = item.canonical_name(ctx);
match comp_info.kind() {
CompKind::Struct => write!(writer, "struct {}", name)?,
CompKind::Union => write!(writer, "union {}", name)?,
};
}
TypeKind::Enum(_enum_ty) => {
if self.is_const() {
write!(writer, "const ")?;
}
let name = item.canonical_name(ctx);
write!(writer, "enum {}", name)?;
}
ty => {
return Err(CodegenError::Serialize {
msg: format!("Cannot serialize type kind {:?}", ty),
loc: get_loc(item),
})
}
};
if !stack.is_empty() {
write!(writer, " ")?;
while let Some(item) = stack.pop() {
write!(writer, "{}", item)?;
}
}
Ok(())
}
}
fn serialize_args<W: Write>(
args: &[(String, TypeId)],
ctx: &BindgenContext,
writer: &mut W,
) -> Result<(), CodegenError> {
if args.is_empty() {
write!(writer, "void")?;
} else {
serialize_sep(
", ",
args.iter(),
ctx,
writer,
|(name, type_id), ctx, buf| {
type_id.serialize(ctx, (), &mut vec![name.clone()], buf)
},
)?;
}
Ok(())
}
fn serialize_sep<
W: Write,
F: FnMut(I::Item, &BindgenContext, &mut W) -> Result<(), CodegenError>,
I: Iterator,
>(
sep: &str,
mut iter: I,
ctx: &BindgenContext,
buf: &mut W,
mut f: F,
) -> Result<(), CodegenError> {
if let Some(item) = iter.next() {
f(item, ctx, buf)?;
let sep = sep.as_bytes();
for item in iter {
buf.write_all(sep)?;
f(item, ctx, buf)?;
}
}
Ok(())
}

View File

@@ -0,0 +1,462 @@
//! Helpers for code generation that need struct layout
use super::helpers;
use crate::ir::comp::CompInfo;
use crate::ir::context::BindgenContext;
use crate::ir::layout::Layout;
use crate::ir::ty::{Type, TypeKind};
use crate::FieldVisibilityKind;
use proc_macro2::{Ident, Span};
use std::cmp;
const MAX_GUARANTEED_ALIGN: usize = 8;
/// Trace the layout of struct.
#[derive(Debug)]
pub(crate) struct StructLayoutTracker<'a> {
name: &'a str,
ctx: &'a BindgenContext,
comp: &'a CompInfo,
is_packed: bool,
known_type_layout: Option<Layout>,
is_rust_union: bool,
can_copy_union_fields: bool,
latest_offset: usize,
padding_count: usize,
latest_field_layout: Option<Layout>,
max_field_align: usize,
last_field_was_bitfield: bool,
visibility: FieldVisibilityKind,
last_field_was_flexible_array: bool,
}
/// Returns a size aligned to a given value.
pub(crate) fn align_to(size: usize, align: usize) -> usize {
if align == 0 {
return size;
}
let rem = size % align;
if rem == 0 {
return size;
}
size + align - rem
}
/// Returns the lower power of two byte count that can hold at most n bits.
pub(crate) fn bytes_from_bits_pow2(mut n: usize) -> usize {
if n == 0 {
return 0;
}
if n <= 8 {
return 1;
}
if !n.is_power_of_two() {
n = n.next_power_of_two();
}
n / 8
}
#[test]
fn test_align_to() {
assert_eq!(align_to(1, 1), 1);
assert_eq!(align_to(1, 2), 2);
assert_eq!(align_to(1, 4), 4);
assert_eq!(align_to(5, 1), 5);
assert_eq!(align_to(17, 4), 20);
}
#[test]
fn test_bytes_from_bits_pow2() {
assert_eq!(bytes_from_bits_pow2(0), 0);
for i in 1..9 {
assert_eq!(bytes_from_bits_pow2(i), 1);
}
for i in 9..17 {
assert_eq!(bytes_from_bits_pow2(i), 2);
}
for i in 17..33 {
assert_eq!(bytes_from_bits_pow2(i), 4);
}
}
impl<'a> StructLayoutTracker<'a> {
pub(crate) fn new(
ctx: &'a BindgenContext,
comp: &'a CompInfo,
ty: &'a Type,
name: &'a str,
visibility: FieldVisibilityKind,
is_packed: bool,
) -> Self {
let known_type_layout = ty.layout(ctx);
let (is_rust_union, can_copy_union_fields) =
comp.is_rust_union(ctx, known_type_layout.as_ref(), name);
StructLayoutTracker {
name,
ctx,
comp,
visibility,
is_packed,
known_type_layout,
is_rust_union,
can_copy_union_fields,
latest_offset: 0,
padding_count: 0,
latest_field_layout: None,
max_field_align: 0,
last_field_was_bitfield: false,
last_field_was_flexible_array: false,
}
}
pub(crate) fn can_copy_union_fields(&self) -> bool {
self.can_copy_union_fields
}
pub(crate) fn is_rust_union(&self) -> bool {
self.is_rust_union
}
pub(crate) fn saw_flexible_array(&mut self) {
self.last_field_was_flexible_array = true;
}
pub(crate) fn saw_vtable(&mut self) {
debug!("saw vtable for {}", self.name);
let ptr_size = self.ctx.target_pointer_size();
self.latest_offset += ptr_size;
self.latest_field_layout = Some(Layout::new(ptr_size, ptr_size));
self.max_field_align = ptr_size;
}
pub(crate) fn saw_base(&mut self, base_ty: &Type) {
debug!("saw base for {}", self.name);
if let Some(layout) = base_ty.layout(self.ctx) {
self.align_to_latest_field(layout);
self.latest_offset += self.padding_bytes(layout) + layout.size;
self.latest_field_layout = Some(layout);
self.max_field_align = cmp::max(self.max_field_align, layout.align);
}
}
pub(crate) fn saw_bitfield_unit(&mut self, layout: Layout) {
debug!("saw bitfield unit for {}: {:?}", self.name, layout);
self.align_to_latest_field(layout);
self.latest_offset += layout.size;
debug!(
"Offset: <bitfield>: {} -> {}",
self.latest_offset - layout.size,
self.latest_offset
);
self.latest_field_layout = Some(layout);
self.last_field_was_bitfield = true;
self.max_field_align = cmp::max(self.max_field_align, layout.align);
}
/// Returns a padding field if necessary for a given new field _before_
/// adding that field.
pub(crate) fn saw_field(
&mut self,
field_name: &str,
field_ty: &Type,
field_offset: Option<usize>,
) -> Option<proc_macro2::TokenStream> {
let mut field_layout = field_ty.layout(self.ctx)?;
if let TypeKind::Array(inner, len) =
*field_ty.canonical_type(self.ctx).kind()
{
// FIXME(emilio): As an _ultra_ hack, we correct the layout returned
// by arrays of structs that have a bigger alignment than what we
// can support.
//
// This means that the structs in the array are super-unsafe to
// access, since they won't be properly aligned, but there's not too
// much we can do about it.
if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx)
{
if layout.align > MAX_GUARANTEED_ALIGN {
field_layout.size =
align_to(layout.size, layout.align) * len;
field_layout.align = MAX_GUARANTEED_ALIGN;
}
}
}
self.saw_field_with_layout(field_name, field_layout, field_offset)
}
pub(crate) fn saw_field_with_layout(
&mut self,
field_name: &str,
field_layout: Layout,
field_offset: Option<usize>,
) -> Option<proc_macro2::TokenStream> {
let will_merge_with_bitfield = self.align_to_latest_field(field_layout);
let is_union = self.comp.is_union();
let padding_bytes = match field_offset {
Some(offset) if offset / 8 > self.latest_offset => {
offset / 8 - self.latest_offset
}
_ => {
if will_merge_with_bitfield ||
field_layout.align == 0 ||
is_union
{
0
} else if !self.is_packed {
self.padding_bytes(field_layout)
} else if let Some(mut l) = self.known_type_layout {
if field_layout.align < l.align {
l.align = field_layout.align;
}
self.padding_bytes(l)
} else {
0
}
}
};
self.latest_offset += padding_bytes;
let padding_layout = if self.is_packed || is_union {
None
} else {
let force_padding = self.ctx.options().force_explicit_padding;
// Otherwise the padding is useless.
let need_padding = force_padding ||
padding_bytes >= field_layout.align ||
field_layout.align > MAX_GUARANTEED_ALIGN;
debug!(
"Offset: <padding>: {} -> {}",
self.latest_offset - padding_bytes,
self.latest_offset
);
debug!(
"align field {} to {}/{} with {} padding bytes {:?}",
field_name,
self.latest_offset,
field_offset.unwrap_or(0) / 8,
padding_bytes,
field_layout
);
let padding_align = if force_padding {
1
} else {
cmp::min(field_layout.align, MAX_GUARANTEED_ALIGN)
};
if need_padding && padding_bytes != 0 {
Some(Layout::new(padding_bytes, padding_align))
} else {
None
}
};
self.latest_offset += field_layout.size;
self.latest_field_layout = Some(field_layout);
self.max_field_align =
cmp::max(self.max_field_align, field_layout.align);
self.last_field_was_bitfield = false;
debug!(
"Offset: {}: {} -> {}",
field_name,
self.latest_offset - field_layout.size,
self.latest_offset
);
padding_layout.map(|layout| self.padding_field(layout))
}
pub(crate) fn add_tail_padding(
&mut self,
comp_name: &str,
comp_layout: Layout,
) -> Option<proc_macro2::TokenStream> {
// Only emit an padding field at the end of a struct if the
// user configures explicit padding.
if !self.ctx.options().force_explicit_padding {
return None;
}
// Padding doesn't make sense for rust unions.
if self.is_rust_union {
return None;
}
// Also doesn't make sense for structs with flexible array members
if self.last_field_was_flexible_array {
return None;
}
if self.latest_offset == comp_layout.size {
// This struct does not contain tail padding.
return None;
}
trace!(
"need a tail padding field for {}: offset {} -> size {}",
comp_name,
self.latest_offset,
comp_layout.size
);
let size = comp_layout.size - self.latest_offset;
Some(self.padding_field(Layout::new(size, 0)))
}
pub(crate) fn pad_struct(
&mut self,
layout: Layout,
) -> Option<proc_macro2::TokenStream> {
debug!(
"pad_struct:\n\tself = {:#?}\n\tlayout = {:#?}",
self, layout
);
if layout.size < self.latest_offset {
warn!(
"Calculated wrong layout for {}, too more {} bytes",
self.name,
self.latest_offset - layout.size
);
return None;
}
let padding_bytes = layout.size - self.latest_offset;
if padding_bytes == 0 {
return None;
}
let repr_align = self.ctx.options().rust_features().repr_align;
// We always pad to get to the correct size if the struct is one of
// those we can't align properly.
//
// Note that if the last field we saw was a bitfield, we may need to pad
// regardless, because bitfields don't respect alignment as strictly as
// other fields.
if padding_bytes >= layout.align ||
(self.last_field_was_bitfield &&
padding_bytes >= self.latest_field_layout.unwrap().align) ||
(!repr_align && layout.align > MAX_GUARANTEED_ALIGN)
{
let layout = if self.is_packed {
Layout::new(padding_bytes, 1)
} else if self.last_field_was_bitfield ||
layout.align > MAX_GUARANTEED_ALIGN
{
// We've already given up on alignment here.
Layout::for_size(self.ctx, padding_bytes)
} else {
Layout::new(padding_bytes, layout.align)
};
debug!("pad bytes to struct {}, {:?}", self.name, layout);
Some(self.padding_field(layout))
} else {
None
}
}
pub(crate) fn requires_explicit_align(&self, layout: Layout) -> bool {
let repr_align = self.ctx.options().rust_features().repr_align;
// Always force explicit repr(align) for stuff more than 16-byte aligned
// to work-around https://github.com/rust-lang/rust/issues/54341.
//
// Worst-case this just generates redundant alignment attributes.
if repr_align && self.max_field_align >= 16 {
return true;
}
if self.max_field_align >= layout.align {
return false;
}
// We can only generate up-to a 8-bytes of alignment unless we support
// repr(align).
repr_align || layout.align <= MAX_GUARANTEED_ALIGN
}
fn padding_bytes(&self, layout: Layout) -> usize {
align_to(self.latest_offset, layout.align) - self.latest_offset
}
fn padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream {
let ty = helpers::blob(self.ctx, layout);
let padding_count = self.padding_count;
self.padding_count += 1;
let padding_field_name = Ident::new(
&format!("__bindgen_padding_{}", padding_count),
Span::call_site(),
);
self.max_field_align = cmp::max(self.max_field_align, layout.align);
let vis = super::access_specifier(self.visibility);
quote! {
#vis #padding_field_name : #ty ,
}
}
/// Returns whether the new field is known to merge with a bitfield.
///
/// This is just to avoid doing the same check also in pad_field.
fn align_to_latest_field(&mut self, new_field_layout: Layout) -> bool {
if self.is_packed {
// Skip to align fields when packed.
return false;
}
let layout = match self.latest_field_layout {
Some(l) => l,
None => return false,
};
// If it was, we may or may not need to align, depending on what the
// current field alignment and the bitfield size and alignment are.
debug!(
"align_to_bitfield? {}: {:?} {:?}",
self.last_field_was_bitfield, layout, new_field_layout
);
// Avoid divide-by-zero errors if align is 0.
let align = cmp::max(1, layout.align);
if self.last_field_was_bitfield &&
new_field_layout.align <= layout.size % align &&
new_field_layout.size <= layout.size % align
{
// The new field will be coalesced into some of the remaining bits.
//
// FIXME(emilio): I think this may not catch everything?
debug!("Will merge with bitfield");
return true;
}
// Else, just align the obvious way.
self.latest_offset += self.padding_bytes(layout);
false
}
}