Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

726
vendor/bindgen/ir/analysis/derive.rs vendored Normal file
View File

@@ -0,0 +1,726 @@
//! Determining which types for which we cannot emit `#[derive(Trait)]`.
use std::fmt;
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::analysis::has_vtable::HasVtable;
use crate::ir::comp::CompKind;
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::derive::CanDerive;
use crate::ir::function::FunctionSig;
use crate::ir::item::{IsOpaque, Item};
use crate::ir::layout::Layout;
use crate::ir::template::TemplateParameters;
use crate::ir::traversal::{EdgeKind, Trace};
use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use crate::ir::ty::{Type, TypeKind};
use crate::{Entry, HashMap, HashSet};
/// Which trait to consider when doing the `CannotDerive` analysis.
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub enum DeriveTrait {
/// The `Copy` trait.
Copy,
/// The `Debug` trait.
Debug,
/// The `Default` trait.
Default,
/// The `Hash` trait.
Hash,
/// The `PartialEq` and `PartialOrd` traits.
PartialEqOrPartialOrd,
}
/// An analysis that finds for each IR item whether a trait cannot be derived.
///
/// We use the monotone constraint function `cannot_derive`, defined as follows
/// for type T:
///
/// * If T is Opaque and the layout of the type is known, get this layout as an
/// opaquetype and check whether it can derive using trivial checks.
///
/// * If T is Array, a trait cannot be derived if the array is incomplete,
/// if the length of the array is larger than the limit (unless the trait
/// allows it), or the trait cannot be derived for the type of data the array
/// contains.
///
/// * If T is Vector, a trait cannot be derived if the trait cannot be derived
/// for the type of data the vector contains.
///
/// * If T is a type alias, a templated alias or an indirection to another type,
/// the trait cannot be derived if the trait cannot be derived for type T
/// refers to.
///
/// * If T is a compound type, the trait cannot be derived if the trait cannot
/// be derived for any of its base members or fields.
///
/// * If T is an instantiation of an abstract template definition, the trait
/// cannot be derived if any of the template arguments or template definition
/// cannot derive the trait.
///
/// * For all other (simple) types, compiler and standard library limitations
/// dictate whether the trait is implemented.
#[derive(Debug, Clone)]
pub(crate) struct CannotDerive<'ctx> {
ctx: &'ctx BindgenContext,
derive_trait: DeriveTrait,
// The incremental result of this analysis's computation.
// Contains information whether particular item can derive `derive_trait`
can_derive: HashMap<ItemId, CanDerive>,
// Dependencies saying that if a key ItemId has been inserted into the
// `cannot_derive_partialeq_or_partialord` set, then each of the ids
// in Vec<ItemId> need to be considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// can derive `derive_trait`.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
type EdgePredicate = fn(EdgeKind) -> bool;
fn consider_edge_default(kind: EdgeKind) -> bool {
match kind {
// These are the only edges that can affect whether a type can derive
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TypeReference |
EdgeKind::VarType |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => true,
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::InnerType |
EdgeKind::InnerVar |
EdgeKind::Method |
EdgeKind::Generic => false,
}
}
impl CannotDerive<'_> {
fn insert<Id: Into<ItemId>>(
&mut self,
id: Id,
can_derive: CanDerive,
) -> ConstrainResult {
let id = id.into();
trace!(
"inserting {id:?} can_derive<{}>={can_derive:?}",
self.derive_trait,
);
if let CanDerive::Yes = can_derive {
return ConstrainResult::Same;
}
match self.can_derive.entry(id) {
Entry::Occupied(mut entry) => {
if *entry.get() < can_derive {
entry.insert(can_derive);
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
Entry::Vacant(entry) => {
entry.insert(can_derive);
ConstrainResult::Changed
}
}
}
fn constrain_type(&mut self, item: &Item, ty: &Type) -> CanDerive {
if !self.ctx.allowlisted_items().contains(&item.id()) {
let can_derive = self
.ctx
.blocklisted_type_implements_trait(item, self.derive_trait);
match can_derive {
CanDerive::Yes => trace!(
" blocklisted type explicitly implements {}",
self.derive_trait
),
CanDerive::Manually => trace!(
" blocklisted type requires manual implementation of {}",
self.derive_trait
),
CanDerive::No => trace!(
" cannot derive {} for blocklisted type",
self.derive_trait
),
}
return can_derive;
}
if self.derive_trait.not_by_name(self.ctx, item) {
trace!(
" cannot derive {} for explicitly excluded type",
self.derive_trait
);
return CanDerive::No;
}
trace!("ty: {ty:?}");
if item.is_opaque(self.ctx, &()) {
if !self.derive_trait.can_derive_union() &&
ty.is_union() &&
self.ctx.options().untagged_union
{
trace!(
" cannot derive {} for Rust unions",
self.derive_trait
);
return CanDerive::No;
}
let layout_can_derive =
ty.layout(self.ctx).map_or(CanDerive::Yes, |l| {
l.opaque().array_size_within_derive_limit()
});
match layout_can_derive {
CanDerive::Yes => {
trace!(
" we can trivially derive {} for the layout",
self.derive_trait
);
}
_ => {
trace!(
" we cannot derive {} for the layout",
self.derive_trait
);
}
}
return layout_can_derive;
}
match *ty.kind() {
// Handle the simple cases. These can derive traits without further
// information.
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Complex(..) |
TypeKind::Float(..) |
TypeKind::Enum(..) |
TypeKind::TypeParam |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::Reference(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel => self.derive_trait.can_derive_simple(ty.kind()),
TypeKind::Pointer(inner) => {
let inner_type =
self.ctx.resolve_type(inner).canonical_type(self.ctx);
if let TypeKind::Function(ref sig) = *inner_type.kind() {
self.derive_trait.can_derive_fnptr(sig)
} else {
self.derive_trait.can_derive_pointer()
}
}
TypeKind::Function(ref sig) => {
self.derive_trait.can_derive_fnptr(sig)
}
// Complex cases need more information
TypeKind::Array(t, len) => {
let inner_type =
self.can_derive.get(&t.into()).copied().unwrap_or_default();
if inner_type != CanDerive::Yes {
trace!(
" arrays of T for which we cannot derive {} \
also cannot derive {}",
self.derive_trait,
self.derive_trait
);
return CanDerive::No;
}
if len == 0 && !self.derive_trait.can_derive_incomplete_array()
{
trace!(
" cannot derive {} for incomplete arrays",
self.derive_trait
);
return CanDerive::No;
}
if self.derive_trait.can_derive_large_array(self.ctx) {
trace!(" array can derive {}", self.derive_trait);
return CanDerive::Yes;
}
if len > RUST_DERIVE_IN_ARRAY_LIMIT {
trace!(
" array is too large to derive {}, but it may be implemented", self.derive_trait
);
return CanDerive::Manually;
}
trace!(
" array is small enough to derive {}",
self.derive_trait
);
CanDerive::Yes
}
TypeKind::Vector(t, len) => {
let inner_type =
self.can_derive.get(&t.into()).copied().unwrap_or_default();
if inner_type != CanDerive::Yes {
trace!(
" vectors of T for which we cannot derive {} \
also cannot derive {}",
self.derive_trait,
self.derive_trait
);
return CanDerive::No;
}
assert_ne!(len, 0, "vectors cannot have zero length");
self.derive_trait.can_derive_vector()
}
TypeKind::Comp(ref info) => {
assert!(
!info.has_non_type_template_params(),
"The early ty.is_opaque check should have handled this case"
);
if !self.derive_trait.can_derive_compound_forward_decl() &&
info.is_forward_declaration()
{
trace!(
" cannot derive {} for forward decls",
self.derive_trait
);
return CanDerive::No;
}
// NOTE: Take into account that while unions in C and C++ are copied by
// default, the may have an explicit destructor in C++, so we can't
// defer this check just for the union case.
if !self.derive_trait.can_derive_compound_with_destructor() &&
self.ctx.lookup_has_destructor(
item.id().expect_type_id(self.ctx),
)
{
trace!(
" comp has destructor which cannot derive {}",
self.derive_trait
);
return CanDerive::No;
}
if info.kind() == CompKind::Union {
if self.derive_trait.can_derive_union() {
if self.ctx.options().untagged_union &&
// https://github.com/rust-lang/rust/issues/36640
(!info.self_template_params(self.ctx).is_empty() ||
!item.all_template_params(self.ctx).is_empty())
{
trace!(
" cannot derive {} for Rust union because issue 36640", self.derive_trait
);
return CanDerive::No;
}
// fall through to be same as non-union handling
} else {
if self.ctx.options().untagged_union {
trace!(
" cannot derive {} for Rust unions",
self.derive_trait
);
return CanDerive::No;
}
let layout_can_derive =
ty.layout(self.ctx).map_or(CanDerive::Yes, |l| {
l.opaque().array_size_within_derive_limit()
});
match layout_can_derive {
CanDerive::Yes => {
trace!(
" union layout can trivially derive {}",
self.derive_trait
);
}
_ => {
trace!(
" union layout cannot derive {}",
self.derive_trait
);
}
}
return layout_can_derive;
}
}
if !self.derive_trait.can_derive_compound_with_vtable() &&
item.has_vtable(self.ctx)
{
trace!(
" cannot derive {} for comp with vtable",
self.derive_trait
);
return CanDerive::No;
}
// Bitfield units are always represented as arrays of u8, but
// they're not traced as arrays, so we need to check here
// instead.
if !self.derive_trait.can_derive_large_array(self.ctx) &&
info.has_too_large_bitfield_unit() &&
!item.is_opaque(self.ctx, &())
{
trace!(
" cannot derive {} for comp with too large bitfield unit",
self.derive_trait
);
return CanDerive::No;
}
let pred = self.derive_trait.consider_edge_comp();
self.constrain_join(item, pred)
}
TypeKind::ResolvedTypeRef(..) |
TypeKind::TemplateAlias(..) |
TypeKind::Alias(..) |
TypeKind::BlockPointer(..) => {
let pred = self.derive_trait.consider_edge_typeref();
self.constrain_join(item, pred)
}
TypeKind::TemplateInstantiation(..) => {
let pred = self.derive_trait.consider_edge_tmpl_inst();
self.constrain_join(item, pred)
}
TypeKind::Opaque => unreachable!(
"The early ty.is_opaque check should have handled this case"
),
}
}
fn constrain_join(
&mut self,
item: &Item,
consider_edge: EdgePredicate,
) -> CanDerive {
let mut candidate = None;
item.trace(
self.ctx,
&mut |sub_id, edge_kind| {
// Ignore ourselves, since union with ourself is a
// no-op. Ignore edges that aren't relevant to the
// analysis.
if sub_id == item.id() || !consider_edge(edge_kind) {
return;
}
let can_derive = self.can_derive
.get(&sub_id)
.copied()
.unwrap_or_default();
match can_derive {
CanDerive::Yes => trace!(" member {sub_id:?} can derive {}", self.derive_trait),
CanDerive::Manually => trace!(" member {sub_id:?} cannot derive {}, but it may be implemented", self.derive_trait),
CanDerive::No => trace!(" member {sub_id:?} cannot derive {}", self.derive_trait),
}
*candidate.get_or_insert(CanDerive::Yes) |= can_derive;
},
&(),
);
if candidate.is_none() {
trace!(
" can derive {} because there are no members",
self.derive_trait
);
}
candidate.unwrap_or_default()
}
}
impl DeriveTrait {
fn not_by_name(self, ctx: &BindgenContext, item: &Item) -> bool {
match self {
DeriveTrait::Copy => ctx.no_copy_by_name(item),
DeriveTrait::Debug => ctx.no_debug_by_name(item),
DeriveTrait::Default => ctx.no_default_by_name(item),
DeriveTrait::Hash => ctx.no_hash_by_name(item),
DeriveTrait::PartialEqOrPartialOrd => {
ctx.no_partialeq_by_name(item)
}
}
}
fn consider_edge_comp(self) -> EdgePredicate {
match self {
DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
_ => |kind| matches!(kind, EdgeKind::BaseMember | EdgeKind::Field),
}
}
fn consider_edge_typeref(self) -> EdgePredicate {
match self {
DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
_ => |kind| kind == EdgeKind::TypeReference,
}
}
fn consider_edge_tmpl_inst(self) -> EdgePredicate {
match self {
DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
_ => |kind| {
matches!(
kind,
EdgeKind::TemplateArgument | EdgeKind::TemplateDeclaration
)
},
}
}
fn can_derive_large_array(self, ctx: &BindgenContext) -> bool {
if ctx.options().rust_features().larger_arrays {
!matches!(self, DeriveTrait::Default)
} else {
matches!(self, DeriveTrait::Copy)
}
}
fn can_derive_union(self) -> bool {
matches!(self, DeriveTrait::Copy)
}
fn can_derive_compound_with_destructor(self) -> bool {
!matches!(self, DeriveTrait::Copy)
}
fn can_derive_compound_with_vtable(self) -> bool {
!matches!(self, DeriveTrait::Default)
}
fn can_derive_compound_forward_decl(self) -> bool {
matches!(self, DeriveTrait::Copy | DeriveTrait::Debug)
}
fn can_derive_incomplete_array(self) -> bool {
!matches!(
self,
DeriveTrait::Copy |
DeriveTrait::Hash |
DeriveTrait::PartialEqOrPartialOrd
)
}
fn can_derive_fnptr(self, f: &FunctionSig) -> CanDerive {
match (self, f.function_pointers_can_derive()) {
(DeriveTrait::Copy | DeriveTrait::Default, _) | (_, true) => {
trace!(" function pointer can derive {self}");
CanDerive::Yes
}
(DeriveTrait::Debug, false) => {
trace!(" function pointer cannot derive {self}, but it may be implemented");
CanDerive::Manually
}
(_, false) => {
trace!(" function pointer cannot derive {self}");
CanDerive::No
}
}
}
fn can_derive_vector(self) -> CanDerive {
if self == DeriveTrait::PartialEqOrPartialOrd {
// FIXME: vectors always can derive PartialEq, but they should
// not derive PartialOrd:
// https://github.com/rust-lang-nursery/packed_simd/issues/48
trace!(" vectors cannot derive PartialOrd");
CanDerive::No
} else {
trace!(" vector can derive {self}");
CanDerive::Yes
}
}
fn can_derive_pointer(self) -> CanDerive {
if self == DeriveTrait::Default {
trace!(" pointer cannot derive Default");
CanDerive::No
} else {
trace!(" pointer can derive {self}");
CanDerive::Yes
}
}
fn can_derive_simple(self, kind: &TypeKind) -> CanDerive {
match (self, kind) {
// === Default ===
(
DeriveTrait::Default,
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::TypeParam |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel,
) => {
trace!(" types that always cannot derive Default");
CanDerive::No
}
(DeriveTrait::Default, TypeKind::UnresolvedTypeRef(..)) => {
unreachable!(
"Type with unresolved type ref can't reach derive default"
)
}
// === Hash ===
(
DeriveTrait::Hash,
TypeKind::Float(..) | TypeKind::Complex(..),
) => {
trace!(" float cannot derive Hash");
CanDerive::No
}
// === others ===
_ => {
trace!(" simple type that can always derive {self}");
CanDerive::Yes
}
}
}
}
impl fmt::Display for DeriveTrait {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match self {
DeriveTrait::Copy => "Copy",
DeriveTrait::Debug => "Debug",
DeriveTrait::Default => "Default",
DeriveTrait::Hash => "Hash",
DeriveTrait::PartialEqOrPartialOrd => "PartialEq/PartialOrd",
};
s.fmt(f)
}
}
impl<'ctx> MonotoneFramework for CannotDerive<'ctx> {
type Node = ItemId;
type Extra = (&'ctx BindgenContext, DeriveTrait);
type Output = HashMap<ItemId, CanDerive>;
fn new(
(ctx, derive_trait): (&'ctx BindgenContext, DeriveTrait),
) -> CannotDerive<'ctx> {
let can_derive = HashMap::default();
let dependencies = generate_dependencies(ctx, consider_edge_default);
CannotDerive {
ctx,
derive_trait,
can_derive,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
// The transitive closure of all allowlisted items, including explicitly
// blocklisted items.
self.ctx
.allowlisted_items()
.iter()
.copied()
.flat_map(|i| {
let mut reachable = vec![i];
i.trace(
self.ctx,
&mut |s, _| {
reachable.push(s);
},
&(),
);
reachable
})
.collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain: {id:?}");
if let Some(CanDerive::No) = self.can_derive.get(&id) {
trace!(" already know it cannot derive {}", self.derive_trait);
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let can_derive = match item.as_type() {
Some(ty) => {
let mut can_derive = self.constrain_type(item, ty);
if let CanDerive::Yes = can_derive {
let is_reached_limit =
|l: Layout| l.align > RUST_DERIVE_IN_ARRAY_LIMIT;
if !self.derive_trait.can_derive_large_array(self.ctx) &&
ty.layout(self.ctx).is_some_and(is_reached_limit)
{
// We have to be conservative: the struct *could* have enough
// padding that we emit an array that is longer than
// `RUST_DERIVE_IN_ARRAY_LIMIT`. If we moved padding calculations
// into the IR and computed them before this analysis, then we could
// be precise rather than conservative here.
can_derive = CanDerive::Manually;
}
}
can_derive
}
None => self.constrain_join(item, consider_edge_default),
};
self.insert(id, can_derive)
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<CannotDerive<'ctx>> for HashMap<ItemId, CanDerive> {
fn from(analysis: CannotDerive<'ctx>) -> Self {
extra_assert!(analysis
.can_derive
.values()
.all(|v| *v != CanDerive::Yes));
analysis.can_derive
}
}
/// Convert a `HashMap<ItemId, CanDerive>` into a `HashSet<ItemId>`.
///
/// Elements that are not `CanDerive::Yes` are kept in the set, so that it
/// represents all items that cannot derive.
pub(crate) fn as_cannot_derive_set(
can_derive: HashMap<ItemId, CanDerive>,
) -> HashSet<ItemId> {
can_derive
.into_iter()
.filter_map(|(k, v)| if v == CanDerive::Yes { None } else { Some(k) })
.collect()
}

View File

@@ -0,0 +1,175 @@
//! Determining which types have destructors
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::comp::{CompKind, Field, FieldMethods};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item whether it has a destructor or not
///
/// We use the monotone function `has destructor`, defined as follows:
///
/// * If T is a type alias, a templated alias, or an indirection to another type,
/// T has a destructor if the type T refers to has a destructor.
/// * If T is a compound type, T has a destructor if we saw a destructor when parsing it,
/// or if it's a struct, T has a destructor if any of its base members has a destructor,
/// or if any of its fields have a destructor.
/// * If T is an instantiation of an abstract template definition, T has
/// a destructor if its template definition has a destructor,
/// or if any of the template arguments has a destructor.
/// * If T is the type of a field, that field has a destructor if it's not a bitfield,
/// and if T has a destructor.
#[derive(Debug, Clone)]
pub(crate) struct HasDestructorAnalysis<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set definitely has a destructor.
have_destructor: HashSet<ItemId>,
// Dependencies saying that if a key ItemId has been inserted into the
// `have_destructor` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has a destructor or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl HasDestructorAnalysis<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
// These are the only edges that can affect whether a type has a
// destructor or not.
matches!(
kind,
EdgeKind::TypeReference |
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration
)
}
fn insert<Id: Into<ItemId>>(&mut self, id: Id) -> ConstrainResult {
let id = id.into();
let was_not_already_in_set = self.have_destructor.insert(id);
assert!(
was_not_already_in_set,
"We shouldn't try and insert {id:?} twice because if it was \
already in the set, `constrain` should have exited early."
);
ConstrainResult::Changed
}
}
impl<'ctx> MonotoneFramework for HasDestructorAnalysis<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> Self {
let have_destructor = HashSet::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasDestructorAnalysis {
ctx,
have_destructor,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().copied().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
if self.have_destructor.contains(&id) {
// We've already computed that this type has a destructor and that can't
// change.
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let ty = match item.as_type() {
None => return ConstrainResult::Same,
Some(ty) => ty,
};
match *ty.kind() {
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::ResolvedTypeRef(t) => {
if self.have_destructor.contains(&t.into()) {
self.insert(id)
} else {
ConstrainResult::Same
}
}
TypeKind::Comp(ref info) => {
if info.has_own_destructor() {
return self.insert(id);
}
match info.kind() {
CompKind::Union => ConstrainResult::Same,
CompKind::Struct => {
let base_or_field_destructor =
info.base_members().iter().any(|base| {
self.have_destructor.contains(&base.ty.into())
}) || info.fields().iter().any(
|field| match *field {
Field::DataMember(ref data) => self
.have_destructor
.contains(&data.ty().into()),
Field::Bitfields(_) => false,
},
);
if base_or_field_destructor {
self.insert(id)
} else {
ConstrainResult::Same
}
}
}
}
TypeKind::TemplateInstantiation(ref inst) => {
let definition_or_arg_destructor = self
.have_destructor
.contains(&inst.template_definition().into()) ||
inst.template_arguments().iter().any(|arg| {
self.have_destructor.contains(&arg.into())
});
if definition_or_arg_destructor {
self.insert(id)
} else {
ConstrainResult::Same
}
}
_ => ConstrainResult::Same,
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<HasDestructorAnalysis<'ctx>> for HashSet<ItemId> {
fn from(analysis: HasDestructorAnalysis<'ctx>) -> Self {
analysis.have_destructor
}
}

248
vendor/bindgen/ir/analysis/has_float.rs vendored Normal file
View File

@@ -0,0 +1,248 @@
//! Determining which types has float.
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::comp::Field;
use crate::ir::comp::FieldMethods;
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item whether it has float or not.
///
/// We use the monotone constraint function `has_float`,
/// defined as follows:
///
/// * If T is float or complex float, T trivially has.
/// * If T is a type alias, a templated alias or an indirection to another type,
/// it has float if the type T refers to has.
/// * If T is a compound type, it has float if any of base memter or field
/// has.
/// * If T is an instantiation of an abstract template definition, T has
/// float if any of the template arguments or template definition
/// has.
#[derive(Debug, Clone)]
pub(crate) struct HasFloat<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set has float.
has_float: HashSet<ItemId>,
// Dependencies saying that if a key ItemId has been inserted into the
// `has_float` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has float or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl HasFloat<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
match kind {
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TypeReference |
EdgeKind::VarType |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => true,
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::InnerType |
EdgeKind::InnerVar |
EdgeKind::Method |
EdgeKind::Generic => false,
}
}
fn insert<Id: Into<ItemId>>(&mut self, id: Id) -> ConstrainResult {
let id = id.into();
trace!("inserting {id:?} into the has_float set");
let was_not_already_in_set = self.has_float.insert(id);
assert!(
was_not_already_in_set,
"We shouldn't try and insert {id:?} twice because if it was \
already in the set, `constrain` should have exited early."
);
ConstrainResult::Changed
}
}
impl<'ctx> MonotoneFramework for HasFloat<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> HasFloat<'ctx> {
let has_float = HashSet::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasFloat {
ctx,
has_float,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().copied().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain: {id:?}");
if self.has_float.contains(&id) {
trace!(" already know it do not have float");
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let Some(ty) = item.as_type() else {
trace!(" not a type; ignoring");
return ConstrainResult::Same;
};
match *ty.kind() {
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::TypeParam |
TypeKind::Opaque |
TypeKind::Pointer(..) |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel => {
trace!(" simple type that do not have float");
ConstrainResult::Same
}
TypeKind::Float(..) | TypeKind::Complex(..) => {
trace!(" float type has float");
self.insert(id)
}
TypeKind::Array(t, _) => {
if self.has_float.contains(&t.into()) {
trace!(
" Array with type T that has float also has float"
);
return self.insert(id);
}
trace!(" Array with type T that do not have float also do not have float");
ConstrainResult::Same
}
TypeKind::Vector(t, _) => {
if self.has_float.contains(&t.into()) {
trace!(
" Vector with type T that has float also has float"
);
return self.insert(id);
}
trace!(" Vector with type T that do not have float also do not have float");
ConstrainResult::Same
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
if self.has_float.contains(&t.into()) {
trace!(
" aliases and type refs to T which have float \
also have float"
);
self.insert(id)
} else {
trace!(" aliases and type refs to T which do not have float \
also do not have floaarrayt");
ConstrainResult::Same
}
}
TypeKind::Comp(ref info) => {
let bases_have = info
.base_members()
.iter()
.any(|base| self.has_float.contains(&base.ty.into()));
if bases_have {
trace!(" bases have float, so we also have");
return self.insert(id);
}
let fields_have = info.fields().iter().any(|f| match *f {
Field::DataMember(ref data) => {
self.has_float.contains(&data.ty().into())
}
Field::Bitfields(ref bfu) => bfu
.bitfields()
.iter()
.any(|b| self.has_float.contains(&b.ty().into())),
});
if fields_have {
trace!(" fields have float, so we also have");
return self.insert(id);
}
trace!(" comp doesn't have float");
ConstrainResult::Same
}
TypeKind::TemplateInstantiation(ref template) => {
let args_have = template
.template_arguments()
.iter()
.any(|arg| self.has_float.contains(&arg.into()));
if args_have {
trace!(
" template args have float, so \
instantiation also has float"
);
return self.insert(id);
}
let def_has = self
.has_float
.contains(&template.template_definition().into());
if def_has {
trace!(
" template definition has float, so \
instantiation also has"
);
return self.insert(id);
}
trace!(" template instantiation do not have float");
ConstrainResult::Same
}
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<HasFloat<'ctx>> for HashSet<ItemId> {
fn from(analysis: HasFloat<'ctx>) -> Self {
analysis.has_float
}
}

View File

@@ -0,0 +1,242 @@
//! Determining which types has typed parameters in array.
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::comp::Field;
use crate::ir::comp::FieldMethods;
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item whether it has array or not.
///
/// We use the monotone constraint function `has_type_parameter_in_array`,
/// defined as follows:
///
/// * If T is Array type with type parameter, T trivially has.
/// * If T is a type alias, a templated alias or an indirection to another type,
/// it has type parameter in array if the type T refers to has.
/// * If T is a compound type, it has array if any of base memter or field
/// has type parameter in array.
/// * If T is an instantiation of an abstract template definition, T has
/// type parameter in array if any of the template arguments or template definition
/// has.
#[derive(Debug, Clone)]
pub(crate) struct HasTypeParameterInArray<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set has array.
has_type_parameter_in_array: HashSet<ItemId>,
// Dependencies saying that if a key ItemId has been inserted into the
// `has_type_parameter_in_array` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has array or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl HasTypeParameterInArray<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
match kind {
// These are the only edges that can affect whether a type has type parameter
// in array or not.
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TypeReference |
EdgeKind::VarType |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => true,
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::InnerType |
EdgeKind::InnerVar |
EdgeKind::Method |
EdgeKind::Generic => false,
}
}
fn insert<Id: Into<ItemId>>(&mut self, id: Id) -> ConstrainResult {
let id = id.into();
trace!("inserting {id:?} into the has_type_parameter_in_array set");
let was_not_already_in_set =
self.has_type_parameter_in_array.insert(id);
assert!(
was_not_already_in_set,
"We shouldn't try and insert {id:?} twice because if it was \
already in the set, `constrain` should have exited early."
);
ConstrainResult::Changed
}
}
impl<'ctx> MonotoneFramework for HasTypeParameterInArray<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> HasTypeParameterInArray<'ctx> {
let has_type_parameter_in_array = HashSet::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasTypeParameterInArray {
ctx,
has_type_parameter_in_array,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().copied().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain: {id:?}");
if self.has_type_parameter_in_array.contains(&id) {
trace!(" already know it do not have array");
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let Some(ty) = item.as_type() else {
trace!(" not a type; ignoring");
return ConstrainResult::Same;
};
match *ty.kind() {
// Handle the simple cases. These cannot have array in type parameter
// without further information.
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Vector(..) |
TypeKind::Complex(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::TypeParam |
TypeKind::Opaque |
TypeKind::Pointer(..) |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel => {
trace!(" simple type that do not have array");
ConstrainResult::Same
}
TypeKind::Array(t, _) => {
let inner_ty =
self.ctx.resolve_type(t).canonical_type(self.ctx);
if let TypeKind::TypeParam = *inner_ty.kind() {
trace!(" Array with Named type has type parameter");
self.insert(id)
} else {
trace!(
" Array without Named type does have type parameter"
);
ConstrainResult::Same
}
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
if self.has_type_parameter_in_array.contains(&t.into()) {
trace!(
" aliases and type refs to T which have array \
also have array"
);
self.insert(id)
} else {
trace!(
" aliases and type refs to T which do not have array \
also do not have array"
);
ConstrainResult::Same
}
}
TypeKind::Comp(ref info) => {
let bases_have = info.base_members().iter().any(|base| {
self.has_type_parameter_in_array.contains(&base.ty.into())
});
if bases_have {
trace!(" bases have array, so we also have");
return self.insert(id);
}
let fields_have = info.fields().iter().any(|f| match *f {
Field::DataMember(ref data) => self
.has_type_parameter_in_array
.contains(&data.ty().into()),
Field::Bitfields(..) => false,
});
if fields_have {
trace!(" fields have array, so we also have");
return self.insert(id);
}
trace!(" comp doesn't have array");
ConstrainResult::Same
}
TypeKind::TemplateInstantiation(ref template) => {
let args_have =
template.template_arguments().iter().any(|arg| {
self.has_type_parameter_in_array.contains(&arg.into())
});
if args_have {
trace!(
" template args have array, so \
instantiation also has array"
);
return self.insert(id);
}
let def_has = self
.has_type_parameter_in_array
.contains(&template.template_definition().into());
if def_has {
trace!(
" template definition has array, so \
instantiation also has"
);
return self.insert(id);
}
trace!(" template instantiation do not have array");
ConstrainResult::Same
}
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<HasTypeParameterInArray<'ctx>> for HashSet<ItemId> {
fn from(analysis: HasTypeParameterInArray<'ctx>) -> Self {
analysis.has_type_parameter_in_array
}
}

235
vendor/bindgen/ir/analysis/has_vtable.rs vendored Normal file
View File

@@ -0,0 +1,235 @@
//! Determining which types has vtable
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{Entry, HashMap};
use std::cmp;
use std::ops;
/// The result of the `HasVtableAnalysis` for an individual item.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Default)]
pub(crate) enum HasVtableResult {
/// The item does not have a vtable pointer.
#[default]
No,
/// The item has a vtable and the actual vtable pointer is within this item.
SelfHasVtable,
/// The item has a vtable, but the actual vtable pointer is in a base
/// member.
BaseHasVtable,
}
impl HasVtableResult {
/// Take the least upper bound of `self` and `rhs`.
pub(crate) fn join(self, rhs: Self) -> Self {
cmp::max(self, rhs)
}
}
impl ops::BitOr for HasVtableResult {
type Output = Self;
fn bitor(self, rhs: HasVtableResult) -> Self::Output {
self.join(rhs)
}
}
impl ops::BitOrAssign for HasVtableResult {
fn bitor_assign(&mut self, rhs: HasVtableResult) {
*self = self.join(rhs);
}
}
/// An analysis that finds for each IR item whether it has vtable or not
///
/// We use the monotone function `has vtable`, defined as follows:
///
/// * If T is a type alias, a templated alias, an indirection to another type,
/// or a reference of a type, T has vtable if the type T refers to has vtable.
/// * If T is a compound type, T has vtable if we saw a virtual function when
/// parsing it or any of its base member has vtable.
/// * If T is an instantiation of an abstract template definition, T has
/// vtable if template definition has vtable
#[derive(Debug, Clone)]
pub(crate) struct HasVtableAnalysis<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set definitely has a vtable.
have_vtable: HashMap<ItemId, HasVtableResult>,
// Dependencies saying that if a key ItemId has been inserted into the
// `have_vtable` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has a vtable or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl HasVtableAnalysis<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
// These are the only edges that can affect whether a type has a
// vtable or not.
matches!(
kind,
EdgeKind::TypeReference |
EdgeKind::BaseMember |
EdgeKind::TemplateDeclaration
)
}
fn insert<Id: Into<ItemId>>(
&mut self,
id: Id,
result: HasVtableResult,
) -> ConstrainResult {
if let HasVtableResult::No = result {
return ConstrainResult::Same;
}
let id = id.into();
match self.have_vtable.entry(id) {
Entry::Occupied(mut entry) => {
if *entry.get() < result {
entry.insert(result);
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
Entry::Vacant(entry) => {
entry.insert(result);
ConstrainResult::Changed
}
}
}
fn forward<Id1, Id2>(&mut self, from: Id1, to: Id2) -> ConstrainResult
where
Id1: Into<ItemId>,
Id2: Into<ItemId>,
{
let from = from.into();
let to = to.into();
match self.have_vtable.get(&from) {
None => ConstrainResult::Same,
Some(r) => self.insert(to, *r),
}
}
}
impl<'ctx> MonotoneFramework for HasVtableAnalysis<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashMap<ItemId, HasVtableResult>;
fn new(ctx: &'ctx BindgenContext) -> HasVtableAnalysis<'ctx> {
let have_vtable = HashMap::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasVtableAnalysis {
ctx,
have_vtable,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().copied().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain {id:?}");
let item = self.ctx.resolve_item(id);
let ty = match item.as_type() {
None => return ConstrainResult::Same,
Some(ty) => ty,
};
// TODO #851: figure out a way to handle deriving from template type parameters.
match *ty.kind() {
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::ResolvedTypeRef(t) |
TypeKind::Reference(t) => {
trace!(
" aliases and references forward to their inner type"
);
self.forward(t, id)
}
TypeKind::Comp(ref info) => {
trace!(" comp considers its own methods and bases");
let mut result = HasVtableResult::No;
if info.has_own_virtual_method() {
trace!(" comp has its own virtual method");
result |= HasVtableResult::SelfHasVtable;
}
let bases_has_vtable = info.base_members().iter().any(|base| {
trace!(" comp has a base with a vtable: {base:?}");
self.have_vtable.contains_key(&base.ty.into())
});
if bases_has_vtable {
result |= HasVtableResult::BaseHasVtable;
}
self.insert(id, result)
}
TypeKind::TemplateInstantiation(ref inst) => {
self.forward(inst.template_definition(), id)
}
_ => ConstrainResult::Same,
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<HasVtableAnalysis<'ctx>> for HashMap<ItemId, HasVtableResult> {
fn from(analysis: HasVtableAnalysis<'ctx>) -> Self {
// We let the lack of an entry mean "No" to save space.
extra_assert!(analysis
.have_vtable
.values()
.all(|v| { *v != HasVtableResult::No }));
analysis.have_vtable
}
}
/// A convenience trait for the things for which we might wonder if they have a
/// vtable during codegen.
///
/// This is not for _computing_ whether the thing has a vtable, it is for
/// looking up the results of the `HasVtableAnalysis`'s computations for a
/// specific thing.
pub(crate) trait HasVtable {
/// Return `true` if this thing has vtable, `false` otherwise.
fn has_vtable(&self, ctx: &BindgenContext) -> bool;
/// Return `true` if this thing has an actual vtable pointer in itself, as
/// opposed to transitively in a base member.
fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool;
}

395
vendor/bindgen/ir/analysis/mod.rs vendored Normal file
View File

@@ -0,0 +1,395 @@
//! Fix-point analyses on the IR using the "monotone framework".
//!
//! A lattice is a set with a partial ordering between elements, where there is
//! a single least upper bound and a single greatest least bound for every
//! subset. We are dealing with finite lattices, which means that it has a
//! finite number of elements, and it follows that there exists a single top and
//! a single bottom member of the lattice. For example, the power set of a
//! finite set forms a finite lattice where partial ordering is defined by set
//! inclusion, that is `a <= b` if `a` is a subset of `b`. Here is the finite
//! lattice constructed from the set {0,1,2}:
//!
//! ```text
//! .----- Top = {0,1,2} -----.
//! / | \
//! / | \
//! / | \
//! {0,1} -------. {0,2} .--------- {1,2}
//! | \ / \ / |
//! | / \ |
//! | / \ / \ |
//! {0} --------' {1} `---------- {2}
//! \ | /
//! \ | /
//! \ | /
//! `------ Bottom = {} ------'
//! ```
//!
//! A monotone function `f` is a function where if `x <= y`, then it holds that
//! `f(x) <= f(y)`. It should be clear that running a monotone function to a
//! fix-point on a finite lattice will always terminate: `f` can only "move"
//! along the lattice in a single direction, and therefore can only either find
//! a fix-point in the middle of the lattice or continue to the top or bottom
//! depending if it is ascending or descending the lattice respectively.
//!
//! For a deeper introduction to the general form of this kind of analysis, see
//! [Static Program Analysis by Anders Møller and Michael I. Schwartzbach][spa].
//!
//! [spa]: https://cs.au.dk/~amoeller/spa/spa.pdf
// Re-export individual analyses.
mod template_params;
pub(crate) use self::template_params::UsedTemplateParameters;
mod derive;
pub use self::derive::DeriveTrait;
pub(crate) use self::derive::{as_cannot_derive_set, CannotDerive};
mod has_vtable;
pub(crate) use self::has_vtable::{
HasVtable, HasVtableAnalysis, HasVtableResult,
};
mod has_destructor;
pub(crate) use self::has_destructor::HasDestructorAnalysis;
mod has_type_param_in_array;
pub(crate) use self::has_type_param_in_array::HasTypeParameterInArray;
mod has_float;
pub(crate) use self::has_float::HasFloat;
mod sizedness;
pub(crate) use self::sizedness::{
Sizedness, SizednessAnalysis, SizednessResult,
};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::{EdgeKind, Trace};
use crate::HashMap;
use std::fmt;
use std::ops;
/// An analysis in the monotone framework.
///
/// Implementors of this trait must maintain the following two invariants:
///
/// 1. The concrete data must be a member of a finite-height lattice.
/// 2. The concrete `constrain` method must be monotone: that is,
/// if `x <= y`, then `constrain(x) <= constrain(y)`.
///
/// If these invariants do not hold, iteration to a fix-point might never
/// complete.
///
/// For a simple example analysis, see the `ReachableFrom` type in the `tests`
/// module below.
pub(crate) trait MonotoneFramework: Sized + fmt::Debug {
/// The type of node in our dependency graph.
///
/// This is just generic (and not `ItemId`) so that we can easily unit test
/// without constructing real `Item`s and their `ItemId`s.
type Node: Copy;
/// Any extra data that is needed during computation.
///
/// Again, this is just generic (and not `&BindgenContext`) so that we can
/// easily unit test without constructing real `BindgenContext`s full of
/// real `Item`s and real `ItemId`s.
type Extra: Sized;
/// The final output of this analysis. Once we have reached a fix-point, we
/// convert `self` into this type, and return it as the final result of the
/// analysis.
type Output: From<Self> + fmt::Debug;
/// Construct a new instance of this analysis.
fn new(extra: Self::Extra) -> Self;
/// Get the initial set of nodes from which to start the analysis. Unless
/// you are sure of some domain-specific knowledge, this should be the
/// complete set of nodes.
fn initial_worklist(&self) -> Vec<Self::Node>;
/// Update the analysis for the given node.
///
/// If this results in changing our internal state (ie, we discovered that
/// we have not reached a fix-point and iteration should continue), return
/// `ConstrainResult::Changed`. Otherwise, return `ConstrainResult::Same`.
/// When `constrain` returns `ConstrainResult::Same` for all nodes in the
/// set, we have reached a fix-point and the analysis is complete.
fn constrain(&mut self, node: Self::Node) -> ConstrainResult;
/// For each node `d` that depends on the given `node`'s current answer when
/// running `constrain(d)`, call `f(d)`. This informs us which new nodes to
/// queue up in the worklist when `constrain(node)` reports updated
/// information.
fn each_depending_on<F>(&self, node: Self::Node, f: F)
where
F: FnMut(Self::Node);
}
/// Whether an analysis's `constrain` function modified the incremental results
/// or not.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
pub(crate) enum ConstrainResult {
/// The incremental results were updated, and the fix-point computation
/// should continue.
Changed,
/// The incremental results were not updated.
#[default]
Same,
}
impl ops::BitOr for ConstrainResult {
type Output = Self;
fn bitor(self, rhs: ConstrainResult) -> Self::Output {
if self == ConstrainResult::Changed || rhs == ConstrainResult::Changed {
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
}
impl ops::BitOrAssign for ConstrainResult {
fn bitor_assign(&mut self, rhs: ConstrainResult) {
*self = *self | rhs;
}
}
/// Run an analysis in the monotone framework.
pub(crate) fn analyze<Analysis>(extra: Analysis::Extra) -> Analysis::Output
where
Analysis: MonotoneFramework,
{
let mut analysis = Analysis::new(extra);
let mut worklist = analysis.initial_worklist();
while let Some(node) = worklist.pop() {
if let ConstrainResult::Changed = analysis.constrain(node) {
analysis.each_depending_on(node, |needs_work| {
worklist.push(needs_work);
});
}
}
analysis.into()
}
/// Generate the dependency map for analysis
pub(crate) fn generate_dependencies<F>(
ctx: &BindgenContext,
consider_edge: F,
) -> HashMap<ItemId, Vec<ItemId>>
where
F: Fn(EdgeKind) -> bool,
{
let mut dependencies = HashMap::default();
for &item in ctx.allowlisted_items() {
dependencies.entry(item).or_insert_with(Vec::new);
{
// We reverse our natural IR graph edges to find dependencies
// between nodes.
item.trace(
ctx,
&mut |sub_item: ItemId, edge_kind| {
if ctx.allowlisted_items().contains(&sub_item) &&
consider_edge(edge_kind)
{
dependencies
.entry(sub_item)
.or_insert_with(Vec::new)
.push(item);
}
},
&(),
);
}
}
dependencies
}
#[cfg(test)]
mod tests {
use super::*;
use crate::HashSet;
// Here we find the set of nodes that are reachable from any given
// node. This is a lattice mapping nodes to subsets of all nodes. Our join
// function is set union.
//
// This is our test graph:
//
// +---+ +---+
// | | | |
// | 1 | .----| 2 |
// | | | | |
// +---+ | +---+
// | | ^
// | | |
// | +---+ '------'
// '----->| |
// | 3 |
// .------| |------.
// | +---+ |
// | ^ |
// v | v
// +---+ | +---+ +---+
// | | | | | | |
// | 4 | | | 5 |--->| 6 |
// | | | | | | |
// +---+ | +---+ +---+
// | | | |
// | | | v
// | +---+ | +---+
// | | | | | |
// '----->| 7 |<-----' | 8 |
// | | | |
// +---+ +---+
//
// And here is the mapping from a node to the set of nodes that are
// reachable from it within the test graph:
//
// 1: {3,4,5,6,7,8}
// 2: {2}
// 3: {3,4,5,6,7,8}
// 4: {3,4,5,6,7,8}
// 5: {3,4,5,6,7,8}
// 6: {8}
// 7: {3,4,5,6,7,8}
// 8: {}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
struct Node(usize);
#[derive(Clone, Debug, Default, PartialEq, Eq)]
struct Graph(HashMap<Node, Vec<Node>>);
impl Graph {
fn make_test_graph() -> Graph {
let mut g = Graph::default();
g.0.insert(Node(1), vec![Node(3)]);
g.0.insert(Node(2), vec![Node(2)]);
g.0.insert(Node(3), vec![Node(4), Node(5)]);
g.0.insert(Node(4), vec![Node(7)]);
g.0.insert(Node(5), vec![Node(6), Node(7)]);
g.0.insert(Node(6), vec![Node(8)]);
g.0.insert(Node(7), vec![Node(3)]);
g.0.insert(Node(8), vec![]);
g
}
fn reverse(&self) -> Graph {
let mut reversed = Graph::default();
for (node, edges) in &self.0 {
reversed.0.entry(*node).or_insert_with(Vec::new);
for referent in edges {
reversed
.0
.entry(*referent)
.or_insert_with(Vec::new)
.push(*node);
}
}
reversed
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct ReachableFrom<'a> {
reachable: HashMap<Node, HashSet<Node>>,
graph: &'a Graph,
reversed: Graph,
}
impl<'a> MonotoneFramework for ReachableFrom<'a> {
type Node = Node;
type Extra = &'a Graph;
type Output = HashMap<Node, HashSet<Node>>;
fn new(graph: &'a Graph) -> Self {
let reversed = graph.reverse();
ReachableFrom {
reachable: Default::default(),
graph,
reversed,
}
}
fn initial_worklist(&self) -> Vec<Node> {
self.graph.0.keys().copied().collect()
}
fn constrain(&mut self, node: Node) -> ConstrainResult {
// The set of nodes reachable from a node `x` is
//
// reachable(x) = s_0 U s_1 U ... U reachable(s_0) U reachable(s_1) U ...
//
// where there exist edges from `x` to each of `s_0, s_1, ...`.
//
// Yes, what follows is a **terribly** inefficient set union
// implementation. Don't copy this code outside of this test!
let original_size = self.reachable.entry(node).or_default().len();
for sub_node in &self.graph.0[&node] {
self.reachable.get_mut(&node).unwrap().insert(*sub_node);
let sub_reachable =
self.reachable.entry(*sub_node).or_default().clone();
for transitive in sub_reachable {
self.reachable.get_mut(&node).unwrap().insert(transitive);
}
}
let new_size = self.reachable[&node].len();
if original_size == new_size {
ConstrainResult::Same
} else {
ConstrainResult::Changed
}
}
fn each_depending_on<F>(&self, node: Node, mut f: F)
where
F: FnMut(Node),
{
for dep in &self.reversed.0[&node] {
f(*dep);
}
}
}
impl<'a> From<ReachableFrom<'a>> for HashMap<Node, HashSet<Node>> {
fn from(reachable: ReachableFrom<'a>) -> Self {
reachable.reachable
}
}
#[test]
fn monotone() {
let g = Graph::make_test_graph();
let reachable = analyze::<ReachableFrom>(&g);
println!("reachable = {reachable:#?}");
fn nodes<A>(nodes: A) -> HashSet<Node>
where
A: AsRef<[usize]>,
{
nodes.as_ref().iter().copied().map(Node).collect()
}
let mut expected = HashMap::default();
expected.insert(Node(1), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(2), nodes([2]));
expected.insert(Node(3), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(4), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(5), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(6), nodes([8]));
expected.insert(Node(7), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(8), nodes([]));
println!("expected = {expected:#?}");
assert_eq!(reachable, expected);
}
}

353
vendor/bindgen/ir/analysis/sizedness.rs vendored Normal file
View File

@@ -0,0 +1,353 @@
//! Determining the sizedness of types (as base classes and otherwise).
use super::{
generate_dependencies, ConstrainResult, HasVtable, MonotoneFramework,
};
use crate::ir::context::{BindgenContext, TypeId};
use crate::ir::item::IsOpaque;
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{Entry, HashMap};
use std::{cmp, ops};
/// The result of the `Sizedness` analysis for an individual item.
///
/// This is a chain lattice of the form:
///
/// ```ignore
/// NonZeroSized
/// |
/// DependsOnTypeParam
/// |
/// ZeroSized
/// ```
///
/// We initially assume that all types are `ZeroSized` and then update our
/// understanding as we learn more about each type.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Default)]
pub(crate) enum SizednessResult {
/// The type is zero-sized.
///
/// This means that if it is a C++ type, and is not being used as a base
/// member, then we must add an `_address` byte to enforce the
/// unique-address-per-distinct-object-instance rule.
#[default]
ZeroSized,
/// Whether this type is zero-sized or not depends on whether a type
/// parameter is zero-sized or not.
///
/// For example, given these definitions:
///
/// ```c++
/// template<class T>
/// class Flongo : public T {};
///
/// class Empty {};
///
/// class NonEmpty { int x; };
/// ```
///
/// Then `Flongo<Empty>` is zero-sized, and needs an `_address` byte
/// inserted, while `Flongo<NonEmpty>` is *not* zero-sized, and should *not*
/// have an `_address` byte inserted.
///
/// We don't properly handle this situation correctly right now:
/// <https://github.com/rust-lang/rust-bindgen/issues/586>
DependsOnTypeParam,
/// Has some size that is known to be greater than zero. That doesn't mean
/// it has a static size, but it is not zero sized for sure. In other words,
/// it might contain an incomplete array or some other dynamically sized
/// type.
NonZeroSized,
}
impl SizednessResult {
/// Take the least upper bound of `self` and `rhs`.
pub(crate) fn join(self, rhs: Self) -> Self {
cmp::max(self, rhs)
}
}
impl ops::BitOr for SizednessResult {
type Output = Self;
fn bitor(self, rhs: SizednessResult) -> Self::Output {
self.join(rhs)
}
}
impl ops::BitOrAssign for SizednessResult {
fn bitor_assign(&mut self, rhs: SizednessResult) {
*self = self.join(rhs);
}
}
/// An analysis that computes the sizedness of all types.
///
/// * For types with known sizes -- for example pointers, scalars, etc... --
/// they are assigned `NonZeroSized`.
///
/// * For compound structure types with one or more fields, they are assigned
/// `NonZeroSized`.
///
/// * For compound structure types without any fields, the results of the bases
/// are `join`ed.
///
/// * For type parameters, `DependsOnTypeParam` is assigned.
#[derive(Debug)]
pub(crate) struct SizednessAnalysis<'ctx> {
ctx: &'ctx BindgenContext,
dependencies: HashMap<TypeId, Vec<TypeId>>,
// Incremental results of the analysis. Missing entries are implicitly
// considered `ZeroSized`.
sized: HashMap<TypeId, SizednessResult>,
}
impl SizednessAnalysis<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
// These are the only edges that can affect whether a type is
// zero-sized or not.
matches!(
kind,
EdgeKind::TemplateArgument |
EdgeKind::TemplateParameterDefinition |
EdgeKind::TemplateDeclaration |
EdgeKind::TypeReference |
EdgeKind::BaseMember |
EdgeKind::Field
)
}
/// Insert an incremental result, and return whether this updated our
/// knowledge of types and we should continue the analysis.
fn insert(
&mut self,
id: TypeId,
result: SizednessResult,
) -> ConstrainResult {
trace!("inserting {result:?} for {id:?}");
if let SizednessResult::ZeroSized = result {
return ConstrainResult::Same;
}
match self.sized.entry(id) {
Entry::Occupied(mut entry) => {
if *entry.get() < result {
entry.insert(result);
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
Entry::Vacant(entry) => {
entry.insert(result);
ConstrainResult::Changed
}
}
}
fn forward(&mut self, from: TypeId, to: TypeId) -> ConstrainResult {
match self.sized.get(&from) {
None => ConstrainResult::Same,
Some(r) => self.insert(to, *r),
}
}
}
impl<'ctx> MonotoneFramework for SizednessAnalysis<'ctx> {
type Node = TypeId;
type Extra = &'ctx BindgenContext;
type Output = HashMap<TypeId, SizednessResult>;
fn new(ctx: &'ctx BindgenContext) -> SizednessAnalysis<'ctx> {
let dependencies = generate_dependencies(ctx, Self::consider_edge)
.into_iter()
.filter_map(|(id, sub_ids)| {
id.as_type_id(ctx).map(|id| {
(
id,
sub_ids
.into_iter()
.filter_map(|s| s.as_type_id(ctx))
.collect::<Vec<_>>(),
)
})
})
.collect();
let sized = HashMap::default();
SizednessAnalysis {
ctx,
dependencies,
sized,
}
}
fn initial_worklist(&self) -> Vec<TypeId> {
self.ctx
.allowlisted_items()
.iter()
.filter_map(|id| id.as_type_id(self.ctx))
.collect()
}
fn constrain(&mut self, id: TypeId) -> ConstrainResult {
trace!("constrain {id:?}");
if let Some(SizednessResult::NonZeroSized) = self.sized.get(&id) {
trace!(" already know it is not zero-sized");
return ConstrainResult::Same;
}
if id.has_vtable_ptr(self.ctx) {
trace!(" has an explicit vtable pointer, therefore is not zero-sized");
return self.insert(id, SizednessResult::NonZeroSized);
}
let ty = self.ctx.resolve_type(id);
if id.is_opaque(self.ctx, &()) {
trace!(" type is opaque; checking layout...");
let result =
ty.layout(self.ctx).map_or(SizednessResult::ZeroSized, |l| {
if l.size == 0 {
trace!(" ...layout has size == 0");
SizednessResult::ZeroSized
} else {
trace!(" ...layout has size > 0");
SizednessResult::NonZeroSized
}
});
return self.insert(id, result);
}
match *ty.kind() {
TypeKind::Void => {
trace!(" void is zero-sized");
self.insert(id, SizednessResult::ZeroSized)
}
TypeKind::TypeParam => {
trace!(
" type params sizedness depends on what they're \
instantiated as"
);
self.insert(id, SizednessResult::DependsOnTypeParam)
}
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Complex(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::NullPtr |
TypeKind::ObjCId |
TypeKind::ObjCSel |
TypeKind::Pointer(..) => {
trace!(" {:?} is known not to be zero-sized", ty.kind());
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::ObjCInterface(..) => {
trace!(" obj-c interfaces always have at least the `isa` pointer");
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) |
TypeKind::ResolvedTypeRef(t) => {
trace!(" aliases and type refs forward to their inner type");
self.forward(t, id)
}
TypeKind::TemplateInstantiation(ref inst) => {
trace!(
" template instantiations are zero-sized if their \
definition is zero-sized"
);
self.forward(inst.template_definition(), id)
}
TypeKind::Array(_, 0) => {
trace!(" arrays of zero elements are zero-sized");
self.insert(id, SizednessResult::ZeroSized)
}
TypeKind::Array(..) => {
trace!(" arrays of > 0 elements are not zero-sized");
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::Vector(..) => {
trace!(" vectors are not zero-sized");
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::Comp(ref info) => {
trace!(" comp considers its own fields and bases");
if !info.fields().is_empty() {
return self.insert(id, SizednessResult::NonZeroSized);
}
let result = info
.base_members()
.iter()
.filter_map(|base| self.sized.get(&base.ty))
.fold(SizednessResult::ZeroSized, |a, b| a.join(*b));
self.insert(id, result)
}
TypeKind::Opaque => {
unreachable!("covered by the .is_opaque() check above")
}
TypeKind::UnresolvedTypeRef(..) => {
unreachable!("Should have been resolved after parsing!");
}
}
}
fn each_depending_on<F>(&self, id: TypeId, mut f: F)
where
F: FnMut(TypeId),
{
if let Some(edges) = self.dependencies.get(&id) {
for ty in edges {
trace!("enqueue {ty:?} into worklist");
f(*ty);
}
}
}
}
impl<'ctx> From<SizednessAnalysis<'ctx>> for HashMap<TypeId, SizednessResult> {
fn from(analysis: SizednessAnalysis<'ctx>) -> Self {
// We let the lack of an entry mean "ZeroSized" to save space.
extra_assert!(analysis
.sized
.values()
.all(|v| { *v != SizednessResult::ZeroSized }));
analysis.sized
}
}
/// A convenience trait for querying whether some type or ID is sized.
///
/// This is not for _computing_ whether the thing is sized, it is for looking up
/// the results of the `Sizedness` analysis's computations for a specific thing.
pub(crate) trait Sizedness {
/// Get the sizedness of this type.
fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult;
/// Is the sizedness for this type `SizednessResult::ZeroSized`?
fn is_zero_sized(&self, ctx: &BindgenContext) -> bool {
self.sizedness(ctx) == SizednessResult::ZeroSized
}
}

View File

@@ -0,0 +1,601 @@
//! Discover which template type parameters are actually used.
//!
//! ### Why do we care?
//!
//! C++ allows ignoring template parameters, while Rust does not. Usually we can
//! blindly stick a `PhantomData<T>` inside a generic Rust struct to make up for
//! this. That doesn't work for templated type aliases, however:
//!
//! ```C++
//! template <typename T>
//! using Fml = int;
//! ```
//!
//! If we generate the naive Rust code for this alias, we get:
//!
//! ```ignore
//! pub(crate) type Fml<T> = ::std::os::raw::int;
//! ```
//!
//! And this is rejected by `rustc` due to the unused type parameter.
//!
//! (Aside: in these simple cases, `libclang` will often just give us the
//! aliased type directly, and we will never even know we were dealing with
//! aliases, let alone templated aliases. It's the more convoluted scenarios
//! where we get to have some fun...)
//!
//! For such problematic template aliases, we could generate a tuple whose
//! second member is a `PhantomData<T>`. Or, if we wanted to go the extra mile,
//! we could even generate some smarter wrapper that implements `Deref`,
//! `DerefMut`, `From`, `Into`, `AsRef`, and `AsMut` to the actually aliased
//! type. However, this is still lackluster:
//!
//! 1. Even with a billion conversion-trait implementations, using the generated
//! bindings is rather un-ergonomic.
//! 2. With either of these solutions, we need to keep track of which aliases
//! we've transformed like this in order to generate correct uses of the
//! wrapped type.
//!
//! Given that we have to properly track which template parameters ended up used
//! for (2), we might as well leverage that information to make ergonomic
//! bindings that don't contain any unused type parameters at all, and
//! completely avoid the pain of (1).
//!
//! ### How do we determine which template parameters are used?
//!
//! Determining which template parameters are actually used is a trickier
//! problem than it might seem at a glance. On the one hand, trivial uses are
//! easy to detect:
//!
//! ```C++
//! template <typename T>
//! class Foo {
//! T trivial_use_of_t;
//! };
//! ```
//!
//! It gets harder when determining if one template parameter is used depends on
//! determining if another template parameter is used. In this example, whether
//! `U` is used depends on whether `T` is used.
//!
//! ```C++
//! template <typename T>
//! class DoesntUseT {
//! int x;
//! };
//!
//! template <typename U>
//! class Fml {
//! DoesntUseT<U> lololol;
//! };
//! ```
//!
//! We can express the set of used template parameters as a constraint solving
//! problem (where the set of template parameters used by a given IR item is the
//! union of its sub-item's used template parameters) and iterate to a
//! fixed-point.
//!
//! We use the `ir::analysis::MonotoneFramework` infrastructure for this
//! fix-point analysis, where our lattice is the mapping from each IR item to
//! the powerset of the template parameters that appear in the input C++ header,
//! our join function is set union. The set of template parameters appearing in
//! the program is finite, as is the number of IR items. We start at our
//! lattice's bottom element: every item mapping to an empty set of template
//! parameters. Our analysis only adds members to each item's set of used
//! template parameters, never removes them, so it is monotone. Because our
//! lattice is finite and our constraint function is monotone, iteration to a
//! fix-point will terminate.
//!
//! See `src/ir/analysis.rs` for more.
use super::{ConstrainResult, MonotoneFramework};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::item::{Item, ItemSet};
use crate::ir::template::{TemplateInstantiation, TemplateParameters};
use crate::ir::traversal::{EdgeKind, Trace};
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item its set of template parameters that
/// it uses.
///
/// We use the monotone constraint function `template_param_usage`, defined as
/// follows:
///
/// * If `T` is a named template type parameter, it trivially uses itself:
///
/// ```ignore
/// template_param_usage(T) = { T }
/// ```
///
/// * If `inst` is a template instantiation, `inst.args` are the template
/// instantiation's template arguments, `inst.def` is the template definition
/// being instantiated, and `inst.def.params` is the template definition's
/// template parameters, then the instantiation's usage is the union of each
/// of its arguments' usages *if* the corresponding template parameter is in
/// turn used by the template definition:
///
/// ```ignore
/// template_param_usage(inst) = union(
/// template_param_usage(inst.args[i])
/// for i in 0..length(inst.args.length)
/// if inst.def.params[i] in template_param_usage(inst.def)
/// )
/// ```
///
/// * Finally, for all other IR item kinds, we use our lattice's `join`
/// operation: set union with each successor of the given item's template
/// parameter usage:
///
/// ```ignore
/// template_param_usage(v) =
/// union(template_param_usage(w) for w in successors(v))
/// ```
///
/// Note that we ignore certain edges in the graph, such as edges from a
/// template declaration to its template parameters' definitions for this
/// analysis. If we didn't, then we would mistakenly determine that ever
/// template parameter is always used.
///
/// The final wrinkle is handling of blocklisted types. Normally, we say that
/// the set of allowlisted items is the transitive closure of items explicitly
/// called out for allowlisting, *without* any items explicitly called out as
/// blocklisted. However, for the purposes of this analysis's correctness, we
/// simplify and consider run the analysis on the full transitive closure of
/// allowlisted items. We do, however, treat instantiations of blocklisted items
/// specially; see `constrain_instantiation_of_blocklisted_template` and its
/// documentation for details.
#[derive(Debug, Clone)]
pub(crate) struct UsedTemplateParameters<'ctx> {
ctx: &'ctx BindgenContext,
// The Option is only there for temporary moves out of the hash map. See the
// comments in `UsedTemplateParameters::constrain` below.
used: HashMap<ItemId, Option<ItemSet>>,
dependencies: HashMap<ItemId, Vec<ItemId>>,
// The set of allowlisted items, without any blocklisted items reachable
// from the allowlisted items which would otherwise be considered
// allowlisted as well.
allowlisted_items: HashSet<ItemId>,
}
impl UsedTemplateParameters<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
match kind {
// For each of these kinds of edges, if the referent uses a template
// parameter, then it should be considered that the origin of the
// edge also uses the template parameter.
EdgeKind::TemplateArgument |
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::VarType |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::TypeReference => true,
// An inner var or type using a template parameter is orthogonal
// from whether we use it. See template-param-usage-{6,11}.hpp.
EdgeKind::InnerVar | EdgeKind::InnerType => false,
// We can't emit machine code for new monomorphizations of class
// templates' methods (and don't detect explicit instantiations) so
// we must ignore template parameters that are only used by
// methods. This doesn't apply to a function type's return or
// parameter types, however, because of type aliases of function
// pointers that use template parameters, eg
// tests/headers/struct_with_typedef_template_arg.hpp
EdgeKind::Method => false,
// If we considered these edges, we would end up mistakenly claiming
// that every template parameter always used.
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => false,
// Since we have to be careful about which edges we consider for
// this analysis to be correct, we ignore generic edges. We also
// avoid a `_` wild card to force authors of new edge kinds to
// determine whether they need to be considered by this analysis.
EdgeKind::Generic => false,
}
}
fn take_this_id_usage_set<Id: Into<ItemId>>(
&mut self,
this_id: Id,
) -> ItemSet {
let this_id = this_id.into();
self.used
.get_mut(&this_id)
.expect(
"Should have a set of used template params for every item \
id",
)
.take()
.expect(
"Should maintain the invariant that all used template param \
sets are `Some` upon entry of `constrain`",
)
}
/// We say that blocklisted items use all of their template parameters. The
/// blocklisted type is most likely implemented explicitly by the user,
/// since it won't be in the generated bindings, and we don't know exactly
/// what they'll to with template parameters, but we can push the issue down
/// the line to them.
fn constrain_instantiation_of_blocklisted_template(
&self,
this_id: ItemId,
used_by_this_id: &mut ItemSet,
instantiation: &TemplateInstantiation,
) {
trace!(
" instantiation of blocklisted template, uses all template \
arguments"
);
let args = instantiation
.template_arguments()
.iter()
.map(|a| {
a.into_resolver()
.through_type_refs()
.through_type_aliases()
.resolve(self.ctx)
.id()
})
.filter(|a| *a != this_id)
.flat_map(|a| {
self.used
.get(&a)
.expect("Should have a used entry for the template arg")
.as_ref()
.expect(
"Because a != this_id, and all used template \
param sets other than this_id's are `Some`, \
a's used template param set should be `Some`",
)
.iter()
});
used_by_this_id.extend(args);
}
/// A template instantiation's concrete template argument is only used if
/// the template definition uses the corresponding template parameter.
fn constrain_instantiation(
&self,
this_id: ItemId,
used_by_this_id: &mut ItemSet,
instantiation: &TemplateInstantiation,
) {
trace!(" template instantiation");
let decl = self.ctx.resolve_type(instantiation.template_definition());
let args = instantiation.template_arguments();
let params = decl.self_template_params(self.ctx);
debug_assert!(this_id != instantiation.template_definition());
let used_by_def = self.used
.get(&instantiation.template_definition().into())
.expect("Should have a used entry for instantiation's template definition")
.as_ref()
.expect("And it should be Some because only this_id's set is None, and an \
instantiation's template definition should never be the \
instantiation itself");
for (arg, param) in args.iter().zip(params.iter()) {
trace!(
" instantiation's argument {arg:?} is used if definition's \
parameter {param:?} is used",
);
if used_by_def.contains(&param.into()) {
trace!(" param is used by template definition");
let arg = arg
.into_resolver()
.through_type_refs()
.through_type_aliases()
.resolve(self.ctx)
.id();
if arg == this_id {
continue;
}
let used_by_arg = self
.used
.get(&arg)
.expect("Should have a used entry for the template arg")
.as_ref()
.expect(
"Because arg != this_id, and all used template \
param sets other than this_id's are `Some`, \
arg's used template param set should be \
`Some`",
)
.iter();
used_by_this_id.extend(used_by_arg);
}
}
}
/// The join operation on our lattice: the set union of all of this ID's
/// successors.
fn constrain_join(&self, used_by_this_id: &mut ItemSet, item: &Item) {
trace!(" other item: join with successors' usage");
item.trace(
self.ctx,
&mut |sub_id, edge_kind| {
// Ignore ourselves, since union with ourself is a
// no-op. Ignore edges that aren't relevant to the
// analysis.
if sub_id == item.id() || !Self::consider_edge(edge_kind) {
return;
}
let used_by_sub_id = self
.used
.get(&sub_id)
.expect("Should have a used set for the sub_id successor")
.as_ref()
.expect(
"Because sub_id != id, and all used template \
param sets other than id's are `Some`, \
sub_id's used template param set should be \
`Some`",
)
.iter();
trace!(
" union with {sub_id:?}'s usage: {:?}",
used_by_sub_id.clone().collect::<Vec<_>>()
);
used_by_this_id.extend(used_by_sub_id);
},
&(),
);
}
}
impl<'ctx> MonotoneFramework for UsedTemplateParameters<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashMap<ItemId, ItemSet>;
fn new(ctx: &'ctx BindgenContext) -> UsedTemplateParameters<'ctx> {
let mut used = HashMap::default();
let mut dependencies = HashMap::default();
let allowlisted_items: HashSet<_> =
ctx.allowlisted_items().iter().copied().collect();
let allowlisted_and_blocklisted_items: ItemSet = allowlisted_items
.iter()
.copied()
.flat_map(|i| {
let mut reachable = vec![i];
i.trace(
ctx,
&mut |s, _| {
reachable.push(s);
},
&(),
);
reachable
})
.collect();
for item in allowlisted_and_blocklisted_items {
dependencies.entry(item).or_insert_with(Vec::new);
used.entry(item).or_insert_with(|| Some(ItemSet::new()));
{
// We reverse our natural IR graph edges to find dependencies
// between nodes.
item.trace(
ctx,
&mut |sub_item: ItemId, _| {
used.entry(sub_item)
.or_insert_with(|| Some(ItemSet::new()));
dependencies
.entry(sub_item)
.or_insert_with(Vec::new)
.push(item);
},
&(),
);
}
// Additionally, whether a template instantiation's template
// arguments are used depends on whether the template declaration's
// generic template parameters are used.
let item_kind =
ctx.resolve_item(item).as_type().map(|ty| ty.kind());
if let Some(TypeKind::TemplateInstantiation(inst)) = item_kind {
let decl = ctx.resolve_type(inst.template_definition());
let args = inst.template_arguments();
// Although template definitions should always have
// template parameters, there is a single exception:
// opaque templates. Hence the unwrap_or.
let params = decl.self_template_params(ctx);
for (arg, param) in args.iter().zip(params.iter()) {
let arg = arg
.into_resolver()
.through_type_aliases()
.through_type_refs()
.resolve(ctx)
.id();
let param = param
.into_resolver()
.through_type_aliases()
.through_type_refs()
.resolve(ctx)
.id();
used.entry(arg).or_insert_with(|| Some(ItemSet::new()));
used.entry(param).or_insert_with(|| Some(ItemSet::new()));
dependencies
.entry(arg)
.or_insert_with(Vec::new)
.push(param);
}
}
}
if cfg!(feature = "__testing_only_extra_assertions") {
// Invariant: The `used` map has an entry for every allowlisted
// item, as well as all explicitly blocklisted items that are
// reachable from allowlisted items.
//
// Invariant: the `dependencies` map has an entry for every
// allowlisted item.
//
// (This is so that every item we call `constrain` on is guaranteed
// to have a set of template parameters, and we can allow
// blocklisted templates to use all of their parameters).
for item in &allowlisted_items {
extra_assert!(used.contains_key(item));
extra_assert!(dependencies.contains_key(item));
item.trace(
ctx,
&mut |sub_item, _| {
extra_assert!(used.contains_key(&sub_item));
extra_assert!(dependencies.contains_key(&sub_item));
},
&(),
);
}
}
UsedTemplateParameters {
ctx,
used,
dependencies,
allowlisted_items,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
// The transitive closure of all allowlisted items, including explicitly
// blocklisted items.
self.ctx
.allowlisted_items()
.iter()
.copied()
.flat_map(|i| {
let mut reachable = vec![i];
i.trace(
self.ctx,
&mut |s, _| {
reachable.push(s);
},
&(),
);
reachable
})
.collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
// Invariant: all hash map entries' values are `Some` upon entering and
// exiting this method.
extra_assert!(self.used.values().all(|v| v.is_some()));
// Take the set for this ID out of the hash map while we mutate it based
// on other hash map entries. We *must* put it back into the hash map at
// the end of this method. This allows us to side-step HashMap's lack of
// an analog to slice::split_at_mut.
let mut used_by_this_id = self.take_this_id_usage_set(id);
trace!("constrain {id:?}");
trace!(" initially, used set is {used_by_this_id:?}");
let original_len = used_by_this_id.len();
let item = self.ctx.resolve_item(id);
let ty_kind = item.as_type().map(|ty| ty.kind());
match ty_kind {
// Named template type parameters trivially use themselves.
Some(&TypeKind::TypeParam) => {
trace!(" named type, trivially uses itself");
used_by_this_id.insert(id);
}
// Template instantiations only use their template arguments if the
// template definition uses the corresponding template parameter.
Some(TypeKind::TemplateInstantiation(inst)) => {
if self
.allowlisted_items
.contains(&inst.template_definition().into())
{
self.constrain_instantiation(
id,
&mut used_by_this_id,
inst,
);
} else {
self.constrain_instantiation_of_blocklisted_template(
id,
&mut used_by_this_id,
inst,
);
}
}
// Otherwise, add the union of each of its referent item's template
// parameter usage.
_ => self.constrain_join(&mut used_by_this_id, item),
}
trace!(" finally, used set is {used_by_this_id:?}");
let new_len = used_by_this_id.len();
assert!(
new_len >= original_len,
"This is the property that ensures this function is monotone -- \
if it doesn't hold, the analysis might never terminate!"
);
// Put the set back in the hash map and restore our invariant.
debug_assert!(self.used[&id].is_none());
self.used.insert(id, Some(used_by_this_id));
extra_assert!(self.used.values().all(|v| v.is_some()));
if new_len == original_len {
ConstrainResult::Same
} else {
ConstrainResult::Changed
}
}
fn each_depending_on<F>(&self, item: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&item) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<UsedTemplateParameters<'ctx>> for HashMap<ItemId, ItemSet> {
fn from(used_templ_params: UsedTemplateParameters<'ctx>) -> Self {
used_templ_params
.used
.into_iter()
.map(|(k, v)| (k, v.unwrap()))
.collect()
}
}

259
vendor/bindgen/ir/annotations.rs vendored Normal file
View File

@@ -0,0 +1,259 @@
//! Types and functions related to bindgen annotation comments.
//!
//! Users can add annotations in doc comments to types that they would like to
//! replace other types with, mark as opaque, etc. This module deals with all of
//! that stuff.
use std::str::FromStr;
use crate::clang;
/// What kind of visibility modifier should be used for a struct or field?
#[derive(Copy, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Default)]
pub enum FieldVisibilityKind {
/// Fields are marked as private, i.e., struct Foo {bar: bool}
Private,
/// Fields are marked as crate public, i.e., struct Foo {pub(crate) bar: bool}
PublicCrate,
/// Fields are marked as public, i.e., struct Foo {pub bar: bool}
#[default]
Public,
}
impl FromStr for FieldVisibilityKind {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"private" => Ok(Self::Private),
"crate" => Ok(Self::PublicCrate),
"public" => Ok(Self::Public),
_ => Err(format!("Invalid visibility kind: `{s}`")),
}
}
}
impl std::fmt::Display for FieldVisibilityKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match self {
FieldVisibilityKind::Private => "private",
FieldVisibilityKind::PublicCrate => "crate",
FieldVisibilityKind::Public => "public",
};
s.fmt(f)
}
}
/// What kind of accessor should we provide for a field?
#[derive(Copy, PartialEq, Eq, Clone, Debug)]
pub(crate) enum FieldAccessorKind {
/// No accessor.
None,
/// Plain accessor.
Regular,
/// Unsafe accessor.
Unsafe,
/// Immutable accessor.
Immutable,
}
/// Annotations for a given item, or a field.
///
/// You can see the kind of comments that are accepted in the [Doxygen documentation](https://www.doxygen.nl/manual/docblocks.html).
#[derive(Default, Clone, PartialEq, Eq, Debug)]
pub(crate) struct Annotations {
/// Whether this item is marked as opaque. Only applies to types.
opaque: bool,
/// Whether this item should be hidden from the output. Only applies to
/// types, or enum variants.
hide: bool,
/// Whether this type should be replaced by another. The name is a
/// namespace-aware path.
use_instead_of: Option<Vec<String>>,
/// Manually disable deriving copy/clone on this type. Only applies to
/// struct or union types.
disallow_copy: bool,
/// Manually disable deriving debug on this type.
disallow_debug: bool,
/// Manually disable deriving/implement default on this type.
disallow_default: bool,
/// Whether to add a `#[must_use]` annotation to this type.
must_use_type: bool,
/// Visibility of struct fields. You can set this on
/// structs (it will apply to all the fields), or individual fields.
visibility_kind: Option<FieldVisibilityKind>,
/// The kind of accessor this field will have. Also can be applied to
/// structs so all the fields inside share it by default.
accessor_kind: Option<FieldAccessorKind>,
/// Whether this enum variant should be constified.
///
/// This is controlled by the `constant` attribute, this way:
///
/// ```cpp
/// enum Foo {
/// Bar = 0, /**< <div rustbindgen constant></div> */
/// Baz = 0,
/// };
/// ```
///
/// In that case, bindgen will generate a constant for `Bar` instead of
/// `Baz`.
constify_enum_variant: bool,
/// List of explicit derives for this type.
derives: Vec<String>,
/// List of explicit attributes for this type.
attributes: Vec<String>,
}
fn parse_accessor(s: &str) -> FieldAccessorKind {
match s {
"false" => FieldAccessorKind::None,
"unsafe" => FieldAccessorKind::Unsafe,
"immutable" => FieldAccessorKind::Immutable,
_ => FieldAccessorKind::Regular,
}
}
impl Annotations {
/// Construct new annotations for the given cursor and its bindgen comments
/// (if any).
pub(crate) fn new(cursor: &clang::Cursor) -> Option<Annotations> {
let mut anno = Annotations::default();
let mut matched_one = false;
anno.parse(&cursor.comment(), &mut matched_one);
if matched_one {
Some(anno)
} else {
None
}
}
/// Should this type be hidden?
pub(crate) fn hide(&self) -> bool {
self.hide
}
/// Should this type be opaque?
pub(crate) fn opaque(&self) -> bool {
self.opaque
}
/// For a given type, indicates the type it should replace.
///
/// For example, in the following code:
///
/// ```cpp
///
/// /** <div rustbindgen replaces="Bar"></div> */
/// struct Foo { int x; };
///
/// struct Bar { char foo; };
/// ```
///
/// the generated code would look something like:
///
/// ```
/// /** <div rustbindgen replaces="Bar"></div> */
/// struct Bar {
/// x: ::std::os::raw::c_int,
/// };
/// ```
///
/// That is, code for `Foo` is used to generate `Bar`.
pub(crate) fn use_instead_of(&self) -> Option<&[String]> {
self.use_instead_of.as_deref()
}
/// The list of derives that have been specified in this annotation.
pub(crate) fn derives(&self) -> &[String] {
&self.derives
}
/// The list of attributes that have been specified in this annotation.
pub(crate) fn attributes(&self) -> &[String] {
&self.attributes
}
/// Should we avoid implementing the `Copy` trait?
pub(crate) fn disallow_copy(&self) -> bool {
self.disallow_copy
}
/// Should we avoid implementing the `Debug` trait?
pub(crate) fn disallow_debug(&self) -> bool {
self.disallow_debug
}
/// Should we avoid implementing the `Default` trait?
pub(crate) fn disallow_default(&self) -> bool {
self.disallow_default
}
/// Should this type get a `#[must_use]` annotation?
pub(crate) fn must_use_type(&self) -> bool {
self.must_use_type
}
/// What kind of accessors should we provide for this type's fields?
pub(crate) fn visibility_kind(&self) -> Option<FieldVisibilityKind> {
self.visibility_kind
}
/// What kind of accessors should we provide for this type's fields?
pub(crate) fn accessor_kind(&self) -> Option<FieldAccessorKind> {
self.accessor_kind
}
fn parse(&mut self, comment: &clang::Comment, matched: &mut bool) {
use clang_sys::CXComment_HTMLStartTag;
if comment.kind() == CXComment_HTMLStartTag &&
comment.get_tag_name() == "div" &&
comment
.get_tag_attrs()
.next()
.is_some_and(|attr| attr.name == "rustbindgen")
{
*matched = true;
for attr in comment.get_tag_attrs() {
match attr.name.as_str() {
"opaque" => self.opaque = true,
"hide" => self.hide = true,
"nocopy" => self.disallow_copy = true,
"nodebug" => self.disallow_debug = true,
"nodefault" => self.disallow_default = true,
"mustusetype" => self.must_use_type = true,
"replaces" => {
self.use_instead_of = Some(
attr.value.split("::").map(Into::into).collect(),
);
}
"derive" => self.derives.push(attr.value),
"attribute" => self.attributes.push(attr.value),
"private" => {
self.visibility_kind = if attr.value == "false" {
Some(FieldVisibilityKind::Public)
} else {
Some(FieldVisibilityKind::Private)
};
}
"accessor" => {
self.accessor_kind = Some(parse_accessor(&attr.value));
}
"constant" => self.constify_enum_variant = true,
_ => {}
}
}
}
for child in comment.get_children() {
self.parse(&child, matched);
}
}
/// Returns whether we've parsed a "constant" attribute.
pub(crate) fn constify_enum_variant(&self) -> bool {
self.constify_enum_variant
}
}

100
vendor/bindgen/ir/comment.rs vendored Normal file
View File

@@ -0,0 +1,100 @@
//! Utilities for manipulating C/C++ comments.
/// The type of a comment.
#[derive(Debug, PartialEq, Eq)]
enum Kind {
/// A `///` comment, or something of the like.
/// All lines in a comment should start with the same symbol.
SingleLines,
/// A `/**` comment, where each other line can start with `*` and the
/// entire block ends with `*/`.
MultiLine,
}
/// Preprocesses a C/C++ comment so that it is a valid Rust comment.
pub(crate) fn preprocess(comment: &str) -> String {
match kind(comment) {
Some(Kind::SingleLines) => preprocess_single_lines(comment),
Some(Kind::MultiLine) => preprocess_multi_line(comment),
None => comment.to_owned(),
}
}
/// Gets the kind of the doc comment, if it is one.
fn kind(comment: &str) -> Option<Kind> {
if comment.starts_with("/*") {
Some(Kind::MultiLine)
} else if comment.starts_with("//") {
Some(Kind::SingleLines)
} else {
None
}
}
/// Preprocesses multiple single line comments.
///
/// Handles lines starting with both `//` and `///`.
fn preprocess_single_lines(comment: &str) -> String {
debug_assert!(comment.starts_with("//"), "comment is not single line");
let lines: Vec<_> = comment
.lines()
.map(|l| l.trim().trim_start_matches('/'))
.collect();
lines.join("\n")
}
fn preprocess_multi_line(comment: &str) -> String {
let comment = comment
.trim_start_matches('/')
.trim_end_matches('/')
.trim_end_matches('*');
// Strip any potential `*` characters preceding each line.
let mut lines: Vec<_> = comment
.lines()
.map(|line| line.trim().trim_start_matches('*').trim_start_matches('!'))
.skip_while(|line| line.trim().is_empty()) // Skip the first empty lines.
.collect();
// Remove the trailing line corresponding to the `*/`.
if lines.last().is_some_and(|l| l.trim().is_empty()) {
lines.pop();
}
lines.join("\n")
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn picks_up_single_and_multi_line_doc_comments() {
assert_eq!(kind("/// hello"), Some(Kind::SingleLines));
assert_eq!(kind("/** world */"), Some(Kind::MultiLine));
}
#[test]
fn processes_single_lines_correctly() {
assert_eq!(preprocess("///"), "");
assert_eq!(preprocess("/// hello"), " hello");
assert_eq!(preprocess("// hello"), " hello");
assert_eq!(preprocess("// hello"), " hello");
}
#[test]
fn processes_multi_lines_correctly() {
assert_eq!(preprocess("/**/"), "");
assert_eq!(
preprocess("/** hello \n * world \n * foo \n */"),
" hello\n world\n foo"
);
assert_eq!(
preprocess("/**\nhello\n*world\n*foo\n*/"),
"hello\nworld\nfoo"
);
}
}

1921
vendor/bindgen/ir/comp.rs vendored Normal file

File diff suppressed because it is too large Load Diff

3107
vendor/bindgen/ir/context.rs vendored Normal file

File diff suppressed because it is too large Load Diff

130
vendor/bindgen/ir/derive.rs vendored Normal file
View File

@@ -0,0 +1,130 @@
//! Traits for determining whether we can derive traits for a thing or not.
//!
//! These traits tend to come in pairs:
//!
//! 1. A "trivial" version, whose implementations aren't allowed to recursively
//! look at other types or the results of fix point analyses.
//!
//! 2. A "normal" version, whose implementations simply query the results of a
//! fix point analysis.
//!
//! The former is used by the analyses when creating the results queried by the
//! second.
use super::context::BindgenContext;
use std::cmp;
use std::ops;
/// A trait that encapsulates the logic for whether or not we can derive `Debug`
/// for a given thing.
pub(crate) trait CanDeriveDebug {
/// Return `true` if `Debug` can be derived for this thing, `false`
/// otherwise.
fn can_derive_debug(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive `Copy`
/// for a given thing.
pub(crate) trait CanDeriveCopy {
/// Return `true` if `Copy` can be derived for this thing, `false`
/// otherwise.
fn can_derive_copy(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive
/// `Default` for a given thing.
pub(crate) trait CanDeriveDefault {
/// Return `true` if `Default` can be derived for this thing, `false`
/// otherwise.
fn can_derive_default(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive `Hash`
/// for a given thing.
pub(crate) trait CanDeriveHash {
/// Return `true` if `Hash` can be derived for this thing, `false`
/// otherwise.
fn can_derive_hash(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive
/// `PartialEq` for a given thing.
pub(crate) trait CanDerivePartialEq {
/// Return `true` if `PartialEq` can be derived for this thing, `false`
/// otherwise.
fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive
/// `PartialOrd` for a given thing.
pub(crate) trait CanDerivePartialOrd {
/// Return `true` if `PartialOrd` can be derived for this thing, `false`
/// otherwise.
fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive `Eq`
/// for a given thing.
pub(crate) trait CanDeriveEq {
/// Return `true` if `Eq` can be derived for this thing, `false` otherwise.
fn can_derive_eq(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive `Ord`
/// for a given thing.
pub(crate) trait CanDeriveOrd {
/// Return `true` if `Ord` can be derived for this thing, `false` otherwise.
fn can_derive_ord(&self, ctx: &BindgenContext) -> bool;
}
/// Whether it is possible or not to automatically derive trait for an item.
///
/// ```ignore
/// No
/// ^
/// |
/// Manually
/// ^
/// |
/// Yes
/// ```
///
/// Initially we assume that we can derive trait for all types and then
/// update our understanding as we learn more about each type.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Default)]
pub enum CanDerive {
/// Yes, we can derive automatically.
#[default]
Yes,
/// The only thing that stops us from automatically deriving is that
/// array with more than maximum number of elements is used.
///
/// This means we probably can "manually" implement such trait.
Manually,
/// No, we cannot.
No,
}
impl CanDerive {
/// Take the least upper bound of `self` and `rhs`.
pub(crate) fn join(self, rhs: Self) -> Self {
cmp::max(self, rhs)
}
}
impl ops::BitOr for CanDerive {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
self.join(rhs)
}
}
impl ops::BitOrAssign for CanDerive {
fn bitor_assign(&mut self, rhs: Self) {
*self = self.join(rhs);
}
}

85
vendor/bindgen/ir/dot.rs vendored Normal file
View File

@@ -0,0 +1,85 @@
//! Generating Graphviz `dot` files from our IR.
use super::context::{BindgenContext, ItemId};
use super::traversal::Trace;
use std::fs::File;
use std::io::{self, Write};
use std::path::Path;
/// A trait for anything that can write attributes as `<table>` rows to a dot
/// file.
pub(crate) trait DotAttributes {
/// Write this thing's attributes to the given output. Each attribute must
/// be its own `<tr>...</tr>`.
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: Write;
}
/// Write a graphviz dot file containing our IR.
pub(crate) fn write_dot_file<P>(ctx: &BindgenContext, path: P) -> io::Result<()>
where
P: AsRef<Path>,
{
let file = File::create(path)?;
let mut dot_file = io::BufWriter::new(file);
writeln!(&mut dot_file, "digraph {{")?;
let mut err: Option<io::Result<_>> = None;
for (id, item) in ctx.items() {
let is_allowlisted = ctx.allowlisted_items().contains(&id);
writeln!(
&mut dot_file,
r#"{} [fontname="courier", color={}, label=< <table border="0" align="left">"#,
id.as_usize(),
if is_allowlisted { "black" } else { "gray" }
)?;
item.dot_attributes(ctx, &mut dot_file)?;
writeln!(&mut dot_file, "</table> >];")?;
item.trace(
ctx,
&mut |sub_id: ItemId, edge_kind| {
if err.is_some() {
return;
}
match writeln!(
&mut dot_file,
"{} -> {} [label={edge_kind:?}, color={}];",
id.as_usize(),
sub_id.as_usize(),
if is_allowlisted { "black" } else { "gray" }
) {
Ok(_) => {}
Err(e) => err = Some(Err(e)),
}
},
&(),
);
if let Some(err) = err {
return err;
}
if let Some(module) = item.as_module() {
for child in module.children() {
writeln!(
&mut dot_file,
"{} -> {} [style=dotted, color=gray]",
item.id().as_usize(),
child.as_usize()
)?;
}
}
}
writeln!(&mut dot_file, "}}")?;
Ok(())
}

321
vendor/bindgen/ir/enum_ty.rs vendored Normal file
View File

@@ -0,0 +1,321 @@
//! Intermediate representation for C/C++ enumerations.
use super::super::codegen::EnumVariation;
use super::context::{BindgenContext, TypeId};
use super::item::Item;
use super::ty::{Type, TypeKind};
use crate::clang;
use crate::ir::annotations::Annotations;
use crate::parse::ParseError;
use crate::regex_set::RegexSet;
/// An enum representing custom handling that can be given to a variant.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum EnumVariantCustomBehavior {
/// This variant will be a module containing constants.
ModuleConstify,
/// This variant will be constified, that is, forced to generate a constant.
Constify,
/// This variant will be hidden entirely from the resulting enum.
Hide,
}
/// A C/C++ enumeration.
#[derive(Debug)]
pub(crate) struct Enum {
/// The representation used for this enum; it should be an `IntKind` type or
/// an alias to one.
///
/// It's `None` if the enum is a forward declaration and isn't defined
/// anywhere else, see `tests/headers/func_ptr_in_struct.h`.
repr: Option<TypeId>,
/// The different variants, with explicit values.
variants: Vec<EnumVariant>,
}
impl Enum {
/// Construct a new `Enum` with the given representation and variants.
pub(crate) fn new(
repr: Option<TypeId>,
variants: Vec<EnumVariant>,
) -> Self {
Enum { repr, variants }
}
/// Get this enumeration's representation.
pub(crate) fn repr(&self) -> Option<TypeId> {
self.repr
}
/// Get this enumeration's variants.
pub(crate) fn variants(&self) -> &[EnumVariant] {
&self.variants
}
/// Construct an enumeration from the given Clang type.
pub(crate) fn from_ty(
ty: &clang::Type,
ctx: &mut BindgenContext,
) -> Result<Self, ParseError> {
use clang_sys::*;
debug!("Enum::from_ty {ty:?}");
if ty.kind() != CXType_Enum {
return Err(ParseError::Continue);
}
let declaration = ty.declaration().canonical();
let repr = declaration
.enum_type()
.and_then(|et| Item::from_ty(&et, declaration, None, ctx).ok());
let mut variants = vec![];
let variant_ty =
repr.and_then(|r| ctx.resolve_type(r).safe_canonical_type(ctx));
let is_bool = variant_ty.is_some_and(Type::is_bool);
// Assume signedness since the default type by the C standard is an int.
let is_signed = variant_ty.map_or(true, |ty| match *ty.kind() {
TypeKind::Int(ref int_kind) => int_kind.is_signed(),
ref other => {
panic!("Since when enums can be non-integers? {other:?}")
}
});
let type_name = ty.spelling();
let type_name = if type_name.is_empty() {
None
} else {
Some(type_name)
};
let type_name = type_name.as_deref();
let definition = declaration.definition().unwrap_or(declaration);
definition.visit(|cursor| {
if cursor.kind() == CXCursor_EnumConstantDecl {
let value = if is_bool {
cursor.enum_val_boolean().map(EnumVariantValue::Boolean)
} else if is_signed {
cursor.enum_val_signed().map(EnumVariantValue::Signed)
} else {
cursor.enum_val_unsigned().map(EnumVariantValue::Unsigned)
};
if let Some(val) = value {
let name = cursor.spelling();
let annotations = Annotations::new(&cursor);
let custom_behavior = ctx
.options()
.last_callback(|callbacks| {
callbacks
.enum_variant_behavior(type_name, &name, val)
})
.or_else(|| {
let annotations = annotations.as_ref()?;
if annotations.hide() {
Some(EnumVariantCustomBehavior::Hide)
} else if annotations.constify_enum_variant() {
Some(EnumVariantCustomBehavior::Constify)
} else {
None
}
});
let new_name = ctx
.options()
.last_callback(|callbacks| {
callbacks.enum_variant_name(type_name, &name, val)
})
.or_else(|| {
annotations
.as_ref()?
.use_instead_of()?
.last()
.cloned()
})
.unwrap_or_else(|| name.clone());
let comment = cursor.raw_comment();
variants.push(EnumVariant::new(
new_name,
name,
comment,
val,
custom_behavior,
));
}
}
CXChildVisit_Continue
});
Ok(Enum::new(repr, variants))
}
fn is_matching_enum(
&self,
ctx: &BindgenContext,
enums: &RegexSet,
item: &Item,
) -> bool {
let path = item.path_for_allowlisting(ctx);
let enum_ty = item.expect_type();
if enums.matches(path[1..].join("::")) {
return true;
}
// Test the variants if the enum is anonymous.
if enum_ty.name().is_some() {
return false;
}
self.variants().iter().any(|v| enums.matches(v.name()))
}
/// Returns the final representation of the enum.
pub(crate) fn computed_enum_variation(
&self,
ctx: &BindgenContext,
item: &Item,
) -> EnumVariation {
// ModuleConsts has higher precedence before Rust in order to avoid
// problems with overlapping match patterns.
if self.is_matching_enum(
ctx,
&ctx.options().constified_enum_modules,
item,
) {
EnumVariation::ModuleConsts
} else if self.is_matching_enum(
ctx,
&ctx.options().bitfield_enums,
item,
) {
EnumVariation::NewType {
is_bitfield: true,
is_global: false,
}
} else if self.is_matching_enum(ctx, &ctx.options().newtype_enums, item)
{
EnumVariation::NewType {
is_bitfield: false,
is_global: false,
}
} else if self.is_matching_enum(
ctx,
&ctx.options().newtype_global_enums,
item,
) {
EnumVariation::NewType {
is_bitfield: false,
is_global: true,
}
} else if self.is_matching_enum(
ctx,
&ctx.options().rustified_enums,
item,
) {
EnumVariation::Rust {
non_exhaustive: false,
}
} else if self.is_matching_enum(
ctx,
&ctx.options().rustified_non_exhaustive_enums,
item,
) {
EnumVariation::Rust {
non_exhaustive: true,
}
} else if self.is_matching_enum(
ctx,
&ctx.options().constified_enums,
item,
) {
EnumVariation::Consts
} else {
ctx.options().default_enum_style
}
}
}
/// A single enum variant, to be contained only in an enum.
#[derive(Debug)]
pub(crate) struct EnumVariant {
/// The name of the variant.
name: String,
/// The original name of the variant (without user mangling)
name_for_allowlisting: String,
/// An optional doc comment.
comment: Option<String>,
/// The integer value of the variant.
val: EnumVariantValue,
/// The custom behavior this variant may have, if any.
custom_behavior: Option<EnumVariantCustomBehavior>,
}
/// A constant value assigned to an enumeration variant.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum EnumVariantValue {
/// A boolean constant.
Boolean(bool),
/// A signed constant.
Signed(i64),
/// An unsigned constant.
Unsigned(u64),
}
impl EnumVariant {
/// Construct a new enumeration variant from the given parts.
pub(crate) fn new(
name: String,
name_for_allowlisting: String,
comment: Option<String>,
val: EnumVariantValue,
custom_behavior: Option<EnumVariantCustomBehavior>,
) -> Self {
EnumVariant {
name,
name_for_allowlisting,
comment,
val,
custom_behavior,
}
}
/// Get this variant's name.
pub(crate) fn name(&self) -> &str {
&self.name
}
/// Get this variant's name.
pub(crate) fn name_for_allowlisting(&self) -> &str {
&self.name_for_allowlisting
}
/// Get this variant's value.
pub(crate) fn val(&self) -> EnumVariantValue {
self.val
}
/// Get this variant's documentation.
pub(crate) fn comment(&self) -> Option<&str> {
self.comment.as_deref()
}
/// Returns whether this variant should be enforced to be a constant by code
/// generation.
pub(crate) fn force_constification(&self) -> bool {
self.custom_behavior == Some(EnumVariantCustomBehavior::Constify)
}
/// Returns whether the current variant should be hidden completely from the
/// resulting rust enum.
pub(crate) fn hidden(&self) -> bool {
self.custom_behavior == Some(EnumVariantCustomBehavior::Hide)
}
}

838
vendor/bindgen/ir/function.rs vendored Normal file
View File

@@ -0,0 +1,838 @@
//! Intermediate representation for C/C++ functions and methods.
use super::comp::MethodKind;
use super::context::{BindgenContext, TypeId};
use super::dot::DotAttributes;
use super::item::Item;
use super::traversal::{EdgeKind, Trace, Tracer};
use super::ty::TypeKind;
use crate::callbacks::{ItemInfo, ItemKind};
use crate::clang::{self, ABIKind, Attribute};
use crate::parse::{ClangSubItemParser, ParseError, ParseResult};
use clang_sys::CXCallingConv;
use quote::TokenStreamExt;
use std::io;
use std::str::FromStr;
const RUST_DERIVE_FUNPTR_LIMIT: usize = 12;
/// What kind of function are we looking at?
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub(crate) enum FunctionKind {
/// A plain, free function.
Function,
/// A method of some kind.
Method(MethodKind),
}
impl FunctionKind {
/// Given a clang cursor, return the kind of function it represents, or
/// `None` otherwise.
pub(crate) fn from_cursor(cursor: &clang::Cursor) -> Option<FunctionKind> {
// FIXME(emilio): Deduplicate logic with `ir::comp`.
Some(match cursor.kind() {
clang_sys::CXCursor_FunctionDecl => FunctionKind::Function,
clang_sys::CXCursor_Constructor => {
FunctionKind::Method(MethodKind::Constructor)
}
clang_sys::CXCursor_Destructor => {
FunctionKind::Method(if cursor.method_is_virtual() {
MethodKind::VirtualDestructor {
pure_virtual: cursor.method_is_pure_virtual(),
}
} else {
MethodKind::Destructor
})
}
clang_sys::CXCursor_CXXMethod => {
if cursor.method_is_virtual() {
FunctionKind::Method(MethodKind::Virtual {
pure_virtual: cursor.method_is_pure_virtual(),
})
} else if cursor.method_is_static() {
FunctionKind::Method(MethodKind::Static)
} else {
FunctionKind::Method(MethodKind::Normal)
}
}
_ => return None,
})
}
}
/// The style of linkage
#[derive(Debug, Clone, Copy)]
pub(crate) enum Linkage {
/// Externally visible and can be linked against
External,
/// Not exposed externally. 'static inline' functions will have this kind of linkage
Internal,
}
/// A function declaration, with a signature, arguments, and argument names.
///
/// The argument names vector must be the same length as the ones in the
/// signature.
#[derive(Debug)]
pub(crate) struct Function {
/// The name of this function.
name: String,
/// The mangled name, that is, the symbol.
mangled_name: Option<String>,
/// The link name. If specified, overwrite `mangled_name`.
link_name: Option<String>,
/// The ID pointing to the current function signature.
signature: TypeId,
/// The kind of function this is.
kind: FunctionKind,
/// The linkage of the function.
linkage: Linkage,
}
impl Function {
/// Construct a new function.
pub(crate) fn new(
name: String,
mangled_name: Option<String>,
link_name: Option<String>,
signature: TypeId,
kind: FunctionKind,
linkage: Linkage,
) -> Self {
Function {
name,
mangled_name,
link_name,
signature,
kind,
linkage,
}
}
/// Get this function's name.
pub(crate) fn name(&self) -> &str {
&self.name
}
/// Get this function's name.
pub(crate) fn mangled_name(&self) -> Option<&str> {
self.mangled_name.as_deref()
}
/// Get this function's link name.
pub fn link_name(&self) -> Option<&str> {
self.link_name.as_deref()
}
/// Get this function's signature type.
pub(crate) fn signature(&self) -> TypeId {
self.signature
}
/// Get this function's kind.
pub(crate) fn kind(&self) -> FunctionKind {
self.kind
}
/// Get this function's linkage.
pub(crate) fn linkage(&self) -> Linkage {
self.linkage
}
}
impl DotAttributes for Function {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
if let Some(ref mangled) = self.mangled_name {
let mangled: String =
mangled.chars().flat_map(|c| c.escape_default()).collect();
writeln!(out, "<tr><td>mangled name</td><td>{mangled}</td></tr>")?;
}
Ok(())
}
}
/// A valid rust ABI.
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)]
pub enum Abi {
/// The default C ABI.
C,
/// The "stdcall" ABI.
Stdcall,
/// The "efiapi" ABI.
EfiApi,
/// The "fastcall" ABI.
Fastcall,
/// The "thiscall" ABI.
ThisCall,
/// The "vectorcall" ABI.
Vectorcall,
/// The "aapcs" ABI.
Aapcs,
/// The "win64" ABI.
Win64,
/// The "C-unwind" ABI.
CUnwind,
/// The "system" ABI.
System,
}
impl FromStr for Abi {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"C" => Ok(Self::C),
"stdcall" => Ok(Self::Stdcall),
"efiapi" => Ok(Self::EfiApi),
"fastcall" => Ok(Self::Fastcall),
"thiscall" => Ok(Self::ThisCall),
"vectorcall" => Ok(Self::Vectorcall),
"aapcs" => Ok(Self::Aapcs),
"win64" => Ok(Self::Win64),
"C-unwind" => Ok(Self::CUnwind),
"system" => Ok(Self::System),
_ => Err(format!("Invalid or unknown ABI {s:?}")),
}
}
}
impl std::fmt::Display for Abi {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match *self {
Self::C => "C",
Self::Stdcall => "stdcall",
Self::EfiApi => "efiapi",
Self::Fastcall => "fastcall",
Self::ThisCall => "thiscall",
Self::Vectorcall => "vectorcall",
Self::Aapcs => "aapcs",
Self::Win64 => "win64",
Self::CUnwind => "C-unwind",
Abi::System => "system",
};
s.fmt(f)
}
}
impl quote::ToTokens for Abi {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
let abi = self.to_string();
tokens.append_all(quote! { #abi });
}
}
/// An ABI extracted from a clang cursor.
#[derive(Debug, Copy, Clone)]
pub(crate) enum ClangAbi {
/// An ABI known by Rust.
Known(Abi),
/// An unknown or invalid ABI.
Unknown(CXCallingConv),
}
impl ClangAbi {
/// Returns whether this Abi is known or not.
fn is_unknown(self) -> bool {
matches!(self, ClangAbi::Unknown(..))
}
}
impl quote::ToTokens for ClangAbi {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
match *self {
Self::Known(abi) => abi.to_tokens(tokens),
Self::Unknown(cc) => panic!(
"Cannot turn unknown calling convention to tokens: {cc:?}"
),
}
}
}
/// A function signature.
#[derive(Debug)]
pub(crate) struct FunctionSig {
/// The name of this function signature.
name: String,
/// The return type of the function.
return_type: TypeId,
/// The type of the arguments, optionally with the name of the argument when
/// declared.
argument_types: Vec<(Option<String>, TypeId)>,
/// Whether this function is variadic.
is_variadic: bool,
is_divergent: bool,
/// Whether this function's return value must be used.
must_use: bool,
/// The ABI of this function.
abi: ClangAbi,
}
fn get_abi(cc: CXCallingConv) -> ClangAbi {
use clang_sys::*;
match cc {
CXCallingConv_Default | CXCallingConv_C => ClangAbi::Known(Abi::C),
CXCallingConv_X86StdCall => ClangAbi::Known(Abi::Stdcall),
CXCallingConv_X86FastCall => ClangAbi::Known(Abi::Fastcall),
CXCallingConv_X86ThisCall => ClangAbi::Known(Abi::ThisCall),
CXCallingConv_X86VectorCall | CXCallingConv_AArch64VectorCall => {
ClangAbi::Known(Abi::Vectorcall)
}
CXCallingConv_AAPCS => ClangAbi::Known(Abi::Aapcs),
CXCallingConv_X86_64Win64 => ClangAbi::Known(Abi::Win64),
other => ClangAbi::Unknown(other),
}
}
/// Get the mangled name for the cursor's referent.
pub(crate) fn cursor_mangling(
ctx: &BindgenContext,
cursor: &clang::Cursor,
) -> Option<String> {
if !ctx.options().enable_mangling {
return None;
}
// We early return here because libclang may crash in some case
// if we pass in a variable inside a partial specialized template.
// See rust-lang/rust-bindgen#67, and rust-lang/rust-bindgen#462.
if cursor.is_in_non_fully_specialized_template() {
return None;
}
let is_itanium_abi = ctx.abi_kind() == ABIKind::GenericItanium;
let is_destructor = cursor.kind() == clang_sys::CXCursor_Destructor;
if let Ok(mut manglings) = cursor.cxx_manglings() {
while let Some(m) = manglings.pop() {
// Only generate the destructor group 1, see below.
if is_itanium_abi && is_destructor && !m.ends_with("D1Ev") {
continue;
}
return Some(m);
}
}
let mut mangling = cursor.mangling();
if mangling.is_empty() {
return None;
}
if is_itanium_abi && is_destructor {
// With old (3.8-) libclang versions, and the Itanium ABI, clang returns
// the "destructor group 0" symbol, which means that it'll try to free
// memory, which definitely isn't what we want.
//
// Explicitly force the destructor group 1 symbol.
//
// See http://refspecs.linuxbase.org/cxxabi-1.83.html#mangling-special
// for the reference, and http://stackoverflow.com/a/6614369/1091587 for
// a more friendly explanation.
//
// We don't need to do this for constructors since clang seems to always
// have returned the C1 constructor.
//
// FIXME(emilio): Can a legit symbol in other ABIs end with this string?
// I don't think so, but if it can this would become a linker error
// anyway, not an invalid free at runtime.
//
// TODO(emilio, #611): Use cpp_demangle if this becomes nastier with
// time.
if mangling.ends_with("D0Ev") {
let new_len = mangling.len() - 4;
mangling.truncate(new_len);
mangling.push_str("D1Ev");
}
}
Some(mangling)
}
fn args_from_ty_and_cursor(
ty: &clang::Type,
cursor: &clang::Cursor,
ctx: &mut BindgenContext,
) -> Vec<(Option<String>, TypeId)> {
let cursor_args = cursor.args().unwrap_or_default().into_iter();
let type_args = ty.args().unwrap_or_default().into_iter();
// Argument types can be found in either the cursor or the type, but argument names may only be
// found on the cursor. We often have access to both a type and a cursor for each argument, but
// in some cases we may only have one.
//
// Prefer using the type as the source of truth for the argument's type, but fall back to
// inspecting the cursor (this happens for Objective C interfaces).
//
// Prefer using the cursor for the argument's type, but fall back to using the parent's cursor
// (this happens for function pointer return types).
cursor_args
.map(Some)
.chain(std::iter::repeat(None))
.zip(type_args.map(Some).chain(std::iter::repeat(None)))
.take_while(|(cur, ty)| cur.is_some() || ty.is_some())
.map(|(arg_cur, arg_ty)| {
let name = arg_cur.map(|a| a.spelling()).and_then(|name| {
if name.is_empty() {
None
} else {
Some(name)
}
});
let cursor = arg_cur.unwrap_or(*cursor);
let ty = arg_ty.unwrap_or_else(|| cursor.cur_type());
(name, Item::from_ty_or_ref(ty, cursor, None, ctx))
})
.collect()
}
impl FunctionSig {
/// Get the function name.
pub(crate) fn name(&self) -> &str {
&self.name
}
/// Construct a new function signature from the given Clang type.
pub(crate) fn from_ty(
ty: &clang::Type,
cursor: &clang::Cursor,
ctx: &mut BindgenContext,
) -> Result<Self, ParseError> {
use clang_sys::*;
debug!("FunctionSig::from_ty {ty:?} {cursor:?}");
// Skip function templates
let kind = cursor.kind();
if kind == CXCursor_FunctionTemplate {
return Err(ParseError::Continue);
}
let spelling = cursor.spelling();
// Don't parse operatorxx functions in C++
let is_operator = |spelling: &str| {
spelling.starts_with("operator") &&
!clang::is_valid_identifier(spelling)
};
if is_operator(&spelling) && !ctx.options().represent_cxx_operators {
return Err(ParseError::Continue);
}
// Constructors of non-type template parameter classes for some reason
// include the template parameter in their name. Just skip them, since
// we don't handle well non-type template parameters anyway.
if (kind == CXCursor_Constructor || kind == CXCursor_Destructor) &&
spelling.contains('<')
{
return Err(ParseError::Continue);
}
let cursor = if cursor.is_valid() {
*cursor
} else {
ty.declaration()
};
let mut args = match kind {
CXCursor_FunctionDecl |
CXCursor_Constructor |
CXCursor_CXXMethod |
CXCursor_ObjCInstanceMethodDecl |
CXCursor_ObjCClassMethodDecl => {
args_from_ty_and_cursor(ty, &cursor, ctx)
}
_ => {
// For non-CXCursor_FunctionDecl, visiting the cursor's children
// is the only reliable way to get parameter names.
let mut args = vec![];
cursor.visit(|c| {
if c.kind() == CXCursor_ParmDecl {
let ty =
Item::from_ty_or_ref(c.cur_type(), c, None, ctx);
let name = c.spelling();
let name =
if name.is_empty() { None } else { Some(name) };
args.push((name, ty));
}
CXChildVisit_Continue
});
if args.is_empty() {
// FIXME(emilio): Sometimes libclang doesn't expose the
// right AST for functions tagged as stdcall and such...
//
// https://bugs.llvm.org/show_bug.cgi?id=45919
args_from_ty_and_cursor(ty, &cursor, ctx)
} else {
args
}
}
};
let (must_use, mut is_divergent) =
if ctx.options().enable_function_attribute_detection {
let [must_use, no_return, no_return_cpp] = cursor.has_attrs(&[
Attribute::MUST_USE,
Attribute::NO_RETURN,
Attribute::NO_RETURN_CPP,
]);
(must_use, no_return || no_return_cpp)
} else {
Default::default()
};
// Check if the type contains __attribute__((noreturn)) outside of parentheses. This is
// somewhat fragile, but it seems to be the only way to get at this information as of
// libclang 9.
let ty_spelling = ty.spelling();
let has_attribute_noreturn = ty_spelling
.match_indices("__attribute__((noreturn))")
.any(|(i, _)| {
let depth = ty_spelling[..i]
.bytes()
.filter_map(|ch| match ch {
b'(' => Some(1),
b')' => Some(-1),
_ => None,
})
.sum::<isize>();
depth == 0
});
is_divergent = is_divergent || has_attribute_noreturn;
let is_method = kind == CXCursor_CXXMethod;
let is_constructor = kind == CXCursor_Constructor;
let is_destructor = kind == CXCursor_Destructor;
if (is_constructor || is_destructor || is_method) &&
cursor.lexical_parent() != cursor.semantic_parent()
{
// Only parse constructors once.
return Err(ParseError::Continue);
}
if is_method || is_constructor || is_destructor {
let is_const = is_method && cursor.method_is_const();
let is_virtual = is_method && cursor.method_is_virtual();
let is_static = is_method && cursor.method_is_static();
if !is_static &&
(!is_virtual ||
ctx.options().use_specific_virtual_function_receiver)
{
let parent = cursor.semantic_parent();
let class = Item::parse(parent, None, ctx)
.expect("Expected to parse the class");
// The `class` most likely is not finished parsing yet, so use
// the unchecked variant.
let class = class.as_type_id_unchecked();
let class = if is_const {
let const_class_id = ctx.next_item_id();
ctx.build_const_wrapper(
const_class_id,
class,
None,
&parent.cur_type(),
)
} else {
class
};
let ptr =
Item::builtin_type(TypeKind::Pointer(class), false, ctx);
args.insert(0, (Some("this".into()), ptr));
} else if is_virtual {
let void = Item::builtin_type(TypeKind::Void, false, ctx);
let ptr =
Item::builtin_type(TypeKind::Pointer(void), false, ctx);
args.insert(0, (Some("this".into()), ptr));
}
}
let ty_ret_type = if kind == CXCursor_ObjCInstanceMethodDecl ||
kind == CXCursor_ObjCClassMethodDecl
{
ty.ret_type()
.or_else(|| cursor.ret_type())
.ok_or(ParseError::Continue)?
} else {
ty.ret_type().ok_or(ParseError::Continue)?
};
let ret = if is_constructor && ctx.is_target_wasm32() {
// Constructors in Clang wasm32 target return a pointer to the object
// being constructed.
let void = Item::builtin_type(TypeKind::Void, false, ctx);
Item::builtin_type(TypeKind::Pointer(void), false, ctx)
} else {
Item::from_ty_or_ref(ty_ret_type, cursor, None, ctx)
};
// Clang plays with us at "find the calling convention", see #549 and
// co. This seems to be a better fix than that commit.
let mut call_conv = ty.call_conv();
if let Some(ty) = cursor.cur_type().canonical_type().pointee_type() {
let cursor_call_conv = ty.call_conv();
if cursor_call_conv != CXCallingConv_Invalid {
call_conv = cursor_call_conv;
}
}
let abi = get_abi(call_conv);
if abi.is_unknown() {
warn!("Unknown calling convention: {call_conv:?}");
}
Ok(Self {
name: spelling,
return_type: ret,
argument_types: args,
is_variadic: ty.is_variadic(),
is_divergent,
must_use,
abi,
})
}
/// Get this function signature's return type.
pub(crate) fn return_type(&self) -> TypeId {
self.return_type
}
/// Get this function signature's argument (name, type) pairs.
pub(crate) fn argument_types(&self) -> &[(Option<String>, TypeId)] {
&self.argument_types
}
/// Get this function signature's ABI.
pub(crate) fn abi(
&self,
ctx: &BindgenContext,
name: Option<&str>,
) -> crate::codegen::error::Result<ClangAbi> {
// FIXME (pvdrz): Try to do this check lazily instead. Maybe store the ABI inside `ctx`
// instead?.
let abi = if let Some(name) = name {
if let Some((abi, _)) = ctx
.options()
.abi_overrides
.iter()
.find(|(_, regex_set)| regex_set.matches(name))
{
ClangAbi::Known(*abi)
} else {
self.abi
}
} else if let Some((abi, _)) = ctx
.options()
.abi_overrides
.iter()
.find(|(_, regex_set)| regex_set.matches(&self.name))
{
ClangAbi::Known(*abi)
} else {
self.abi
};
match abi {
ClangAbi::Known(Abi::ThisCall)
if !ctx.options().rust_features().thiscall_abi =>
{
Err(crate::codegen::error::Error::UnsupportedAbi("thiscall"))
}
ClangAbi::Known(Abi::Vectorcall)
if !ctx.options().rust_features().vectorcall_abi =>
{
Err(crate::codegen::error::Error::UnsupportedAbi("vectorcall"))
}
ClangAbi::Known(Abi::CUnwind)
if !ctx.options().rust_features().c_unwind_abi =>
{
Err(crate::codegen::error::Error::UnsupportedAbi("C-unwind"))
}
ClangAbi::Known(Abi::EfiApi)
if !ctx.options().rust_features().abi_efiapi =>
{
Err(crate::codegen::error::Error::UnsupportedAbi("efiapi"))
}
ClangAbi::Known(Abi::Win64) if self.is_variadic() => {
Err(crate::codegen::error::Error::UnsupportedAbi("Win64"))
}
abi => Ok(abi),
}
}
/// Is this function signature variadic?
pub(crate) fn is_variadic(&self) -> bool {
// Clang reports some functions as variadic when they *might* be
// variadic. We do the argument check because rust doesn't codegen well
// variadic functions without an initial argument.
self.is_variadic && !self.argument_types.is_empty()
}
/// Must this function's return value be used?
pub(crate) fn must_use(&self) -> bool {
self.must_use
}
/// Are function pointers with this signature able to derive Rust traits?
/// Rust only supports deriving traits for function pointers with a limited
/// number of parameters and a couple ABIs.
///
/// For more details, see:
///
/// * <https://github.com/rust-lang/rust-bindgen/issues/547>,
/// * <https://github.com/rust-lang/rust/issues/38848>,
/// * and <https://github.com/rust-lang/rust/issues/40158>
pub(crate) fn function_pointers_can_derive(&self) -> bool {
if self.argument_types.len() > RUST_DERIVE_FUNPTR_LIMIT {
return false;
}
matches!(self.abi, ClangAbi::Known(Abi::C) | ClangAbi::Unknown(..))
}
/// Whether this function has attributes marking it as divergent.
pub(crate) fn is_divergent(&self) -> bool {
self.is_divergent
}
}
impl ClangSubItemParser for Function {
fn parse(
cursor: clang::Cursor,
context: &mut BindgenContext,
) -> Result<ParseResult<Self>, ParseError> {
use clang_sys::*;
let kind = match FunctionKind::from_cursor(&cursor) {
None => return Err(ParseError::Continue),
Some(k) => k,
};
debug!("Function::parse({cursor:?}, {:?})", cursor.cur_type());
let visibility = cursor.visibility();
if visibility != CXVisibility_Default {
return Err(ParseError::Continue);
}
if cursor.access_specifier() == CX_CXXPrivate &&
!context.options().generate_private_functions
{
return Err(ParseError::Continue);
}
let linkage = cursor.linkage();
let linkage = match linkage {
CXLinkage_External | CXLinkage_UniqueExternal => Linkage::External,
CXLinkage_Internal => Linkage::Internal,
_ => return Err(ParseError::Continue),
};
if cursor.is_inlined_function() ||
cursor.definition().is_some_and(|x| x.is_inlined_function())
{
if !context.options().generate_inline_functions &&
!context.options().wrap_static_fns
{
return Err(ParseError::Continue);
}
if cursor.is_deleted_function() &&
!context.options().generate_deleted_functions
{
return Err(ParseError::Continue);
}
// We cannot handle `inline` functions that are not `static`.
if context.options().wrap_static_fns &&
cursor.is_inlined_function() &&
matches!(linkage, Linkage::External)
{
return Err(ParseError::Continue);
}
}
// Grab the signature using Item::from_ty.
let sig = Item::from_ty(&cursor.cur_type(), cursor, None, context)?;
let mut name = cursor.spelling();
assert!(!name.is_empty(), "Empty function name?");
if cursor.kind() == CXCursor_Destructor {
// Remove the leading `~`. The alternative to this is special-casing
// code-generation for destructor functions, which seems less than
// ideal.
if name.starts_with('~') {
name.remove(0);
}
// Add a suffix to avoid colliding with constructors. This would be
// technically fine (since we handle duplicated functions/methods),
// but seems easy enough to handle it here.
name.push_str("_destructor");
}
if let Some(nm) = context.options().last_callback(|callbacks| {
callbacks.generated_name_override(ItemInfo {
name: name.as_str(),
kind: ItemKind::Function,
})
}) {
name = nm;
}
assert!(!name.is_empty(), "Empty function name.");
let mangled_name = cursor_mangling(context, &cursor);
let link_name = context.options().last_callback(|callbacks| {
callbacks.generated_link_name_override(ItemInfo {
name: name.as_str(),
kind: ItemKind::Function,
})
});
let function = Self::new(
name.clone(),
mangled_name,
link_name,
sig,
kind,
linkage,
);
Ok(ParseResult::New(function, Some(cursor)))
}
}
impl Trace for FunctionSig {
type Extra = ();
fn trace<T>(&self, _: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
tracer.visit_kind(self.return_type().into(), EdgeKind::FunctionReturn);
for &(_, ty) in self.argument_types() {
tracer.visit_kind(ty.into(), EdgeKind::FunctionParameter);
}
}
}

128
vendor/bindgen/ir/int.rs vendored Normal file
View File

@@ -0,0 +1,128 @@
//! Intermediate representation for integral types.
/// Which integral type are we dealing with?
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum IntKind {
/// A `bool`.
Bool,
/// A `signed char`.
SChar,
/// An `unsigned char`.
UChar,
/// A `wchar_t`.
WChar,
/// A platform-dependent `char` type, with the signedness support.
Char {
/// Whether the char is signed for the target platform.
is_signed: bool,
},
/// A `short`.
Short,
/// An `unsigned short`.
UShort,
/// An `int`.
Int,
/// An `unsigned int`.
UInt,
/// A `long`.
Long,
/// An `unsigned long`.
ULong,
/// A `long long`.
LongLong,
/// An `unsigned long long`.
ULongLong,
/// A 8-bit signed integer.
I8,
/// A 8-bit unsigned integer.
U8,
/// A 16-bit signed integer.
I16,
/// A 16-bit integer, used only for enum size representation.
U16,
/// The C++ type `char16_t`, which is its own type (unlike in C).
Char16,
/// A 32-bit signed integer.
I32,
/// A 32-bit unsigned integer.
U32,
/// A 64-bit signed integer.
I64,
/// A 64-bit unsigned integer.
U64,
/// An `int128_t`
I128,
/// A `uint128_t`.
U128,
/// A custom integer type, used to allow custom macro types depending on
/// range.
Custom {
/// The name of the type, which would be used without modification.
name: &'static str,
/// Whether the type is signed or not.
is_signed: bool,
},
}
impl IntKind {
/// Is this integral type signed?
pub(crate) fn is_signed(&self) -> bool {
use self::IntKind::*;
match *self {
// TODO(emilio): wchar_t can in theory be signed, but we have no way
// to know whether it is or not right now (unlike char, there's no
// WChar_S / WChar_U).
Bool | UChar | UShort | UInt | ULong | ULongLong | U8 | U16 |
Char16 | WChar | U32 | U64 | U128 => false,
SChar | Short | Int | Long | LongLong | I8 | I16 | I32 | I64 |
I128 => true,
Char { is_signed } | Custom { is_signed, .. } => is_signed,
}
}
/// If this type has a known size, return it (in bytes). This is to
/// alleviate libclang sometimes not giving us a layout (like in the case
/// when an enum is defined inside a class with template parameters).
pub(crate) fn known_size(&self) -> Option<usize> {
use self::IntKind::*;
Some(match *self {
Bool | UChar | SChar | U8 | I8 | Char { .. } => 1,
U16 | I16 | Char16 => 2,
U32 | I32 => 4,
U64 | I64 => 8,
I128 | U128 => 16,
_ => return None,
})
}
/// Whether this type's signedness matches the value.
pub(crate) fn signedness_matches(&self, val: i64) -> bool {
val >= 0 || self.is_signed()
}
}

1994
vendor/bindgen/ir/item.rs vendored Normal file

File diff suppressed because it is too large Load Diff

135
vendor/bindgen/ir/item_kind.rs vendored Normal file
View File

@@ -0,0 +1,135 @@
//! Different variants of an `Item` in our intermediate representation.
use super::context::BindgenContext;
use super::dot::DotAttributes;
use super::function::Function;
use super::module::Module;
use super::ty::Type;
use super::var::Var;
use std::io;
/// A item we parse and translate.
#[derive(Debug)]
pub(crate) enum ItemKind {
/// A module, created implicitly once (the root module), or via C++
/// namespaces.
Module(Module),
/// A type declared in any of the multiple ways it can be declared.
Type(Type),
/// A function or method declaration.
Function(Function),
/// A variable declaration, most likely a static.
Var(Var),
}
impl ItemKind {
/// Get a reference to this `ItemKind`'s underlying `Module`, or `None` if it
/// is some other kind.
pub(crate) fn as_module(&self) -> Option<&Module> {
match *self {
ItemKind::Module(ref module) => Some(module),
_ => None,
}
}
/// Transform our `ItemKind` into a string.
pub(crate) fn kind_name(&self) -> &'static str {
match *self {
ItemKind::Module(..) => "Module",
ItemKind::Type(..) => "Type",
ItemKind::Function(..) => "Function",
ItemKind::Var(..) => "Var",
}
}
/// Is this a module?
pub(crate) fn is_module(&self) -> bool {
self.as_module().is_some()
}
/// Get a reference to this `ItemKind`'s underlying `Function`, or `None` if
/// it is some other kind.
pub(crate) fn as_function(&self) -> Option<&Function> {
match *self {
ItemKind::Function(ref func) => Some(func),
_ => None,
}
}
/// Is this a function?
pub(crate) fn is_function(&self) -> bool {
self.as_function().is_some()
}
/// Get a reference to this `ItemKind`'s underlying `Function`, or panic if
/// it is some other kind.
pub(crate) fn expect_function(&self) -> &Function {
self.as_function().expect("Not a function")
}
/// Get a reference to this `ItemKind`'s underlying `Type`, or `None` if
/// it is some other kind.
pub(crate) fn as_type(&self) -> Option<&Type> {
match *self {
ItemKind::Type(ref ty) => Some(ty),
_ => None,
}
}
/// Get a mutable reference to this `ItemKind`'s underlying `Type`, or `None`
/// if it is some other kind.
pub(crate) fn as_type_mut(&mut self) -> Option<&mut Type> {
match *self {
ItemKind::Type(ref mut ty) => Some(ty),
_ => None,
}
}
/// Is this a type?
pub(crate) fn is_type(&self) -> bool {
self.as_type().is_some()
}
/// Get a reference to this `ItemKind`'s underlying `Type`, or panic if it is
/// some other kind.
pub(crate) fn expect_type(&self) -> &Type {
self.as_type().expect("Not a type")
}
/// Get a reference to this `ItemKind`'s underlying `Var`, or `None` if it is
/// some other kind.
pub(crate) fn as_var(&self) -> Option<&Var> {
match *self {
ItemKind::Var(ref v) => Some(v),
_ => None,
}
}
/// Is this a variable?
pub(crate) fn is_var(&self) -> bool {
self.as_var().is_some()
}
}
impl DotAttributes for ItemKind {
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
writeln!(out, "<tr><td>kind</td><td>{}</td></tr>", self.kind_name())?;
match *self {
ItemKind::Module(ref module) => module.dot_attributes(ctx, out),
ItemKind::Type(ref ty) => ty.dot_attributes(ctx, out),
ItemKind::Function(ref func) => func.dot_attributes(ctx, out),
ItemKind::Var(ref var) => var.dot_attributes(ctx, out),
}
}
}

126
vendor/bindgen/ir/layout.rs vendored Normal file
View File

@@ -0,0 +1,126 @@
//! Intermediate representation for the physical layout of some type.
use super::derive::CanDerive;
use super::ty::{Type, TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT};
use crate::clang;
use crate::ir::context::BindgenContext;
use std::cmp;
/// A type that represents the struct layout of a type.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) struct Layout {
/// The size (in bytes) of this layout.
pub(crate) size: usize,
/// The alignment (in bytes) of this layout.
pub(crate) align: usize,
/// Whether this layout's members are packed or not.
pub(crate) packed: bool,
}
#[test]
fn test_layout_for_size() {
use std::mem::size_of;
let ptr_size = size_of::<*mut ()>();
assert_eq!(
Layout::for_size_internal(ptr_size, ptr_size),
Layout::new(ptr_size, ptr_size)
);
assert_eq!(
Layout::for_size_internal(ptr_size, 3 * ptr_size),
Layout::new(3 * ptr_size, ptr_size)
);
}
impl Layout {
/// Gets the integer type name for a given known size.
pub(crate) fn known_type_for_size(size: usize) -> Option<syn::Type> {
Some(match size {
16 => syn::parse_quote! { u128 },
8 => syn::parse_quote! { u64 },
4 => syn::parse_quote! { u32 },
2 => syn::parse_quote! { u16 },
1 => syn::parse_quote! { u8 },
_ => return None,
})
}
/// Construct a new `Layout` with the given `size` and `align`. It is not
/// packed.
pub(crate) fn new(size: usize, align: usize) -> Self {
Layout {
size,
align,
packed: false,
}
}
fn for_size_internal(ptr_size: usize, size: usize) -> Self {
let mut next_align = 2;
while size % next_align == 0 && next_align <= ptr_size {
next_align *= 2;
}
Layout {
size,
align: next_align / 2,
packed: false,
}
}
/// Creates a non-packed layout for a given size, trying to use the maximum
/// alignment possible.
pub(crate) fn for_size(ctx: &BindgenContext, size: usize) -> Self {
Self::for_size_internal(ctx.target_pointer_size(), size)
}
/// Get this layout as an opaque type.
pub(crate) fn opaque(&self) -> Opaque {
Opaque(*self)
}
}
/// When we are treating a type as opaque, it is just a blob with a `Layout`.
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct Opaque(pub(crate) Layout);
impl Opaque {
/// Construct a new opaque type from the given clang type.
pub(crate) fn from_clang_ty(
ty: &clang::Type,
ctx: &BindgenContext,
) -> Type {
let layout = Layout::new(ty.size(ctx), ty.align(ctx));
let ty_kind = TypeKind::Opaque;
let is_const = ty.is_const();
Type::new(None, Some(layout), ty_kind, is_const)
}
/// Return the known rust type we should use to create a correctly-aligned
/// field with this layout.
pub(crate) fn known_rust_type_for_array(&self) -> Option<syn::Type> {
Layout::known_type_for_size(self.0.align)
}
/// Return the array size that an opaque type for this layout should have if
/// we know the correct type for it, or `None` otherwise.
pub(crate) fn array_size(&self) -> Option<usize> {
if self.known_rust_type_for_array().is_some() {
Some(self.0.size / cmp::max(self.0.align, 1))
} else {
None
}
}
/// Return `true` if this opaque layout's array size will fit within the
/// maximum number of array elements that Rust allows deriving traits
/// with. Return `false` otherwise.
pub(crate) fn array_size_within_derive_limit(&self) -> CanDerive {
if self
.array_size()
.is_some_and(|size| size <= RUST_DERIVE_IN_ARRAY_LIMIT)
{
CanDerive::Yes
} else {
CanDerive::Manually
}
}
}

25
vendor/bindgen/ir/mod.rs vendored Normal file
View File

@@ -0,0 +1,25 @@
//! The ir module defines bindgen's intermediate representation.
//!
//! Parsing C/C++ generates the IR, while code generation outputs Rust code from
//! the IR.
#![deny(clippy::missing_docs_in_private_items)]
pub(crate) mod analysis;
pub(crate) mod annotations;
pub(crate) mod comment;
pub(crate) mod comp;
pub(crate) mod context;
pub(crate) mod derive;
pub(crate) mod dot;
pub(crate) mod enum_ty;
pub(crate) mod function;
pub(crate) mod int;
pub(crate) mod item;
pub(crate) mod item_kind;
pub(crate) mod layout;
pub(crate) mod module;
pub(crate) mod objc;
pub(crate) mod template;
pub(crate) mod traversal;
pub(crate) mod ty;
pub(crate) mod var;

96
vendor/bindgen/ir/module.rs vendored Normal file
View File

@@ -0,0 +1,96 @@
//! Intermediate representation for modules (AKA C++ namespaces).
use super::context::BindgenContext;
use super::dot::DotAttributes;
use super::item::ItemSet;
use crate::clang;
use crate::parse::{ClangSubItemParser, ParseError, ParseResult};
use crate::parse_one;
use std::io;
/// Whether this module is inline or not.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub(crate) enum ModuleKind {
/// This module is not inline.
Normal,
/// This module is inline, as in `inline namespace foo {}`.
Inline,
}
/// A module, as in, a C++ namespace.
#[derive(Clone, Debug)]
pub(crate) struct Module {
/// The name of the module, or none if it's anonymous.
name: Option<String>,
/// The kind of module this is.
kind: ModuleKind,
/// The children of this module, just here for convenience.
children: ItemSet,
}
impl Module {
/// Construct a new `Module`.
pub(crate) fn new(name: Option<String>, kind: ModuleKind) -> Self {
Module {
name,
kind,
children: ItemSet::new(),
}
}
/// Get this module's name.
pub(crate) fn name(&self) -> Option<&str> {
self.name.as_deref()
}
/// Get a mutable reference to this module's children.
pub(crate) fn children_mut(&mut self) -> &mut ItemSet {
&mut self.children
}
/// Get this module's children.
pub(crate) fn children(&self) -> &ItemSet {
&self.children
}
/// Whether this namespace is inline.
pub(crate) fn is_inline(&self) -> bool {
self.kind == ModuleKind::Inline
}
}
impl DotAttributes for Module {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
writeln!(out, "<tr><td>ModuleKind</td><td>{:?}</td></tr>", self.kind)
}
}
impl ClangSubItemParser for Module {
fn parse(
cursor: clang::Cursor,
ctx: &mut BindgenContext,
) -> Result<ParseResult<Self>, ParseError> {
use clang_sys::*;
match cursor.kind() {
CXCursor_Namespace => {
let module_id = ctx.module(cursor);
ctx.with_module(module_id, |ctx| {
cursor.visit_sorted(ctx, |ctx, child| {
parse_one(ctx, child, Some(module_id.into()));
});
});
Ok(ParseResult::AlreadyResolved(module_id.into()))
}
_ => Err(ParseError::Continue),
}
}
}

343
vendor/bindgen/ir/objc.rs vendored Normal file
View File

@@ -0,0 +1,343 @@
//! Objective C types
use super::context::{BindgenContext, ItemId};
use super::function::FunctionSig;
use super::item::Item;
use super::traversal::{Trace, Tracer};
use super::ty::TypeKind;
use crate::clang;
use clang_sys::CXChildVisit_Continue;
use clang_sys::CXCursor_ObjCCategoryDecl;
use clang_sys::CXCursor_ObjCClassMethodDecl;
use clang_sys::CXCursor_ObjCClassRef;
use clang_sys::CXCursor_ObjCInstanceMethodDecl;
use clang_sys::CXCursor_ObjCProtocolDecl;
use clang_sys::CXCursor_ObjCProtocolRef;
use clang_sys::CXCursor_ObjCSuperClassRef;
use clang_sys::CXCursor_TemplateTypeParameter;
use proc_macro2::{Ident, Span, TokenStream};
/// Objective-C interface as used in `TypeKind`
///
/// Also, protocols and categories are parsed as this type
#[derive(Debug)]
pub(crate) struct ObjCInterface {
/// The name
/// like, `NSObject`
name: String,
category: Option<String>,
is_protocol: bool,
/// The list of template names almost always, `ObjectType` or `KeyType`
pub(crate) template_names: Vec<String>,
/// The list of protocols that this interface conforms to.
pub(crate) conforms_to: Vec<ItemId>,
/// The direct parent for this interface.
pub(crate) parent_class: Option<ItemId>,
/// List of the methods defined in this interface
methods: Vec<ObjCMethod>,
class_methods: Vec<ObjCMethod>,
}
/// The objective c methods
#[derive(Debug)]
pub(crate) struct ObjCMethod {
/// The original method selector name
/// like, dataWithBytes:length:
name: String,
/// Method name as converted to rust
/// like, `dataWithBytes_length`_
rust_name: String,
signature: FunctionSig,
/// Is class method?
is_class_method: bool,
}
impl ObjCInterface {
fn new(name: &str) -> ObjCInterface {
ObjCInterface {
name: name.to_owned(),
category: None,
is_protocol: false,
template_names: Vec::new(),
parent_class: None,
conforms_to: Vec::new(),
methods: Vec::new(),
class_methods: Vec::new(),
}
}
/// The name
/// like, `NSObject`
pub(crate) fn name(&self) -> &str {
self.name.as_ref()
}
/// Formats the name for rust
/// Can be like `NSObject`, but with categories might be like `NSObject_NSCoderMethods`
/// and protocols are like `PNSObject`
pub(crate) fn rust_name(&self) -> String {
if let Some(ref cat) = self.category {
format!("{}_{cat}", self.name())
} else if self.is_protocol {
format!("P{}", self.name())
} else {
format!("I{}", self.name().to_owned())
}
}
/// Is this a template interface?
pub(crate) fn is_template(&self) -> bool {
!self.template_names.is_empty()
}
/// List of the methods defined in this interface
pub(crate) fn methods(&self) -> &Vec<ObjCMethod> {
&self.methods
}
/// Is this a protocol?
pub(crate) fn is_protocol(&self) -> bool {
self.is_protocol
}
/// Is this a category?
pub(crate) fn is_category(&self) -> bool {
self.category.is_some()
}
/// List of the class methods defined in this interface
pub(crate) fn class_methods(&self) -> &Vec<ObjCMethod> {
&self.class_methods
}
/// Parses the Objective C interface from the cursor
pub(crate) fn from_ty(
cursor: &clang::Cursor,
ctx: &mut BindgenContext,
) -> Option<Self> {
let name = cursor.spelling();
let mut interface = Self::new(&name);
if cursor.kind() == CXCursor_ObjCProtocolDecl {
interface.is_protocol = true;
}
cursor.visit(|c| {
match c.kind() {
CXCursor_ObjCClassRef => {
if cursor.kind() == CXCursor_ObjCCategoryDecl {
// We are actually a category extension, and we found the reference
// to the original interface, so name this interface appropriately
interface.name = c.spelling();
interface.category = Some(cursor.spelling());
}
}
CXCursor_ObjCProtocolRef => {
// Gather protocols this interface conforms to
let needle = format!("P{}", c.spelling());
let items_map = ctx.items();
debug!(
"Interface {} conforms to {needle}, find the item",
interface.name,
);
for (id, item) in items_map {
if let Some(ty) = item.as_type() {
if let TypeKind::ObjCInterface(ref protocol) =
*ty.kind()
{
if protocol.is_protocol {
debug!(
"Checking protocol {}, ty.name {:?}",
protocol.name,
ty.name()
);
if Some(needle.as_ref()) == ty.name() {
debug!("Found conforming protocol {item:?}");
interface.conforms_to.push(id);
break;
}
}
}
}
}
}
CXCursor_ObjCInstanceMethodDecl |
CXCursor_ObjCClassMethodDecl => {
let name = c.spelling();
let signature =
FunctionSig::from_ty(&c.cur_type(), &c, ctx)
.expect("Invalid function sig");
let is_class_method =
c.kind() == CXCursor_ObjCClassMethodDecl;
let method =
ObjCMethod::new(&name, signature, is_class_method);
interface.add_method(method);
}
CXCursor_TemplateTypeParameter => {
let name = c.spelling();
interface.template_names.push(name);
}
CXCursor_ObjCSuperClassRef => {
let item = Item::from_ty_or_ref(c.cur_type(), c, None, ctx);
interface.parent_class = Some(item.into());
}
_ => {}
}
CXChildVisit_Continue
});
Some(interface)
}
fn add_method(&mut self, method: ObjCMethod) {
if method.is_class_method {
self.class_methods.push(method);
} else {
self.methods.push(method);
}
}
}
impl ObjCMethod {
fn new(
name: &str,
signature: FunctionSig,
is_class_method: bool,
) -> ObjCMethod {
let split_name: Vec<&str> = name.split(':').collect();
let rust_name = split_name.join("_");
ObjCMethod {
name: name.to_owned(),
rust_name,
signature,
is_class_method,
}
}
/// Method name as converted to rust
/// like, `dataWithBytes_length`_
pub(crate) fn rust_name(&self) -> &str {
self.rust_name.as_ref()
}
/// Returns the methods signature as `FunctionSig`
pub(crate) fn signature(&self) -> &FunctionSig {
&self.signature
}
/// Is this a class method?
pub(crate) fn is_class_method(&self) -> bool {
self.is_class_method
}
/// Formats the method call
pub(crate) fn format_method_call(
&self,
args: &[TokenStream],
) -> TokenStream {
let split_name: Vec<Option<Ident>> = self
.name
.split(':')
.enumerate()
.map(|(idx, name)| {
if name.is_empty() {
None
} else if idx == 0 {
// Try to parse the method name as an identifier. Having a keyword is ok
// unless it is `crate`, `self`, `super` or `Self`, so we try to add the `_`
// suffix to it and parse it.
if ["crate", "self", "super", "Self"].contains(&name) {
Some(Ident::new(&format!("{name}_"), Span::call_site()))
} else {
Some(Ident::new(name, Span::call_site()))
}
} else {
// Try to parse the current joining name as an identifier. This might fail if the name
// is a keyword, so we try to "r#" to it and parse again, this could also fail
// if the name is `crate`, `self`, `super` or `Self`, so we try to add the `_`
// suffix to it and parse again. If this also fails, we panic with the first
// error.
Some(
syn::parse_str::<Ident>(name)
.or_else(|err| {
syn::parse_str::<Ident>(&format!("r#{name}"))
.map_err(|_| err)
})
.or_else(|err| {
syn::parse_str::<Ident>(&format!("{name}_"))
.map_err(|_| err)
})
.expect("Invalid identifier"),
)
}
})
.collect();
// No arguments
if args.is_empty() && split_name.len() == 1 {
let name = &split_name[0];
return quote! {
#name
};
}
// Check right amount of arguments
assert_eq!(args.len(), split_name.len() - 1, "Incorrect method name or arguments for objc method, {args:?} vs {split_name:?}");
// Get arguments without type signatures to pass to `msg_send!`
let mut args_without_types = vec![];
for arg in args {
let arg = arg.to_string();
let name_and_sig: Vec<&str> = arg.split(' ').collect();
let name = name_and_sig[0];
args_without_types.push(Ident::new(name, Span::call_site()));
}
let args = split_name.into_iter().zip(args_without_types).map(
|(arg, arg_val)| {
if let Some(arg) = arg {
quote! { #arg: #arg_val }
} else {
quote! { #arg_val: #arg_val }
}
},
);
quote! {
#( #args )*
}
}
}
impl Trace for ObjCInterface {
type Extra = ();
fn trace<T>(&self, context: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
for method in &self.methods {
method.signature.trace(context, tracer, &());
}
for class_method in &self.class_methods {
class_method.signature.trace(context, tracer, &());
}
for protocol in &self.conforms_to {
tracer.visit(*protocol);
}
}
}

335
vendor/bindgen/ir/template.rs vendored Normal file
View File

@@ -0,0 +1,335 @@
//! Template declaration and instantiation related things.
//!
//! The nomenclature surrounding templates is often confusing, so here are a few
//! brief definitions:
//!
//! * "Template definition": a class/struct/alias/function definition that takes
//! generic template parameters. For example:
//!
//! ```c++
//! template<typename T>
//! class List<T> {
//! // ...
//! };
//! ```
//!
//! * "Template instantiation": an instantiation is a use of a template with
//! concrete template arguments. For example, `List<int>`.
//!
//! * "Template specialization": an alternative template definition providing a
//! custom definition for instantiations with the matching template
//! arguments. This C++ feature is unsupported by bindgen. For example:
//!
//! ```c++
//! template<>
//! class List<int> {
//! // Special layout for int lists...
//! };
//! ```
use super::context::{BindgenContext, ItemId, TypeId};
use super::item::{IsOpaque, Item, ItemAncestors};
use super::traversal::{EdgeKind, Trace, Tracer};
use crate::clang;
/// Template declaration (and such declaration's template parameters) related
/// methods.
///
/// This trait's methods distinguish between `None` and `Some([])` for
/// declarations that are not templates and template declarations with zero
/// parameters, in general.
///
/// Consider this example:
///
/// ```c++
/// template <typename T, typename U>
/// class Foo {
/// T use_of_t;
/// U use_of_u;
///
/// template <typename V>
/// using Bar = V*;
///
/// class Inner {
/// T x;
/// U y;
/// Bar<int> z;
/// };
///
/// template <typename W>
/// class Lol {
/// // No use of W, but here's a use of T.
/// T t;
/// };
///
/// template <typename X>
/// class Wtf {
/// // X is not used because W is not used.
/// Lol<X> lololol;
/// };
/// };
///
/// class Qux {
/// int y;
/// };
/// ```
///
/// The following table depicts the results of each trait method when invoked on
/// each of the declarations above:
///
/// |Decl. | self_template_params | num_self_template_params | all_template_parameters |
/// |------|----------------------|--------------------------|-------------------------|
/// |Foo | T, U | 2 | T, U |
/// |Bar | V | 1 | T, U, V |
/// |Inner | | 0 | T, U |
/// |Lol | W | 1 | T, U, W |
/// |Wtf | X | 1 | T, U, X |
/// |Qux | | 0 | |
///
/// | Decl. | used_template_params |
/// |-------|----------------------|
/// | Foo | T, U |
/// | Bar | V |
/// | Inner | |
/// | Lol | T |
/// | Wtf | T |
/// | Qux | |
pub(crate) trait TemplateParameters: Sized {
/// Get the set of `ItemId`s that make up this template declaration's free
/// template parameters.
///
/// Note that these might *not* all be named types: C++ allows
/// constant-value template parameters as well as template-template
/// parameters. Of course, Rust does not allow generic parameters to be
/// anything but types, so we must treat them as opaque, and avoid
/// instantiating them.
fn self_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId>;
/// Get the number of free template parameters this template declaration
/// has.
fn num_self_template_params(&self, ctx: &BindgenContext) -> usize {
self.self_template_params(ctx).len()
}
/// Get the complete set of template parameters that can affect this
/// declaration.
///
/// Note that this item doesn't need to be a template declaration itself for
/// `Some` to be returned here (in contrast to `self_template_params`). If
/// this item is a member of a template declaration, then the parent's
/// template parameters are included here.
///
/// In the example above, `Inner` depends on both of the `T` and `U` type
/// parameters, even though it is not itself a template declaration and
/// therefore has no type parameters itself. Perhaps it helps to think about
/// how we would fully reference such a member type in C++:
/// `Foo<int,char>::Inner`. `Foo` *must* be instantiated with template
/// arguments before we can gain access to the `Inner` member type.
fn all_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId>
where
Self: ItemAncestors,
{
let mut ancestors: Vec<_> = self.ancestors(ctx).collect();
ancestors.reverse();
ancestors
.into_iter()
.flat_map(|id| id.self_template_params(ctx).into_iter())
.collect()
}
/// Get only the set of template parameters that this item uses. This is a
/// subset of `all_template_params` and does not necessarily contain any of
/// `self_template_params`.
fn used_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId>
where
Self: AsRef<ItemId>,
{
assert!(
ctx.in_codegen_phase(),
"template parameter usage is not computed until codegen"
);
let id = *self.as_ref();
ctx.resolve_item(id)
.all_template_params(ctx)
.into_iter()
.filter(|p| ctx.uses_template_parameter(id, *p))
.collect()
}
}
/// A trait for things which may or may not be a named template type parameter.
pub(crate) trait AsTemplateParam {
/// Any extra information the implementor might need to make this decision.
type Extra;
/// Convert this thing to the item ID of a named template type parameter.
fn as_template_param(
&self,
ctx: &BindgenContext,
extra: &Self::Extra,
) -> Option<TypeId>;
/// Is this a named template type parameter?
fn is_template_param(
&self,
ctx: &BindgenContext,
extra: &Self::Extra,
) -> bool {
self.as_template_param(ctx, extra).is_some()
}
}
/// A concrete instantiation of a generic template.
#[derive(Clone, Debug)]
pub(crate) struct TemplateInstantiation {
/// The template definition which this is instantiating.
definition: TypeId,
/// The concrete template arguments, which will be substituted in the
/// definition for the generic template parameters.
args: Vec<TypeId>,
}
impl TemplateInstantiation {
/// Construct a new template instantiation from the given parts.
pub(crate) fn new<I>(definition: TypeId, args: I) -> TemplateInstantiation
where
I: IntoIterator<Item = TypeId>,
{
TemplateInstantiation {
definition,
args: args.into_iter().collect(),
}
}
/// Get the template definition for this instantiation.
pub(crate) fn template_definition(&self) -> TypeId {
self.definition
}
/// Get the concrete template arguments used in this instantiation.
pub(crate) fn template_arguments(&self) -> &[TypeId] {
&self.args[..]
}
/// Parse a `TemplateInstantiation` from a clang `Type`.
pub(crate) fn from_ty(
ty: &clang::Type,
ctx: &mut BindgenContext,
) -> Option<TemplateInstantiation> {
use clang_sys::*;
let template_args = ty.template_args().map_or(vec![], |args| match ty
.canonical_type()
.template_args()
{
Some(canonical_args) => {
let arg_count = args.len();
args.chain(canonical_args.skip(arg_count))
.filter(|t| t.kind() != CXType_Invalid)
.map(|t| {
Item::from_ty_or_ref(t, t.declaration(), None, ctx)
})
.collect()
}
None => args
.filter(|t| t.kind() != CXType_Invalid)
.map(|t| Item::from_ty_or_ref(t, t.declaration(), None, ctx))
.collect(),
});
let declaration = ty.declaration();
let definition = if declaration.kind() == CXCursor_TypeAliasTemplateDecl
{
Some(declaration)
} else {
declaration.specialized().or_else(|| {
let mut template_ref = None;
ty.declaration().visit(|child| {
if child.kind() == CXCursor_TemplateRef {
template_ref = Some(child);
return CXVisit_Break;
}
// Instantiations of template aliases might have the
// TemplateRef to the template alias definition arbitrarily
// deep, so we need to recurse here and not only visit
// direct children.
CXChildVisit_Recurse
});
template_ref.and_then(|cur| cur.referenced())
})
};
let Some(definition) = definition else {
if !ty.declaration().is_builtin() {
warn!(
"Could not find template definition for template \
instantiation"
);
}
return None;
};
let template_definition =
Item::from_ty_or_ref(definition.cur_type(), definition, None, ctx);
Some(TemplateInstantiation::new(
template_definition,
template_args,
))
}
}
impl IsOpaque for TemplateInstantiation {
type Extra = Item;
/// Is this an opaque template instantiation?
fn is_opaque(&self, ctx: &BindgenContext, item: &Item) -> bool {
if self.template_definition().is_opaque(ctx, &()) {
return true;
}
// TODO(#774): This doesn't properly handle opaque instantiations where
// an argument is itself an instantiation because `canonical_name` does
// not insert the template arguments into the name, ie it for nested
// template arguments it creates "Foo" instead of "Foo<int>". The fully
// correct fix is to make `canonical_{name,path}` include template
// arguments properly.
let mut path = item.path_for_allowlisting(ctx).clone();
let args: Vec<_> = self
.template_arguments()
.iter()
.map(|arg| {
let arg_path =
ctx.resolve_item(*arg).path_for_allowlisting(ctx);
arg_path[1..].join("::")
})
.collect();
{
let last = path.last_mut().unwrap();
last.push('<');
last.push_str(&args.join(", "));
last.push('>');
}
ctx.opaque_by_name(&path)
}
}
impl Trace for TemplateInstantiation {
type Extra = ();
fn trace<T>(&self, _ctx: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
tracer
.visit_kind(self.definition.into(), EdgeKind::TemplateDeclaration);
for arg in self.template_arguments() {
tracer.visit_kind(arg.into(), EdgeKind::TemplateArgument);
}
}
}

478
vendor/bindgen/ir/traversal.rs vendored Normal file
View File

@@ -0,0 +1,478 @@
//! Traversal of the graph of IR items and types.
use super::context::{BindgenContext, ItemId};
use super::item::ItemSet;
use std::collections::{BTreeMap, VecDeque};
/// An outgoing edge in the IR graph is a reference from some item to another
/// item:
///
/// from --> to
///
/// The `from` is left implicit: it is the concrete `Trace` implementer which
/// yielded this outgoing edge.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) struct Edge {
to: ItemId,
kind: EdgeKind,
}
impl Edge {
/// Construct a new edge whose referent is `to` and is of the given `kind`.
pub(crate) fn new(to: ItemId, kind: EdgeKind) -> Edge {
Edge { to, kind }
}
}
impl From<Edge> for ItemId {
fn from(val: Edge) -> Self {
val.to
}
}
/// The kind of edge reference. This is useful when we wish to only consider
/// certain kinds of edges for a particular traversal or analysis.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) enum EdgeKind {
/// A generic, catch-all edge.
Generic,
/// An edge from a template declaration, to the definition of a named type
/// parameter. For example, the edge from `Foo<T>` to `T` in the following
/// snippet:
///
/// ```C++
/// template<typename T>
/// class Foo { };
/// ```
TemplateParameterDefinition,
/// An edge from a template instantiation to the template declaration that
/// is being instantiated. For example, the edge from `Foo<int>` to
/// to `Foo<T>`:
///
/// ```C++
/// template<typename T>
/// class Foo { };
///
/// using Bar = Foo<ant>;
/// ```
TemplateDeclaration,
/// An edge from a template instantiation to its template argument. For
/// example, `Foo<Bar>` to `Bar`:
///
/// ```C++
/// template<typename T>
/// class Foo { };
///
/// class Bar { };
///
/// using FooBar = Foo<Bar>;
/// ```
TemplateArgument,
/// An edge from a compound type to one of its base member types. For
/// example, the edge from `Bar` to `Foo`:
///
/// ```C++
/// class Foo { };
///
/// class Bar : public Foo { };
/// ```
BaseMember,
/// An edge from a compound type to the types of one of its fields. For
/// example, the edge from `Foo` to `int`:
///
/// ```C++
/// class Foo {
/// int x;
/// };
/// ```
Field,
/// An edge from an class or struct type to an inner type member. For
/// example, the edge from `Foo` to `Foo::Bar` here:
///
/// ```C++
/// class Foo {
/// struct Bar { };
/// };
/// ```
InnerType,
/// An edge from an class or struct type to an inner static variable. For
/// example, the edge from `Foo` to `Foo::BAR` here:
///
/// ```C++
/// class Foo {
/// static const char* BAR;
/// };
/// ```
InnerVar,
/// An edge from a class or struct type to one of its method functions. For
/// example, the edge from `Foo` to `Foo::bar`:
///
/// ```C++
/// class Foo {
/// bool bar(int x, int y);
/// };
/// ```
Method,
/// An edge from a class or struct type to one of its constructor
/// functions. For example, the edge from `Foo` to `Foo::Foo(int x, int y)`:
///
/// ```C++
/// class Foo {
/// int my_x;
/// int my_y;
///
/// public:
/// Foo(int x, int y);
/// };
/// ```
Constructor,
/// An edge from a class or struct type to its destructor function. For
/// example, the edge from `Doggo` to `Doggo::~Doggo()`:
///
/// ```C++
/// struct Doggo {
/// char* wow;
///
/// public:
/// ~Doggo();
/// };
/// ```
Destructor,
/// An edge from a function declaration to its return type. For example, the
/// edge from `foo` to `int`:
///
/// ```C++
/// int foo(char* string);
/// ```
FunctionReturn,
/// An edge from a function declaration to one of its parameter types. For
/// example, the edge from `foo` to `char*`:
///
/// ```C++
/// int foo(char* string);
/// ```
FunctionParameter,
/// An edge from a static variable to its type. For example, the edge from
/// `FOO` to `const char*`:
///
/// ```C++
/// static const char* FOO;
/// ```
VarType,
/// An edge from a non-templated alias or typedef to the referenced type.
TypeReference,
}
/// A predicate to allow visiting only sub-sets of the whole IR graph by
/// excluding certain edges from being followed by the traversal.
///
/// The predicate must return true if the traversal should follow this edge
/// and visit everything that is reachable through it.
pub(crate) type TraversalPredicate =
for<'a> fn(&'a BindgenContext, Edge) -> bool;
/// A `TraversalPredicate` implementation that follows all edges, and therefore
/// traversals using this predicate will see the whole IR graph reachable from
/// the traversal's roots.
pub(crate) fn all_edges(_: &BindgenContext, _: Edge) -> bool {
true
}
/// A `TraversalPredicate` implementation that only follows
/// `EdgeKind::InnerType` edges, and therefore traversals using this predicate
/// will only visit the traversal's roots and their inner types. This is used
/// in no-recursive-allowlist mode, where inner types such as anonymous
/// structs/unions still need to be processed.
pub(crate) fn only_inner_type_edges(_: &BindgenContext, edge: Edge) -> bool {
edge.kind == EdgeKind::InnerType
}
/// A `TraversalPredicate` implementation that only follows edges to items that
/// are enabled for code generation. This lets us skip considering items for
/// which are not reachable from code generation.
pub(crate) fn codegen_edges(ctx: &BindgenContext, edge: Edge) -> bool {
let cc = &ctx.options().codegen_config;
match edge.kind {
EdgeKind::Generic => {
ctx.resolve_item(edge.to).is_enabled_for_codegen(ctx)
}
// We statically know the kind of item that non-generic edges can point
// to, so we don't need to actually resolve the item and check
// `Item::is_enabled_for_codegen`.
EdgeKind::TemplateParameterDefinition |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::InnerType |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::VarType |
EdgeKind::TypeReference => cc.types(),
EdgeKind::InnerVar => cc.vars(),
EdgeKind::Method => cc.methods(),
EdgeKind::Constructor => cc.constructors(),
EdgeKind::Destructor => cc.destructors(),
}
}
/// The storage for the set of items that have been seen (although their
/// outgoing edges might not have been fully traversed yet) in an active
/// traversal.
pub(crate) trait TraversalStorage<'ctx> {
/// Construct a new instance of this `TraversalStorage`, for a new traversal.
fn new(ctx: &'ctx BindgenContext) -> Self;
/// Add the given item to the storage. If the item has never been seen
/// before, return `true`. Otherwise, return `false`.
///
/// The `from` item is the item from which we discovered this item, or is
/// `None` if this item is a root.
fn add(&mut self, from: Option<ItemId>, item: ItemId) -> bool;
}
impl<'ctx> TraversalStorage<'ctx> for ItemSet {
fn new(_: &'ctx BindgenContext) -> Self {
ItemSet::new()
}
fn add(&mut self, _: Option<ItemId>, item: ItemId) -> bool {
self.insert(item)
}
}
/// A `TraversalStorage` implementation that keeps track of how we first reached
/// each item. This is useful for providing debug assertions with meaningful
/// diagnostic messages about dangling items.
#[derive(Debug)]
pub(crate) struct Paths<'ctx>(BTreeMap<ItemId, ItemId>, &'ctx BindgenContext);
impl<'ctx> TraversalStorage<'ctx> for Paths<'ctx> {
fn new(ctx: &'ctx BindgenContext) -> Self {
Paths(BTreeMap::new(), ctx)
}
fn add(&mut self, from: Option<ItemId>, item: ItemId) -> bool {
let newly_discovered =
self.0.insert(item, from.unwrap_or(item)).is_none();
if self.1.resolve_item_fallible(item).is_none() {
let mut path = vec![];
let mut current = item;
loop {
let predecessor = *self.0.get(&current).expect(
"We know we found this item id, so it must have a \
predecessor",
);
if predecessor == current {
break;
}
path.push(predecessor);
current = predecessor;
}
path.reverse();
panic!(
"Found reference to dangling id = {item:?}\nvia path = {path:?}"
);
}
newly_discovered
}
}
/// The queue of seen-but-not-yet-traversed items.
///
/// Using a FIFO queue with a traversal will yield a breadth-first traversal,
/// while using a LIFO queue will result in a depth-first traversal of the IR
/// graph.
pub(crate) trait TraversalQueue: Default {
/// Add a newly discovered item to the queue.
fn push(&mut self, item: ItemId);
/// Pop the next item to traverse, if any.
fn next(&mut self) -> Option<ItemId>;
}
impl TraversalQueue for Vec<ItemId> {
fn push(&mut self, item: ItemId) {
self.push(item);
}
fn next(&mut self) -> Option<ItemId> {
self.pop()
}
}
impl TraversalQueue for VecDeque<ItemId> {
fn push(&mut self, item: ItemId) {
self.push_back(item);
}
fn next(&mut self) -> Option<ItemId> {
self.pop_front()
}
}
/// Something that can receive edges from a `Trace` implementation.
pub(crate) trait Tracer {
/// Note an edge between items. Called from within a `Trace` implementation.
fn visit_kind(&mut self, item: ItemId, kind: EdgeKind);
/// A synonym for `tracer.visit_kind(item, EdgeKind::Generic)`.
fn visit(&mut self, item: ItemId) {
self.visit_kind(item, EdgeKind::Generic);
}
}
impl<F> Tracer for F
where
F: FnMut(ItemId, EdgeKind),
{
fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) {
(*self)(item, kind);
}
}
/// Trace all of the outgoing edges to other items. Implementations should call
/// one of `tracer.visit(edge)` or `tracer.visit_kind(edge, EdgeKind::Whatever)`
/// for each of their outgoing edges.
pub(crate) trait Trace {
/// If a particular type needs extra information beyond what it has in
/// `self` and `context` to find its referenced items, its implementation
/// can define this associated type, forcing callers to pass the needed
/// information through.
type Extra;
/// Trace all of this item's outgoing edges to other items.
fn trace<T>(
&self,
context: &BindgenContext,
tracer: &mut T,
extra: &Self::Extra,
) where
T: Tracer;
}
/// An graph traversal of the transitive closure of references between items.
///
/// See `BindgenContext::allowlisted_items` for more information.
pub(crate) struct ItemTraversal<'ctx, Storage, Queue>
where
Storage: TraversalStorage<'ctx>,
Queue: TraversalQueue,
{
ctx: &'ctx BindgenContext,
/// The set of items we have seen thus far in this traversal.
seen: Storage,
/// The set of items that we have seen, but have yet to traverse.
queue: Queue,
/// The predicate that determines which edges this traversal will follow.
predicate: TraversalPredicate,
/// The item we are currently traversing.
currently_traversing: Option<ItemId>,
}
impl<'ctx, Storage, Queue> ItemTraversal<'ctx, Storage, Queue>
where
Storage: TraversalStorage<'ctx>,
Queue: TraversalQueue,
{
/// Begin a new traversal, starting from the given roots.
pub(crate) fn new<R>(
ctx: &'ctx BindgenContext,
roots: R,
predicate: TraversalPredicate,
) -> ItemTraversal<'ctx, Storage, Queue>
where
R: IntoIterator<Item = ItemId>,
{
let mut seen = Storage::new(ctx);
let mut queue = Queue::default();
for id in roots {
seen.add(None, id);
queue.push(id);
}
ItemTraversal {
ctx,
seen,
queue,
predicate,
currently_traversing: None,
}
}
}
impl<'ctx, Storage, Queue> Tracer for ItemTraversal<'ctx, Storage, Queue>
where
Storage: TraversalStorage<'ctx>,
Queue: TraversalQueue,
{
fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) {
let edge = Edge::new(item, kind);
if !(self.predicate)(self.ctx, edge) {
return;
}
let is_newly_discovered =
self.seen.add(self.currently_traversing, item);
if is_newly_discovered {
self.queue.push(item);
}
}
}
impl<'ctx, Storage, Queue> Iterator for ItemTraversal<'ctx, Storage, Queue>
where
Storage: TraversalStorage<'ctx>,
Queue: TraversalQueue,
{
type Item = ItemId;
fn next(&mut self) -> Option<Self::Item> {
let id = self.queue.next()?;
let newly_discovered = self.seen.add(None, id);
debug_assert!(
!newly_discovered,
"should have already seen anything we get out of our queue"
);
debug_assert!(
self.ctx.resolve_item_fallible(id).is_some(),
"should only get IDs of actual items in our context during traversal"
);
self.currently_traversing = Some(id);
id.trace(self.ctx, self, &());
self.currently_traversing = None;
Some(id)
}
}
/// An iterator to find any dangling items.
///
/// See `BindgenContext::assert_no_dangling_item_traversal` for more
/// information.
pub(crate) type AssertNoDanglingItemsTraversal<'ctx> =
ItemTraversal<'ctx, Paths<'ctx>, VecDeque<ItemId>>;

1256
vendor/bindgen/ir/ty.rs vendored Normal file

File diff suppressed because it is too large Load Diff

523
vendor/bindgen/ir/var.rs vendored Normal file
View File

@@ -0,0 +1,523 @@
//! Intermediate representation of variables.
use super::super::codegen::MacroTypeVariation;
use super::context::{BindgenContext, TypeId};
use super::dot::DotAttributes;
use super::function::cursor_mangling;
use super::int::IntKind;
use super::item::Item;
use super::ty::{FloatKind, TypeKind};
use crate::callbacks::{ItemInfo, ItemKind, MacroParsingBehavior};
use crate::clang;
use crate::clang::ClangToken;
use crate::parse::{ClangSubItemParser, ParseError, ParseResult};
use std::io;
use std::num::Wrapping;
/// The type for a constant variable.
#[derive(Debug)]
pub(crate) enum VarType {
/// A boolean.
Bool(bool),
/// An integer.
Int(i64),
/// A floating point number.
Float(f64),
/// A character.
Char(u8),
/// A string, not necessarily well-formed utf-8.
String(Vec<u8>),
}
/// A `Var` is our intermediate representation of a variable.
#[derive(Debug)]
pub(crate) struct Var {
/// The name of the variable.
name: String,
/// The mangled name of the variable.
mangled_name: Option<String>,
/// The link name of the variable.
link_name: Option<String>,
/// The type of the variable.
ty: TypeId,
/// The value of the variable, that needs to be suitable for `ty`.
val: Option<VarType>,
/// Whether this variable is const.
is_const: bool,
}
impl Var {
/// Construct a new `Var`.
pub(crate) fn new(
name: String,
mangled_name: Option<String>,
link_name: Option<String>,
ty: TypeId,
val: Option<VarType>,
is_const: bool,
) -> Var {
assert!(!name.is_empty());
Var {
name,
mangled_name,
link_name,
ty,
val,
is_const,
}
}
/// Is this variable `const` qualified?
pub(crate) fn is_const(&self) -> bool {
self.is_const
}
/// The value of this constant variable, if any.
pub(crate) fn val(&self) -> Option<&VarType> {
self.val.as_ref()
}
/// Get this variable's type.
pub(crate) fn ty(&self) -> TypeId {
self.ty
}
/// Get this variable's name.
pub(crate) fn name(&self) -> &str {
&self.name
}
/// Get this variable's mangled name.
pub(crate) fn mangled_name(&self) -> Option<&str> {
self.mangled_name.as_deref()
}
/// Get this variable's link name.
pub fn link_name(&self) -> Option<&str> {
self.link_name.as_deref()
}
}
impl DotAttributes for Var {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
if self.is_const {
writeln!(out, "<tr><td>const</td><td>true</td></tr>")?;
}
if let Some(ref mangled) = self.mangled_name {
writeln!(out, "<tr><td>mangled name</td><td>{mangled}</td></tr>")?;
}
Ok(())
}
}
fn default_macro_constant_type(ctx: &BindgenContext, value: i64) -> IntKind {
if value < 0 ||
ctx.options().default_macro_constant_type ==
MacroTypeVariation::Signed
{
if value < i64::from(i32::MIN) || value > i64::from(i32::MAX) {
IntKind::I64
} else if !ctx.options().fit_macro_constants ||
value < i64::from(i16::MIN) ||
value > i64::from(i16::MAX)
{
IntKind::I32
} else if value < i64::from(i8::MIN) || value > i64::from(i8::MAX) {
IntKind::I16
} else {
IntKind::I8
}
} else if value > i64::from(u32::MAX) {
IntKind::U64
} else if !ctx.options().fit_macro_constants || value > i64::from(u16::MAX)
{
IntKind::U32
} else if value > i64::from(u8::MAX) {
IntKind::U16
} else {
IntKind::U8
}
}
/// Parses tokens from a `CXCursor_MacroDefinition` pointing into a function-like
/// macro, and calls the `func_macro` callback.
fn handle_function_macro(
cursor: &clang::Cursor,
callbacks: &dyn crate::callbacks::ParseCallbacks,
) {
let is_closing_paren = |t: &ClangToken| {
// Test cheap token kind before comparing exact spellings.
t.kind == clang_sys::CXToken_Punctuation && t.spelling() == b")"
};
let tokens: Vec<_> = cursor.tokens().iter().collect();
if let Some(boundary) = tokens.iter().position(is_closing_paren) {
let mut spelled = tokens.iter().map(ClangToken::spelling);
// Add 1, to convert index to length.
let left = spelled.by_ref().take(boundary + 1);
let left = left.collect::<Vec<_>>().concat();
if let Ok(left) = String::from_utf8(left) {
let right: Vec<_> = spelled.collect();
callbacks.func_macro(&left, &right);
}
}
}
impl ClangSubItemParser for Var {
fn parse(
cursor: clang::Cursor,
ctx: &mut BindgenContext,
) -> Result<ParseResult<Self>, ParseError> {
use cexpr::expr::EvalResult;
use cexpr::literal::CChar;
use clang_sys::*;
match cursor.kind() {
CXCursor_MacroDefinition => {
for callbacks in &ctx.options().parse_callbacks {
match callbacks.will_parse_macro(&cursor.spelling()) {
MacroParsingBehavior::Ignore => {
return Err(ParseError::Continue);
}
MacroParsingBehavior::Default => {}
}
if cursor.is_macro_function_like() {
handle_function_macro(&cursor, callbacks.as_ref());
// We handled the macro, skip macro processing below.
return Err(ParseError::Continue);
}
}
let value = parse_macro(ctx, &cursor);
let Some((id, value)) = value else {
return Err(ParseError::Continue);
};
assert!(!id.is_empty(), "Empty macro name?");
let previously_defined = ctx.parsed_macro(&id);
// NB: It's important to "note" the macro even if the result is
// not an integer, otherwise we might loose other kind of
// derived macros.
ctx.note_parsed_macro(id.clone(), value.clone());
if previously_defined {
let name = String::from_utf8(id).unwrap();
duplicated_macro_diagnostic(&name, cursor.location(), ctx);
return Err(ParseError::Continue);
}
// NOTE: Unwrapping, here and above, is safe, because the
// identifier of a token comes straight from clang, and we
// enforce utf8 there, so we should have already panicked at
// this point.
let name = String::from_utf8(id).unwrap();
let (type_kind, val) = match value {
EvalResult::Invalid => return Err(ParseError::Continue),
EvalResult::Float(f) => {
(TypeKind::Float(FloatKind::Double), VarType::Float(f))
}
EvalResult::Char(c) => {
let c = match c {
CChar::Char(c) => {
assert_eq!(c.len_utf8(), 1);
c as u8
}
CChar::Raw(c) => u8::try_from(c).unwrap(),
};
(TypeKind::Int(IntKind::U8), VarType::Char(c))
}
EvalResult::Str(val) => {
let char_ty = Item::builtin_type(
TypeKind::Int(IntKind::U8),
true,
ctx,
);
for callbacks in &ctx.options().parse_callbacks {
callbacks.str_macro(&name, &val);
}
(TypeKind::Pointer(char_ty), VarType::String(val))
}
EvalResult::Int(Wrapping(value)) => {
let kind = ctx
.options()
.last_callback(|c| c.int_macro(&name, value))
.unwrap_or_else(|| {
default_macro_constant_type(ctx, value)
});
(TypeKind::Int(kind), VarType::Int(value))
}
};
let ty = Item::builtin_type(type_kind, true, ctx);
Ok(ParseResult::New(
Var::new(name, None, None, ty, Some(val), true),
Some(cursor),
))
}
CXCursor_VarDecl => {
let mut name = cursor.spelling();
if cursor.linkage() == CXLinkage_External {
if let Some(nm) = ctx.options().last_callback(|callbacks| {
callbacks.generated_name_override(ItemInfo {
name: name.as_str(),
kind: ItemKind::Var,
})
}) {
name = nm;
}
}
// No more changes to name
let name = name;
if name.is_empty() {
warn!("Empty constant name?");
return Err(ParseError::Continue);
}
let link_name = ctx.options().last_callback(|callbacks| {
callbacks.generated_link_name_override(ItemInfo {
name: name.as_str(),
kind: ItemKind::Var,
})
});
let ty = cursor.cur_type();
// TODO(emilio): do we have to special-case constant arrays in
// some other places?
let is_const = ty.is_const() ||
([CXType_ConstantArray, CXType_IncompleteArray]
.contains(&ty.kind()) &&
ty.elem_type()
.is_some_and(|element| element.is_const()));
let ty = match Item::from_ty(&ty, cursor, None, ctx) {
Ok(ty) => ty,
Err(e) => {
assert!(
matches!(ty.kind(), CXType_Auto | CXType_Unexposed),
"Couldn't resolve constant type, and it \
wasn't an nondeductible auto type or unexposed \
type: {ty:?}"
);
return Err(e);
}
};
// Note: Ty might not be totally resolved yet, see
// tests/headers/inner_const.hpp
//
// That's fine because in that case we know it's not a literal.
let canonical_ty = ctx
.safe_resolve_type(ty)
.and_then(|t| t.safe_canonical_type(ctx));
let is_integer = canonical_ty.is_some_and(|t| t.is_integer());
let is_float = canonical_ty.is_some_and(|t| t.is_float());
// TODO: We could handle `char` more gracefully.
// TODO: Strings, though the lookup is a bit more hard (we need
// to look at the canonical type of the pointee too, and check
// is char, u8, or i8 I guess).
let value = if is_integer {
let TypeKind::Int(kind) = *canonical_ty.unwrap().kind()
else {
unreachable!()
};
let mut val = cursor.evaluate().and_then(|v| v.as_int());
if val.is_none() || !kind.signedness_matches(val.unwrap()) {
val = get_integer_literal_from_cursor(&cursor);
}
val.map(|val| {
if kind == IntKind::Bool {
VarType::Bool(val != 0)
} else {
VarType::Int(val)
}
})
} else if is_float {
cursor
.evaluate()
.and_then(|v| v.as_double())
.map(VarType::Float)
} else {
cursor
.evaluate()
.and_then(|v| v.as_literal_string())
.map(VarType::String)
};
let mangling = cursor_mangling(ctx, &cursor);
let var =
Var::new(name, mangling, link_name, ty, value, is_const);
Ok(ParseResult::New(var, Some(cursor)))
}
_ => {
/* TODO */
Err(ParseError::Continue)
}
}
}
}
/// This function uses a [`FallbackTranslationUnit`][clang::FallbackTranslationUnit] to parse each
/// macro that cannot be parsed by the normal bindgen process for `#define`s.
///
/// To construct the [`FallbackTranslationUnit`][clang::FallbackTranslationUnit], first precompiled
/// headers are generated for all input headers. An empty temporary `.c` file is generated to pass
/// to the translation unit. On the evaluation of each macro, a [`String`] is generated with the
/// new contents of the empty file and passed in for reparsing. The precompiled headers and
/// preservation of the [`FallbackTranslationUnit`][clang::FallbackTranslationUnit] across macro
/// evaluations are both optimizations that have significantly improved the performance.
fn parse_macro_clang_fallback(
ctx: &mut BindgenContext,
cursor: &clang::Cursor,
) -> Option<(Vec<u8>, cexpr::expr::EvalResult)> {
if !ctx.options().clang_macro_fallback {
return None;
}
let ftu = ctx.try_ensure_fallback_translation_unit()?;
let contents = format!("int main() {{ {}; }}", cursor.spelling());
ftu.reparse(&contents).ok()?;
// Children of root node of AST
let root_children = ftu.translation_unit().cursor().collect_children();
// Last child in root is function declaration
// Should be FunctionDecl
let main_func = root_children.last()?;
// Children should all be statements in function declaration
let all_stmts = main_func.collect_children();
// First child in all_stmts should be the statement containing the macro to evaluate
// Should be CompoundStmt
let macro_stmt = all_stmts.first()?;
// Children should all be expressions from the compound statement
let paren_exprs = macro_stmt.collect_children();
// First child in all_exprs is the expression utilizing the given macro to be evaluated
// Should be ParenExpr
let paren = paren_exprs.first()?;
Some((
cursor.spelling().into_bytes(),
cexpr::expr::EvalResult::Int(Wrapping(paren.evaluate()?.as_int()?)),
))
}
/// Try and parse a macro using all the macros parsed until now.
fn parse_macro(
ctx: &mut BindgenContext,
cursor: &clang::Cursor,
) -> Option<(Vec<u8>, cexpr::expr::EvalResult)> {
use cexpr::expr;
let mut cexpr_tokens = cursor.cexpr_tokens();
for callbacks in &ctx.options().parse_callbacks {
callbacks.modify_macro(&cursor.spelling(), &mut cexpr_tokens);
}
let parser = expr::IdentifierParser::new(ctx.parsed_macros());
match parser.macro_definition(&cexpr_tokens) {
Ok((_, (id, val))) => Some((id.into(), val)),
_ => parse_macro_clang_fallback(ctx, cursor),
}
}
fn parse_int_literal_tokens(cursor: &clang::Cursor) -> Option<i64> {
use cexpr::expr;
use cexpr::expr::EvalResult;
let cexpr_tokens = cursor.cexpr_tokens();
// TODO(emilio): We can try to parse other kinds of literals.
match expr::expr(&cexpr_tokens) {
Ok((_, EvalResult::Int(Wrapping(val)))) => Some(val),
_ => None,
}
}
fn get_integer_literal_from_cursor(cursor: &clang::Cursor) -> Option<i64> {
use clang_sys::*;
let mut value = None;
cursor.visit(|c| {
match c.kind() {
CXCursor_IntegerLiteral | CXCursor_UnaryOperator => {
value = parse_int_literal_tokens(&c);
}
CXCursor_UnexposedExpr => {
value = get_integer_literal_from_cursor(&c);
}
_ => (),
}
if value.is_some() {
CXChildVisit_Break
} else {
CXChildVisit_Continue
}
});
value
}
fn duplicated_macro_diagnostic(
macro_name: &str,
_location: clang::SourceLocation,
_ctx: &BindgenContext,
) {
warn!("Duplicated macro definition: {macro_name}");
#[cfg(feature = "experimental")]
// FIXME (pvdrz & amanjeev): This diagnostic message shows way too often to be actually
// useful. We have to change the logic where this function is called to be able to emit this
// message only when the duplication is an actual issue.
//
// If I understood correctly, `bindgen` ignores all `#undef` directives. Meaning that this:
// ```c
// #define FOO 1
// #undef FOO
// #define FOO 2
// ```
//
// Will trigger this message even though there's nothing wrong with it.
#[allow(clippy::overly_complex_bool_expr)]
if false && _ctx.options().emit_diagnostics {
use crate::diagnostics::{get_line, Diagnostic, Level, Slice};
use std::borrow::Cow;
let mut slice = Slice::default();
let mut source = Cow::from(macro_name);
let (file, line, col, _) = _location.location();
if let Some(filename) = file.name() {
if let Ok(Some(code)) = get_line(&filename, line) {
source = code.into();
}
slice.with_location(filename, line, col);
}
slice.with_source(source);
Diagnostic::default()
.with_title("Duplicated macro definition.", Level::Warning)
.add_slice(slice)
.add_annotation("This macro had a duplicate.", Level::Note)
.display();
}
}