Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

726
vendor/bindgen/ir/analysis/derive.rs vendored Normal file
View File

@@ -0,0 +1,726 @@
//! Determining which types for which we cannot emit `#[derive(Trait)]`.
use std::fmt;
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::analysis::has_vtable::HasVtable;
use crate::ir::comp::CompKind;
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::derive::CanDerive;
use crate::ir::function::FunctionSig;
use crate::ir::item::{IsOpaque, Item};
use crate::ir::layout::Layout;
use crate::ir::template::TemplateParameters;
use crate::ir::traversal::{EdgeKind, Trace};
use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use crate::ir::ty::{Type, TypeKind};
use crate::{Entry, HashMap, HashSet};
/// Which trait to consider when doing the `CannotDerive` analysis.
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub enum DeriveTrait {
/// The `Copy` trait.
Copy,
/// The `Debug` trait.
Debug,
/// The `Default` trait.
Default,
/// The `Hash` trait.
Hash,
/// The `PartialEq` and `PartialOrd` traits.
PartialEqOrPartialOrd,
}
/// An analysis that finds for each IR item whether a trait cannot be derived.
///
/// We use the monotone constraint function `cannot_derive`, defined as follows
/// for type T:
///
/// * If T is Opaque and the layout of the type is known, get this layout as an
/// opaquetype and check whether it can derive using trivial checks.
///
/// * If T is Array, a trait cannot be derived if the array is incomplete,
/// if the length of the array is larger than the limit (unless the trait
/// allows it), or the trait cannot be derived for the type of data the array
/// contains.
///
/// * If T is Vector, a trait cannot be derived if the trait cannot be derived
/// for the type of data the vector contains.
///
/// * If T is a type alias, a templated alias or an indirection to another type,
/// the trait cannot be derived if the trait cannot be derived for type T
/// refers to.
///
/// * If T is a compound type, the trait cannot be derived if the trait cannot
/// be derived for any of its base members or fields.
///
/// * If T is an instantiation of an abstract template definition, the trait
/// cannot be derived if any of the template arguments or template definition
/// cannot derive the trait.
///
/// * For all other (simple) types, compiler and standard library limitations
/// dictate whether the trait is implemented.
#[derive(Debug, Clone)]
pub(crate) struct CannotDerive<'ctx> {
ctx: &'ctx BindgenContext,
derive_trait: DeriveTrait,
// The incremental result of this analysis's computation.
// Contains information whether particular item can derive `derive_trait`
can_derive: HashMap<ItemId, CanDerive>,
// Dependencies saying that if a key ItemId has been inserted into the
// `cannot_derive_partialeq_or_partialord` set, then each of the ids
// in Vec<ItemId> need to be considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// can derive `derive_trait`.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
type EdgePredicate = fn(EdgeKind) -> bool;
fn consider_edge_default(kind: EdgeKind) -> bool {
match kind {
// These are the only edges that can affect whether a type can derive
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TypeReference |
EdgeKind::VarType |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => true,
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::InnerType |
EdgeKind::InnerVar |
EdgeKind::Method |
EdgeKind::Generic => false,
}
}
impl CannotDerive<'_> {
fn insert<Id: Into<ItemId>>(
&mut self,
id: Id,
can_derive: CanDerive,
) -> ConstrainResult {
let id = id.into();
trace!(
"inserting {id:?} can_derive<{}>={can_derive:?}",
self.derive_trait,
);
if let CanDerive::Yes = can_derive {
return ConstrainResult::Same;
}
match self.can_derive.entry(id) {
Entry::Occupied(mut entry) => {
if *entry.get() < can_derive {
entry.insert(can_derive);
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
Entry::Vacant(entry) => {
entry.insert(can_derive);
ConstrainResult::Changed
}
}
}
fn constrain_type(&mut self, item: &Item, ty: &Type) -> CanDerive {
if !self.ctx.allowlisted_items().contains(&item.id()) {
let can_derive = self
.ctx
.blocklisted_type_implements_trait(item, self.derive_trait);
match can_derive {
CanDerive::Yes => trace!(
" blocklisted type explicitly implements {}",
self.derive_trait
),
CanDerive::Manually => trace!(
" blocklisted type requires manual implementation of {}",
self.derive_trait
),
CanDerive::No => trace!(
" cannot derive {} for blocklisted type",
self.derive_trait
),
}
return can_derive;
}
if self.derive_trait.not_by_name(self.ctx, item) {
trace!(
" cannot derive {} for explicitly excluded type",
self.derive_trait
);
return CanDerive::No;
}
trace!("ty: {ty:?}");
if item.is_opaque(self.ctx, &()) {
if !self.derive_trait.can_derive_union() &&
ty.is_union() &&
self.ctx.options().untagged_union
{
trace!(
" cannot derive {} for Rust unions",
self.derive_trait
);
return CanDerive::No;
}
let layout_can_derive =
ty.layout(self.ctx).map_or(CanDerive::Yes, |l| {
l.opaque().array_size_within_derive_limit()
});
match layout_can_derive {
CanDerive::Yes => {
trace!(
" we can trivially derive {} for the layout",
self.derive_trait
);
}
_ => {
trace!(
" we cannot derive {} for the layout",
self.derive_trait
);
}
}
return layout_can_derive;
}
match *ty.kind() {
// Handle the simple cases. These can derive traits without further
// information.
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Complex(..) |
TypeKind::Float(..) |
TypeKind::Enum(..) |
TypeKind::TypeParam |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::Reference(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel => self.derive_trait.can_derive_simple(ty.kind()),
TypeKind::Pointer(inner) => {
let inner_type =
self.ctx.resolve_type(inner).canonical_type(self.ctx);
if let TypeKind::Function(ref sig) = *inner_type.kind() {
self.derive_trait.can_derive_fnptr(sig)
} else {
self.derive_trait.can_derive_pointer()
}
}
TypeKind::Function(ref sig) => {
self.derive_trait.can_derive_fnptr(sig)
}
// Complex cases need more information
TypeKind::Array(t, len) => {
let inner_type =
self.can_derive.get(&t.into()).copied().unwrap_or_default();
if inner_type != CanDerive::Yes {
trace!(
" arrays of T for which we cannot derive {} \
also cannot derive {}",
self.derive_trait,
self.derive_trait
);
return CanDerive::No;
}
if len == 0 && !self.derive_trait.can_derive_incomplete_array()
{
trace!(
" cannot derive {} for incomplete arrays",
self.derive_trait
);
return CanDerive::No;
}
if self.derive_trait.can_derive_large_array(self.ctx) {
trace!(" array can derive {}", self.derive_trait);
return CanDerive::Yes;
}
if len > RUST_DERIVE_IN_ARRAY_LIMIT {
trace!(
" array is too large to derive {}, but it may be implemented", self.derive_trait
);
return CanDerive::Manually;
}
trace!(
" array is small enough to derive {}",
self.derive_trait
);
CanDerive::Yes
}
TypeKind::Vector(t, len) => {
let inner_type =
self.can_derive.get(&t.into()).copied().unwrap_or_default();
if inner_type != CanDerive::Yes {
trace!(
" vectors of T for which we cannot derive {} \
also cannot derive {}",
self.derive_trait,
self.derive_trait
);
return CanDerive::No;
}
assert_ne!(len, 0, "vectors cannot have zero length");
self.derive_trait.can_derive_vector()
}
TypeKind::Comp(ref info) => {
assert!(
!info.has_non_type_template_params(),
"The early ty.is_opaque check should have handled this case"
);
if !self.derive_trait.can_derive_compound_forward_decl() &&
info.is_forward_declaration()
{
trace!(
" cannot derive {} for forward decls",
self.derive_trait
);
return CanDerive::No;
}
// NOTE: Take into account that while unions in C and C++ are copied by
// default, the may have an explicit destructor in C++, so we can't
// defer this check just for the union case.
if !self.derive_trait.can_derive_compound_with_destructor() &&
self.ctx.lookup_has_destructor(
item.id().expect_type_id(self.ctx),
)
{
trace!(
" comp has destructor which cannot derive {}",
self.derive_trait
);
return CanDerive::No;
}
if info.kind() == CompKind::Union {
if self.derive_trait.can_derive_union() {
if self.ctx.options().untagged_union &&
// https://github.com/rust-lang/rust/issues/36640
(!info.self_template_params(self.ctx).is_empty() ||
!item.all_template_params(self.ctx).is_empty())
{
trace!(
" cannot derive {} for Rust union because issue 36640", self.derive_trait
);
return CanDerive::No;
}
// fall through to be same as non-union handling
} else {
if self.ctx.options().untagged_union {
trace!(
" cannot derive {} for Rust unions",
self.derive_trait
);
return CanDerive::No;
}
let layout_can_derive =
ty.layout(self.ctx).map_or(CanDerive::Yes, |l| {
l.opaque().array_size_within_derive_limit()
});
match layout_can_derive {
CanDerive::Yes => {
trace!(
" union layout can trivially derive {}",
self.derive_trait
);
}
_ => {
trace!(
" union layout cannot derive {}",
self.derive_trait
);
}
}
return layout_can_derive;
}
}
if !self.derive_trait.can_derive_compound_with_vtable() &&
item.has_vtable(self.ctx)
{
trace!(
" cannot derive {} for comp with vtable",
self.derive_trait
);
return CanDerive::No;
}
// Bitfield units are always represented as arrays of u8, but
// they're not traced as arrays, so we need to check here
// instead.
if !self.derive_trait.can_derive_large_array(self.ctx) &&
info.has_too_large_bitfield_unit() &&
!item.is_opaque(self.ctx, &())
{
trace!(
" cannot derive {} for comp with too large bitfield unit",
self.derive_trait
);
return CanDerive::No;
}
let pred = self.derive_trait.consider_edge_comp();
self.constrain_join(item, pred)
}
TypeKind::ResolvedTypeRef(..) |
TypeKind::TemplateAlias(..) |
TypeKind::Alias(..) |
TypeKind::BlockPointer(..) => {
let pred = self.derive_trait.consider_edge_typeref();
self.constrain_join(item, pred)
}
TypeKind::TemplateInstantiation(..) => {
let pred = self.derive_trait.consider_edge_tmpl_inst();
self.constrain_join(item, pred)
}
TypeKind::Opaque => unreachable!(
"The early ty.is_opaque check should have handled this case"
),
}
}
fn constrain_join(
&mut self,
item: &Item,
consider_edge: EdgePredicate,
) -> CanDerive {
let mut candidate = None;
item.trace(
self.ctx,
&mut |sub_id, edge_kind| {
// Ignore ourselves, since union with ourself is a
// no-op. Ignore edges that aren't relevant to the
// analysis.
if sub_id == item.id() || !consider_edge(edge_kind) {
return;
}
let can_derive = self.can_derive
.get(&sub_id)
.copied()
.unwrap_or_default();
match can_derive {
CanDerive::Yes => trace!(" member {sub_id:?} can derive {}", self.derive_trait),
CanDerive::Manually => trace!(" member {sub_id:?} cannot derive {}, but it may be implemented", self.derive_trait),
CanDerive::No => trace!(" member {sub_id:?} cannot derive {}", self.derive_trait),
}
*candidate.get_or_insert(CanDerive::Yes) |= can_derive;
},
&(),
);
if candidate.is_none() {
trace!(
" can derive {} because there are no members",
self.derive_trait
);
}
candidate.unwrap_or_default()
}
}
impl DeriveTrait {
fn not_by_name(self, ctx: &BindgenContext, item: &Item) -> bool {
match self {
DeriveTrait::Copy => ctx.no_copy_by_name(item),
DeriveTrait::Debug => ctx.no_debug_by_name(item),
DeriveTrait::Default => ctx.no_default_by_name(item),
DeriveTrait::Hash => ctx.no_hash_by_name(item),
DeriveTrait::PartialEqOrPartialOrd => {
ctx.no_partialeq_by_name(item)
}
}
}
fn consider_edge_comp(self) -> EdgePredicate {
match self {
DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
_ => |kind| matches!(kind, EdgeKind::BaseMember | EdgeKind::Field),
}
}
fn consider_edge_typeref(self) -> EdgePredicate {
match self {
DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
_ => |kind| kind == EdgeKind::TypeReference,
}
}
fn consider_edge_tmpl_inst(self) -> EdgePredicate {
match self {
DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
_ => |kind| {
matches!(
kind,
EdgeKind::TemplateArgument | EdgeKind::TemplateDeclaration
)
},
}
}
fn can_derive_large_array(self, ctx: &BindgenContext) -> bool {
if ctx.options().rust_features().larger_arrays {
!matches!(self, DeriveTrait::Default)
} else {
matches!(self, DeriveTrait::Copy)
}
}
fn can_derive_union(self) -> bool {
matches!(self, DeriveTrait::Copy)
}
fn can_derive_compound_with_destructor(self) -> bool {
!matches!(self, DeriveTrait::Copy)
}
fn can_derive_compound_with_vtable(self) -> bool {
!matches!(self, DeriveTrait::Default)
}
fn can_derive_compound_forward_decl(self) -> bool {
matches!(self, DeriveTrait::Copy | DeriveTrait::Debug)
}
fn can_derive_incomplete_array(self) -> bool {
!matches!(
self,
DeriveTrait::Copy |
DeriveTrait::Hash |
DeriveTrait::PartialEqOrPartialOrd
)
}
fn can_derive_fnptr(self, f: &FunctionSig) -> CanDerive {
match (self, f.function_pointers_can_derive()) {
(DeriveTrait::Copy | DeriveTrait::Default, _) | (_, true) => {
trace!(" function pointer can derive {self}");
CanDerive::Yes
}
(DeriveTrait::Debug, false) => {
trace!(" function pointer cannot derive {self}, but it may be implemented");
CanDerive::Manually
}
(_, false) => {
trace!(" function pointer cannot derive {self}");
CanDerive::No
}
}
}
fn can_derive_vector(self) -> CanDerive {
if self == DeriveTrait::PartialEqOrPartialOrd {
// FIXME: vectors always can derive PartialEq, but they should
// not derive PartialOrd:
// https://github.com/rust-lang-nursery/packed_simd/issues/48
trace!(" vectors cannot derive PartialOrd");
CanDerive::No
} else {
trace!(" vector can derive {self}");
CanDerive::Yes
}
}
fn can_derive_pointer(self) -> CanDerive {
if self == DeriveTrait::Default {
trace!(" pointer cannot derive Default");
CanDerive::No
} else {
trace!(" pointer can derive {self}");
CanDerive::Yes
}
}
fn can_derive_simple(self, kind: &TypeKind) -> CanDerive {
match (self, kind) {
// === Default ===
(
DeriveTrait::Default,
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::TypeParam |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel,
) => {
trace!(" types that always cannot derive Default");
CanDerive::No
}
(DeriveTrait::Default, TypeKind::UnresolvedTypeRef(..)) => {
unreachable!(
"Type with unresolved type ref can't reach derive default"
)
}
// === Hash ===
(
DeriveTrait::Hash,
TypeKind::Float(..) | TypeKind::Complex(..),
) => {
trace!(" float cannot derive Hash");
CanDerive::No
}
// === others ===
_ => {
trace!(" simple type that can always derive {self}");
CanDerive::Yes
}
}
}
}
impl fmt::Display for DeriveTrait {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match self {
DeriveTrait::Copy => "Copy",
DeriveTrait::Debug => "Debug",
DeriveTrait::Default => "Default",
DeriveTrait::Hash => "Hash",
DeriveTrait::PartialEqOrPartialOrd => "PartialEq/PartialOrd",
};
s.fmt(f)
}
}
impl<'ctx> MonotoneFramework for CannotDerive<'ctx> {
type Node = ItemId;
type Extra = (&'ctx BindgenContext, DeriveTrait);
type Output = HashMap<ItemId, CanDerive>;
fn new(
(ctx, derive_trait): (&'ctx BindgenContext, DeriveTrait),
) -> CannotDerive<'ctx> {
let can_derive = HashMap::default();
let dependencies = generate_dependencies(ctx, consider_edge_default);
CannotDerive {
ctx,
derive_trait,
can_derive,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
// The transitive closure of all allowlisted items, including explicitly
// blocklisted items.
self.ctx
.allowlisted_items()
.iter()
.copied()
.flat_map(|i| {
let mut reachable = vec![i];
i.trace(
self.ctx,
&mut |s, _| {
reachable.push(s);
},
&(),
);
reachable
})
.collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain: {id:?}");
if let Some(CanDerive::No) = self.can_derive.get(&id) {
trace!(" already know it cannot derive {}", self.derive_trait);
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let can_derive = match item.as_type() {
Some(ty) => {
let mut can_derive = self.constrain_type(item, ty);
if let CanDerive::Yes = can_derive {
let is_reached_limit =
|l: Layout| l.align > RUST_DERIVE_IN_ARRAY_LIMIT;
if !self.derive_trait.can_derive_large_array(self.ctx) &&
ty.layout(self.ctx).is_some_and(is_reached_limit)
{
// We have to be conservative: the struct *could* have enough
// padding that we emit an array that is longer than
// `RUST_DERIVE_IN_ARRAY_LIMIT`. If we moved padding calculations
// into the IR and computed them before this analysis, then we could
// be precise rather than conservative here.
can_derive = CanDerive::Manually;
}
}
can_derive
}
None => self.constrain_join(item, consider_edge_default),
};
self.insert(id, can_derive)
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<CannotDerive<'ctx>> for HashMap<ItemId, CanDerive> {
fn from(analysis: CannotDerive<'ctx>) -> Self {
extra_assert!(analysis
.can_derive
.values()
.all(|v| *v != CanDerive::Yes));
analysis.can_derive
}
}
/// Convert a `HashMap<ItemId, CanDerive>` into a `HashSet<ItemId>`.
///
/// Elements that are not `CanDerive::Yes` are kept in the set, so that it
/// represents all items that cannot derive.
pub(crate) fn as_cannot_derive_set(
can_derive: HashMap<ItemId, CanDerive>,
) -> HashSet<ItemId> {
can_derive
.into_iter()
.filter_map(|(k, v)| if v == CanDerive::Yes { None } else { Some(k) })
.collect()
}

View File

@@ -0,0 +1,175 @@
//! Determining which types have destructors
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::comp::{CompKind, Field, FieldMethods};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item whether it has a destructor or not
///
/// We use the monotone function `has destructor`, defined as follows:
///
/// * If T is a type alias, a templated alias, or an indirection to another type,
/// T has a destructor if the type T refers to has a destructor.
/// * If T is a compound type, T has a destructor if we saw a destructor when parsing it,
/// or if it's a struct, T has a destructor if any of its base members has a destructor,
/// or if any of its fields have a destructor.
/// * If T is an instantiation of an abstract template definition, T has
/// a destructor if its template definition has a destructor,
/// or if any of the template arguments has a destructor.
/// * If T is the type of a field, that field has a destructor if it's not a bitfield,
/// and if T has a destructor.
#[derive(Debug, Clone)]
pub(crate) struct HasDestructorAnalysis<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set definitely has a destructor.
have_destructor: HashSet<ItemId>,
// Dependencies saying that if a key ItemId has been inserted into the
// `have_destructor` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has a destructor or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl HasDestructorAnalysis<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
// These are the only edges that can affect whether a type has a
// destructor or not.
matches!(
kind,
EdgeKind::TypeReference |
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration
)
}
fn insert<Id: Into<ItemId>>(&mut self, id: Id) -> ConstrainResult {
let id = id.into();
let was_not_already_in_set = self.have_destructor.insert(id);
assert!(
was_not_already_in_set,
"We shouldn't try and insert {id:?} twice because if it was \
already in the set, `constrain` should have exited early."
);
ConstrainResult::Changed
}
}
impl<'ctx> MonotoneFramework for HasDestructorAnalysis<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> Self {
let have_destructor = HashSet::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasDestructorAnalysis {
ctx,
have_destructor,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().copied().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
if self.have_destructor.contains(&id) {
// We've already computed that this type has a destructor and that can't
// change.
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let ty = match item.as_type() {
None => return ConstrainResult::Same,
Some(ty) => ty,
};
match *ty.kind() {
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::ResolvedTypeRef(t) => {
if self.have_destructor.contains(&t.into()) {
self.insert(id)
} else {
ConstrainResult::Same
}
}
TypeKind::Comp(ref info) => {
if info.has_own_destructor() {
return self.insert(id);
}
match info.kind() {
CompKind::Union => ConstrainResult::Same,
CompKind::Struct => {
let base_or_field_destructor =
info.base_members().iter().any(|base| {
self.have_destructor.contains(&base.ty.into())
}) || info.fields().iter().any(
|field| match *field {
Field::DataMember(ref data) => self
.have_destructor
.contains(&data.ty().into()),
Field::Bitfields(_) => false,
},
);
if base_or_field_destructor {
self.insert(id)
} else {
ConstrainResult::Same
}
}
}
}
TypeKind::TemplateInstantiation(ref inst) => {
let definition_or_arg_destructor = self
.have_destructor
.contains(&inst.template_definition().into()) ||
inst.template_arguments().iter().any(|arg| {
self.have_destructor.contains(&arg.into())
});
if definition_or_arg_destructor {
self.insert(id)
} else {
ConstrainResult::Same
}
}
_ => ConstrainResult::Same,
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<HasDestructorAnalysis<'ctx>> for HashSet<ItemId> {
fn from(analysis: HasDestructorAnalysis<'ctx>) -> Self {
analysis.have_destructor
}
}

248
vendor/bindgen/ir/analysis/has_float.rs vendored Normal file
View File

@@ -0,0 +1,248 @@
//! Determining which types has float.
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::comp::Field;
use crate::ir::comp::FieldMethods;
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item whether it has float or not.
///
/// We use the monotone constraint function `has_float`,
/// defined as follows:
///
/// * If T is float or complex float, T trivially has.
/// * If T is a type alias, a templated alias or an indirection to another type,
/// it has float if the type T refers to has.
/// * If T is a compound type, it has float if any of base memter or field
/// has.
/// * If T is an instantiation of an abstract template definition, T has
/// float if any of the template arguments or template definition
/// has.
#[derive(Debug, Clone)]
pub(crate) struct HasFloat<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set has float.
has_float: HashSet<ItemId>,
// Dependencies saying that if a key ItemId has been inserted into the
// `has_float` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has float or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl HasFloat<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
match kind {
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TypeReference |
EdgeKind::VarType |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => true,
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::InnerType |
EdgeKind::InnerVar |
EdgeKind::Method |
EdgeKind::Generic => false,
}
}
fn insert<Id: Into<ItemId>>(&mut self, id: Id) -> ConstrainResult {
let id = id.into();
trace!("inserting {id:?} into the has_float set");
let was_not_already_in_set = self.has_float.insert(id);
assert!(
was_not_already_in_set,
"We shouldn't try and insert {id:?} twice because if it was \
already in the set, `constrain` should have exited early."
);
ConstrainResult::Changed
}
}
impl<'ctx> MonotoneFramework for HasFloat<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> HasFloat<'ctx> {
let has_float = HashSet::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasFloat {
ctx,
has_float,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().copied().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain: {id:?}");
if self.has_float.contains(&id) {
trace!(" already know it do not have float");
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let Some(ty) = item.as_type() else {
trace!(" not a type; ignoring");
return ConstrainResult::Same;
};
match *ty.kind() {
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::TypeParam |
TypeKind::Opaque |
TypeKind::Pointer(..) |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel => {
trace!(" simple type that do not have float");
ConstrainResult::Same
}
TypeKind::Float(..) | TypeKind::Complex(..) => {
trace!(" float type has float");
self.insert(id)
}
TypeKind::Array(t, _) => {
if self.has_float.contains(&t.into()) {
trace!(
" Array with type T that has float also has float"
);
return self.insert(id);
}
trace!(" Array with type T that do not have float also do not have float");
ConstrainResult::Same
}
TypeKind::Vector(t, _) => {
if self.has_float.contains(&t.into()) {
trace!(
" Vector with type T that has float also has float"
);
return self.insert(id);
}
trace!(" Vector with type T that do not have float also do not have float");
ConstrainResult::Same
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
if self.has_float.contains(&t.into()) {
trace!(
" aliases and type refs to T which have float \
also have float"
);
self.insert(id)
} else {
trace!(" aliases and type refs to T which do not have float \
also do not have floaarrayt");
ConstrainResult::Same
}
}
TypeKind::Comp(ref info) => {
let bases_have = info
.base_members()
.iter()
.any(|base| self.has_float.contains(&base.ty.into()));
if bases_have {
trace!(" bases have float, so we also have");
return self.insert(id);
}
let fields_have = info.fields().iter().any(|f| match *f {
Field::DataMember(ref data) => {
self.has_float.contains(&data.ty().into())
}
Field::Bitfields(ref bfu) => bfu
.bitfields()
.iter()
.any(|b| self.has_float.contains(&b.ty().into())),
});
if fields_have {
trace!(" fields have float, so we also have");
return self.insert(id);
}
trace!(" comp doesn't have float");
ConstrainResult::Same
}
TypeKind::TemplateInstantiation(ref template) => {
let args_have = template
.template_arguments()
.iter()
.any(|arg| self.has_float.contains(&arg.into()));
if args_have {
trace!(
" template args have float, so \
instantiation also has float"
);
return self.insert(id);
}
let def_has = self
.has_float
.contains(&template.template_definition().into());
if def_has {
trace!(
" template definition has float, so \
instantiation also has"
);
return self.insert(id);
}
trace!(" template instantiation do not have float");
ConstrainResult::Same
}
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<HasFloat<'ctx>> for HashSet<ItemId> {
fn from(analysis: HasFloat<'ctx>) -> Self {
analysis.has_float
}
}

View File

@@ -0,0 +1,242 @@
//! Determining which types has typed parameters in array.
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::comp::Field;
use crate::ir::comp::FieldMethods;
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item whether it has array or not.
///
/// We use the monotone constraint function `has_type_parameter_in_array`,
/// defined as follows:
///
/// * If T is Array type with type parameter, T trivially has.
/// * If T is a type alias, a templated alias or an indirection to another type,
/// it has type parameter in array if the type T refers to has.
/// * If T is a compound type, it has array if any of base memter or field
/// has type parameter in array.
/// * If T is an instantiation of an abstract template definition, T has
/// type parameter in array if any of the template arguments or template definition
/// has.
#[derive(Debug, Clone)]
pub(crate) struct HasTypeParameterInArray<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set has array.
has_type_parameter_in_array: HashSet<ItemId>,
// Dependencies saying that if a key ItemId has been inserted into the
// `has_type_parameter_in_array` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has array or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl HasTypeParameterInArray<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
match kind {
// These are the only edges that can affect whether a type has type parameter
// in array or not.
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TypeReference |
EdgeKind::VarType |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => true,
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::InnerType |
EdgeKind::InnerVar |
EdgeKind::Method |
EdgeKind::Generic => false,
}
}
fn insert<Id: Into<ItemId>>(&mut self, id: Id) -> ConstrainResult {
let id = id.into();
trace!("inserting {id:?} into the has_type_parameter_in_array set");
let was_not_already_in_set =
self.has_type_parameter_in_array.insert(id);
assert!(
was_not_already_in_set,
"We shouldn't try and insert {id:?} twice because if it was \
already in the set, `constrain` should have exited early."
);
ConstrainResult::Changed
}
}
impl<'ctx> MonotoneFramework for HasTypeParameterInArray<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> HasTypeParameterInArray<'ctx> {
let has_type_parameter_in_array = HashSet::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasTypeParameterInArray {
ctx,
has_type_parameter_in_array,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().copied().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain: {id:?}");
if self.has_type_parameter_in_array.contains(&id) {
trace!(" already know it do not have array");
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let Some(ty) = item.as_type() else {
trace!(" not a type; ignoring");
return ConstrainResult::Same;
};
match *ty.kind() {
// Handle the simple cases. These cannot have array in type parameter
// without further information.
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Vector(..) |
TypeKind::Complex(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::TypeParam |
TypeKind::Opaque |
TypeKind::Pointer(..) |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel => {
trace!(" simple type that do not have array");
ConstrainResult::Same
}
TypeKind::Array(t, _) => {
let inner_ty =
self.ctx.resolve_type(t).canonical_type(self.ctx);
if let TypeKind::TypeParam = *inner_ty.kind() {
trace!(" Array with Named type has type parameter");
self.insert(id)
} else {
trace!(
" Array without Named type does have type parameter"
);
ConstrainResult::Same
}
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
if self.has_type_parameter_in_array.contains(&t.into()) {
trace!(
" aliases and type refs to T which have array \
also have array"
);
self.insert(id)
} else {
trace!(
" aliases and type refs to T which do not have array \
also do not have array"
);
ConstrainResult::Same
}
}
TypeKind::Comp(ref info) => {
let bases_have = info.base_members().iter().any(|base| {
self.has_type_parameter_in_array.contains(&base.ty.into())
});
if bases_have {
trace!(" bases have array, so we also have");
return self.insert(id);
}
let fields_have = info.fields().iter().any(|f| match *f {
Field::DataMember(ref data) => self
.has_type_parameter_in_array
.contains(&data.ty().into()),
Field::Bitfields(..) => false,
});
if fields_have {
trace!(" fields have array, so we also have");
return self.insert(id);
}
trace!(" comp doesn't have array");
ConstrainResult::Same
}
TypeKind::TemplateInstantiation(ref template) => {
let args_have =
template.template_arguments().iter().any(|arg| {
self.has_type_parameter_in_array.contains(&arg.into())
});
if args_have {
trace!(
" template args have array, so \
instantiation also has array"
);
return self.insert(id);
}
let def_has = self
.has_type_parameter_in_array
.contains(&template.template_definition().into());
if def_has {
trace!(
" template definition has array, so \
instantiation also has"
);
return self.insert(id);
}
trace!(" template instantiation do not have array");
ConstrainResult::Same
}
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<HasTypeParameterInArray<'ctx>> for HashSet<ItemId> {
fn from(analysis: HasTypeParameterInArray<'ctx>) -> Self {
analysis.has_type_parameter_in_array
}
}

235
vendor/bindgen/ir/analysis/has_vtable.rs vendored Normal file
View File

@@ -0,0 +1,235 @@
//! Determining which types has vtable
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{Entry, HashMap};
use std::cmp;
use std::ops;
/// The result of the `HasVtableAnalysis` for an individual item.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Default)]
pub(crate) enum HasVtableResult {
/// The item does not have a vtable pointer.
#[default]
No,
/// The item has a vtable and the actual vtable pointer is within this item.
SelfHasVtable,
/// The item has a vtable, but the actual vtable pointer is in a base
/// member.
BaseHasVtable,
}
impl HasVtableResult {
/// Take the least upper bound of `self` and `rhs`.
pub(crate) fn join(self, rhs: Self) -> Self {
cmp::max(self, rhs)
}
}
impl ops::BitOr for HasVtableResult {
type Output = Self;
fn bitor(self, rhs: HasVtableResult) -> Self::Output {
self.join(rhs)
}
}
impl ops::BitOrAssign for HasVtableResult {
fn bitor_assign(&mut self, rhs: HasVtableResult) {
*self = self.join(rhs);
}
}
/// An analysis that finds for each IR item whether it has vtable or not
///
/// We use the monotone function `has vtable`, defined as follows:
///
/// * If T is a type alias, a templated alias, an indirection to another type,
/// or a reference of a type, T has vtable if the type T refers to has vtable.
/// * If T is a compound type, T has vtable if we saw a virtual function when
/// parsing it or any of its base member has vtable.
/// * If T is an instantiation of an abstract template definition, T has
/// vtable if template definition has vtable
#[derive(Debug, Clone)]
pub(crate) struct HasVtableAnalysis<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set definitely has a vtable.
have_vtable: HashMap<ItemId, HasVtableResult>,
// Dependencies saying that if a key ItemId has been inserted into the
// `have_vtable` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has a vtable or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl HasVtableAnalysis<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
// These are the only edges that can affect whether a type has a
// vtable or not.
matches!(
kind,
EdgeKind::TypeReference |
EdgeKind::BaseMember |
EdgeKind::TemplateDeclaration
)
}
fn insert<Id: Into<ItemId>>(
&mut self,
id: Id,
result: HasVtableResult,
) -> ConstrainResult {
if let HasVtableResult::No = result {
return ConstrainResult::Same;
}
let id = id.into();
match self.have_vtable.entry(id) {
Entry::Occupied(mut entry) => {
if *entry.get() < result {
entry.insert(result);
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
Entry::Vacant(entry) => {
entry.insert(result);
ConstrainResult::Changed
}
}
}
fn forward<Id1, Id2>(&mut self, from: Id1, to: Id2) -> ConstrainResult
where
Id1: Into<ItemId>,
Id2: Into<ItemId>,
{
let from = from.into();
let to = to.into();
match self.have_vtable.get(&from) {
None => ConstrainResult::Same,
Some(r) => self.insert(to, *r),
}
}
}
impl<'ctx> MonotoneFramework for HasVtableAnalysis<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashMap<ItemId, HasVtableResult>;
fn new(ctx: &'ctx BindgenContext) -> HasVtableAnalysis<'ctx> {
let have_vtable = HashMap::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasVtableAnalysis {
ctx,
have_vtable,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().copied().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain {id:?}");
let item = self.ctx.resolve_item(id);
let ty = match item.as_type() {
None => return ConstrainResult::Same,
Some(ty) => ty,
};
// TODO #851: figure out a way to handle deriving from template type parameters.
match *ty.kind() {
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::ResolvedTypeRef(t) |
TypeKind::Reference(t) => {
trace!(
" aliases and references forward to their inner type"
);
self.forward(t, id)
}
TypeKind::Comp(ref info) => {
trace!(" comp considers its own methods and bases");
let mut result = HasVtableResult::No;
if info.has_own_virtual_method() {
trace!(" comp has its own virtual method");
result |= HasVtableResult::SelfHasVtable;
}
let bases_has_vtable = info.base_members().iter().any(|base| {
trace!(" comp has a base with a vtable: {base:?}");
self.have_vtable.contains_key(&base.ty.into())
});
if bases_has_vtable {
result |= HasVtableResult::BaseHasVtable;
}
self.insert(id, result)
}
TypeKind::TemplateInstantiation(ref inst) => {
self.forward(inst.template_definition(), id)
}
_ => ConstrainResult::Same,
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<HasVtableAnalysis<'ctx>> for HashMap<ItemId, HasVtableResult> {
fn from(analysis: HasVtableAnalysis<'ctx>) -> Self {
// We let the lack of an entry mean "No" to save space.
extra_assert!(analysis
.have_vtable
.values()
.all(|v| { *v != HasVtableResult::No }));
analysis.have_vtable
}
}
/// A convenience trait for the things for which we might wonder if they have a
/// vtable during codegen.
///
/// This is not for _computing_ whether the thing has a vtable, it is for
/// looking up the results of the `HasVtableAnalysis`'s computations for a
/// specific thing.
pub(crate) trait HasVtable {
/// Return `true` if this thing has vtable, `false` otherwise.
fn has_vtable(&self, ctx: &BindgenContext) -> bool;
/// Return `true` if this thing has an actual vtable pointer in itself, as
/// opposed to transitively in a base member.
fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool;
}

395
vendor/bindgen/ir/analysis/mod.rs vendored Normal file
View File

@@ -0,0 +1,395 @@
//! Fix-point analyses on the IR using the "monotone framework".
//!
//! A lattice is a set with a partial ordering between elements, where there is
//! a single least upper bound and a single greatest least bound for every
//! subset. We are dealing with finite lattices, which means that it has a
//! finite number of elements, and it follows that there exists a single top and
//! a single bottom member of the lattice. For example, the power set of a
//! finite set forms a finite lattice where partial ordering is defined by set
//! inclusion, that is `a <= b` if `a` is a subset of `b`. Here is the finite
//! lattice constructed from the set {0,1,2}:
//!
//! ```text
//! .----- Top = {0,1,2} -----.
//! / | \
//! / | \
//! / | \
//! {0,1} -------. {0,2} .--------- {1,2}
//! | \ / \ / |
//! | / \ |
//! | / \ / \ |
//! {0} --------' {1} `---------- {2}
//! \ | /
//! \ | /
//! \ | /
//! `------ Bottom = {} ------'
//! ```
//!
//! A monotone function `f` is a function where if `x <= y`, then it holds that
//! `f(x) <= f(y)`. It should be clear that running a monotone function to a
//! fix-point on a finite lattice will always terminate: `f` can only "move"
//! along the lattice in a single direction, and therefore can only either find
//! a fix-point in the middle of the lattice or continue to the top or bottom
//! depending if it is ascending or descending the lattice respectively.
//!
//! For a deeper introduction to the general form of this kind of analysis, see
//! [Static Program Analysis by Anders Møller and Michael I. Schwartzbach][spa].
//!
//! [spa]: https://cs.au.dk/~amoeller/spa/spa.pdf
// Re-export individual analyses.
mod template_params;
pub(crate) use self::template_params::UsedTemplateParameters;
mod derive;
pub use self::derive::DeriveTrait;
pub(crate) use self::derive::{as_cannot_derive_set, CannotDerive};
mod has_vtable;
pub(crate) use self::has_vtable::{
HasVtable, HasVtableAnalysis, HasVtableResult,
};
mod has_destructor;
pub(crate) use self::has_destructor::HasDestructorAnalysis;
mod has_type_param_in_array;
pub(crate) use self::has_type_param_in_array::HasTypeParameterInArray;
mod has_float;
pub(crate) use self::has_float::HasFloat;
mod sizedness;
pub(crate) use self::sizedness::{
Sizedness, SizednessAnalysis, SizednessResult,
};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::{EdgeKind, Trace};
use crate::HashMap;
use std::fmt;
use std::ops;
/// An analysis in the monotone framework.
///
/// Implementors of this trait must maintain the following two invariants:
///
/// 1. The concrete data must be a member of a finite-height lattice.
/// 2. The concrete `constrain` method must be monotone: that is,
/// if `x <= y`, then `constrain(x) <= constrain(y)`.
///
/// If these invariants do not hold, iteration to a fix-point might never
/// complete.
///
/// For a simple example analysis, see the `ReachableFrom` type in the `tests`
/// module below.
pub(crate) trait MonotoneFramework: Sized + fmt::Debug {
/// The type of node in our dependency graph.
///
/// This is just generic (and not `ItemId`) so that we can easily unit test
/// without constructing real `Item`s and their `ItemId`s.
type Node: Copy;
/// Any extra data that is needed during computation.
///
/// Again, this is just generic (and not `&BindgenContext`) so that we can
/// easily unit test without constructing real `BindgenContext`s full of
/// real `Item`s and real `ItemId`s.
type Extra: Sized;
/// The final output of this analysis. Once we have reached a fix-point, we
/// convert `self` into this type, and return it as the final result of the
/// analysis.
type Output: From<Self> + fmt::Debug;
/// Construct a new instance of this analysis.
fn new(extra: Self::Extra) -> Self;
/// Get the initial set of nodes from which to start the analysis. Unless
/// you are sure of some domain-specific knowledge, this should be the
/// complete set of nodes.
fn initial_worklist(&self) -> Vec<Self::Node>;
/// Update the analysis for the given node.
///
/// If this results in changing our internal state (ie, we discovered that
/// we have not reached a fix-point and iteration should continue), return
/// `ConstrainResult::Changed`. Otherwise, return `ConstrainResult::Same`.
/// When `constrain` returns `ConstrainResult::Same` for all nodes in the
/// set, we have reached a fix-point and the analysis is complete.
fn constrain(&mut self, node: Self::Node) -> ConstrainResult;
/// For each node `d` that depends on the given `node`'s current answer when
/// running `constrain(d)`, call `f(d)`. This informs us which new nodes to
/// queue up in the worklist when `constrain(node)` reports updated
/// information.
fn each_depending_on<F>(&self, node: Self::Node, f: F)
where
F: FnMut(Self::Node);
}
/// Whether an analysis's `constrain` function modified the incremental results
/// or not.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
pub(crate) enum ConstrainResult {
/// The incremental results were updated, and the fix-point computation
/// should continue.
Changed,
/// The incremental results were not updated.
#[default]
Same,
}
impl ops::BitOr for ConstrainResult {
type Output = Self;
fn bitor(self, rhs: ConstrainResult) -> Self::Output {
if self == ConstrainResult::Changed || rhs == ConstrainResult::Changed {
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
}
impl ops::BitOrAssign for ConstrainResult {
fn bitor_assign(&mut self, rhs: ConstrainResult) {
*self = *self | rhs;
}
}
/// Run an analysis in the monotone framework.
pub(crate) fn analyze<Analysis>(extra: Analysis::Extra) -> Analysis::Output
where
Analysis: MonotoneFramework,
{
let mut analysis = Analysis::new(extra);
let mut worklist = analysis.initial_worklist();
while let Some(node) = worklist.pop() {
if let ConstrainResult::Changed = analysis.constrain(node) {
analysis.each_depending_on(node, |needs_work| {
worklist.push(needs_work);
});
}
}
analysis.into()
}
/// Generate the dependency map for analysis
pub(crate) fn generate_dependencies<F>(
ctx: &BindgenContext,
consider_edge: F,
) -> HashMap<ItemId, Vec<ItemId>>
where
F: Fn(EdgeKind) -> bool,
{
let mut dependencies = HashMap::default();
for &item in ctx.allowlisted_items() {
dependencies.entry(item).or_insert_with(Vec::new);
{
// We reverse our natural IR graph edges to find dependencies
// between nodes.
item.trace(
ctx,
&mut |sub_item: ItemId, edge_kind| {
if ctx.allowlisted_items().contains(&sub_item) &&
consider_edge(edge_kind)
{
dependencies
.entry(sub_item)
.or_insert_with(Vec::new)
.push(item);
}
},
&(),
);
}
}
dependencies
}
#[cfg(test)]
mod tests {
use super::*;
use crate::HashSet;
// Here we find the set of nodes that are reachable from any given
// node. This is a lattice mapping nodes to subsets of all nodes. Our join
// function is set union.
//
// This is our test graph:
//
// +---+ +---+
// | | | |
// | 1 | .----| 2 |
// | | | | |
// +---+ | +---+
// | | ^
// | | |
// | +---+ '------'
// '----->| |
// | 3 |
// .------| |------.
// | +---+ |
// | ^ |
// v | v
// +---+ | +---+ +---+
// | | | | | | |
// | 4 | | | 5 |--->| 6 |
// | | | | | | |
// +---+ | +---+ +---+
// | | | |
// | | | v
// | +---+ | +---+
// | | | | | |
// '----->| 7 |<-----' | 8 |
// | | | |
// +---+ +---+
//
// And here is the mapping from a node to the set of nodes that are
// reachable from it within the test graph:
//
// 1: {3,4,5,6,7,8}
// 2: {2}
// 3: {3,4,5,6,7,8}
// 4: {3,4,5,6,7,8}
// 5: {3,4,5,6,7,8}
// 6: {8}
// 7: {3,4,5,6,7,8}
// 8: {}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
struct Node(usize);
#[derive(Clone, Debug, Default, PartialEq, Eq)]
struct Graph(HashMap<Node, Vec<Node>>);
impl Graph {
fn make_test_graph() -> Graph {
let mut g = Graph::default();
g.0.insert(Node(1), vec![Node(3)]);
g.0.insert(Node(2), vec![Node(2)]);
g.0.insert(Node(3), vec![Node(4), Node(5)]);
g.0.insert(Node(4), vec![Node(7)]);
g.0.insert(Node(5), vec![Node(6), Node(7)]);
g.0.insert(Node(6), vec![Node(8)]);
g.0.insert(Node(7), vec![Node(3)]);
g.0.insert(Node(8), vec![]);
g
}
fn reverse(&self) -> Graph {
let mut reversed = Graph::default();
for (node, edges) in &self.0 {
reversed.0.entry(*node).or_insert_with(Vec::new);
for referent in edges {
reversed
.0
.entry(*referent)
.or_insert_with(Vec::new)
.push(*node);
}
}
reversed
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct ReachableFrom<'a> {
reachable: HashMap<Node, HashSet<Node>>,
graph: &'a Graph,
reversed: Graph,
}
impl<'a> MonotoneFramework for ReachableFrom<'a> {
type Node = Node;
type Extra = &'a Graph;
type Output = HashMap<Node, HashSet<Node>>;
fn new(graph: &'a Graph) -> Self {
let reversed = graph.reverse();
ReachableFrom {
reachable: Default::default(),
graph,
reversed,
}
}
fn initial_worklist(&self) -> Vec<Node> {
self.graph.0.keys().copied().collect()
}
fn constrain(&mut self, node: Node) -> ConstrainResult {
// The set of nodes reachable from a node `x` is
//
// reachable(x) = s_0 U s_1 U ... U reachable(s_0) U reachable(s_1) U ...
//
// where there exist edges from `x` to each of `s_0, s_1, ...`.
//
// Yes, what follows is a **terribly** inefficient set union
// implementation. Don't copy this code outside of this test!
let original_size = self.reachable.entry(node).or_default().len();
for sub_node in &self.graph.0[&node] {
self.reachable.get_mut(&node).unwrap().insert(*sub_node);
let sub_reachable =
self.reachable.entry(*sub_node).or_default().clone();
for transitive in sub_reachable {
self.reachable.get_mut(&node).unwrap().insert(transitive);
}
}
let new_size = self.reachable[&node].len();
if original_size == new_size {
ConstrainResult::Same
} else {
ConstrainResult::Changed
}
}
fn each_depending_on<F>(&self, node: Node, mut f: F)
where
F: FnMut(Node),
{
for dep in &self.reversed.0[&node] {
f(*dep);
}
}
}
impl<'a> From<ReachableFrom<'a>> for HashMap<Node, HashSet<Node>> {
fn from(reachable: ReachableFrom<'a>) -> Self {
reachable.reachable
}
}
#[test]
fn monotone() {
let g = Graph::make_test_graph();
let reachable = analyze::<ReachableFrom>(&g);
println!("reachable = {reachable:#?}");
fn nodes<A>(nodes: A) -> HashSet<Node>
where
A: AsRef<[usize]>,
{
nodes.as_ref().iter().copied().map(Node).collect()
}
let mut expected = HashMap::default();
expected.insert(Node(1), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(2), nodes([2]));
expected.insert(Node(3), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(4), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(5), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(6), nodes([8]));
expected.insert(Node(7), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(8), nodes([]));
println!("expected = {expected:#?}");
assert_eq!(reachable, expected);
}
}

353
vendor/bindgen/ir/analysis/sizedness.rs vendored Normal file
View File

@@ -0,0 +1,353 @@
//! Determining the sizedness of types (as base classes and otherwise).
use super::{
generate_dependencies, ConstrainResult, HasVtable, MonotoneFramework,
};
use crate::ir::context::{BindgenContext, TypeId};
use crate::ir::item::IsOpaque;
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{Entry, HashMap};
use std::{cmp, ops};
/// The result of the `Sizedness` analysis for an individual item.
///
/// This is a chain lattice of the form:
///
/// ```ignore
/// NonZeroSized
/// |
/// DependsOnTypeParam
/// |
/// ZeroSized
/// ```
///
/// We initially assume that all types are `ZeroSized` and then update our
/// understanding as we learn more about each type.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Default)]
pub(crate) enum SizednessResult {
/// The type is zero-sized.
///
/// This means that if it is a C++ type, and is not being used as a base
/// member, then we must add an `_address` byte to enforce the
/// unique-address-per-distinct-object-instance rule.
#[default]
ZeroSized,
/// Whether this type is zero-sized or not depends on whether a type
/// parameter is zero-sized or not.
///
/// For example, given these definitions:
///
/// ```c++
/// template<class T>
/// class Flongo : public T {};
///
/// class Empty {};
///
/// class NonEmpty { int x; };
/// ```
///
/// Then `Flongo<Empty>` is zero-sized, and needs an `_address` byte
/// inserted, while `Flongo<NonEmpty>` is *not* zero-sized, and should *not*
/// have an `_address` byte inserted.
///
/// We don't properly handle this situation correctly right now:
/// <https://github.com/rust-lang/rust-bindgen/issues/586>
DependsOnTypeParam,
/// Has some size that is known to be greater than zero. That doesn't mean
/// it has a static size, but it is not zero sized for sure. In other words,
/// it might contain an incomplete array or some other dynamically sized
/// type.
NonZeroSized,
}
impl SizednessResult {
/// Take the least upper bound of `self` and `rhs`.
pub(crate) fn join(self, rhs: Self) -> Self {
cmp::max(self, rhs)
}
}
impl ops::BitOr for SizednessResult {
type Output = Self;
fn bitor(self, rhs: SizednessResult) -> Self::Output {
self.join(rhs)
}
}
impl ops::BitOrAssign for SizednessResult {
fn bitor_assign(&mut self, rhs: SizednessResult) {
*self = self.join(rhs);
}
}
/// An analysis that computes the sizedness of all types.
///
/// * For types with known sizes -- for example pointers, scalars, etc... --
/// they are assigned `NonZeroSized`.
///
/// * For compound structure types with one or more fields, they are assigned
/// `NonZeroSized`.
///
/// * For compound structure types without any fields, the results of the bases
/// are `join`ed.
///
/// * For type parameters, `DependsOnTypeParam` is assigned.
#[derive(Debug)]
pub(crate) struct SizednessAnalysis<'ctx> {
ctx: &'ctx BindgenContext,
dependencies: HashMap<TypeId, Vec<TypeId>>,
// Incremental results of the analysis. Missing entries are implicitly
// considered `ZeroSized`.
sized: HashMap<TypeId, SizednessResult>,
}
impl SizednessAnalysis<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
// These are the only edges that can affect whether a type is
// zero-sized or not.
matches!(
kind,
EdgeKind::TemplateArgument |
EdgeKind::TemplateParameterDefinition |
EdgeKind::TemplateDeclaration |
EdgeKind::TypeReference |
EdgeKind::BaseMember |
EdgeKind::Field
)
}
/// Insert an incremental result, and return whether this updated our
/// knowledge of types and we should continue the analysis.
fn insert(
&mut self,
id: TypeId,
result: SizednessResult,
) -> ConstrainResult {
trace!("inserting {result:?} for {id:?}");
if let SizednessResult::ZeroSized = result {
return ConstrainResult::Same;
}
match self.sized.entry(id) {
Entry::Occupied(mut entry) => {
if *entry.get() < result {
entry.insert(result);
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
Entry::Vacant(entry) => {
entry.insert(result);
ConstrainResult::Changed
}
}
}
fn forward(&mut self, from: TypeId, to: TypeId) -> ConstrainResult {
match self.sized.get(&from) {
None => ConstrainResult::Same,
Some(r) => self.insert(to, *r),
}
}
}
impl<'ctx> MonotoneFramework for SizednessAnalysis<'ctx> {
type Node = TypeId;
type Extra = &'ctx BindgenContext;
type Output = HashMap<TypeId, SizednessResult>;
fn new(ctx: &'ctx BindgenContext) -> SizednessAnalysis<'ctx> {
let dependencies = generate_dependencies(ctx, Self::consider_edge)
.into_iter()
.filter_map(|(id, sub_ids)| {
id.as_type_id(ctx).map(|id| {
(
id,
sub_ids
.into_iter()
.filter_map(|s| s.as_type_id(ctx))
.collect::<Vec<_>>(),
)
})
})
.collect();
let sized = HashMap::default();
SizednessAnalysis {
ctx,
dependencies,
sized,
}
}
fn initial_worklist(&self) -> Vec<TypeId> {
self.ctx
.allowlisted_items()
.iter()
.filter_map(|id| id.as_type_id(self.ctx))
.collect()
}
fn constrain(&mut self, id: TypeId) -> ConstrainResult {
trace!("constrain {id:?}");
if let Some(SizednessResult::NonZeroSized) = self.sized.get(&id) {
trace!(" already know it is not zero-sized");
return ConstrainResult::Same;
}
if id.has_vtable_ptr(self.ctx) {
trace!(" has an explicit vtable pointer, therefore is not zero-sized");
return self.insert(id, SizednessResult::NonZeroSized);
}
let ty = self.ctx.resolve_type(id);
if id.is_opaque(self.ctx, &()) {
trace!(" type is opaque; checking layout...");
let result =
ty.layout(self.ctx).map_or(SizednessResult::ZeroSized, |l| {
if l.size == 0 {
trace!(" ...layout has size == 0");
SizednessResult::ZeroSized
} else {
trace!(" ...layout has size > 0");
SizednessResult::NonZeroSized
}
});
return self.insert(id, result);
}
match *ty.kind() {
TypeKind::Void => {
trace!(" void is zero-sized");
self.insert(id, SizednessResult::ZeroSized)
}
TypeKind::TypeParam => {
trace!(
" type params sizedness depends on what they're \
instantiated as"
);
self.insert(id, SizednessResult::DependsOnTypeParam)
}
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Complex(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::NullPtr |
TypeKind::ObjCId |
TypeKind::ObjCSel |
TypeKind::Pointer(..) => {
trace!(" {:?} is known not to be zero-sized", ty.kind());
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::ObjCInterface(..) => {
trace!(" obj-c interfaces always have at least the `isa` pointer");
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) |
TypeKind::ResolvedTypeRef(t) => {
trace!(" aliases and type refs forward to their inner type");
self.forward(t, id)
}
TypeKind::TemplateInstantiation(ref inst) => {
trace!(
" template instantiations are zero-sized if their \
definition is zero-sized"
);
self.forward(inst.template_definition(), id)
}
TypeKind::Array(_, 0) => {
trace!(" arrays of zero elements are zero-sized");
self.insert(id, SizednessResult::ZeroSized)
}
TypeKind::Array(..) => {
trace!(" arrays of > 0 elements are not zero-sized");
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::Vector(..) => {
trace!(" vectors are not zero-sized");
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::Comp(ref info) => {
trace!(" comp considers its own fields and bases");
if !info.fields().is_empty() {
return self.insert(id, SizednessResult::NonZeroSized);
}
let result = info
.base_members()
.iter()
.filter_map(|base| self.sized.get(&base.ty))
.fold(SizednessResult::ZeroSized, |a, b| a.join(*b));
self.insert(id, result)
}
TypeKind::Opaque => {
unreachable!("covered by the .is_opaque() check above")
}
TypeKind::UnresolvedTypeRef(..) => {
unreachable!("Should have been resolved after parsing!");
}
}
}
fn each_depending_on<F>(&self, id: TypeId, mut f: F)
where
F: FnMut(TypeId),
{
if let Some(edges) = self.dependencies.get(&id) {
for ty in edges {
trace!("enqueue {ty:?} into worklist");
f(*ty);
}
}
}
}
impl<'ctx> From<SizednessAnalysis<'ctx>> for HashMap<TypeId, SizednessResult> {
fn from(analysis: SizednessAnalysis<'ctx>) -> Self {
// We let the lack of an entry mean "ZeroSized" to save space.
extra_assert!(analysis
.sized
.values()
.all(|v| { *v != SizednessResult::ZeroSized }));
analysis.sized
}
}
/// A convenience trait for querying whether some type or ID is sized.
///
/// This is not for _computing_ whether the thing is sized, it is for looking up
/// the results of the `Sizedness` analysis's computations for a specific thing.
pub(crate) trait Sizedness {
/// Get the sizedness of this type.
fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult;
/// Is the sizedness for this type `SizednessResult::ZeroSized`?
fn is_zero_sized(&self, ctx: &BindgenContext) -> bool {
self.sizedness(ctx) == SizednessResult::ZeroSized
}
}

View File

@@ -0,0 +1,601 @@
//! Discover which template type parameters are actually used.
//!
//! ### Why do we care?
//!
//! C++ allows ignoring template parameters, while Rust does not. Usually we can
//! blindly stick a `PhantomData<T>` inside a generic Rust struct to make up for
//! this. That doesn't work for templated type aliases, however:
//!
//! ```C++
//! template <typename T>
//! using Fml = int;
//! ```
//!
//! If we generate the naive Rust code for this alias, we get:
//!
//! ```ignore
//! pub(crate) type Fml<T> = ::std::os::raw::int;
//! ```
//!
//! And this is rejected by `rustc` due to the unused type parameter.
//!
//! (Aside: in these simple cases, `libclang` will often just give us the
//! aliased type directly, and we will never even know we were dealing with
//! aliases, let alone templated aliases. It's the more convoluted scenarios
//! where we get to have some fun...)
//!
//! For such problematic template aliases, we could generate a tuple whose
//! second member is a `PhantomData<T>`. Or, if we wanted to go the extra mile,
//! we could even generate some smarter wrapper that implements `Deref`,
//! `DerefMut`, `From`, `Into`, `AsRef`, and `AsMut` to the actually aliased
//! type. However, this is still lackluster:
//!
//! 1. Even with a billion conversion-trait implementations, using the generated
//! bindings is rather un-ergonomic.
//! 2. With either of these solutions, we need to keep track of which aliases
//! we've transformed like this in order to generate correct uses of the
//! wrapped type.
//!
//! Given that we have to properly track which template parameters ended up used
//! for (2), we might as well leverage that information to make ergonomic
//! bindings that don't contain any unused type parameters at all, and
//! completely avoid the pain of (1).
//!
//! ### How do we determine which template parameters are used?
//!
//! Determining which template parameters are actually used is a trickier
//! problem than it might seem at a glance. On the one hand, trivial uses are
//! easy to detect:
//!
//! ```C++
//! template <typename T>
//! class Foo {
//! T trivial_use_of_t;
//! };
//! ```
//!
//! It gets harder when determining if one template parameter is used depends on
//! determining if another template parameter is used. In this example, whether
//! `U` is used depends on whether `T` is used.
//!
//! ```C++
//! template <typename T>
//! class DoesntUseT {
//! int x;
//! };
//!
//! template <typename U>
//! class Fml {
//! DoesntUseT<U> lololol;
//! };
//! ```
//!
//! We can express the set of used template parameters as a constraint solving
//! problem (where the set of template parameters used by a given IR item is the
//! union of its sub-item's used template parameters) and iterate to a
//! fixed-point.
//!
//! We use the `ir::analysis::MonotoneFramework` infrastructure for this
//! fix-point analysis, where our lattice is the mapping from each IR item to
//! the powerset of the template parameters that appear in the input C++ header,
//! our join function is set union. The set of template parameters appearing in
//! the program is finite, as is the number of IR items. We start at our
//! lattice's bottom element: every item mapping to an empty set of template
//! parameters. Our analysis only adds members to each item's set of used
//! template parameters, never removes them, so it is monotone. Because our
//! lattice is finite and our constraint function is monotone, iteration to a
//! fix-point will terminate.
//!
//! See `src/ir/analysis.rs` for more.
use super::{ConstrainResult, MonotoneFramework};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::item::{Item, ItemSet};
use crate::ir::template::{TemplateInstantiation, TemplateParameters};
use crate::ir::traversal::{EdgeKind, Trace};
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item its set of template parameters that
/// it uses.
///
/// We use the monotone constraint function `template_param_usage`, defined as
/// follows:
///
/// * If `T` is a named template type parameter, it trivially uses itself:
///
/// ```ignore
/// template_param_usage(T) = { T }
/// ```
///
/// * If `inst` is a template instantiation, `inst.args` are the template
/// instantiation's template arguments, `inst.def` is the template definition
/// being instantiated, and `inst.def.params` is the template definition's
/// template parameters, then the instantiation's usage is the union of each
/// of its arguments' usages *if* the corresponding template parameter is in
/// turn used by the template definition:
///
/// ```ignore
/// template_param_usage(inst) = union(
/// template_param_usage(inst.args[i])
/// for i in 0..length(inst.args.length)
/// if inst.def.params[i] in template_param_usage(inst.def)
/// )
/// ```
///
/// * Finally, for all other IR item kinds, we use our lattice's `join`
/// operation: set union with each successor of the given item's template
/// parameter usage:
///
/// ```ignore
/// template_param_usage(v) =
/// union(template_param_usage(w) for w in successors(v))
/// ```
///
/// Note that we ignore certain edges in the graph, such as edges from a
/// template declaration to its template parameters' definitions for this
/// analysis. If we didn't, then we would mistakenly determine that ever
/// template parameter is always used.
///
/// The final wrinkle is handling of blocklisted types. Normally, we say that
/// the set of allowlisted items is the transitive closure of items explicitly
/// called out for allowlisting, *without* any items explicitly called out as
/// blocklisted. However, for the purposes of this analysis's correctness, we
/// simplify and consider run the analysis on the full transitive closure of
/// allowlisted items. We do, however, treat instantiations of blocklisted items
/// specially; see `constrain_instantiation_of_blocklisted_template` and its
/// documentation for details.
#[derive(Debug, Clone)]
pub(crate) struct UsedTemplateParameters<'ctx> {
ctx: &'ctx BindgenContext,
// The Option is only there for temporary moves out of the hash map. See the
// comments in `UsedTemplateParameters::constrain` below.
used: HashMap<ItemId, Option<ItemSet>>,
dependencies: HashMap<ItemId, Vec<ItemId>>,
// The set of allowlisted items, without any blocklisted items reachable
// from the allowlisted items which would otherwise be considered
// allowlisted as well.
allowlisted_items: HashSet<ItemId>,
}
impl UsedTemplateParameters<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
match kind {
// For each of these kinds of edges, if the referent uses a template
// parameter, then it should be considered that the origin of the
// edge also uses the template parameter.
EdgeKind::TemplateArgument |
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::VarType |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::TypeReference => true,
// An inner var or type using a template parameter is orthogonal
// from whether we use it. See template-param-usage-{6,11}.hpp.
EdgeKind::InnerVar | EdgeKind::InnerType => false,
// We can't emit machine code for new monomorphizations of class
// templates' methods (and don't detect explicit instantiations) so
// we must ignore template parameters that are only used by
// methods. This doesn't apply to a function type's return or
// parameter types, however, because of type aliases of function
// pointers that use template parameters, eg
// tests/headers/struct_with_typedef_template_arg.hpp
EdgeKind::Method => false,
// If we considered these edges, we would end up mistakenly claiming
// that every template parameter always used.
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => false,
// Since we have to be careful about which edges we consider for
// this analysis to be correct, we ignore generic edges. We also
// avoid a `_` wild card to force authors of new edge kinds to
// determine whether they need to be considered by this analysis.
EdgeKind::Generic => false,
}
}
fn take_this_id_usage_set<Id: Into<ItemId>>(
&mut self,
this_id: Id,
) -> ItemSet {
let this_id = this_id.into();
self.used
.get_mut(&this_id)
.expect(
"Should have a set of used template params for every item \
id",
)
.take()
.expect(
"Should maintain the invariant that all used template param \
sets are `Some` upon entry of `constrain`",
)
}
/// We say that blocklisted items use all of their template parameters. The
/// blocklisted type is most likely implemented explicitly by the user,
/// since it won't be in the generated bindings, and we don't know exactly
/// what they'll to with template parameters, but we can push the issue down
/// the line to them.
fn constrain_instantiation_of_blocklisted_template(
&self,
this_id: ItemId,
used_by_this_id: &mut ItemSet,
instantiation: &TemplateInstantiation,
) {
trace!(
" instantiation of blocklisted template, uses all template \
arguments"
);
let args = instantiation
.template_arguments()
.iter()
.map(|a| {
a.into_resolver()
.through_type_refs()
.through_type_aliases()
.resolve(self.ctx)
.id()
})
.filter(|a| *a != this_id)
.flat_map(|a| {
self.used
.get(&a)
.expect("Should have a used entry for the template arg")
.as_ref()
.expect(
"Because a != this_id, and all used template \
param sets other than this_id's are `Some`, \
a's used template param set should be `Some`",
)
.iter()
});
used_by_this_id.extend(args);
}
/// A template instantiation's concrete template argument is only used if
/// the template definition uses the corresponding template parameter.
fn constrain_instantiation(
&self,
this_id: ItemId,
used_by_this_id: &mut ItemSet,
instantiation: &TemplateInstantiation,
) {
trace!(" template instantiation");
let decl = self.ctx.resolve_type(instantiation.template_definition());
let args = instantiation.template_arguments();
let params = decl.self_template_params(self.ctx);
debug_assert!(this_id != instantiation.template_definition());
let used_by_def = self.used
.get(&instantiation.template_definition().into())
.expect("Should have a used entry for instantiation's template definition")
.as_ref()
.expect("And it should be Some because only this_id's set is None, and an \
instantiation's template definition should never be the \
instantiation itself");
for (arg, param) in args.iter().zip(params.iter()) {
trace!(
" instantiation's argument {arg:?} is used if definition's \
parameter {param:?} is used",
);
if used_by_def.contains(&param.into()) {
trace!(" param is used by template definition");
let arg = arg
.into_resolver()
.through_type_refs()
.through_type_aliases()
.resolve(self.ctx)
.id();
if arg == this_id {
continue;
}
let used_by_arg = self
.used
.get(&arg)
.expect("Should have a used entry for the template arg")
.as_ref()
.expect(
"Because arg != this_id, and all used template \
param sets other than this_id's are `Some`, \
arg's used template param set should be \
`Some`",
)
.iter();
used_by_this_id.extend(used_by_arg);
}
}
}
/// The join operation on our lattice: the set union of all of this ID's
/// successors.
fn constrain_join(&self, used_by_this_id: &mut ItemSet, item: &Item) {
trace!(" other item: join with successors' usage");
item.trace(
self.ctx,
&mut |sub_id, edge_kind| {
// Ignore ourselves, since union with ourself is a
// no-op. Ignore edges that aren't relevant to the
// analysis.
if sub_id == item.id() || !Self::consider_edge(edge_kind) {
return;
}
let used_by_sub_id = self
.used
.get(&sub_id)
.expect("Should have a used set for the sub_id successor")
.as_ref()
.expect(
"Because sub_id != id, and all used template \
param sets other than id's are `Some`, \
sub_id's used template param set should be \
`Some`",
)
.iter();
trace!(
" union with {sub_id:?}'s usage: {:?}",
used_by_sub_id.clone().collect::<Vec<_>>()
);
used_by_this_id.extend(used_by_sub_id);
},
&(),
);
}
}
impl<'ctx> MonotoneFramework for UsedTemplateParameters<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashMap<ItemId, ItemSet>;
fn new(ctx: &'ctx BindgenContext) -> UsedTemplateParameters<'ctx> {
let mut used = HashMap::default();
let mut dependencies = HashMap::default();
let allowlisted_items: HashSet<_> =
ctx.allowlisted_items().iter().copied().collect();
let allowlisted_and_blocklisted_items: ItemSet = allowlisted_items
.iter()
.copied()
.flat_map(|i| {
let mut reachable = vec![i];
i.trace(
ctx,
&mut |s, _| {
reachable.push(s);
},
&(),
);
reachable
})
.collect();
for item in allowlisted_and_blocklisted_items {
dependencies.entry(item).or_insert_with(Vec::new);
used.entry(item).or_insert_with(|| Some(ItemSet::new()));
{
// We reverse our natural IR graph edges to find dependencies
// between nodes.
item.trace(
ctx,
&mut |sub_item: ItemId, _| {
used.entry(sub_item)
.or_insert_with(|| Some(ItemSet::new()));
dependencies
.entry(sub_item)
.or_insert_with(Vec::new)
.push(item);
},
&(),
);
}
// Additionally, whether a template instantiation's template
// arguments are used depends on whether the template declaration's
// generic template parameters are used.
let item_kind =
ctx.resolve_item(item).as_type().map(|ty| ty.kind());
if let Some(TypeKind::TemplateInstantiation(inst)) = item_kind {
let decl = ctx.resolve_type(inst.template_definition());
let args = inst.template_arguments();
// Although template definitions should always have
// template parameters, there is a single exception:
// opaque templates. Hence the unwrap_or.
let params = decl.self_template_params(ctx);
for (arg, param) in args.iter().zip(params.iter()) {
let arg = arg
.into_resolver()
.through_type_aliases()
.through_type_refs()
.resolve(ctx)
.id();
let param = param
.into_resolver()
.through_type_aliases()
.through_type_refs()
.resolve(ctx)
.id();
used.entry(arg).or_insert_with(|| Some(ItemSet::new()));
used.entry(param).or_insert_with(|| Some(ItemSet::new()));
dependencies
.entry(arg)
.or_insert_with(Vec::new)
.push(param);
}
}
}
if cfg!(feature = "__testing_only_extra_assertions") {
// Invariant: The `used` map has an entry for every allowlisted
// item, as well as all explicitly blocklisted items that are
// reachable from allowlisted items.
//
// Invariant: the `dependencies` map has an entry for every
// allowlisted item.
//
// (This is so that every item we call `constrain` on is guaranteed
// to have a set of template parameters, and we can allow
// blocklisted templates to use all of their parameters).
for item in &allowlisted_items {
extra_assert!(used.contains_key(item));
extra_assert!(dependencies.contains_key(item));
item.trace(
ctx,
&mut |sub_item, _| {
extra_assert!(used.contains_key(&sub_item));
extra_assert!(dependencies.contains_key(&sub_item));
},
&(),
);
}
}
UsedTemplateParameters {
ctx,
used,
dependencies,
allowlisted_items,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
// The transitive closure of all allowlisted items, including explicitly
// blocklisted items.
self.ctx
.allowlisted_items()
.iter()
.copied()
.flat_map(|i| {
let mut reachable = vec![i];
i.trace(
self.ctx,
&mut |s, _| {
reachable.push(s);
},
&(),
);
reachable
})
.collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
// Invariant: all hash map entries' values are `Some` upon entering and
// exiting this method.
extra_assert!(self.used.values().all(|v| v.is_some()));
// Take the set for this ID out of the hash map while we mutate it based
// on other hash map entries. We *must* put it back into the hash map at
// the end of this method. This allows us to side-step HashMap's lack of
// an analog to slice::split_at_mut.
let mut used_by_this_id = self.take_this_id_usage_set(id);
trace!("constrain {id:?}");
trace!(" initially, used set is {used_by_this_id:?}");
let original_len = used_by_this_id.len();
let item = self.ctx.resolve_item(id);
let ty_kind = item.as_type().map(|ty| ty.kind());
match ty_kind {
// Named template type parameters trivially use themselves.
Some(&TypeKind::TypeParam) => {
trace!(" named type, trivially uses itself");
used_by_this_id.insert(id);
}
// Template instantiations only use their template arguments if the
// template definition uses the corresponding template parameter.
Some(TypeKind::TemplateInstantiation(inst)) => {
if self
.allowlisted_items
.contains(&inst.template_definition().into())
{
self.constrain_instantiation(
id,
&mut used_by_this_id,
inst,
);
} else {
self.constrain_instantiation_of_blocklisted_template(
id,
&mut used_by_this_id,
inst,
);
}
}
// Otherwise, add the union of each of its referent item's template
// parameter usage.
_ => self.constrain_join(&mut used_by_this_id, item),
}
trace!(" finally, used set is {used_by_this_id:?}");
let new_len = used_by_this_id.len();
assert!(
new_len >= original_len,
"This is the property that ensures this function is monotone -- \
if it doesn't hold, the analysis might never terminate!"
);
// Put the set back in the hash map and restore our invariant.
debug_assert!(self.used[&id].is_none());
self.used.insert(id, Some(used_by_this_id));
extra_assert!(self.used.values().all(|v| v.is_some()));
if new_len == original_len {
ConstrainResult::Same
} else {
ConstrainResult::Changed
}
}
fn each_depending_on<F>(&self, item: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&item) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<UsedTemplateParameters<'ctx>> for HashMap<ItemId, ItemSet> {
fn from(used_templ_params: UsedTemplateParameters<'ctx>) -> Self {
used_templ_params
.used
.into_iter()
.map(|(k, v)| (k, v.unwrap()))
.collect()
}
}