Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

1
vendor/bindgen/.cargo-checksum.json vendored Normal file

File diff suppressed because one or more lines are too long

485
vendor/bindgen/Cargo.lock generated vendored Normal file
View File

@@ -0,0 +1,485 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "aho-corasick"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]]
name = "annotate-snippets"
version = "0.11.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24e35ed54e5ea7997c14ed4c70ba043478db1112e98263b3b035907aa197d991"
dependencies = [
"anstyle",
"unicode-width",
]
[[package]]
name = "anstyle"
version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
[[package]]
name = "bindgen"
version = "0.72.1"
dependencies = [
"annotate-snippets",
"bitflags 2.2.1",
"cexpr",
"clang-sys",
"clap",
"clap_complete",
"itertools",
"log",
"prettyplease",
"proc-macro2",
"quote",
"regex",
"rustc-hash",
"shlex",
"syn 2.0.90",
]
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24a6904aef64d73cf10ab17ebace7befb918b82164785cb89907993be7f83813"
[[package]]
name = "cexpr"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766"
dependencies = [
"nom",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clang-sys"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4"
dependencies = [
"glob",
"libc",
"libloading",
]
[[package]]
name = "clap"
version = "4.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f13b9c79b5d1dd500d20ef541215a6423c75829ef43117e1b4d17fd8af0b5d76"
dependencies = [
"bitflags 1.3.2",
"clap_derive",
"clap_lex",
"is-terminal",
"once_cell",
"strsim",
"termcolor",
]
[[package]]
name = "clap_complete"
version = "4.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "01c22dcfb410883764b29953103d9ef7bb8fe21b3fa1158bc99986c2067294bd"
dependencies = [
"clap",
]
[[package]]
name = "clap_derive"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "684a277d672e91966334af371f1a7b5833f9aa00b07c84e92fbce95e00208ce8"
dependencies = [
"heck",
"proc-macro-error",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "clap_lex"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "783fe232adfca04f90f56201b26d79682d4cd2625e0bc7290b95123afe558ade"
dependencies = [
"os_str_bytes",
]
[[package]]
name = "either"
version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0"
[[package]]
name = "glob"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b"
[[package]]
name = "heck"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "hermit-abi"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc"
[[package]]
name = "is-terminal"
version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b"
dependencies = [
"hermit-abi",
"libc",
"windows-sys",
]
[[package]]
name = "itertools"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
dependencies = [
"either",
]
[[package]]
name = "libc"
version = "0.2.167"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc"
[[package]]
name = "libloading"
version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
dependencies = [
"cfg-if",
"windows-targets",
]
[[package]]
name = "log"
version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "minimal-lexical"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
[[package]]
name = "nom"
version = "7.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
dependencies = [
"memchr",
"minimal-lexical",
]
[[package]]
name = "once_cell"
version = "1.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
[[package]]
name = "os_str_bytes"
version = "6.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee"
[[package]]
name = "prettyplease"
version = "0.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033"
dependencies = [
"proc-macro2",
"syn 2.0.90",
]
[[package]]
name = "proc-macro-error"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
dependencies = [
"proc-macro-error-attr",
"proc-macro2",
"quote",
"syn 1.0.109",
"version_check",
]
[[package]]
name = "proc-macro-error-attr"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
dependencies = [
"proc-macro2",
"quote",
"version_check",
]
[[package]]
name = "proc-macro2"
version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
dependencies = [
"proc-macro2",
]
[[package]]
name = "regex"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "rustc-hash"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497"
[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "strsim"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.90"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "termcolor"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6"
dependencies = [
"winapi-util",
]
[[package]]
name = "unicode-ident"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
[[package]]
name = "unicode-width"
version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
dependencies = [
"winapi",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"

189
vendor/bindgen/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,189 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.70.0"
name = "bindgen"
version = "0.72.1"
authors = [
"Jyun-Yan You <jyyou.tw@gmail.com>",
"Emilio Cobos Álvarez <emilio@crisal.io>",
"Nick Fitzgerald <fitzgen@gmail.com>",
"The Servo project developers",
]
build = "build.rs"
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Automatically generates Rust FFI bindings to C and C++ libraries."
homepage = "https://rust-lang.github.io/rust-bindgen/"
documentation = "https://docs.rs/bindgen"
readme = "README.md"
keywords = [
"bindings",
"ffi",
"code-generation",
]
categories = [
"external-ffi-bindings",
"development-tools::ffi",
]
license = "BSD-3-Clause"
repository = "https://github.com/rust-lang/rust-bindgen"
[package.metadata.docs.rs]
features = ["experimental"]
[package.metadata.release]
pre-release-hook = [
"../node_modules/doctoc/doctoc.js",
"../CHANGELOG.md",
]
release = true
[[package.metadata.release.pre-release-replacements]]
file = "../CHANGELOG.md"
replace = """
# Unreleased
## Added
## Changed
## Removed
## Fixed
## Security
# {{version}} ({{date}})"""
search = "# Unreleased"
[features]
__cli = [
"dep:clap",
"dep:clap_complete",
]
__testing_only_extra_assertions = []
__testing_only_libclang_16 = []
__testing_only_libclang_9 = []
default = [
"logging",
"prettyplease",
"runtime",
]
experimental = ["dep:annotate-snippets"]
logging = ["dep:log"]
runtime = ["clang-sys/runtime"]
static = ["clang-sys/static"]
[lib]
name = "bindgen"
path = "lib.rs"
[dependencies.annotate-snippets]
version = "0.11.4"
optional = true
[dependencies.bitflags]
version = "2.2.1"
[dependencies.cexpr]
version = "0.6"
[dependencies.clang-sys]
version = "1"
features = ["clang_11_0"]
[dependencies.clap]
version = "4"
features = ["derive"]
optional = true
[dependencies.clap_complete]
version = "4"
optional = true
[dependencies.itertools]
version = ">=0.10,<0.14"
default-features = false
[dependencies.log]
version = "0.4"
optional = true
[dependencies.prettyplease]
version = "0.2.7"
features = ["verbatim"]
optional = true
[dependencies.proc-macro2]
version = "1.0.80"
[dependencies.quote]
version = "1"
default-features = false
[dependencies.regex]
version = "1.5.3"
features = [
"std",
"unicode-perl",
]
default-features = false
[dependencies.rustc-hash]
version = "2.1.0"
[dependencies.shlex]
version = "1"
[dependencies.syn]
version = "2.0"
features = [
"full",
"extra-traits",
"visit-mut",
]
[lints.clippy]
cast_possible_truncation = "allow"
cast_possible_wrap = "allow"
cast_precision_loss = "allow"
cast_sign_loss = "allow"
default_trait_access = "allow"
enum_glob_use = "allow"
ignored_unit_patterns = "allow"
implicit_hasher = "allow"
items_after_statements = "allow"
match_same_arms = "allow"
maybe_infinite_iter = "allow"
missing_errors_doc = "allow"
missing_panics_doc = "allow"
module_name_repetitions = "allow"
must_use_candidate = "allow"
redundant_closure_for_method_calls = "allow"
return_self_not_must_use = "allow"
similar_names = "allow"
struct_excessive_bools = "allow"
struct_field_names = "allow"
too_many_lines = "allow"
trivially_copy_pass_by_ref = "allow"
unnecessary_wraps = "allow"
unreadable_literal = "allow"
unused_self = "allow"
used_underscore_binding = "allow"
wildcard_imports = "allow"
[lints.clippy.pedantic]
level = "warn"
priority = -1
[lints.rust]
unused_qualifications = "warn"

29
vendor/bindgen/LICENSE vendored Normal file
View File

@@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2013, Jyun-Yan You
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

89
vendor/bindgen/README.md vendored Normal file
View File

@@ -0,0 +1,89 @@
[![crates.io](https://img.shields.io/crates/v/bindgen.svg)](https://crates.io/crates/bindgen)
[![docs.rs](https://docs.rs/bindgen/badge.svg)](https://docs.rs/bindgen/)
# `bindgen`
**`bindgen` automatically generates Rust FFI bindings to C (and some C++) libraries.**
For example, given the C header `doggo.h`:
```c
typedef struct Doggo {
int many;
char wow;
} Doggo;
void eleven_out_of_ten_majestic_af(Doggo* pupper);
```
`bindgen` produces Rust FFI code allowing you to call into the `doggo` library's
functions and use its types:
```rust
/* automatically generated by rust-bindgen 0.99.9 */
#[repr(C)]
pub struct Doggo {
pub many: ::std::os::raw::c_int,
pub wow: ::std::os::raw::c_char,
}
extern "C" {
pub fn eleven_out_of_ten_majestic_af(pupper: *mut Doggo);
}
```
## Users Guide
[📚 Read the `bindgen` users guide here! 📚](https://rust-lang.github.io/rust-bindgen)
## MSRV
The `bindgen` minimum supported Rust version is **1.70.0**.
The `bindgen-cli` minimum supported Rust version is **1.70.0**.
No MSRV bump policy has been established yet, so MSRV may increase in any release.
The MSRV is the minimum Rust version that can be used to *compile* each crate. However, `bindgen` and `bindgen-cli` can generate bindings that are compatible with Rust versions below the current MSRV.
Most of the time, the `bindgen-cli` crate will have a more recent MSRV than `bindgen` as crates such as `clap` require it.
## API Reference
[API reference documentation is on docs.rs](https://docs.rs/bindgen)
## Environment Variables
In addition to the [library API](https://docs.rs/bindgen) and [executable command-line API][bindgen-cmdline],
`bindgen` can be controlled through environment variables.
End-users should set these environment variables to modify `bindgen`'s behavior without modifying the source code of direct consumers of `bindgen`.
- `BINDGEN_EXTRA_CLANG_ARGS`: extra arguments to pass to `clang`
- Arguments are whitespace-separated
- Use shell-style quoting to pass through whitespace
- Examples:
- Specify alternate sysroot: `--sysroot=/path/to/sysroot`
- Add include search path with spaces: `-I"/path/with spaces"`
- `BINDGEN_EXTRA_CLANG_ARGS_<TARGET>`: similar to `BINDGEN_EXTRA_CLANG_ARGS`,
but used to set per-target arguments to pass to clang. Useful to set system include
directories in a target-specific way in cross-compilation environments with multiple targets.
Has precedence over `BINDGEN_EXTRA_CLANG_ARGS`.
Additionally, `bindgen` uses `libclang` to parse C and C++ header files.
To modify how `bindgen` searches for `libclang`, see the [`clang-sys` documentation][clang-sys-env].
For more details on how `bindgen` uses `libclang`, see the [`bindgen` users guide][bindgen-book-clang].
## Releases
We don't follow a specific release calendar, but if you need a release please
file an issue requesting that (ping `@emilio` for increased effectiveness).
## Contributing
[See `CONTRIBUTING.md` for hacking on `bindgen`!](./CONTRIBUTING.md)
[bindgen-cmdline]: https://rust-lang.github.io/rust-bindgen/command-line-usage.html
[clang-sys-env]: https://github.com/KyleMayes/clang-sys#environment-variables
[bindgen-book-clang]: https://rust-lang.github.io/rust-bindgen/requirements.html#clang

29
vendor/bindgen/build.rs vendored Normal file
View File

@@ -0,0 +1,29 @@
use std::env;
use std::fs::File;
use std::io::Write;
use std::path::{Path, PathBuf};
fn main() {
let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
let mut dst =
File::create(Path::new(&out_dir).join("host-target.txt")).unwrap();
dst.write_all(env::var("TARGET").unwrap().as_bytes())
.unwrap();
// On behalf of clang_sys, rebuild ourselves if important configuration
// variables change, to ensure that bindings get rebuilt if the
// underlying libclang changes.
println!("cargo:rerun-if-env-changed=LLVM_CONFIG_PATH");
println!("cargo:rerun-if-env-changed=LIBCLANG_PATH");
println!("cargo:rerun-if-env-changed=LIBCLANG_STATIC_PATH");
println!("cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS");
println!(
"cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS_{}",
env::var("TARGET").unwrap()
);
println!(
"cargo:rerun-if-env-changed=BINDGEN_EXTRA_CLANG_ARGS_{}",
env::var("TARGET").unwrap().replace('-', "_")
);
}

317
vendor/bindgen/callbacks.rs vendored Normal file
View File

@@ -0,0 +1,317 @@
//! A public API for more fine-grained customization of bindgen behavior.
pub use crate::ir::analysis::DeriveTrait;
pub use crate::ir::derive::CanDerive as ImplementsTrait;
pub use crate::ir::enum_ty::{EnumVariantCustomBehavior, EnumVariantValue};
pub use crate::ir::int::IntKind;
pub use cexpr::token::Kind as TokenKind;
pub use cexpr::token::Token;
use std::fmt;
/// An enum to allow ignoring parsing of macros.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Default)]
pub enum MacroParsingBehavior {
/// Ignore the macro, generating no code for it, or anything that depends on
/// it.
Ignore,
/// The default behavior bindgen would have otherwise.
#[default]
Default,
}
/// A trait to allow configuring different kinds of types in different
/// situations.
pub trait ParseCallbacks: fmt::Debug {
#[cfg(feature = "__cli")]
#[doc(hidden)]
fn cli_args(&self) -> Vec<String> {
vec![]
}
/// This function will be run on every macro that is identified.
fn will_parse_macro(&self, _name: &str) -> MacroParsingBehavior {
MacroParsingBehavior::Default
}
/// This function will run for every extern variable and function. The returned value determines
/// the name visible in the bindings.
fn generated_name_override(
&self,
_item_info: ItemInfo<'_>,
) -> Option<String> {
None
}
/// This function will run for every extern variable and function. The returned value determines
/// the link name in the bindings.
fn generated_link_name_override(
&self,
_item_info: ItemInfo<'_>,
) -> Option<String> {
None
}
/// Modify the contents of a macro
fn modify_macro(&self, _name: &str, _tokens: &mut Vec<Token>) {}
/// The integer kind an integer macro should have, given a name and the
/// value of that macro, or `None` if you want the default to be chosen.
fn int_macro(&self, _name: &str, _value: i64) -> Option<IntKind> {
None
}
/// This will be run on every string macro. The callback cannot influence the further
/// treatment of the macro, but may use the value to generate additional code or configuration.
fn str_macro(&self, _name: &str, _value: &[u8]) {}
/// This will be run on every function-like macro. The callback cannot
/// influence the further treatment of the macro, but may use the value to
/// generate additional code or configuration.
///
/// The first parameter represents the name and argument list (including the
/// parentheses) of the function-like macro. The second parameter represents
/// the expansion of the macro as a sequence of tokens.
fn func_macro(&self, _name: &str, _value: &[&[u8]]) {}
/// This function should return whether, given an enum variant
/// name, and value, this enum variant will forcibly be a constant.
fn enum_variant_behavior(
&self,
_enum_name: Option<&str>,
_original_variant_name: &str,
_variant_value: EnumVariantValue,
) -> Option<EnumVariantCustomBehavior> {
None
}
/// Allows to rename an enum variant, replacing `_original_variant_name`.
fn enum_variant_name(
&self,
_enum_name: Option<&str>,
_original_variant_name: &str,
_variant_value: EnumVariantValue,
) -> Option<String> {
None
}
/// Allows to rename an item, replacing `_item_info.name`.
fn item_name(&self, _item_info: ItemInfo) -> Option<String> {
None
}
/// This will be called on every header filename passed to (`Builder::header`)[`crate::Builder::header`].
fn header_file(&self, _filename: &str) {}
/// This will be called on every file inclusion, with the full path of the included file.
fn include_file(&self, _filename: &str) {}
/// This will be called every time `bindgen` reads an environment variable whether it has any
/// content or not.
fn read_env_var(&self, _key: &str) {}
/// This will be called to determine whether a particular blocklisted type
/// implements a trait or not. This will be used to implement traits on
/// other types containing the blocklisted type.
///
/// * `None`: use the default behavior
/// * `Some(ImplementsTrait::Yes)`: `_name` implements `_derive_trait`
/// * `Some(ImplementsTrait::Manually)`: any type including `_name` can't
/// derive `_derive_trait` but can implemented it manually
/// * `Some(ImplementsTrait::No)`: `_name` doesn't implement `_derive_trait`
fn blocklisted_type_implements_trait(
&self,
_name: &str,
_derive_trait: DeriveTrait,
) -> Option<ImplementsTrait> {
None
}
/// Provide a list of custom derive attributes.
///
/// If no additional attributes are wanted, this function should return an
/// empty `Vec`.
fn add_derives(&self, _info: &DeriveInfo<'_>) -> Vec<String> {
vec![]
}
/// Provide a list of custom attributes.
///
/// If no additional attributes are wanted, this function should return an
/// empty `Vec`.
fn add_attributes(&self, _info: &AttributeInfo<'_>) -> Vec<String> {
vec![]
}
/// Process a source code comment.
fn process_comment(&self, _comment: &str) -> Option<String> {
None
}
/// Potentially override the visibility of a composite type field.
///
/// Caution: This allows overriding standard C++ visibility inferred by
/// `respect_cxx_access_specs`.
fn field_visibility(
&self,
_info: FieldInfo<'_>,
) -> Option<crate::FieldVisibilityKind> {
None
}
/// Process a function name that as exactly one `va_list` argument
/// to be wrapped as a variadic function with the wrapped static function
/// feature.
///
/// The returned string is new function name.
#[cfg(feature = "experimental")]
fn wrap_as_variadic_fn(&self, _name: &str) -> Option<String> {
None
}
/// This will get called everytime an item (currently struct, union, and alias) is found with some information about it
fn new_item_found(&self, _id: DiscoveredItemId, _item: DiscoveredItem) {}
// TODO add callback for ResolvedTypeRef
}
/// An identifier for a discovered item. Used to identify an aliased type (see [`DiscoveredItem::Alias`])
#[derive(Ord, PartialOrd, PartialEq, Eq, Hash, Debug, Clone, Copy)]
pub struct DiscoveredItemId(usize);
impl DiscoveredItemId {
/// Constructor
pub fn new(value: usize) -> Self {
Self(value)
}
}
/// Struct passed to [`ParseCallbacks::new_item_found`] containing information about discovered
/// items (struct, union, and alias)
#[derive(Debug, Hash, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub enum DiscoveredItem {
/// Represents a struct with its original name in C and its generated binding name
Struct {
/// The original name (learnt from C) of the structure
/// Can be None if the union is anonymous.
original_name: Option<String>,
/// The name of the generated binding
final_name: String,
},
/// Represents a union with its original name in C and its generated binding name
Union {
/// The original name (learnt from C) of the structure.
/// Can be None if the union is anonymous.
original_name: Option<String>,
/// The name of the generated binding
final_name: String,
},
/// Represents an alias like a typedef
/// ```c
/// typedef struct MyStruct {
/// ...
/// } StructAlias;
/// ```
/// Here, the name of the alias is `StructAlias` and it's an alias for `MyStruct`
Alias {
/// The name of the alias in C (`StructAlias`)
alias_name: String,
/// The identifier of the discovered type
alias_for: DiscoveredItemId,
},
/// Represents an enum.
Enum {
/// The final name of the generated binding
final_name: String,
},
/// A function or method.
Function {
/// The final name used.
final_name: String,
},
/// A method.
Method {
/// The final name used.
final_name: String,
/// Type to which this method belongs.
parent: DiscoveredItemId,
}, // modules, etc.
}
/// Relevant information about a type to which new derive attributes will be added using
/// [`ParseCallbacks::add_derives`].
#[derive(Debug)]
#[non_exhaustive]
pub struct DeriveInfo<'a> {
/// The name of the type.
pub name: &'a str,
/// The kind of the type.
pub kind: TypeKind,
}
/// Relevant information about a type to which new attributes will be added using
/// [`ParseCallbacks::add_attributes`].
#[derive(Debug)]
#[non_exhaustive]
pub struct AttributeInfo<'a> {
/// The name of the type.
pub name: &'a str,
/// The kind of the type.
pub kind: TypeKind,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// The kind of the current type.
pub enum TypeKind {
/// The type is a Rust `struct`.
Struct,
/// The type is a Rust `enum`.
Enum,
/// The type is a Rust `union`.
Union,
}
/// A struct providing information about the item being passed to [`ParseCallbacks::generated_name_override`].
#[derive(Clone, Copy)]
#[non_exhaustive]
pub struct ItemInfo<'a> {
/// The name of the item
pub name: &'a str,
/// The kind of item
pub kind: ItemKind,
}
/// An enum indicating the kind of item for an `ItemInfo`.
#[derive(Clone, Copy)]
#[non_exhaustive]
pub enum ItemKind {
/// A module
Module,
/// A type
Type,
/// A Function
Function,
/// A Variable
Var,
}
/// Relevant information about a field for which visibility can be determined using
/// [`ParseCallbacks::field_visibility`].
#[derive(Debug)]
#[non_exhaustive]
pub struct FieldInfo<'a> {
/// The name of the type.
pub type_name: &'a str,
/// The name of the field.
pub field_name: &'a str,
/// The name of the type of the field.
pub field_type_name: Option<&'a str>,
}

2448
vendor/bindgen/clang.rs vendored Normal file

File diff suppressed because it is too large Load Diff

112
vendor/bindgen/codegen/bitfield_unit.rs vendored Normal file
View File

@@ -0,0 +1,112 @@
#[repr(C)]
#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct __BindgenBitfieldUnit<Storage> {
storage: Storage,
}
impl<Storage> __BindgenBitfieldUnit<Storage> {
#[inline]
pub const fn new(storage: Storage) -> Self {
Self { storage }
}
}
impl<Storage> __BindgenBitfieldUnit<Storage>
where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
fn extract_bit(byte: u8, index: usize) -> bool {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
byte & mask == mask
}
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
Self::extract_bit(byte, index)
}
#[inline]
fn change_bit(byte: u8, index: usize, val: bool) -> u8 {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
if val {
byte | mask
} else {
byte & !mask
}
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
self.storage.as_ref().len()
);
let mut val = 0;
for i in 0..(bit_width as usize) {
if self.get_bit(i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
self.storage.as_ref().len()
);
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
}

View File

@@ -0,0 +1,191 @@
#[repr(C)]
#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct __BindgenBitfieldUnit<Storage> {
storage: Storage,
}
impl<Storage> __BindgenBitfieldUnit<Storage> {
#[inline]
pub const fn new(storage: Storage) -> Self {
Self { storage }
}
}
impl<Storage> __BindgenBitfieldUnit<Storage>
where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
fn extract_bit(byte: u8, index: usize) -> bool {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
byte & mask == mask
}
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
Self::extract_bit(byte, index)
}
#[inline]
pub unsafe fn raw_get_bit(this: *const Self, index: usize) -> bool {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte = unsafe { *(core::ptr::addr_of!((*this).storage) as *const u8)
.offset(byte_index as isize) };
Self::extract_bit(byte, index)
}
#[inline]
fn change_bit(byte: u8, index: usize, val: bool) -> u8 {
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
if val {
byte | mask
} else {
byte & !mask
}
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
*byte = Self::change_bit(*byte, index, val);
}
#[inline]
pub unsafe fn raw_set_bit(this: *mut Self, index: usize, val: bool) {
debug_assert!(index / 8 < core::mem::size_of::<Storage>());
let byte_index = index / 8;
let byte = unsafe {
(core::ptr::addr_of_mut!((*this).storage) as *mut u8)
.offset(byte_index as isize)
};
unsafe { *byte = Self::change_bit(*byte, index, val) };
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
self.storage.as_ref().len()
);
let mut val = 0;
for i in 0..(bit_width as usize) {
if self.get_bit(i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub unsafe fn raw_get(
this: *const Self,
bit_offset: usize,
bit_width: u8,
) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
core::mem::size_of::<Storage>()
);
let mut val = 0;
for i in 0..(bit_width as usize) {
if unsafe { Self::raw_get_bit(this, i + bit_offset) } {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
self.storage.as_ref().len()
);
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
#[inline]
pub unsafe fn raw_set(
this: *mut Self,
bit_offset: usize,
bit_width: u8,
val: u64,
) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
debug_assert!(
(bit_offset + (bit_width as usize)) / 8 <=
core::mem::size_of::<Storage>()
);
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) };
}
}
}

View File

@@ -0,0 +1,260 @@
//! Tests for `__BindgenBitfieldUnit`.
//!
//! Note that bit-fields are allocated right to left (least to most significant
//! bits).
//!
//! From the x86 PS ABI:
//!
//! ```c
//! struct {
//! int j : 5;
//! int k : 6;
//! int m : 7;
//! };
//! ```
//!
//! ```ignore
//! +------------------------------------------------------------+
//! | | | | |
//! | padding | m | k | j |
//! |31 18|17 11|10 5|4 0|
//! +------------------------------------------------------------+
//! ```
use super::bitfield_unit::__BindgenBitfieldUnit;
#[test]
fn bitfield_unit_get_bit() {
let unit = __BindgenBitfieldUnit::<[u8; 2]>::new([0b10011101, 0b00011101]);
let mut bits = vec![];
for i in 0..16 {
bits.push(unit.get_bit(i));
}
println!();
println!("bits = {bits:?}");
assert_eq!(
bits,
&[
// 0b10011101
true, false, true, true, true, false, false, true,
// 0b00011101
true, false, true, true, true, false, false, false
]
);
}
#[test]
fn bitfield_unit_set_bit() {
let mut unit =
__BindgenBitfieldUnit::<[u8; 2]>::new([0b00000000, 0b00000000]);
for i in 0..16 {
if i % 3 == 0 {
unit.set_bit(i, true);
}
}
for i in 0..16 {
assert_eq!(unit.get_bit(i), i % 3 == 0);
}
let mut unit =
__BindgenBitfieldUnit::<[u8; 2]>::new([0b11111111, 0b11111111]);
for i in 0..16 {
if i % 3 == 0 {
unit.set_bit(i, false);
}
}
for i in 0..16 {
assert_eq!(unit.get_bit(i), i % 3 != 0);
}
}
macro_rules! bitfield_unit_get {
(
$(
With $storage:expr , then get($start:expr, $len:expr) is $expected:expr;
)*
) => {
#[test]
fn bitfield_unit_get() {
$({
let expected = $expected;
let unit = __BindgenBitfieldUnit::<_>::new($storage);
let actual = unit.get($start, $len);
println!();
println!("expected = {expected:064b}");
println!("actual = {actual:064b}");
assert_eq!(expected, actual);
})*
}
}
}
bitfield_unit_get! {
// Let's just exhaustively test getting the bits from a single byte, since
// there are few enough combinations...
With [0b11100010], then get(0, 1) is 0;
With [0b11100010], then get(1, 1) is 1;
With [0b11100010], then get(2, 1) is 0;
With [0b11100010], then get(3, 1) is 0;
With [0b11100010], then get(4, 1) is 0;
With [0b11100010], then get(5, 1) is 1;
With [0b11100010], then get(6, 1) is 1;
With [0b11100010], then get(7, 1) is 1;
With [0b11100010], then get(0, 2) is 0b10;
With [0b11100010], then get(1, 2) is 0b01;
With [0b11100010], then get(2, 2) is 0b00;
With [0b11100010], then get(3, 2) is 0b00;
With [0b11100010], then get(4, 2) is 0b10;
With [0b11100010], then get(5, 2) is 0b11;
With [0b11100010], then get(6, 2) is 0b11;
With [0b11100010], then get(0, 3) is 0b010;
With [0b11100010], then get(1, 3) is 0b001;
With [0b11100010], then get(2, 3) is 0b000;
With [0b11100010], then get(3, 3) is 0b100;
With [0b11100010], then get(4, 3) is 0b110;
With [0b11100010], then get(5, 3) is 0b111;
With [0b11100010], then get(0, 4) is 0b0010;
With [0b11100010], then get(1, 4) is 0b0001;
With [0b11100010], then get(2, 4) is 0b1000;
With [0b11100010], then get(3, 4) is 0b1100;
With [0b11100010], then get(4, 4) is 0b1110;
With [0b11100010], then get(0, 5) is 0b00010;
With [0b11100010], then get(1, 5) is 0b10001;
With [0b11100010], then get(2, 5) is 0b11000;
With [0b11100010], then get(3, 5) is 0b11100;
With [0b11100010], then get(0, 6) is 0b100010;
With [0b11100010], then get(1, 6) is 0b110001;
With [0b11100010], then get(2, 6) is 0b111000;
With [0b11100010], then get(0, 7) is 0b1100010;
With [0b11100010], then get(1, 7) is 0b1110001;
With [0b11100010], then get(0, 8) is 0b11100010;
// OK. Now let's test getting bits from across byte boundaries.
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(0, 16) is 0b1111111101010101;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(1, 16) is 0b0111111110101010;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(2, 16) is 0b0011111111010101;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(3, 16) is 0b0001111111101010;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(4, 16) is 0b0000111111110101;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(5, 16) is 0b0000011111111010;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(6, 16) is 0b0000001111111101;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(7, 16) is 0b0000000111111110;
With [0b01010101, 0b11111111, 0b00000000, 0b11111111],
then get(8, 16) is 0b0000000011111111;
}
macro_rules! bitfield_unit_set {
(
$(
set($start:expr, $len:expr, $val:expr) is $expected:expr;
)*
) => {
#[test]
fn bitfield_unit_set() {
$(
let mut unit = __BindgenBitfieldUnit::<[u8; 4]>::new([0, 0, 0, 0]);
unit.set($start, $len, $val);
let actual = unit.get(0, 32);
println!();
println!("set({}, {}, {:032b}", $start, $len, $val);
println!("expected = {:064b}", $expected);
println!("actual = {actual:064b}");
assert_eq!($expected, actual);
)*
}
}
}
bitfield_unit_set! {
// Once again, let's exhaustively test single byte combinations.
set(0, 1, 0b11111111) is 0b00000001;
set(1, 1, 0b11111111) is 0b00000010;
set(2, 1, 0b11111111) is 0b00000100;
set(3, 1, 0b11111111) is 0b00001000;
set(4, 1, 0b11111111) is 0b00010000;
set(5, 1, 0b11111111) is 0b00100000;
set(6, 1, 0b11111111) is 0b01000000;
set(7, 1, 0b11111111) is 0b10000000;
set(0, 2, 0b11111111) is 0b00000011;
set(1, 2, 0b11111111) is 0b00000110;
set(2, 2, 0b11111111) is 0b00001100;
set(3, 2, 0b11111111) is 0b00011000;
set(4, 2, 0b11111111) is 0b00110000;
set(5, 2, 0b11111111) is 0b01100000;
set(6, 2, 0b11111111) is 0b11000000;
set(0, 3, 0b11111111) is 0b00000111;
set(1, 3, 0b11111111) is 0b00001110;
set(2, 3, 0b11111111) is 0b00011100;
set(3, 3, 0b11111111) is 0b00111000;
set(4, 3, 0b11111111) is 0b01110000;
set(5, 3, 0b11111111) is 0b11100000;
set(0, 4, 0b11111111) is 0b00001111;
set(1, 4, 0b11111111) is 0b00011110;
set(2, 4, 0b11111111) is 0b00111100;
set(3, 4, 0b11111111) is 0b01111000;
set(4, 4, 0b11111111) is 0b11110000;
set(0, 5, 0b11111111) is 0b00011111;
set(1, 5, 0b11111111) is 0b00111110;
set(2, 5, 0b11111111) is 0b01111100;
set(3, 5, 0b11111111) is 0b11111000;
set(0, 6, 0b11111111) is 0b00111111;
set(1, 6, 0b11111111) is 0b01111110;
set(2, 6, 0b11111111) is 0b11111100;
set(0, 7, 0b11111111) is 0b01111111;
set(1, 7, 0b11111111) is 0b11111110;
set(0, 8, 0b11111111) is 0b11111111;
// And, now let's cross byte boundaries.
set(0, 16, 0b1111111111111111) is 0b00000000000000001111111111111111;
set(1, 16, 0b1111111111111111) is 0b00000000000000011111111111111110;
set(2, 16, 0b1111111111111111) is 0b00000000000000111111111111111100;
set(3, 16, 0b1111111111111111) is 0b00000000000001111111111111111000;
set(4, 16, 0b1111111111111111) is 0b00000000000011111111111111110000;
set(5, 16, 0b1111111111111111) is 0b00000000000111111111111111100000;
set(6, 16, 0b1111111111111111) is 0b00000000001111111111111111000000;
set(7, 16, 0b1111111111111111) is 0b00000000011111111111111110000000;
set(8, 16, 0b1111111111111111) is 0b00000000111111111111111100000000;
}

258
vendor/bindgen/codegen/dyngen.rs vendored Normal file
View File

@@ -0,0 +1,258 @@
use crate::codegen;
use crate::ir::context::BindgenContext;
use crate::ir::function::ClangAbi;
use proc_macro2::{Ident, TokenStream};
/// Used to build the output tokens for dynamic bindings.
#[derive(Default)]
pub(crate) struct DynamicItems {
/// Tracks the tokens that will appears inside the library struct -- e.g.:
/// ```ignore
/// struct Lib {
/// __library: ::libloading::Library,
/// pub x: Result<unsafe extern ..., ::libloading::Error>, // <- tracks these
/// ...
/// }
/// ```
struct_members: Vec<TokenStream>,
/// Tracks the tokens that will appear inside the library struct's implementation, e.g.:
///
/// ```ignore
/// impl Lib {
/// ...
/// pub unsafe fn foo(&self, ...) { // <- tracks these
/// ...
/// }
/// }
/// ```
struct_implementation: Vec<TokenStream>,
/// Tracks the initialization of the fields inside the `::new` constructor of the library
/// struct, e.g.:
/// ```ignore
/// impl Lib {
///
/// pub unsafe fn new<P>(path: P) -> Result<Self, ::libloading::Error>
/// where
/// P: AsRef<::std::ffi::OsStr>,
/// {
/// ...
/// let foo = __library.get(...) ...; // <- tracks these
/// ...
/// }
///
/// ...
/// }
/// ```
constructor_inits: Vec<TokenStream>,
/// Tracks the information that is passed to the library struct at the end of the `::new`
/// constructor, e.g.:
/// ```ignore
/// impl LibFoo {
/// pub unsafe fn new<P>(path: P) -> Result<Self, ::libloading::Error>
/// where
/// P: AsRef<::std::ffi::OsStr>,
/// {
/// ...
/// Ok(LibFoo {
/// __library: __library,
/// foo,
/// bar, // <- tracks these
/// ...
/// })
/// }
/// }
/// ```
init_fields: Vec<TokenStream>,
}
impl DynamicItems {
pub(crate) fn new() -> Self {
Self::default()
}
pub(crate) fn get_tokens(
&self,
lib_ident: &Ident,
ctx: &BindgenContext,
) -> TokenStream {
let struct_members = &self.struct_members;
let constructor_inits = &self.constructor_inits;
let init_fields = &self.init_fields;
let struct_implementation = &self.struct_implementation;
let library_new = if ctx.options().wrap_unsafe_ops {
quote!(unsafe { ::libloading::Library::new(path) })
} else {
quote!(::libloading::Library::new(path))
};
let from_library = if ctx.options().wrap_unsafe_ops {
quote!(unsafe { Self::from_library(library) })
} else {
quote!(Self::from_library(library))
};
quote! {
pub struct #lib_ident {
__library: ::libloading::Library,
#(#struct_members)*
}
impl #lib_ident {
pub unsafe fn new<P>(
path: P
) -> Result<Self, ::libloading::Error>
where P: AsRef<::std::ffi::OsStr> {
let library = #library_new?;
#from_library
}
pub unsafe fn from_library<L>(
library: L
) -> Result<Self, ::libloading::Error>
where L: Into<::libloading::Library> {
let __library = library.into();
#( #constructor_inits )*
Ok(#lib_ident {
__library,
#( #init_fields ),*
})
}
#( #struct_implementation )*
}
}
}
#[allow(clippy::too_many_arguments)]
pub(crate) fn push_func(
&mut self,
ident: &Ident,
symbol: &str,
abi: ClangAbi,
is_variadic: bool,
is_required: bool,
args: &[TokenStream],
args_identifiers: &[TokenStream],
ret: &TokenStream,
ret_ty: &TokenStream,
attributes: &[TokenStream],
ctx: &BindgenContext,
) {
if !is_variadic {
assert_eq!(args.len(), args_identifiers.len());
}
let signature = quote! { unsafe extern #abi fn ( #( #args),* ) #ret };
let member = if is_required {
signature
} else {
quote! { Result<#signature, ::libloading::Error> }
};
self.struct_members.push(quote! {
pub #ident: #member,
});
// N.B: If the signature was required, it won't be wrapped in a Result<...>
// and we can simply call it directly.
let fn_ = if is_required {
quote! { self.#ident }
} else {
quote! { self.#ident.as_ref().expect("Expected function, got error.") }
};
let call_body = if ctx.options().wrap_unsafe_ops {
quote!(unsafe { (#fn_)(#( #args_identifiers ),*) })
} else {
quote!((#fn_)(#( #args_identifiers ),*) )
};
// We can't implement variadic functions from C easily, so we allow to
// access the function pointer so that the user can call it just fine.
if !is_variadic {
self.struct_implementation.push(quote! {
#(#attributes)*
pub unsafe fn #ident ( &self, #( #args ),* ) #ret_ty {
#call_body
}
});
}
// N.B: Unwrap the signature upon construction if it is required to be resolved.
let symbol_cstr =
codegen::helpers::ast_ty::cstr_expr(symbol.to_string());
let library_get = if ctx.options().wrap_unsafe_ops {
quote!(unsafe { __library.get(#symbol_cstr) })
} else {
quote!(__library.get(#symbol_cstr))
};
self.constructor_inits.push(if is_required {
quote! {
let #ident = #library_get.map(|sym| *sym)?;
}
} else {
quote! {
let #ident = #library_get.map(|sym| *sym);
}
});
self.init_fields.push(quote! {
#ident
});
}
pub fn push_var(
&mut self,
ident: &Ident,
symbol: &str,
ty: &TokenStream,
is_required: bool,
wrap_unsafe_ops: bool,
) {
let member = if is_required {
quote! { *mut #ty }
} else {
quote! { Result<*mut #ty, ::libloading::Error> }
};
self.struct_members.push(quote! {
pub #ident: #member,
});
let deref = if is_required {
quote! { self.#ident }
} else {
quote! { *self.#ident.as_ref().expect("Expected variable, got error.") }
};
self.struct_implementation.push(quote! {
pub unsafe fn #ident (&self) -> *mut #ty {
#deref
}
});
let symbol_cstr =
codegen::helpers::ast_ty::cstr_expr(symbol.to_string());
let library_get = if wrap_unsafe_ops {
quote!(unsafe { __library.get::<*mut #ty>(#symbol_cstr) })
} else {
quote!(__library.get::<*mut #ty>(#symbol_cstr))
};
let qmark = if is_required { quote!(?) } else { quote!() };
let var_get = quote! {
let #ident = #library_get.map(|sym| *sym)#qmark;
};
self.constructor_inits.push(var_get);
self.init_fields.push(quote! {
#ident
});
}
}

52
vendor/bindgen/codegen/error.rs vendored Normal file
View File

@@ -0,0 +1,52 @@
use std::error;
use std::fmt;
/// Errors that can occur during code generation.
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) enum Error {
/// Tried to generate an opaque blob for a type that did not have a layout.
NoLayoutForOpaqueBlob,
/// Tried to instantiate an opaque template definition, or a template
/// definition that is too difficult for us to understand (like a partial
/// template specialization).
InstantiationOfOpaqueType,
/// Function ABI is not supported.
UnsupportedAbi(&'static str),
/// The pointer type size does not match the target's pointer size.
InvalidPointerSize {
ty_name: String,
ty_size: usize,
ptr_size: usize,
},
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Error::NoLayoutForOpaqueBlob => {
"Tried to generate an opaque blob, but had no layout.".fmt(f)
}
Error::InstantiationOfOpaqueType => {
"Instantiation of opaque template type or partial template specialization."
.fmt(f)
}
Error::UnsupportedAbi(abi) => {
write!(
f,
"{abi} ABI is not supported by the configured Rust target."
)
}
Error::InvalidPointerSize { ty_name, ty_size, ptr_size } => {
write!(f, "The {ty_name} pointer type has size {ty_size} but the current target's pointer size is {ptr_size}.")
}
}
}
}
impl error::Error for Error {}
/// A `Result` of `T` or an error of `bindgen::codegen::error::Error`.
pub(crate) type Result<T> = ::std::result::Result<T, Error>;

395
vendor/bindgen/codegen/helpers.rs vendored Normal file
View File

@@ -0,0 +1,395 @@
//! Helpers for code generation that don't need macro expansion.
use proc_macro2::{Ident, Span};
use crate::ir::context::BindgenContext;
use crate::ir::layout::Layout;
pub(crate) mod attributes {
use proc_macro2::{Ident, Span, TokenStream};
use std::{borrow::Cow, str::FromStr};
pub(crate) fn repr(which: &str) -> TokenStream {
let which = Ident::new(which, Span::call_site());
quote! {
#[repr( #which )]
}
}
pub(crate) fn repr_list(which_ones: &[&str]) -> TokenStream {
let which_ones = which_ones
.iter()
.map(|one| TokenStream::from_str(one).expect("repr to be valid"));
quote! {
#[repr( #( #which_ones ),* )]
}
}
pub(crate) fn derives(which_ones: &[&str]) -> TokenStream {
let which_ones = which_ones
.iter()
.map(|one| TokenStream::from_str(one).expect("derive to be valid"));
quote! {
#[derive( #( #which_ones ),* )]
}
}
pub(crate) fn inline() -> TokenStream {
quote! {
#[inline]
}
}
pub(crate) fn must_use() -> TokenStream {
quote! {
#[must_use]
}
}
pub(crate) fn non_exhaustive() -> TokenStream {
quote! {
#[non_exhaustive]
}
}
pub(crate) fn doc(comment: &str) -> TokenStream {
if comment.is_empty() {
quote!()
} else {
quote!(#[doc = #comment])
}
}
pub(crate) fn link_name<const MANGLE: bool>(name: &str) -> TokenStream {
// LLVM mangles the name by default but it's already mangled.
// Prefixing the name with \u{1} should tell LLVM to not mangle it.
let name: Cow<'_, str> = if MANGLE {
name.into()
} else {
format!("\u{1}{name}").into()
};
quote! {
#[link_name = #name]
}
}
}
/// The `ffi_safe` argument should be true if this is a type that the user might
/// reasonably use, e.g. not struct padding, where the `__BindgenOpaqueArray` is
/// just noise.
/// TODO: Should this be `MaybeUninit`, since padding bytes are effectively
/// uninitialized?
pub(crate) fn blob(
ctx: &BindgenContext,
layout: Layout,
ffi_safe: bool,
) -> syn::Type {
let opaque = layout.opaque();
// FIXME(emilio, #412): We fall back to byte alignment, but there are
// some things that legitimately are more than 8-byte aligned.
//
// Eventually we should be able to `unwrap` here, but...
let ty = opaque.known_rust_type_for_array().unwrap_or_else(|| {
warn!("Found unknown alignment on code generation!");
syn::parse_quote! { u8 }
});
let data_len = opaque.array_size().unwrap_or(layout.size);
if data_len == 1 {
ty
} else if ffi_safe && ctx.options().rust_features().min_const_generics {
ctx.generated_opaque_array();
if ctx.options().enable_cxx_namespaces {
syn::parse_quote! { root::__BindgenOpaqueArray<#ty, #data_len> }
} else {
syn::parse_quote! { __BindgenOpaqueArray<#ty, #data_len> }
}
} else {
// This is not FFI safe as an argument; the struct above is
// preferable.
syn::parse_quote! { [ #ty ; #data_len ] }
}
}
/// Integer type of the same size as the given `Layout`.
pub(crate) fn integer_type(layout: Layout) -> Option<syn::Type> {
Layout::known_type_for_size(layout.size)
}
pub(crate) const BITFIELD_UNIT: &str = "__BindgenBitfieldUnit";
/// Generates a bitfield allocation unit type for a type with the given `Layout`.
pub(crate) fn bitfield_unit(ctx: &BindgenContext, layout: Layout) -> syn::Type {
let size = layout.size;
let bitfield_unit_name = Ident::new(BITFIELD_UNIT, Span::call_site());
let ty = syn::parse_quote! { #bitfield_unit_name<[u8; #size]> };
if ctx.options().enable_cxx_namespaces {
return syn::parse_quote! { root::#ty };
}
ty
}
pub(crate) mod ast_ty {
use crate::ir::context::BindgenContext;
use crate::ir::function::FunctionSig;
use crate::ir::layout::Layout;
use crate::ir::ty::{FloatKind, IntKind};
use crate::RustTarget;
use proc_macro2::TokenStream;
use std::str::FromStr;
pub(crate) fn c_void(ctx: &BindgenContext) -> syn::Type {
// ctypes_prefix takes precedence
match ctx.options().ctypes_prefix {
Some(ref prefix) => {
let prefix = TokenStream::from_str(prefix.as_str()).unwrap();
syn::parse_quote! { #prefix::c_void }
}
None => {
if ctx.options().use_core {
syn::parse_quote! { ::core::ffi::c_void }
} else {
syn::parse_quote! { ::std::os::raw::c_void }
}
}
}
}
pub(crate) fn raw_type(ctx: &BindgenContext, name: &str) -> syn::Type {
let ident = ctx.rust_ident_raw(name);
match ctx.options().ctypes_prefix {
Some(ref prefix) => {
let prefix = TokenStream::from_str(prefix.as_str()).unwrap();
syn::parse_quote! { #prefix::#ident }
}
None => {
if ctx.options().use_core &&
ctx.options().rust_features().core_ffi_c
{
syn::parse_quote! { ::core::ffi::#ident }
} else {
syn::parse_quote! { ::std::os::raw::#ident }
}
}
}
}
pub(crate) fn int_kind_rust_type(
ctx: &BindgenContext,
ik: IntKind,
layout: Option<Layout>,
) -> syn::Type {
match ik {
IntKind::Bool => syn::parse_quote! { bool },
IntKind::Char { .. } => raw_type(ctx, "c_char"),
// The following is used only when an unusual command-line
// argument is used. bindgen_cchar16_t is not a real type;
// but this allows downstream postprocessors to distinguish
// this case and do something special for C++ bindings
// containing the C++ type char16_t.
IntKind::Char16 => syn::parse_quote! { bindgen_cchar16_t },
IntKind::SChar => raw_type(ctx, "c_schar"),
IntKind::UChar => raw_type(ctx, "c_uchar"),
IntKind::Short => raw_type(ctx, "c_short"),
IntKind::UShort => raw_type(ctx, "c_ushort"),
IntKind::Int => raw_type(ctx, "c_int"),
IntKind::UInt => raw_type(ctx, "c_uint"),
IntKind::Long => raw_type(ctx, "c_long"),
IntKind::ULong => raw_type(ctx, "c_ulong"),
IntKind::LongLong => raw_type(ctx, "c_longlong"),
IntKind::ULongLong => raw_type(ctx, "c_ulonglong"),
IntKind::WChar => {
let layout =
layout.expect("Couldn't compute wchar_t's layout?");
Layout::known_type_for_size(layout.size)
.expect("Non-representable wchar_t?")
}
IntKind::I8 => syn::parse_quote! { i8 },
IntKind::U8 => syn::parse_quote! { u8 },
IntKind::I16 => syn::parse_quote! { i16 },
IntKind::U16 => syn::parse_quote! { u16 },
IntKind::I32 => syn::parse_quote! { i32 },
IntKind::U32 => syn::parse_quote! { u32 },
IntKind::I64 => syn::parse_quote! { i64 },
IntKind::U64 => syn::parse_quote! { u64 },
IntKind::Custom { name, .. } => {
syn::parse_str(name).expect("Invalid integer type.")
}
IntKind::U128 => {
if true {
syn::parse_quote! { u128 }
} else {
// Best effort thing, but wrong alignment
// unfortunately.
syn::parse_quote! { [u64; 2] }
}
}
IntKind::I128 => {
if true {
syn::parse_quote! { i128 }
} else {
syn::parse_quote! { [u64; 2] }
}
}
}
}
pub(crate) fn float_kind_rust_type(
ctx: &BindgenContext,
fk: FloatKind,
layout: Option<Layout>,
) -> syn::Type {
// TODO: we probably should take the type layout into account more
// often?
//
// Also, maybe this one shouldn't be the default?
match (fk, ctx.options().convert_floats) {
(FloatKind::Float16, _) => {
// TODO: do f16 when rust lands it
ctx.generated_bindgen_float16();
if ctx.options().enable_cxx_namespaces {
syn::parse_quote! { root::__BindgenFloat16 }
} else {
syn::parse_quote! { __BindgenFloat16 }
}
}
(FloatKind::Float, true) => syn::parse_quote! { f32 },
(FloatKind::Double, true) => syn::parse_quote! { f64 },
(FloatKind::Float, false) => raw_type(ctx, "c_float"),
(FloatKind::Double, false) => raw_type(ctx, "c_double"),
(FloatKind::LongDouble, _) => {
if let Some(layout) = layout {
match layout.size {
4 => syn::parse_quote! { f32 },
8 => syn::parse_quote! { f64 },
// TODO(emilio): If rust ever gains f128 we should
// use it here and below.
_ => super::integer_type(layout)
.unwrap_or(syn::parse_quote! { f64 }),
}
} else {
debug_assert!(
false,
"How didn't we know the layout for a primitive type?"
);
syn::parse_quote! { f64 }
}
}
(FloatKind::Float128, _) => {
if true {
syn::parse_quote! { u128 }
} else {
syn::parse_quote! { [u64; 2] }
}
}
}
}
pub(crate) fn int_expr(val: i64) -> TokenStream {
// Don't use quote! { #val } because that adds the type suffix.
let val = proc_macro2::Literal::i64_unsuffixed(val);
quote!(#val)
}
pub(crate) fn uint_expr(val: u64) -> TokenStream {
// Don't use quote! { #val } because that adds the type suffix.
let val = proc_macro2::Literal::u64_unsuffixed(val);
quote!(#val)
}
pub(crate) fn cstr_expr(mut string: String) -> TokenStream {
string.push('\0');
let b = proc_macro2::Literal::byte_string(string.as_bytes());
quote! {
#b
}
}
pub(crate) fn float_expr(
ctx: &BindgenContext,
f: f64,
) -> Result<TokenStream, ()> {
if f.is_finite() {
let val = proc_macro2::Literal::f64_unsuffixed(f);
return Ok(quote!(#val));
}
let prefix = ctx.trait_prefix();
let rust_target = ctx.options().rust_target;
if f.is_nan() {
// FIXME: This should be done behind a `RustFeature` instead
#[allow(deprecated)]
let tokens = if rust_target >= RustTarget::Stable_1_43 {
quote! {
f64::NAN
}
} else {
quote! {
::#prefix::f64::NAN
}
};
return Ok(tokens);
}
if f.is_infinite() {
let tokens = if f.is_sign_positive() {
// FIXME: This should be done behind a `RustFeature` instead
#[allow(deprecated)]
if rust_target >= RustTarget::Stable_1_43 {
quote! {
f64::INFINITY
}
} else {
quote! {
::#prefix::f64::INFINITY
}
}
} else {
// FIXME: This should be done behind a `RustFeature` instead
#[allow(deprecated)]
// Negative infinity
if rust_target >= RustTarget::Stable_1_43 {
quote! {
f64::NEG_INFINITY
}
} else {
quote! {
::#prefix::f64::NEG_INFINITY
}
}
};
return Ok(tokens);
}
warn!("Unknown non-finite float number: {f:?}");
Err(())
}
pub(crate) fn arguments_from_signature(
signature: &FunctionSig,
ctx: &BindgenContext,
) -> Vec<TokenStream> {
let mut unnamed_arguments = 0;
signature
.argument_types()
.iter()
.map(|&(ref name, _ty)| {
let name = if let Some(ref name) = *name {
ctx.rust_ident(name)
} else {
unnamed_arguments += 1;
ctx.rust_ident(format!("arg{unnamed_arguments}"))
};
quote! { #name }
})
.collect()
}
}

243
vendor/bindgen/codegen/impl_debug.rs vendored Normal file
View File

@@ -0,0 +1,243 @@
use crate::ir::comp::{BitfieldUnit, CompKind, Field, FieldData, FieldMethods};
use crate::ir::context::BindgenContext;
use crate::ir::item::{HasTypeParamInArray, IsOpaque, Item, ItemCanonicalName};
use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT};
use std::fmt::Write as _;
pub(crate) fn gen_debug_impl(
ctx: &BindgenContext,
fields: &[Field],
item: &Item,
kind: CompKind,
) -> proc_macro2::TokenStream {
let struct_name = item.canonical_name(ctx);
let mut format_string = format!("{struct_name} {{{{ ");
let mut tokens = vec![];
if item.is_opaque(ctx, &()) {
format_string.push_str("opaque");
} else {
match kind {
CompKind::Union => {
format_string.push_str("union");
}
CompKind::Struct => {
let processed_fields = fields.iter().filter_map(|f| match f {
Field::DataMember(ref fd) => fd.impl_debug(ctx, ()),
Field::Bitfields(ref bu) => bu.impl_debug(ctx, ()),
});
for (i, (fstring, toks)) in processed_fields.enumerate() {
if i > 0 {
format_string.push_str(", ");
}
tokens.extend(toks);
format_string.push_str(&fstring);
}
}
}
}
format_string.push_str(" }}");
tokens.insert(0, quote! { #format_string });
let prefix = ctx.trait_prefix();
quote! {
fn fmt(&self, f: &mut ::#prefix::fmt::Formatter<'_>) -> ::#prefix ::fmt::Result {
write!(f, #( #tokens ),*)
}
}
}
/// A trait for the things which we can codegen tokens that contribute towards a
/// generated `impl Debug`.
pub(crate) trait ImplDebug<'a> {
/// Any extra parameter required by this a particular `ImplDebug` implementation.
type Extra;
/// Generate a format string snippet to be included in the larger `impl Debug`
/// format string, and the code to get the format string's interpolation values.
fn impl_debug(
&self,
ctx: &BindgenContext,
extra: Self::Extra,
) -> Option<(String, Vec<proc_macro2::TokenStream>)>;
}
impl ImplDebug<'_> for FieldData {
type Extra = ();
fn impl_debug(
&self,
ctx: &BindgenContext,
_: Self::Extra,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
if let Some(name) = self.name() {
ctx.resolve_item(self.ty()).impl_debug(ctx, name)
} else {
None
}
}
}
impl ImplDebug<'_> for BitfieldUnit {
type Extra = ();
fn impl_debug(
&self,
ctx: &BindgenContext,
_: Self::Extra,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
let mut format_string = String::new();
let mut tokens = vec![];
for (i, bitfield) in self.bitfields().iter().enumerate() {
if i > 0 {
format_string.push_str(", ");
}
if let Some(bitfield_name) = bitfield.name() {
let _ = write!(format_string, "{bitfield_name} : {{:?}}");
let getter_name = bitfield.getter_name();
let name_ident = ctx.rust_ident_raw(getter_name);
tokens.push(quote! {
self.#name_ident ()
});
}
}
Some((format_string, tokens))
}
}
impl<'a> ImplDebug<'a> for Item {
type Extra = &'a str;
fn impl_debug(
&self,
ctx: &BindgenContext,
name: &str,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
let name_ident = ctx.rust_ident(name);
// We don't know if blocklisted items `impl Debug` or not, so we can't
// add them to the format string we're building up.
if !ctx.allowlisted_items().contains(&self.id()) {
return None;
}
let ty = self.as_type()?;
fn debug_print(
name: &str,
name_ident: &proc_macro2::TokenStream,
) -> Option<(String, Vec<proc_macro2::TokenStream>)> {
Some((
format!("{name}: {{:?}}"),
vec![quote! {
self.#name_ident
}],
))
}
match *ty.kind() {
// Handle the simple cases.
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Complex(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::Comp(..) |
TypeKind::ObjCSel => debug_print(name, &quote! { #name_ident }),
TypeKind::TemplateInstantiation(ref inst) => {
if inst.is_opaque(ctx, self) {
Some((format!("{name}: opaque"), vec![]))
} else {
debug_print(name, &quote! { #name_ident })
}
}
// The generic is not required to implement Debug, so we can not debug print that type
TypeKind::TypeParam => {
Some((format!("{name}: Non-debuggable generic"), vec![]))
}
TypeKind::Array(_, len) => {
// Generics are not required to implement Debug
if self.has_type_param_in_array(ctx) {
Some((format!("{name}: Array with length {len}"), vec![]))
} else if len < RUST_DERIVE_IN_ARRAY_LIMIT ||
ctx.options().rust_features().larger_arrays
{
// The simple case
debug_print(name, &quote! { #name_ident })
} else if ctx.options().use_core {
// There is no String in core; reducing field visibility to avoid breaking
// no_std setups.
Some((format!("{name}: [...]"), vec![]))
} else {
// Let's implement our own print function
Some((
format!("{name}: [{{}}]"),
vec![quote! {{
use std::fmt::Write as _;
let mut output = String::new();
let mut iter = self.#name_ident.iter();
if let Some(value) = iter.next() {
let _ = write!(output, "{value:?}");
for value in iter {
let _ = write!(output, ", {value:?}");
}
}
output
}}],
))
}
}
TypeKind::Vector(_, len) => {
if ctx.options().use_core {
// There is no format! in core; reducing field visibility to avoid breaking
// no_std setups.
Some((format!("{name}(...)"), vec![]))
} else {
let self_ids = 0..len;
Some((
format!("{name}({{}})"),
vec![quote! {
#(format!("{:?}", self.#self_ids)),*
}],
))
}
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
// We follow the aliases
ctx.resolve_item(t).impl_debug(ctx, name)
}
TypeKind::Pointer(inner) => {
let inner_type = ctx.resolve_type(inner).canonical_type(ctx);
match *inner_type.kind() {
TypeKind::Function(ref sig)
if !sig.function_pointers_can_derive() =>
{
Some((format!("{name}: FunctionPointer"), vec![]))
}
_ => debug_print(name, &quote! { #name_ident }),
}
}
TypeKind::Opaque => None,
}
}
}

142
vendor/bindgen/codegen/impl_partialeq.rs vendored Normal file
View File

@@ -0,0 +1,142 @@
use crate::ir::comp::{CompInfo, CompKind, Field, FieldMethods};
use crate::ir::context::BindgenContext;
use crate::ir::item::{IsOpaque, Item};
use crate::ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT};
/// Generate a manual implementation of `PartialEq` trait for the
/// specified compound type.
pub(crate) fn gen_partialeq_impl(
ctx: &BindgenContext,
comp_info: &CompInfo,
item: &Item,
ty_for_impl: &proc_macro2::TokenStream,
) -> Option<proc_macro2::TokenStream> {
let mut tokens = vec![];
if item.is_opaque(ctx, &()) {
tokens.push(quote! {
&self._bindgen_opaque_blob[..] == &other._bindgen_opaque_blob[..]
});
} else if comp_info.kind() == CompKind::Union {
assert!(!ctx.options().untagged_union);
tokens.push(quote! {
&self.bindgen_union_field[..] == &other.bindgen_union_field[..]
});
} else {
for base in comp_info.base_members() {
if !base.requires_storage(ctx) {
continue;
}
let ty_item = ctx.resolve_item(base.ty);
let field_name = &base.field_name;
if ty_item.is_opaque(ctx, &()) {
let field_name = ctx.rust_ident(field_name);
tokens.push(quote! {
&self. #field_name [..] == &other. #field_name [..]
});
} else {
tokens.push(gen_field(ctx, ty_item, field_name));
}
}
for field in comp_info.fields() {
match *field {
Field::DataMember(ref fd) => {
let ty_item = ctx.resolve_item(fd.ty());
let name = fd.name().unwrap();
tokens.push(gen_field(ctx, ty_item, name));
}
Field::Bitfields(ref bu) => {
for bitfield in bu.bitfields() {
if bitfield.name().is_some() {
let getter_name = bitfield.getter_name();
let name_ident = ctx.rust_ident_raw(getter_name);
tokens.push(quote! {
self.#name_ident () == other.#name_ident ()
});
}
}
}
}
}
}
Some(quote! {
fn eq(&self, other: & #ty_for_impl) -> bool {
#( #tokens )&&*
}
})
}
fn gen_field(
ctx: &BindgenContext,
ty_item: &Item,
name: &str,
) -> proc_macro2::TokenStream {
fn quote_equals(
name_ident: &proc_macro2::Ident,
) -> proc_macro2::TokenStream {
quote! { self.#name_ident == other.#name_ident }
}
let name_ident = ctx.rust_ident(name);
let ty = ty_item.expect_type();
match *ty.kind() {
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Complex(..) |
TypeKind::Float(..) |
TypeKind::Enum(..) |
TypeKind::TypeParam |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::Reference(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel |
TypeKind::Comp(..) |
TypeKind::Pointer(_) |
TypeKind::Function(..) |
TypeKind::Opaque => quote_equals(&name_ident),
TypeKind::TemplateInstantiation(ref inst) => {
if inst.is_opaque(ctx, ty_item) {
quote! {
&self. #name_ident [..] == &other. #name_ident [..]
}
} else {
quote_equals(&name_ident)
}
}
TypeKind::Array(_, len) => {
if len <= RUST_DERIVE_IN_ARRAY_LIMIT ||
ctx.options().rust_features().larger_arrays
{
quote_equals(&name_ident)
} else {
quote! {
&self. #name_ident [..] == &other. #name_ident [..]
}
}
}
TypeKind::Vector(_, len) => {
let self_ids = 0..len;
let other_ids = 0..len;
quote! {
#(self.#self_ids == other.#other_ids &&)* true
}
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
let inner_item = ctx.resolve_item(t);
gen_field(ctx, inner_item, name)
}
}
}

5991
vendor/bindgen/codegen/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,72 @@
use syn::{
visit_mut::{visit_file_mut, visit_item_mod_mut, VisitMut},
File, Item, ItemForeignMod, ItemMod,
};
pub(super) fn merge_extern_blocks(file: &mut File) {
Visitor.visit_file_mut(file);
}
struct Visitor;
impl VisitMut for Visitor {
fn visit_file_mut(&mut self, file: &mut File) {
visit_items(&mut file.items);
visit_file_mut(self, file);
}
fn visit_item_mod_mut(&mut self, item_mod: &mut ItemMod) {
if let Some((_, ref mut items)) = item_mod.content {
visit_items(items);
}
visit_item_mod_mut(self, item_mod);
}
}
fn visit_items(items: &mut Vec<Item>) {
// Keep all the extern blocks in a different `Vec` for faster search.
let mut extern_blocks = Vec::<ItemForeignMod>::new();
for item in std::mem::take(items) {
if let Item::ForeignMod(ItemForeignMod {
attrs,
abi,
brace_token,
unsafety,
items: extern_block_items,
}) = item
{
let mut exists = false;
for extern_block in &mut extern_blocks {
// Check if there is a extern block with the same ABI and
// attributes.
if extern_block.attrs == attrs && extern_block.abi == abi {
// Merge the items of the two blocks.
extern_block.items.extend_from_slice(&extern_block_items);
exists = true;
break;
}
}
// If no existing extern block had the same ABI and attributes, store
// it.
if !exists {
extern_blocks.push(ItemForeignMod {
attrs,
abi,
brace_token,
unsafety,
items: extern_block_items,
});
}
} else {
// If the item is not an extern block, we don't have to do anything and just
// push it back.
items.push(item);
}
}
// Move all the extern blocks alongside the rest of the items.
for extern_block in extern_blocks {
items.push(Item::ForeignMod(extern_block));
}
}

View File

@@ -0,0 +1,57 @@
use proc_macro2::TokenStream;
use quote::ToTokens;
use syn::{parse2, File};
use crate::BindgenOptions;
mod merge_extern_blocks;
mod sort_semantically;
use merge_extern_blocks::merge_extern_blocks;
use sort_semantically::sort_semantically;
struct PostProcessingPass {
should_run: fn(&BindgenOptions) -> bool,
run: fn(&mut File),
}
// TODO: This can be a const fn when mutable references are allowed in const
// context.
macro_rules! pass {
($pass:ident) => {
PostProcessingPass {
should_run: |options| options.$pass,
run: |file| $pass(file),
}
};
}
const PASSES: &[PostProcessingPass] =
&[pass!(merge_extern_blocks), pass!(sort_semantically)];
pub(crate) fn postprocessing(
items: Vec<TokenStream>,
options: &BindgenOptions,
) -> TokenStream {
let items = items.into_iter().collect();
let require_syn = PASSES.iter().any(|pass| (pass.should_run)(options));
if !require_syn {
return items;
}
// This syn business is a hack, for now. This means that we are re-parsing already
// generated code using `syn` (as opposed to `quote`) because `syn` provides us more
// control over the elements.
// The `unwrap` here is deliberate because bindgen should generate valid rust items at all
// times.
let mut file = parse2::<File>(items).unwrap();
for pass in PASSES {
if (pass.should_run)(options) {
(pass.run)(&mut file);
}
}
file.into_token_stream()
}

View File

@@ -0,0 +1,46 @@
use syn::{
visit_mut::{visit_file_mut, visit_item_mod_mut, VisitMut},
File, Item, ItemMod,
};
pub(super) fn sort_semantically(file: &mut File) {
Visitor.visit_file_mut(file);
}
struct Visitor;
impl VisitMut for Visitor {
fn visit_file_mut(&mut self, file: &mut File) {
visit_items(&mut file.items);
visit_file_mut(self, file);
}
fn visit_item_mod_mut(&mut self, item_mod: &mut ItemMod) {
if let Some((_, ref mut items)) = item_mod.content {
visit_items(items);
}
visit_item_mod_mut(self, item_mod);
}
}
fn visit_items(items: &mut [Item]) {
items.sort_by_key(|item| match item {
Item::Type(_) => 0,
Item::Struct(_) => 1,
Item::Const(_) => 2,
Item::Fn(_) => 3,
Item::Enum(_) => 4,
Item::Union(_) => 5,
Item::Static(_) => 6,
Item::Trait(_) => 7,
Item::TraitAlias(_) => 8,
Item::Impl(_) => 9,
Item::Mod(_) => 10,
Item::Use(_) => 11,
Item::Verbatim(_) => 12,
Item::ExternCrate(_) => 13,
Item::ForeignMod(_) => 14,
Item::Macro(_) => 15,
_ => 18,
});
}

443
vendor/bindgen/codegen/serialize.rs vendored Normal file
View File

@@ -0,0 +1,443 @@
use std::io::Write;
use crate::callbacks::IntKind;
use crate::ir::comp::CompKind;
use crate::ir::context::{BindgenContext, TypeId};
use crate::ir::function::{Function, FunctionKind};
use crate::ir::item::Item;
use crate::ir::item::ItemCanonicalName;
use crate::ir::item_kind::ItemKind;
use crate::ir::ty::{FloatKind, Type, TypeKind};
use super::{CodegenError, WrapAsVariadic};
fn get_loc(item: &Item) -> String {
item.location()
.map_or_else(|| "unknown".to_owned(), |x| x.to_string())
}
pub(super) trait CSerialize<'a> {
type Extra;
fn serialize<W: Write>(
&self,
ctx: &BindgenContext,
extra: Self::Extra,
stack: &mut Vec<String>,
writer: &mut W,
) -> Result<(), CodegenError>;
}
impl<'a> CSerialize<'a> for Item {
type Extra = &'a Option<WrapAsVariadic>;
fn serialize<W: Write>(
&self,
ctx: &BindgenContext,
extra: Self::Extra,
stack: &mut Vec<String>,
writer: &mut W,
) -> Result<(), CodegenError> {
match self.kind() {
ItemKind::Function(func) => {
func.serialize(ctx, (self, extra), stack, writer)
}
kind => Err(CodegenError::Serialize {
msg: format!("Cannot serialize item kind {kind:?}"),
loc: get_loc(self),
}),
}
}
}
impl<'a> CSerialize<'a> for Function {
type Extra = (&'a Item, &'a Option<WrapAsVariadic>);
fn serialize<W: Write>(
&self,
ctx: &BindgenContext,
(item, wrap_as_variadic): Self::Extra,
stack: &mut Vec<String>,
writer: &mut W,
) -> Result<(), CodegenError> {
if self.kind() != FunctionKind::Function {
return Err(CodegenError::Serialize {
msg: format!(
"Cannot serialize function kind {:?}",
self.kind(),
),
loc: get_loc(item),
});
}
let TypeKind::Function(signature) =
ctx.resolve_type(self.signature()).kind()
else {
unreachable!()
};
assert!(!signature.is_variadic());
let name = self.name();
// Function arguments stored as `(name, type_id)` tuples.
let args = {
let mut count = 0;
let idx_to_prune = wrap_as_variadic.as_ref().map(
|WrapAsVariadic {
idx_of_va_list_arg, ..
}| *idx_of_va_list_arg,
);
signature
.argument_types()
.iter()
.cloned()
.enumerate()
.filter_map(|(idx, (opt_name, type_id))| {
if Some(idx) == idx_to_prune {
None
} else {
Some((
opt_name.unwrap_or_else(|| {
let name = format!("arg_{count}");
count += 1;
name
}),
type_id,
))
}
})
.collect::<Vec<_>>()
};
// The name used for the wrapper self.
let wrap_name = format!("{name}{}", ctx.wrap_static_fns_suffix());
// The function's return type
let (ret_item, ret_ty) = {
let type_id = signature.return_type();
let ret_item = ctx.resolve_item(type_id);
let ret_ty = ret_item.expect_type();
// Write `ret_ty`.
ret_ty.serialize(ctx, ret_item, stack, writer)?;
(ret_item, ret_ty)
};
const INDENT: &str = " ";
// Write `wrap_name(args`.
write!(writer, " {wrap_name}(")?;
serialize_args(&args, ctx, writer)?;
if wrap_as_variadic.is_none() {
// Write `) { name(` if the function returns void and `) { return name(` if it does not.
if ret_ty.is_void() {
write!(writer, ") {{ {name}(")?;
} else {
write!(writer, ") {{ return {name}(")?;
}
} else {
// Write `, ...) {`
writeln!(writer, ", ...) {{")?;
// Declare the return type `RET_TY ret;` if their is a need to do so
if !ret_ty.is_void() {
write!(writer, "{INDENT}")?;
ret_ty.serialize(ctx, ret_item, stack, writer)?;
writeln!(writer, " ret;")?;
}
// Setup va_list
writeln!(writer, "{INDENT}va_list ap;\n")?;
writeln!(
writer,
"{INDENT}va_start(ap, {});",
args.last().unwrap().0
)?;
write!(writer, "{INDENT}")?;
// Write `ret = name(` or `name(` depending if the function returns something
if !ret_ty.is_void() {
write!(writer, "ret = ")?;
}
write!(writer, "{name}(")?;
}
// Get the arguments names and insert at the right place if necessary `ap`
let mut args: Vec<_> = args.into_iter().map(|(name, _)| name).collect();
if let Some(WrapAsVariadic {
idx_of_va_list_arg, ..
}) = wrap_as_variadic
{
args.insert(*idx_of_va_list_arg, "ap".to_owned());
}
// Write `arg_names);`.
serialize_sep(", ", args.iter(), ctx, writer, |name, _, buf| {
write!(buf, "{name}").map_err(From::from)
})?;
#[rustfmt::skip]
write!(writer, ");{}", if wrap_as_variadic.is_none() { " " } else { "\n" })?;
if wrap_as_variadic.is_some() {
// End va_list and return the result if their is one
writeln!(writer, "{INDENT}va_end(ap);")?;
if !ret_ty.is_void() {
writeln!(writer, "{INDENT}return ret;")?;
}
}
writeln!(writer, "}}")?;
Ok(())
}
}
impl CSerialize<'_> for TypeId {
type Extra = ();
fn serialize<W: Write>(
&self,
ctx: &BindgenContext,
(): Self::Extra,
stack: &mut Vec<String>,
writer: &mut W,
) -> Result<(), CodegenError> {
let item = ctx.resolve_item(*self);
item.expect_type().serialize(ctx, item, stack, writer)
}
}
impl<'a> CSerialize<'a> for Type {
type Extra = &'a Item;
fn serialize<W: Write>(
&self,
ctx: &BindgenContext,
item: Self::Extra,
stack: &mut Vec<String>,
writer: &mut W,
) -> Result<(), CodegenError> {
match self.kind() {
TypeKind::Void => {
if self.is_const() {
write!(writer, "const ")?;
}
write!(writer, "void")?;
}
TypeKind::NullPtr => {
if self.is_const() {
write!(writer, "const ")?;
}
write!(writer, "nullptr_t")?;
}
TypeKind::Int(int_kind) => {
if self.is_const() {
write!(writer, "const ")?;
}
match int_kind {
IntKind::Bool => write!(writer, "bool")?,
IntKind::SChar => write!(writer, "signed char")?,
IntKind::UChar => write!(writer, "unsigned char")?,
IntKind::WChar => write!(writer, "wchar_t")?,
IntKind::Short => write!(writer, "short")?,
IntKind::UShort => write!(writer, "unsigned short")?,
IntKind::Int => write!(writer, "int")?,
IntKind::UInt => write!(writer, "unsigned int")?,
IntKind::Long => write!(writer, "long")?,
IntKind::ULong => write!(writer, "unsigned long")?,
IntKind::LongLong => write!(writer, "long long")?,
IntKind::ULongLong => write!(writer, "unsigned long long")?,
IntKind::Char { .. } => write!(writer, "char")?,
int_kind => {
return Err(CodegenError::Serialize {
msg: format!(
"Cannot serialize integer kind {int_kind:?}"
),
loc: get_loc(item),
})
}
}
}
TypeKind::Float(float_kind) => {
if self.is_const() {
write!(writer, "const ")?;
}
match float_kind {
FloatKind::Float16 => write!(writer, "_Float16")?,
FloatKind::Float => write!(writer, "float")?,
FloatKind::Double => write!(writer, "double")?,
FloatKind::LongDouble => write!(writer, "long double")?,
FloatKind::Float128 => write!(writer, "__float128")?,
}
}
TypeKind::Complex(float_kind) => {
if self.is_const() {
write!(writer, "const ")?;
}
match float_kind {
FloatKind::Float16 => write!(writer, "_Float16 complex")?,
FloatKind::Float => write!(writer, "float complex")?,
FloatKind::Double => write!(writer, "double complex")?,
FloatKind::LongDouble => {
write!(writer, "long double complex")?;
}
FloatKind::Float128 => write!(writer, "__complex128")?,
}
}
TypeKind::Alias(type_id) => {
if let Some(name) = self.name() {
if self.is_const() {
write!(writer, "const {name}")?;
} else {
write!(writer, "{name}")?;
}
} else {
type_id.serialize(ctx, (), stack, writer)?;
}
}
TypeKind::Array(type_id, length) => {
type_id.serialize(ctx, (), stack, writer)?;
write!(writer, " [{length}]")?;
}
TypeKind::Function(signature) => {
if self.is_const() {
stack.push("const ".to_string());
}
signature.return_type().serialize(
ctx,
(),
&mut vec![],
writer,
)?;
write!(writer, " (")?;
while let Some(item) = stack.pop() {
write!(writer, "{item}")?;
}
write!(writer, ")")?;
let args = signature.argument_types();
if args.is_empty() {
write!(writer, " (void)")?;
} else {
write!(writer, " (")?;
serialize_sep(
", ",
args.iter(),
ctx,
writer,
|(name, type_id), ctx, buf| {
let mut stack = vec![];
if let Some(name) = name {
stack.push(name.clone());
}
type_id.serialize(ctx, (), &mut stack, buf)
},
)?;
write!(writer, ")")?;
}
}
TypeKind::ResolvedTypeRef(type_id) => {
if self.is_const() {
write!(writer, "const ")?;
}
type_id.serialize(ctx, (), stack, writer)?;
}
TypeKind::Pointer(type_id) => {
if self.is_const() {
stack.push("*const ".to_owned());
} else {
stack.push("*".to_owned());
}
type_id.serialize(ctx, (), stack, writer)?;
}
TypeKind::Comp(comp_info) => {
if self.is_const() {
write!(writer, "const ")?;
}
let name = item.canonical_name(ctx);
match comp_info.kind() {
CompKind::Struct => write!(writer, "struct {name}")?,
CompKind::Union => write!(writer, "union {name}")?,
}
}
TypeKind::Enum(_enum_ty) => {
if self.is_const() {
write!(writer, "const ")?;
}
let name = item.canonical_name(ctx);
write!(writer, "enum {name}")?;
}
ty => {
return Err(CodegenError::Serialize {
msg: format!("Cannot serialize type kind {ty:?}"),
loc: get_loc(item),
})
}
}
if !stack.is_empty() {
write!(writer, " ")?;
while let Some(item) = stack.pop() {
write!(writer, "{item}")?;
}
}
Ok(())
}
}
fn serialize_args<W: Write>(
args: &[(String, TypeId)],
ctx: &BindgenContext,
writer: &mut W,
) -> Result<(), CodegenError> {
if args.is_empty() {
write!(writer, "void")?;
} else {
serialize_sep(
", ",
args.iter(),
ctx,
writer,
|(name, type_id), ctx, buf| {
type_id.serialize(ctx, (), &mut vec![name.clone()], buf)
},
)?;
}
Ok(())
}
fn serialize_sep<
W: Write,
F: FnMut(I::Item, &BindgenContext, &mut W) -> Result<(), CodegenError>,
I: Iterator,
>(
sep: &str,
mut iter: I,
ctx: &BindgenContext,
buf: &mut W,
mut f: F,
) -> Result<(), CodegenError> {
if let Some(item) = iter.next() {
f(item, ctx, buf)?;
let sep = sep.as_bytes();
for item in iter {
buf.write_all(sep)?;
f(item, ctx, buf)?;
}
}
Ok(())
}

458
vendor/bindgen/codegen/struct_layout.rs vendored Normal file
View File

@@ -0,0 +1,458 @@
//! Helpers for code generation that need struct layout
use super::helpers;
use crate::ir::comp::CompInfo;
use crate::ir::context::BindgenContext;
use crate::ir::layout::Layout;
use crate::ir::ty::{Type, TypeKind};
use crate::FieldVisibilityKind;
use proc_macro2::{Ident, Span};
use std::cmp;
const MAX_GUARANTEED_ALIGN: usize = 8;
/// Trace the layout of struct.
#[derive(Debug)]
pub(crate) struct StructLayoutTracker<'a> {
name: &'a str,
ctx: &'a BindgenContext,
comp: &'a CompInfo,
is_packed: bool,
known_type_layout: Option<Layout>,
is_rust_union: bool,
can_copy_union_fields: bool,
latest_offset: usize,
padding_count: usize,
latest_field_layout: Option<Layout>,
max_field_align: usize,
last_field_was_bitfield: bool,
visibility: FieldVisibilityKind,
last_field_was_flexible_array: bool,
}
/// Returns a size aligned to a given value.
pub(crate) fn align_to(size: usize, align: usize) -> usize {
if align == 0 {
return size;
}
let rem = size % align;
if rem == 0 {
return size;
}
size + align - rem
}
/// Returns the lower power of two byte count that can hold at most n bits.
pub(crate) fn bytes_from_bits_pow2(mut n: usize) -> usize {
if n == 0 {
return 0;
}
if n <= 8 {
return 1;
}
if !n.is_power_of_two() {
n = n.next_power_of_two();
}
n / 8
}
#[test]
fn test_align_to() {
assert_eq!(align_to(1, 1), 1);
assert_eq!(align_to(1, 2), 2);
assert_eq!(align_to(1, 4), 4);
assert_eq!(align_to(5, 1), 5);
assert_eq!(align_to(17, 4), 20);
}
#[test]
fn test_bytes_from_bits_pow2() {
assert_eq!(bytes_from_bits_pow2(0), 0);
for i in 1..9 {
assert_eq!(bytes_from_bits_pow2(i), 1);
}
for i in 9..17 {
assert_eq!(bytes_from_bits_pow2(i), 2);
}
for i in 17..33 {
assert_eq!(bytes_from_bits_pow2(i), 4);
}
}
impl<'a> StructLayoutTracker<'a> {
pub(crate) fn new(
ctx: &'a BindgenContext,
comp: &'a CompInfo,
ty: &'a Type,
name: &'a str,
visibility: FieldVisibilityKind,
is_packed: bool,
) -> Self {
let known_type_layout = ty.layout(ctx);
let (is_rust_union, can_copy_union_fields) =
comp.is_rust_union(ctx, known_type_layout.as_ref(), name);
StructLayoutTracker {
name,
ctx,
comp,
visibility,
is_packed,
known_type_layout,
is_rust_union,
can_copy_union_fields,
latest_offset: 0,
padding_count: 0,
latest_field_layout: None,
max_field_align: 0,
last_field_was_bitfield: false,
last_field_was_flexible_array: false,
}
}
pub(crate) fn can_copy_union_fields(&self) -> bool {
self.can_copy_union_fields
}
pub(crate) fn is_rust_union(&self) -> bool {
self.is_rust_union
}
pub(crate) fn saw_flexible_array(&mut self) {
self.last_field_was_flexible_array = true;
}
pub(crate) fn saw_vtable(&mut self) {
debug!("saw vtable for {}", self.name);
let ptr_size = self.ctx.target_pointer_size();
self.latest_offset += ptr_size;
self.latest_field_layout = Some(Layout::new(ptr_size, ptr_size));
self.max_field_align = ptr_size;
}
pub(crate) fn saw_base(&mut self, base_ty: &Type) {
debug!("saw base for {}", self.name);
if let Some(layout) = base_ty.layout(self.ctx) {
self.align_to_latest_field(layout);
self.latest_offset += self.padding_bytes(layout) + layout.size;
self.latest_field_layout = Some(layout);
self.max_field_align = cmp::max(self.max_field_align, layout.align);
}
}
pub(crate) fn saw_bitfield_unit(&mut self, layout: Layout) {
debug!("saw bitfield unit for {}: {layout:?}", self.name);
self.align_to_latest_field(layout);
self.latest_offset += layout.size;
debug!(
"Offset: <bitfield>: {} -> {}",
self.latest_offset - layout.size,
self.latest_offset
);
self.latest_field_layout = Some(layout);
self.last_field_was_bitfield = true;
self.max_field_align = cmp::max(self.max_field_align, layout.align);
}
/// Returns a padding field if necessary for a given new field _before_
/// adding that field.
pub(crate) fn saw_field(
&mut self,
field_name: &str,
field_ty: &Type,
field_offset: Option<usize>,
) -> Option<proc_macro2::TokenStream> {
let mut field_layout = field_ty.layout(self.ctx)?;
if let TypeKind::Array(inner, len) =
*field_ty.canonical_type(self.ctx).kind()
{
// FIXME(emilio): As an _ultra_ hack, we correct the layout returned
// by arrays of structs that have a bigger alignment than what we
// can support.
//
// This means that the structs in the array are super-unsafe to
// access, since they won't be properly aligned, but there's not too
// much we can do about it.
if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx)
{
if layout.align > MAX_GUARANTEED_ALIGN {
field_layout.size =
align_to(layout.size, layout.align) * len;
field_layout.align = MAX_GUARANTEED_ALIGN;
}
}
}
self.saw_field_with_layout(field_name, field_layout, field_offset)
}
pub(crate) fn saw_field_with_layout(
&mut self,
field_name: &str,
field_layout: Layout,
field_offset: Option<usize>,
) -> Option<proc_macro2::TokenStream> {
let will_merge_with_bitfield = self.align_to_latest_field(field_layout);
let is_union = self.comp.is_union();
let padding_bytes = match field_offset {
Some(offset) if offset / 8 > self.latest_offset => {
offset / 8 - self.latest_offset
}
_ => {
if will_merge_with_bitfield ||
field_layout.align == 0 ||
is_union
{
0
} else if !self.is_packed {
self.padding_bytes(field_layout)
} else if let Some(mut l) = self.known_type_layout {
if field_layout.align < l.align {
l.align = field_layout.align;
}
self.padding_bytes(l)
} else {
0
}
}
};
self.latest_offset += padding_bytes;
let padding_layout = if self.is_packed || is_union {
None
} else {
let force_padding = self.ctx.options().force_explicit_padding;
// Otherwise the padding is useless.
let need_padding = force_padding ||
padding_bytes >= field_layout.align ||
field_layout.align > MAX_GUARANTEED_ALIGN;
debug!(
"Offset: <padding>: {} -> {}",
self.latest_offset - padding_bytes,
self.latest_offset
);
debug!(
"align field {field_name} to {}/{} with {padding_bytes} padding bytes {field_layout:?}",
self.latest_offset,
field_offset.unwrap_or(0) / 8,
);
let padding_align = if force_padding {
1
} else {
cmp::min(field_layout.align, MAX_GUARANTEED_ALIGN)
};
if need_padding && padding_bytes != 0 {
Some(Layout::new(padding_bytes, padding_align))
} else {
None
}
};
if is_union {
self.latest_offset =
cmp::max(self.latest_offset, field_layout.size);
} else {
self.latest_offset += field_layout.size;
}
self.latest_field_layout = Some(field_layout);
self.max_field_align =
cmp::max(self.max_field_align, field_layout.align);
self.last_field_was_bitfield = false;
debug!(
"Offset: {field_name}: {} -> {}",
self.latest_offset - field_layout.size,
self.latest_offset
);
padding_layout.map(|layout| self.padding_field(layout))
}
pub(crate) fn add_tail_padding(
&mut self,
comp_name: &str,
comp_layout: Layout,
) -> Option<proc_macro2::TokenStream> {
// Only emit an padding field at the end of a struct if the
// user configures explicit padding.
if !self.ctx.options().force_explicit_padding {
return None;
}
// Padding doesn't make sense for rust unions.
if self.is_rust_union {
return None;
}
// Also doesn't make sense for structs with flexible array members
if self.last_field_was_flexible_array {
return None;
}
if self.latest_offset == comp_layout.size {
// This struct does not contain tail padding.
return None;
}
trace!(
"need a tail padding field for {comp_name}: offset {} -> size {}",
self.latest_offset,
comp_layout.size
);
let size = comp_layout.size - self.latest_offset;
Some(self.padding_field(Layout::new(size, 0)))
}
pub(crate) fn pad_struct(
&mut self,
layout: Layout,
) -> Option<proc_macro2::TokenStream> {
debug!("pad_struct:\n\tself = {self:#?}\n\tlayout = {layout:#?}");
if layout.size < self.latest_offset {
warn!(
"Calculated wrong layout for {}, too more {} bytes",
self.name,
self.latest_offset - layout.size
);
return None;
}
let padding_bytes = layout.size - self.latest_offset;
if padding_bytes == 0 {
return None;
}
let repr_align = true;
// We always pad to get to the correct size if the struct is one of
// those we can't align properly.
//
// Note that if the last field we saw was a bitfield, we may need to pad
// regardless, because bitfields don't respect alignment as strictly as
// other fields.
if padding_bytes >= layout.align ||
(self.last_field_was_bitfield &&
padding_bytes >= self.latest_field_layout.unwrap().align) ||
(!repr_align && layout.align > MAX_GUARANTEED_ALIGN)
{
let layout = if self.is_packed {
Layout::new(padding_bytes, 1)
} else if self.last_field_was_bitfield ||
layout.align > MAX_GUARANTEED_ALIGN
{
// We've already given up on alignment here.
Layout::for_size(self.ctx, padding_bytes)
} else {
Layout::new(padding_bytes, layout.align)
};
debug!("pad bytes to struct {}, {layout:?}", self.name);
Some(self.padding_field(layout))
} else {
None
}
}
pub(crate) fn requires_explicit_align(&self, layout: Layout) -> bool {
let repr_align = true;
// Always force explicit repr(align) for stuff more than 16-byte aligned
// to work-around https://github.com/rust-lang/rust/issues/54341.
//
// Worst-case this just generates redundant alignment attributes.
if repr_align && self.max_field_align >= 16 {
return true;
}
if self.max_field_align >= layout.align {
return false;
}
// We can only generate up-to a 8-bytes of alignment unless we support
// repr(align).
repr_align || layout.align <= MAX_GUARANTEED_ALIGN
}
fn padding_bytes(&self, layout: Layout) -> usize {
align_to(self.latest_offset, layout.align) - self.latest_offset
}
fn padding_field(&mut self, layout: Layout) -> proc_macro2::TokenStream {
let ty = helpers::blob(self.ctx, layout, false);
let padding_count = self.padding_count;
self.padding_count += 1;
let padding_field_name = Ident::new(
&format!("__bindgen_padding_{padding_count}"),
Span::call_site(),
);
self.max_field_align = cmp::max(self.max_field_align, layout.align);
let vis = super::access_specifier(self.visibility);
quote! {
#vis #padding_field_name : #ty ,
}
}
/// Returns whether the new field is known to merge with a bitfield.
///
/// This is just to avoid doing the same check also in `pad_field`.
fn align_to_latest_field(&mut self, new_field_layout: Layout) -> bool {
if self.is_packed {
// Skip to align fields when packed.
return false;
}
let Some(layout) = self.latest_field_layout else {
return false;
};
// If it was, we may or may not need to align, depending on what the
// current field alignment and the bitfield size and alignment are.
debug!(
"align_to_bitfield? {}: {layout:?} {new_field_layout:?}",
self.last_field_was_bitfield,
);
// Avoid divide-by-zero errors if align is 0.
let align = cmp::max(1, layout.align);
if self.last_field_was_bitfield &&
new_field_layout.align <= layout.size % align &&
new_field_layout.size <= layout.size % align
{
// The new field will be coalesced into some of the remaining bits.
//
// FIXME(emilio): I think this may not catch everything?
debug!("Will merge with bitfield");
return true;
}
// Else, just align the obvious way.
self.latest_offset += self.padding_bytes(layout);
false
}
}

61
vendor/bindgen/deps.rs vendored Normal file
View File

@@ -0,0 +1,61 @@
/// Generating build depfiles from parsed bindings.
use std::{collections::BTreeSet, path::PathBuf};
#[derive(Clone, Debug)]
pub(crate) struct DepfileSpec {
pub output_module: String,
pub depfile_path: PathBuf,
}
impl DepfileSpec {
pub fn write(&self, deps: &BTreeSet<Box<str>>) -> std::io::Result<()> {
std::fs::write(&self.depfile_path, self.to_string(deps))
}
fn to_string(&self, deps: &BTreeSet<Box<str>>) -> String {
// Transforms a string by escaping spaces and backslashes.
let escape = |s: &str| s.replace('\\', "\\\\").replace(' ', "\\ ");
let mut buf = format!("{}:", escape(&self.output_module));
for file in deps {
buf = format!("{buf} {}", escape(file));
}
buf
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn escaping_depfile() {
let spec = DepfileSpec {
output_module: "Mod Name".to_owned(),
depfile_path: PathBuf::new(),
};
let deps: BTreeSet<_> = vec![
r"/absolute/path".into(),
r"C:\win\absolute\path".into(),
r"../relative/path".into(),
r"..\win\relative\path".into(),
r"../path/with spaces/in/it".into(),
r"..\win\path\with spaces\in\it".into(),
r"path\with/mixed\separators".into(),
]
.into_iter()
.collect();
assert_eq!(
spec.to_string(&deps),
"Mod\\ Name: \
../path/with\\ spaces/in/it \
../relative/path \
..\\\\win\\\\path\\\\with\\ spaces\\\\in\\\\it \
..\\\\win\\\\relative\\\\path \
/absolute/path \
C:\\\\win\\\\absolute\\\\path \
path\\\\with/mixed\\\\separators"
);
}
}

146
vendor/bindgen/diagnostics.rs vendored Normal file
View File

@@ -0,0 +1,146 @@
//! Types and function used to emit pretty diagnostics for `bindgen`.
//!
//! The entry point of this module is the [`Diagnostic`] type.
use std::fmt::Write;
use std::io::{self, BufRead, BufReader};
use std::{borrow::Cow, fs::File};
use annotate_snippets::{Renderer, Snippet};
pub(crate) use annotate_snippets::Level;
/// A `bindgen` diagnostic.
#[derive(Default)]
pub(crate) struct Diagnostic<'a> {
title: Option<(Cow<'a, str>, Level)>,
slices: Vec<Slice<'a>>,
footer: Vec<(Cow<'a, str>, Level)>,
}
impl<'a> Diagnostic<'a> {
/// Add a title to the diagnostic and set its type.
pub(crate) fn with_title(
&mut self,
title: impl Into<Cow<'a, str>>,
level: Level,
) -> &mut Self {
self.title = Some((title.into(), level));
self
}
/// Add a slice of source code to the diagnostic.
pub(crate) fn add_slice(&mut self, slice: Slice<'a>) -> &mut Self {
self.slices.push(slice);
self
}
/// Add a footer annotation to the diagnostic. This annotation will have its own type.
pub(crate) fn add_annotation(
&mut self,
msg: impl Into<Cow<'a, str>>,
level: Level,
) -> &mut Self {
self.footer.push((msg.into(), level));
self
}
/// Print this diagnostic.
///
/// The diagnostic is printed using `cargo:warning` if `bindgen` is being invoked by a build
/// script or using `eprintln` otherwise.
pub(crate) fn display(&self) {
std::thread_local! {
static INVOKED_BY_BUILD_SCRIPT: bool = std::env::var_os("CARGO_CFG_TARGET_ARCH").is_some();
}
let mut footer = vec![];
let mut slices = vec![];
let snippet = if let Some((msg, level)) = &self.title {
(*level).title(msg)
} else {
return;
};
for (msg, level) in &self.footer {
footer.push((*level).title(msg));
}
// add additional info that this is generated by bindgen
// so as to not confuse with rustc warnings
footer.push(
Level::Info.title("This diagnostic was generated by bindgen."),
);
for slice in &self.slices {
if let Some(source) = &slice.source {
let mut snippet = Snippet::source(source)
.line_start(slice.line.unwrap_or_default());
if let Some(origin) = &slice.filename {
snippet = snippet.origin(origin);
}
slices.push(snippet);
}
}
let renderer = Renderer::styled();
let dl = renderer.render(snippet.snippets(slices).footers(footer));
if INVOKED_BY_BUILD_SCRIPT.with(Clone::clone) {
// This is just a hack which hides the `warning:` added by cargo at the beginning of
// every line. This should be fine as our diagnostics already have a colorful title.
// FIXME (pvdrz): Could it be that this doesn't work in other languages?
let hide_warning = "\r \r";
let string = dl.to_string();
for line in string.lines() {
println!("cargo:warning={hide_warning}{line}");
}
} else {
eprintln!("{dl}\n");
}
}
}
/// A slice of source code.
#[derive(Default)]
pub(crate) struct Slice<'a> {
source: Option<Cow<'a, str>>,
filename: Option<String>,
line: Option<usize>,
}
impl<'a> Slice<'a> {
/// Set the source code.
pub(crate) fn with_source(
&mut self,
source: impl Into<Cow<'a, str>>,
) -> &mut Self {
self.source = Some(source.into());
self
}
/// Set the file, line and column.
pub(crate) fn with_location(
&mut self,
mut name: String,
line: usize,
col: usize,
) -> &mut Self {
write!(name, ":{line}:{col}").expect("Writing to a string cannot fail");
self.filename = Some(name);
self.line = Some(line);
self
}
}
pub(crate) fn get_line(
filename: &str,
line: usize,
) -> io::Result<Option<String>> {
let file = BufReader::new(File::open(filename)?);
if let Some(line) = file.lines().nth(line.wrapping_sub(1)) {
return line.map(Some);
}
Ok(None)
}

17
vendor/bindgen/extra_assertions.rs vendored Normal file
View File

@@ -0,0 +1,17 @@
//! Macros for defining extra assertions that should only be checked in testing
//! and/or CI when the `__testing_only_extra_assertions` feature is enabled.
/// Simple macro that forwards to assert! when using
/// `__testing_only_extra_assertions`.
macro_rules! extra_assert {
( $cond:expr ) => {
if cfg!(feature = "__testing_only_extra_assertions") {
assert!($cond);
}
};
( $cond:expr , $( $arg:tt )+ ) => {
if cfg!(feature = "__testing_only_extra_assertions") {
assert!($cond, $( $arg )* )
}
};
}

570
vendor/bindgen/features.rs vendored Normal file
View File

@@ -0,0 +1,570 @@
//! Contains code for selecting features
#![deny(unused_extern_crates)]
#![deny(clippy::missing_docs_in_private_items)]
#![allow(deprecated)]
use std::str::FromStr;
use std::{fmt, io};
/// Represents the version of the Rust language to target.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
#[repr(transparent)]
pub struct RustTarget(Version);
impl RustTarget {
/// Create a new [`RustTarget`] for a stable release of Rust.
pub fn stable(minor: u64, patch: u64) -> Result<Self, InvalidRustTarget> {
let target = Self(Version::Stable(minor, patch));
if target < EARLIEST_STABLE_RUST {
return Err(InvalidRustTarget::TooEarly);
}
Ok(target)
}
const fn minor(&self) -> Option<u64> {
match self.0 {
Version::Nightly => None,
Version::Stable(minor, _) => Some(minor),
}
}
const fn is_compatible(&self, other: &Self) -> bool {
match (self.0, other.0) {
(Version::Stable(minor, _), Version::Stable(other_minor, _)) => {
// We ignore the patch version number as they only include backwards compatible bug
// fixes.
minor >= other_minor
}
// Nightly is compatible with everything
(Version::Nightly, _) => true,
// No stable release is compatible with nightly
(Version::Stable { .. }, Version::Nightly) => false,
}
}
}
impl Default for RustTarget {
fn default() -> Self {
// Bindgen from build script: default to generating bindings compatible
// with the Rust version currently performing this build.
#[cfg(not(feature = "__cli"))]
{
use std::env;
use std::iter;
use std::process::Command;
use std::sync::OnceLock;
static CURRENT_RUST: OnceLock<Option<RustTarget>> = OnceLock::new();
if let Some(current_rust) = *CURRENT_RUST.get_or_init(|| {
let is_build_script =
env::var_os("CARGO_CFG_TARGET_ARCH").is_some();
if !is_build_script {
return None;
}
let rustc = env::var_os("RUSTC")?;
let rustc_wrapper = env::var_os("RUSTC_WRAPPER")
.filter(|wrapper| !wrapper.is_empty());
let wrapped_rustc =
rustc_wrapper.iter().chain(iter::once(&rustc));
let mut is_clippy_driver = false;
loop {
let mut wrapped_rustc = wrapped_rustc.clone();
let mut command =
Command::new(wrapped_rustc.next().unwrap());
command.args(wrapped_rustc);
if is_clippy_driver {
command.arg("--rustc");
}
command.arg("--version");
let output = command.output().ok()?;
let string = String::from_utf8(output.stdout).ok()?;
// Version string like "rustc 1.100.0-beta.5 (f0e1d2c3b 2026-10-17)"
let last_line = string.lines().last().unwrap_or(&string);
let (program, rest) = last_line.trim().split_once(' ')?;
if program != "rustc" {
if program.starts_with("clippy") && !is_clippy_driver {
is_clippy_driver = true;
continue;
}
return None;
}
let number = rest.split([' ', '-', '+']).next()?;
break RustTarget::from_str(number).ok();
}
}) {
return current_rust;
}
}
// Bindgen from CLI, or cannot determine compiler version: default to
// generating bindings compatible with the latest stable release of Rust
// that Bindgen knows about.
LATEST_STABLE_RUST
}
}
impl fmt::Display for RustTarget {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
Version::Stable(minor, patch) => write!(f, "1.{minor}.{patch}"),
Version::Nightly => "nightly".fmt(f),
}
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
enum Version {
Stable(u64, u64),
Nightly,
}
#[derive(Debug, PartialEq, Eq, Hash)]
pub enum InvalidRustTarget {
TooEarly,
}
impl fmt::Display for InvalidRustTarget {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::TooEarly => write!(f, "the earliest Rust version supported by bindgen is {EARLIEST_STABLE_RUST}"),
}
}
}
/// This macro defines the Rust editions supported by bindgen.
macro_rules! define_rust_editions {
($($variant:ident($value:literal) => $minor:literal,)*) => {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
#[doc = "Represents Rust Edition for the generated bindings"]
pub enum RustEdition {
$(
#[doc = concat!("The ", stringify!($value), " edition of Rust.")]
$variant,
)*
}
impl FromStr for RustEdition {
type Err = InvalidRustEdition;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
$(stringify!($value) => Ok(Self::$variant),)*
_ => Err(InvalidRustEdition(s.to_owned())),
}
}
}
impl fmt::Display for RustEdition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
$(Self::$variant => stringify!($value).fmt(f),)*
}
}
}
impl RustEdition {
pub(crate) const ALL: [Self; [$($value,)*].len()] = [$(Self::$variant,)*];
pub(crate) fn is_available(self, target: RustTarget) -> bool {
let Some(minor) = target.minor() else {
return true;
};
match self {
$(Self::$variant => $minor <= minor,)*
}
}
}
}
}
#[derive(Debug)]
pub struct InvalidRustEdition(String);
impl fmt::Display for InvalidRustEdition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "\"{}\" is not a valid Rust edition", self.0)
}
}
impl std::error::Error for InvalidRustEdition {}
define_rust_editions! {
Edition2018(2018) => 31,
Edition2021(2021) => 56,
Edition2024(2024) => 85,
}
impl RustTarget {
/// Returns the latest edition supported by this target.
pub(crate) fn latest_edition(self) -> RustEdition {
RustEdition::ALL
.iter()
.rev()
.find(|edition| edition.is_available(self))
.copied()
.expect("bindgen should always support at least one edition")
}
}
impl Default for RustEdition {
fn default() -> Self {
RustTarget::default().latest_edition()
}
}
/// This macro defines the [`RustTarget`] and [`RustFeatures`] types.
macro_rules! define_rust_targets {
(
Nightly => {$($nightly_feature:ident $(($nightly_edition:literal))|* $(: #$issue:literal)?),* $(,)?} $(,)?
$(
$variant:ident($minor:literal) => {$($feature:ident $(($edition:literal))|* $(: #$pull:literal)?),* $(,)?},
)*
$(,)?
) => {
impl RustTarget {
/// The nightly version of Rust, which introduces the following features:"
$(#[doc = concat!(
"- [`", stringify!($nightly_feature), "`]",
"(", $("https://github.com/rust-lang/rust/pull/", stringify!($issue),)* ")",
)])*
#[deprecated = "The use of this constant is deprecated, please use `RustTarget::nightly` instead."]
pub const Nightly: Self = Self::nightly();
/// The nightly version of Rust, which introduces the following features:"
$(#[doc = concat!(
"- [`", stringify!($nightly_feature), "`]",
"(", $("https://github.com/rust-lang/rust/pull/", stringify!($issue),)* ")",
)])*
pub const fn nightly() -> Self {
Self(Version::Nightly)
}
$(
#[doc = concat!("Version 1.", stringify!($minor), " of Rust, which introduced the following features:")]
$(#[doc = concat!(
"- [`", stringify!($feature), "`]",
"(", $("https://github.com/rust-lang/rust/pull/", stringify!($pull),)* ")",
)])*
#[deprecated = "The use of this constant is deprecated, please use `RustTarget::stable` instead."]
pub const $variant: Self = Self(Version::Stable($minor, 0));
)*
const fn stable_releases() -> [(Self, u64); [$($minor,)*].len()] {
[$((Self::$variant, $minor),)*]
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub(crate) struct RustFeatures {
$($(pub(crate) $feature: bool,)*)*
$(pub(crate) $nightly_feature: bool,)*
}
impl RustFeatures {
/// Compute the features that must be enabled in a specific Rust target with a specific edition.
pub(crate) fn new(target: RustTarget, edition: RustEdition) -> Self {
let mut features = Self {
$($($feature: false,)*)*
$($nightly_feature: false,)*
};
if target.is_compatible(&RustTarget::nightly()) {
$(
let editions: &[RustEdition] = &[$(stringify!($nightly_edition).parse::<RustEdition>().ok().expect("invalid edition"),)*];
if editions.is_empty() || editions.contains(&edition) {
features.$nightly_feature = true;
}
)*
}
$(
if target.is_compatible(&RustTarget::$variant) {
$(
let editions: &[RustEdition] = &[$(stringify!($edition).parse::<RustEdition>().ok().expect("invalid edition"),)*];
if editions.is_empty() || editions.contains(&edition) {
features.$feature = true;
}
)*
}
)*
features
}
}
};
}
// NOTE: When adding or removing features here, make sure to add the stabilization PR
// number for the feature if it has been stabilized or the tracking issue number if the feature is
// not stable.
define_rust_targets! {
Nightly => {
vectorcall_abi: #124485,
ptr_metadata: #81513,
layout_for_ptr: #69835,
},
Stable_1_82(82) => {
unsafe_extern_blocks: #127921,
},
Stable_1_77(77) => {
offset_of: #106655,
literal_cstr(2021)|(2024): #117472,
},
Stable_1_73(73) => { thiscall_abi: #42202 },
Stable_1_71(71) => { c_unwind_abi: #106075 },
Stable_1_68(68) => { abi_efiapi: #105795 },
Stable_1_64(64) => { core_ffi_c: #94503 },
Stable_1_51(51) => {
raw_ref_macros: #80886,
min_const_generics: #74878,
},
Stable_1_59(59) => { const_cstr: #54745 },
Stable_1_47(47) => { larger_arrays: #74060 },
Stable_1_43(43) => { associated_constants: #68952 },
Stable_1_40(40) => { non_exhaustive: #44109 },
Stable_1_36(36) => { maybe_uninit: #60445 },
Stable_1_33(33) => { repr_packed_n: #57049 },
}
/// Latest stable release of Rust that is supported by bindgen
pub const LATEST_STABLE_RUST: RustTarget = {
// FIXME: replace all this code by
// ```
// RustTarget::stable_releases()
// .into_iter()
// .max_by_key(|(_, m)| m)
// .map(|(t, _)| t)
// .unwrap()
// ```
// once those operations can be used in constants.
let targets = RustTarget::stable_releases();
let mut i = 0;
let mut latest_target = None;
let mut latest_minor = 0;
while i < targets.len() {
let (target, minor) = targets[i];
if latest_minor < minor {
latest_minor = minor;
latest_target = Some(target);
}
i += 1;
}
match latest_target {
Some(target) => target,
None => unreachable!(),
}
};
/// Earliest stable release of Rust that is supported by bindgen
pub const EARLIEST_STABLE_RUST: RustTarget = {
// FIXME: replace all this code by
// ```
// RustTarget::stable_releases()
// .into_iter()
// .min_by_key(|(_, m)| m)
// .map(|(t, _)| t)
// .unwrap_or(LATEST_STABLE_RUST)
// ```
// once those operations can be used in constants.
let targets = RustTarget::stable_releases();
let mut i = 0;
let mut earliest_target = None;
let Some(mut earliest_minor) = LATEST_STABLE_RUST.minor() else {
unreachable!()
};
while i < targets.len() {
let (target, minor) = targets[i];
if earliest_minor > minor {
earliest_minor = minor;
earliest_target = Some(target);
}
i += 1;
}
match earliest_target {
Some(target) => target,
None => unreachable!(),
}
};
fn invalid_input(input: &str, msg: impl fmt::Display) -> io::Error {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("\"{input}\" is not a valid Rust target, {msg}"),
)
}
impl FromStr for RustTarget {
type Err = io::Error;
fn from_str(input: &str) -> Result<Self, Self::Err> {
if input == "nightly" {
return Ok(Self::Nightly);
}
let Some((major_str, tail)) = input.split_once('.') else {
return Err(invalid_input(input, "accepted values are of the form \"1.71\", \"1.71.1\" or \"nightly\"." ) );
};
if major_str != "1" {
return Err(invalid_input(
input,
"The largest major version of Rust released is \"1\"",
));
}
let (minor, patch) = if let Some((minor_str, patch_str)) =
tail.split_once('.')
{
let Ok(minor) = minor_str.parse::<u64>() else {
return Err(invalid_input(input, "the minor version number must be an unsigned 64-bit integer"));
};
let Ok(patch) = patch_str.parse::<u64>() else {
return Err(invalid_input(input, "the patch version number must be an unsigned 64-bit integer"));
};
(minor, patch)
} else {
let Ok(minor) = tail.parse::<u64>() else {
return Err(invalid_input(input, "the minor version number must be an unsigned 64-bit integer"));
};
(minor, 0)
};
Self::stable(minor, patch).map_err(|err| invalid_input(input, err))
}
}
impl RustFeatures {
/// Compute the features that must be enabled in a specific Rust target with the latest edition
/// available in that target.
pub(crate) fn new_with_latest_edition(target: RustTarget) -> Self {
Self::new(target, target.latest_edition())
}
}
impl Default for RustFeatures {
fn default() -> Self {
Self::new_with_latest_edition(RustTarget::default())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn release_versions_for_editions() {
assert_eq!(
"1.33".parse::<RustTarget>().unwrap().latest_edition(),
RustEdition::Edition2018
);
assert_eq!(
"1.56".parse::<RustTarget>().unwrap().latest_edition(),
RustEdition::Edition2021
);
assert_eq!(
"1.85".parse::<RustTarget>().unwrap().latest_edition(),
RustEdition::Edition2024
);
assert_eq!(
"nightly".parse::<RustTarget>().unwrap().latest_edition(),
RustEdition::Edition2024
);
}
#[test]
fn target_features() {
let features =
RustFeatures::new_with_latest_edition(RustTarget::Stable_1_71);
assert!(
features.c_unwind_abi &&
features.abi_efiapi &&
!features.thiscall_abi
);
let features = RustFeatures::new(
RustTarget::Stable_1_77,
RustEdition::Edition2018,
);
assert!(!features.literal_cstr);
let features =
RustFeatures::new_with_latest_edition(RustTarget::Stable_1_77);
assert!(features.literal_cstr);
let f_nightly =
RustFeatures::new_with_latest_edition(RustTarget::Nightly);
assert!(
f_nightly.vectorcall_abi &&
f_nightly.ptr_metadata &&
f_nightly.layout_for_ptr
);
}
fn test_target(input: &str, expected: RustTarget) {
// Two targets are equivalent if they enable the same set of features
let expected = RustFeatures::new_with_latest_edition(expected);
let found = RustFeatures::new_with_latest_edition(
input.parse::<RustTarget>().unwrap(),
);
assert_eq!(
expected,
found,
"target {input} enables features:\n{found:#?}\nand should enable features:\n{expected:#?}"
);
}
fn test_invalid_target(input: &str) {
assert!(
input.parse::<RustTarget>().is_err(),
"{input} should be an invalid target"
);
}
#[test]
fn valid_targets() {
test_target("1.71", RustTarget::Stable_1_71);
test_target("1.71.0", RustTarget::Stable_1_71);
test_target("1.71.1", RustTarget::Stable_1_71);
test_target("1.72", RustTarget::Stable_1_71);
test_target("1.73", RustTarget::Stable_1_73);
test_target("1.18446744073709551615", LATEST_STABLE_RUST);
test_target("nightly", RustTarget::Nightly);
}
#[test]
fn invalid_targets() {
test_invalid_target("2.0");
test_invalid_target("1.cat");
test_invalid_target("1.0.cat");
test_invalid_target("1.18446744073709551616");
test_invalid_target("1.0.18446744073709551616");
test_invalid_target("1.-1.0");
test_invalid_target("1.0.-1");
test_invalid_target("beta");
test_invalid_target("1.0.0");
test_invalid_target("1.32.0");
}
}

726
vendor/bindgen/ir/analysis/derive.rs vendored Normal file
View File

@@ -0,0 +1,726 @@
//! Determining which types for which we cannot emit `#[derive(Trait)]`.
use std::fmt;
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::analysis::has_vtable::HasVtable;
use crate::ir::comp::CompKind;
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::derive::CanDerive;
use crate::ir::function::FunctionSig;
use crate::ir::item::{IsOpaque, Item};
use crate::ir::layout::Layout;
use crate::ir::template::TemplateParameters;
use crate::ir::traversal::{EdgeKind, Trace};
use crate::ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
use crate::ir::ty::{Type, TypeKind};
use crate::{Entry, HashMap, HashSet};
/// Which trait to consider when doing the `CannotDerive` analysis.
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
pub enum DeriveTrait {
/// The `Copy` trait.
Copy,
/// The `Debug` trait.
Debug,
/// The `Default` trait.
Default,
/// The `Hash` trait.
Hash,
/// The `PartialEq` and `PartialOrd` traits.
PartialEqOrPartialOrd,
}
/// An analysis that finds for each IR item whether a trait cannot be derived.
///
/// We use the monotone constraint function `cannot_derive`, defined as follows
/// for type T:
///
/// * If T is Opaque and the layout of the type is known, get this layout as an
/// opaquetype and check whether it can derive using trivial checks.
///
/// * If T is Array, a trait cannot be derived if the array is incomplete,
/// if the length of the array is larger than the limit (unless the trait
/// allows it), or the trait cannot be derived for the type of data the array
/// contains.
///
/// * If T is Vector, a trait cannot be derived if the trait cannot be derived
/// for the type of data the vector contains.
///
/// * If T is a type alias, a templated alias or an indirection to another type,
/// the trait cannot be derived if the trait cannot be derived for type T
/// refers to.
///
/// * If T is a compound type, the trait cannot be derived if the trait cannot
/// be derived for any of its base members or fields.
///
/// * If T is an instantiation of an abstract template definition, the trait
/// cannot be derived if any of the template arguments or template definition
/// cannot derive the trait.
///
/// * For all other (simple) types, compiler and standard library limitations
/// dictate whether the trait is implemented.
#[derive(Debug, Clone)]
pub(crate) struct CannotDerive<'ctx> {
ctx: &'ctx BindgenContext,
derive_trait: DeriveTrait,
// The incremental result of this analysis's computation.
// Contains information whether particular item can derive `derive_trait`
can_derive: HashMap<ItemId, CanDerive>,
// Dependencies saying that if a key ItemId has been inserted into the
// `cannot_derive_partialeq_or_partialord` set, then each of the ids
// in Vec<ItemId> need to be considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// can derive `derive_trait`.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
type EdgePredicate = fn(EdgeKind) -> bool;
fn consider_edge_default(kind: EdgeKind) -> bool {
match kind {
// These are the only edges that can affect whether a type can derive
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TypeReference |
EdgeKind::VarType |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => true,
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::InnerType |
EdgeKind::InnerVar |
EdgeKind::Method |
EdgeKind::Generic => false,
}
}
impl CannotDerive<'_> {
fn insert<Id: Into<ItemId>>(
&mut self,
id: Id,
can_derive: CanDerive,
) -> ConstrainResult {
let id = id.into();
trace!(
"inserting {id:?} can_derive<{}>={can_derive:?}",
self.derive_trait,
);
if let CanDerive::Yes = can_derive {
return ConstrainResult::Same;
}
match self.can_derive.entry(id) {
Entry::Occupied(mut entry) => {
if *entry.get() < can_derive {
entry.insert(can_derive);
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
Entry::Vacant(entry) => {
entry.insert(can_derive);
ConstrainResult::Changed
}
}
}
fn constrain_type(&mut self, item: &Item, ty: &Type) -> CanDerive {
if !self.ctx.allowlisted_items().contains(&item.id()) {
let can_derive = self
.ctx
.blocklisted_type_implements_trait(item, self.derive_trait);
match can_derive {
CanDerive::Yes => trace!(
" blocklisted type explicitly implements {}",
self.derive_trait
),
CanDerive::Manually => trace!(
" blocklisted type requires manual implementation of {}",
self.derive_trait
),
CanDerive::No => trace!(
" cannot derive {} for blocklisted type",
self.derive_trait
),
}
return can_derive;
}
if self.derive_trait.not_by_name(self.ctx, item) {
trace!(
" cannot derive {} for explicitly excluded type",
self.derive_trait
);
return CanDerive::No;
}
trace!("ty: {ty:?}");
if item.is_opaque(self.ctx, &()) {
if !self.derive_trait.can_derive_union() &&
ty.is_union() &&
self.ctx.options().untagged_union
{
trace!(
" cannot derive {} for Rust unions",
self.derive_trait
);
return CanDerive::No;
}
let layout_can_derive =
ty.layout(self.ctx).map_or(CanDerive::Yes, |l| {
l.opaque().array_size_within_derive_limit()
});
match layout_can_derive {
CanDerive::Yes => {
trace!(
" we can trivially derive {} for the layout",
self.derive_trait
);
}
_ => {
trace!(
" we cannot derive {} for the layout",
self.derive_trait
);
}
}
return layout_can_derive;
}
match *ty.kind() {
// Handle the simple cases. These can derive traits without further
// information.
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Complex(..) |
TypeKind::Float(..) |
TypeKind::Enum(..) |
TypeKind::TypeParam |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::Reference(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel => self.derive_trait.can_derive_simple(ty.kind()),
TypeKind::Pointer(inner) => {
let inner_type =
self.ctx.resolve_type(inner).canonical_type(self.ctx);
if let TypeKind::Function(ref sig) = *inner_type.kind() {
self.derive_trait.can_derive_fnptr(sig)
} else {
self.derive_trait.can_derive_pointer()
}
}
TypeKind::Function(ref sig) => {
self.derive_trait.can_derive_fnptr(sig)
}
// Complex cases need more information
TypeKind::Array(t, len) => {
let inner_type =
self.can_derive.get(&t.into()).copied().unwrap_or_default();
if inner_type != CanDerive::Yes {
trace!(
" arrays of T for which we cannot derive {} \
also cannot derive {}",
self.derive_trait,
self.derive_trait
);
return CanDerive::No;
}
if len == 0 && !self.derive_trait.can_derive_incomplete_array()
{
trace!(
" cannot derive {} for incomplete arrays",
self.derive_trait
);
return CanDerive::No;
}
if self.derive_trait.can_derive_large_array(self.ctx) {
trace!(" array can derive {}", self.derive_trait);
return CanDerive::Yes;
}
if len > RUST_DERIVE_IN_ARRAY_LIMIT {
trace!(
" array is too large to derive {}, but it may be implemented", self.derive_trait
);
return CanDerive::Manually;
}
trace!(
" array is small enough to derive {}",
self.derive_trait
);
CanDerive::Yes
}
TypeKind::Vector(t, len) => {
let inner_type =
self.can_derive.get(&t.into()).copied().unwrap_or_default();
if inner_type != CanDerive::Yes {
trace!(
" vectors of T for which we cannot derive {} \
also cannot derive {}",
self.derive_trait,
self.derive_trait
);
return CanDerive::No;
}
assert_ne!(len, 0, "vectors cannot have zero length");
self.derive_trait.can_derive_vector()
}
TypeKind::Comp(ref info) => {
assert!(
!info.has_non_type_template_params(),
"The early ty.is_opaque check should have handled this case"
);
if !self.derive_trait.can_derive_compound_forward_decl() &&
info.is_forward_declaration()
{
trace!(
" cannot derive {} for forward decls",
self.derive_trait
);
return CanDerive::No;
}
// NOTE: Take into account that while unions in C and C++ are copied by
// default, the may have an explicit destructor in C++, so we can't
// defer this check just for the union case.
if !self.derive_trait.can_derive_compound_with_destructor() &&
self.ctx.lookup_has_destructor(
item.id().expect_type_id(self.ctx),
)
{
trace!(
" comp has destructor which cannot derive {}",
self.derive_trait
);
return CanDerive::No;
}
if info.kind() == CompKind::Union {
if self.derive_trait.can_derive_union() {
if self.ctx.options().untagged_union &&
// https://github.com/rust-lang/rust/issues/36640
(!info.self_template_params(self.ctx).is_empty() ||
!item.all_template_params(self.ctx).is_empty())
{
trace!(
" cannot derive {} for Rust union because issue 36640", self.derive_trait
);
return CanDerive::No;
}
// fall through to be same as non-union handling
} else {
if self.ctx.options().untagged_union {
trace!(
" cannot derive {} for Rust unions",
self.derive_trait
);
return CanDerive::No;
}
let layout_can_derive =
ty.layout(self.ctx).map_or(CanDerive::Yes, |l| {
l.opaque().array_size_within_derive_limit()
});
match layout_can_derive {
CanDerive::Yes => {
trace!(
" union layout can trivially derive {}",
self.derive_trait
);
}
_ => {
trace!(
" union layout cannot derive {}",
self.derive_trait
);
}
}
return layout_can_derive;
}
}
if !self.derive_trait.can_derive_compound_with_vtable() &&
item.has_vtable(self.ctx)
{
trace!(
" cannot derive {} for comp with vtable",
self.derive_trait
);
return CanDerive::No;
}
// Bitfield units are always represented as arrays of u8, but
// they're not traced as arrays, so we need to check here
// instead.
if !self.derive_trait.can_derive_large_array(self.ctx) &&
info.has_too_large_bitfield_unit() &&
!item.is_opaque(self.ctx, &())
{
trace!(
" cannot derive {} for comp with too large bitfield unit",
self.derive_trait
);
return CanDerive::No;
}
let pred = self.derive_trait.consider_edge_comp();
self.constrain_join(item, pred)
}
TypeKind::ResolvedTypeRef(..) |
TypeKind::TemplateAlias(..) |
TypeKind::Alias(..) |
TypeKind::BlockPointer(..) => {
let pred = self.derive_trait.consider_edge_typeref();
self.constrain_join(item, pred)
}
TypeKind::TemplateInstantiation(..) => {
let pred = self.derive_trait.consider_edge_tmpl_inst();
self.constrain_join(item, pred)
}
TypeKind::Opaque => unreachable!(
"The early ty.is_opaque check should have handled this case"
),
}
}
fn constrain_join(
&mut self,
item: &Item,
consider_edge: EdgePredicate,
) -> CanDerive {
let mut candidate = None;
item.trace(
self.ctx,
&mut |sub_id, edge_kind| {
// Ignore ourselves, since union with ourself is a
// no-op. Ignore edges that aren't relevant to the
// analysis.
if sub_id == item.id() || !consider_edge(edge_kind) {
return;
}
let can_derive = self.can_derive
.get(&sub_id)
.copied()
.unwrap_or_default();
match can_derive {
CanDerive::Yes => trace!(" member {sub_id:?} can derive {}", self.derive_trait),
CanDerive::Manually => trace!(" member {sub_id:?} cannot derive {}, but it may be implemented", self.derive_trait),
CanDerive::No => trace!(" member {sub_id:?} cannot derive {}", self.derive_trait),
}
*candidate.get_or_insert(CanDerive::Yes) |= can_derive;
},
&(),
);
if candidate.is_none() {
trace!(
" can derive {} because there are no members",
self.derive_trait
);
}
candidate.unwrap_or_default()
}
}
impl DeriveTrait {
fn not_by_name(self, ctx: &BindgenContext, item: &Item) -> bool {
match self {
DeriveTrait::Copy => ctx.no_copy_by_name(item),
DeriveTrait::Debug => ctx.no_debug_by_name(item),
DeriveTrait::Default => ctx.no_default_by_name(item),
DeriveTrait::Hash => ctx.no_hash_by_name(item),
DeriveTrait::PartialEqOrPartialOrd => {
ctx.no_partialeq_by_name(item)
}
}
}
fn consider_edge_comp(self) -> EdgePredicate {
match self {
DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
_ => |kind| matches!(kind, EdgeKind::BaseMember | EdgeKind::Field),
}
}
fn consider_edge_typeref(self) -> EdgePredicate {
match self {
DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
_ => |kind| kind == EdgeKind::TypeReference,
}
}
fn consider_edge_tmpl_inst(self) -> EdgePredicate {
match self {
DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
_ => |kind| {
matches!(
kind,
EdgeKind::TemplateArgument | EdgeKind::TemplateDeclaration
)
},
}
}
fn can_derive_large_array(self, ctx: &BindgenContext) -> bool {
if ctx.options().rust_features().larger_arrays {
!matches!(self, DeriveTrait::Default)
} else {
matches!(self, DeriveTrait::Copy)
}
}
fn can_derive_union(self) -> bool {
matches!(self, DeriveTrait::Copy)
}
fn can_derive_compound_with_destructor(self) -> bool {
!matches!(self, DeriveTrait::Copy)
}
fn can_derive_compound_with_vtable(self) -> bool {
!matches!(self, DeriveTrait::Default)
}
fn can_derive_compound_forward_decl(self) -> bool {
matches!(self, DeriveTrait::Copy | DeriveTrait::Debug)
}
fn can_derive_incomplete_array(self) -> bool {
!matches!(
self,
DeriveTrait::Copy |
DeriveTrait::Hash |
DeriveTrait::PartialEqOrPartialOrd
)
}
fn can_derive_fnptr(self, f: &FunctionSig) -> CanDerive {
match (self, f.function_pointers_can_derive()) {
(DeriveTrait::Copy | DeriveTrait::Default, _) | (_, true) => {
trace!(" function pointer can derive {self}");
CanDerive::Yes
}
(DeriveTrait::Debug, false) => {
trace!(" function pointer cannot derive {self}, but it may be implemented");
CanDerive::Manually
}
(_, false) => {
trace!(" function pointer cannot derive {self}");
CanDerive::No
}
}
}
fn can_derive_vector(self) -> CanDerive {
if self == DeriveTrait::PartialEqOrPartialOrd {
// FIXME: vectors always can derive PartialEq, but they should
// not derive PartialOrd:
// https://github.com/rust-lang-nursery/packed_simd/issues/48
trace!(" vectors cannot derive PartialOrd");
CanDerive::No
} else {
trace!(" vector can derive {self}");
CanDerive::Yes
}
}
fn can_derive_pointer(self) -> CanDerive {
if self == DeriveTrait::Default {
trace!(" pointer cannot derive Default");
CanDerive::No
} else {
trace!(" pointer can derive {self}");
CanDerive::Yes
}
}
fn can_derive_simple(self, kind: &TypeKind) -> CanDerive {
match (self, kind) {
// === Default ===
(
DeriveTrait::Default,
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::TypeParam |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel,
) => {
trace!(" types that always cannot derive Default");
CanDerive::No
}
(DeriveTrait::Default, TypeKind::UnresolvedTypeRef(..)) => {
unreachable!(
"Type with unresolved type ref can't reach derive default"
)
}
// === Hash ===
(
DeriveTrait::Hash,
TypeKind::Float(..) | TypeKind::Complex(..),
) => {
trace!(" float cannot derive Hash");
CanDerive::No
}
// === others ===
_ => {
trace!(" simple type that can always derive {self}");
CanDerive::Yes
}
}
}
}
impl fmt::Display for DeriveTrait {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let s = match self {
DeriveTrait::Copy => "Copy",
DeriveTrait::Debug => "Debug",
DeriveTrait::Default => "Default",
DeriveTrait::Hash => "Hash",
DeriveTrait::PartialEqOrPartialOrd => "PartialEq/PartialOrd",
};
s.fmt(f)
}
}
impl<'ctx> MonotoneFramework for CannotDerive<'ctx> {
type Node = ItemId;
type Extra = (&'ctx BindgenContext, DeriveTrait);
type Output = HashMap<ItemId, CanDerive>;
fn new(
(ctx, derive_trait): (&'ctx BindgenContext, DeriveTrait),
) -> CannotDerive<'ctx> {
let can_derive = HashMap::default();
let dependencies = generate_dependencies(ctx, consider_edge_default);
CannotDerive {
ctx,
derive_trait,
can_derive,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
// The transitive closure of all allowlisted items, including explicitly
// blocklisted items.
self.ctx
.allowlisted_items()
.iter()
.copied()
.flat_map(|i| {
let mut reachable = vec![i];
i.trace(
self.ctx,
&mut |s, _| {
reachable.push(s);
},
&(),
);
reachable
})
.collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain: {id:?}");
if let Some(CanDerive::No) = self.can_derive.get(&id) {
trace!(" already know it cannot derive {}", self.derive_trait);
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let can_derive = match item.as_type() {
Some(ty) => {
let mut can_derive = self.constrain_type(item, ty);
if let CanDerive::Yes = can_derive {
let is_reached_limit =
|l: Layout| l.align > RUST_DERIVE_IN_ARRAY_LIMIT;
if !self.derive_trait.can_derive_large_array(self.ctx) &&
ty.layout(self.ctx).is_some_and(is_reached_limit)
{
// We have to be conservative: the struct *could* have enough
// padding that we emit an array that is longer than
// `RUST_DERIVE_IN_ARRAY_LIMIT`. If we moved padding calculations
// into the IR and computed them before this analysis, then we could
// be precise rather than conservative here.
can_derive = CanDerive::Manually;
}
}
can_derive
}
None => self.constrain_join(item, consider_edge_default),
};
self.insert(id, can_derive)
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<CannotDerive<'ctx>> for HashMap<ItemId, CanDerive> {
fn from(analysis: CannotDerive<'ctx>) -> Self {
extra_assert!(analysis
.can_derive
.values()
.all(|v| *v != CanDerive::Yes));
analysis.can_derive
}
}
/// Convert a `HashMap<ItemId, CanDerive>` into a `HashSet<ItemId>`.
///
/// Elements that are not `CanDerive::Yes` are kept in the set, so that it
/// represents all items that cannot derive.
pub(crate) fn as_cannot_derive_set(
can_derive: HashMap<ItemId, CanDerive>,
) -> HashSet<ItemId> {
can_derive
.into_iter()
.filter_map(|(k, v)| if v == CanDerive::Yes { None } else { Some(k) })
.collect()
}

View File

@@ -0,0 +1,175 @@
//! Determining which types have destructors
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::comp::{CompKind, Field, FieldMethods};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item whether it has a destructor or not
///
/// We use the monotone function `has destructor`, defined as follows:
///
/// * If T is a type alias, a templated alias, or an indirection to another type,
/// T has a destructor if the type T refers to has a destructor.
/// * If T is a compound type, T has a destructor if we saw a destructor when parsing it,
/// or if it's a struct, T has a destructor if any of its base members has a destructor,
/// or if any of its fields have a destructor.
/// * If T is an instantiation of an abstract template definition, T has
/// a destructor if its template definition has a destructor,
/// or if any of the template arguments has a destructor.
/// * If T is the type of a field, that field has a destructor if it's not a bitfield,
/// and if T has a destructor.
#[derive(Debug, Clone)]
pub(crate) struct HasDestructorAnalysis<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set definitely has a destructor.
have_destructor: HashSet<ItemId>,
// Dependencies saying that if a key ItemId has been inserted into the
// `have_destructor` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has a destructor or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl HasDestructorAnalysis<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
// These are the only edges that can affect whether a type has a
// destructor or not.
matches!(
kind,
EdgeKind::TypeReference |
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration
)
}
fn insert<Id: Into<ItemId>>(&mut self, id: Id) -> ConstrainResult {
let id = id.into();
let was_not_already_in_set = self.have_destructor.insert(id);
assert!(
was_not_already_in_set,
"We shouldn't try and insert {id:?} twice because if it was \
already in the set, `constrain` should have exited early."
);
ConstrainResult::Changed
}
}
impl<'ctx> MonotoneFramework for HasDestructorAnalysis<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> Self {
let have_destructor = HashSet::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasDestructorAnalysis {
ctx,
have_destructor,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().copied().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
if self.have_destructor.contains(&id) {
// We've already computed that this type has a destructor and that can't
// change.
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let ty = match item.as_type() {
None => return ConstrainResult::Same,
Some(ty) => ty,
};
match *ty.kind() {
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::ResolvedTypeRef(t) => {
if self.have_destructor.contains(&t.into()) {
self.insert(id)
} else {
ConstrainResult::Same
}
}
TypeKind::Comp(ref info) => {
if info.has_own_destructor() {
return self.insert(id);
}
match info.kind() {
CompKind::Union => ConstrainResult::Same,
CompKind::Struct => {
let base_or_field_destructor =
info.base_members().iter().any(|base| {
self.have_destructor.contains(&base.ty.into())
}) || info.fields().iter().any(
|field| match *field {
Field::DataMember(ref data) => self
.have_destructor
.contains(&data.ty().into()),
Field::Bitfields(_) => false,
},
);
if base_or_field_destructor {
self.insert(id)
} else {
ConstrainResult::Same
}
}
}
}
TypeKind::TemplateInstantiation(ref inst) => {
let definition_or_arg_destructor = self
.have_destructor
.contains(&inst.template_definition().into()) ||
inst.template_arguments().iter().any(|arg| {
self.have_destructor.contains(&arg.into())
});
if definition_or_arg_destructor {
self.insert(id)
} else {
ConstrainResult::Same
}
}
_ => ConstrainResult::Same,
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<HasDestructorAnalysis<'ctx>> for HashSet<ItemId> {
fn from(analysis: HasDestructorAnalysis<'ctx>) -> Self {
analysis.have_destructor
}
}

248
vendor/bindgen/ir/analysis/has_float.rs vendored Normal file
View File

@@ -0,0 +1,248 @@
//! Determining which types has float.
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::comp::Field;
use crate::ir::comp::FieldMethods;
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item whether it has float or not.
///
/// We use the monotone constraint function `has_float`,
/// defined as follows:
///
/// * If T is float or complex float, T trivially has.
/// * If T is a type alias, a templated alias or an indirection to another type,
/// it has float if the type T refers to has.
/// * If T is a compound type, it has float if any of base memter or field
/// has.
/// * If T is an instantiation of an abstract template definition, T has
/// float if any of the template arguments or template definition
/// has.
#[derive(Debug, Clone)]
pub(crate) struct HasFloat<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set has float.
has_float: HashSet<ItemId>,
// Dependencies saying that if a key ItemId has been inserted into the
// `has_float` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has float or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl HasFloat<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
match kind {
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TypeReference |
EdgeKind::VarType |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => true,
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::InnerType |
EdgeKind::InnerVar |
EdgeKind::Method |
EdgeKind::Generic => false,
}
}
fn insert<Id: Into<ItemId>>(&mut self, id: Id) -> ConstrainResult {
let id = id.into();
trace!("inserting {id:?} into the has_float set");
let was_not_already_in_set = self.has_float.insert(id);
assert!(
was_not_already_in_set,
"We shouldn't try and insert {id:?} twice because if it was \
already in the set, `constrain` should have exited early."
);
ConstrainResult::Changed
}
}
impl<'ctx> MonotoneFramework for HasFloat<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> HasFloat<'ctx> {
let has_float = HashSet::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasFloat {
ctx,
has_float,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().copied().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain: {id:?}");
if self.has_float.contains(&id) {
trace!(" already know it do not have float");
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let Some(ty) = item.as_type() else {
trace!(" not a type; ignoring");
return ConstrainResult::Same;
};
match *ty.kind() {
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::TypeParam |
TypeKind::Opaque |
TypeKind::Pointer(..) |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel => {
trace!(" simple type that do not have float");
ConstrainResult::Same
}
TypeKind::Float(..) | TypeKind::Complex(..) => {
trace!(" float type has float");
self.insert(id)
}
TypeKind::Array(t, _) => {
if self.has_float.contains(&t.into()) {
trace!(
" Array with type T that has float also has float"
);
return self.insert(id);
}
trace!(" Array with type T that do not have float also do not have float");
ConstrainResult::Same
}
TypeKind::Vector(t, _) => {
if self.has_float.contains(&t.into()) {
trace!(
" Vector with type T that has float also has float"
);
return self.insert(id);
}
trace!(" Vector with type T that do not have float also do not have float");
ConstrainResult::Same
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
if self.has_float.contains(&t.into()) {
trace!(
" aliases and type refs to T which have float \
also have float"
);
self.insert(id)
} else {
trace!(" aliases and type refs to T which do not have float \
also do not have floaarrayt");
ConstrainResult::Same
}
}
TypeKind::Comp(ref info) => {
let bases_have = info
.base_members()
.iter()
.any(|base| self.has_float.contains(&base.ty.into()));
if bases_have {
trace!(" bases have float, so we also have");
return self.insert(id);
}
let fields_have = info.fields().iter().any(|f| match *f {
Field::DataMember(ref data) => {
self.has_float.contains(&data.ty().into())
}
Field::Bitfields(ref bfu) => bfu
.bitfields()
.iter()
.any(|b| self.has_float.contains(&b.ty().into())),
});
if fields_have {
trace!(" fields have float, so we also have");
return self.insert(id);
}
trace!(" comp doesn't have float");
ConstrainResult::Same
}
TypeKind::TemplateInstantiation(ref template) => {
let args_have = template
.template_arguments()
.iter()
.any(|arg| self.has_float.contains(&arg.into()));
if args_have {
trace!(
" template args have float, so \
instantiation also has float"
);
return self.insert(id);
}
let def_has = self
.has_float
.contains(&template.template_definition().into());
if def_has {
trace!(
" template definition has float, so \
instantiation also has"
);
return self.insert(id);
}
trace!(" template instantiation do not have float");
ConstrainResult::Same
}
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<HasFloat<'ctx>> for HashSet<ItemId> {
fn from(analysis: HasFloat<'ctx>) -> Self {
analysis.has_float
}
}

View File

@@ -0,0 +1,242 @@
//! Determining which types has typed parameters in array.
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::comp::Field;
use crate::ir::comp::FieldMethods;
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item whether it has array or not.
///
/// We use the monotone constraint function `has_type_parameter_in_array`,
/// defined as follows:
///
/// * If T is Array type with type parameter, T trivially has.
/// * If T is a type alias, a templated alias or an indirection to another type,
/// it has type parameter in array if the type T refers to has.
/// * If T is a compound type, it has array if any of base memter or field
/// has type parameter in array.
/// * If T is an instantiation of an abstract template definition, T has
/// type parameter in array if any of the template arguments or template definition
/// has.
#[derive(Debug, Clone)]
pub(crate) struct HasTypeParameterInArray<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set has array.
has_type_parameter_in_array: HashSet<ItemId>,
// Dependencies saying that if a key ItemId has been inserted into the
// `has_type_parameter_in_array` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has array or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl HasTypeParameterInArray<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
match kind {
// These are the only edges that can affect whether a type has type parameter
// in array or not.
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::TypeReference |
EdgeKind::VarType |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => true,
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::InnerType |
EdgeKind::InnerVar |
EdgeKind::Method |
EdgeKind::Generic => false,
}
}
fn insert<Id: Into<ItemId>>(&mut self, id: Id) -> ConstrainResult {
let id = id.into();
trace!("inserting {id:?} into the has_type_parameter_in_array set");
let was_not_already_in_set =
self.has_type_parameter_in_array.insert(id);
assert!(
was_not_already_in_set,
"We shouldn't try and insert {id:?} twice because if it was \
already in the set, `constrain` should have exited early."
);
ConstrainResult::Changed
}
}
impl<'ctx> MonotoneFramework for HasTypeParameterInArray<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashSet<ItemId>;
fn new(ctx: &'ctx BindgenContext) -> HasTypeParameterInArray<'ctx> {
let has_type_parameter_in_array = HashSet::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasTypeParameterInArray {
ctx,
has_type_parameter_in_array,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().copied().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain: {id:?}");
if self.has_type_parameter_in_array.contains(&id) {
trace!(" already know it do not have array");
return ConstrainResult::Same;
}
let item = self.ctx.resolve_item(id);
let Some(ty) = item.as_type() else {
trace!(" not a type; ignoring");
return ConstrainResult::Same;
};
match *ty.kind() {
// Handle the simple cases. These cannot have array in type parameter
// without further information.
TypeKind::Void |
TypeKind::NullPtr |
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Vector(..) |
TypeKind::Complex(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::TypeParam |
TypeKind::Opaque |
TypeKind::Pointer(..) |
TypeKind::UnresolvedTypeRef(..) |
TypeKind::ObjCInterface(..) |
TypeKind::ObjCId |
TypeKind::ObjCSel => {
trace!(" simple type that do not have array");
ConstrainResult::Same
}
TypeKind::Array(t, _) => {
let inner_ty =
self.ctx.resolve_type(t).canonical_type(self.ctx);
if let TypeKind::TypeParam = *inner_ty.kind() {
trace!(" Array with Named type has type parameter");
self.insert(id)
} else {
trace!(
" Array without Named type does have type parameter"
);
ConstrainResult::Same
}
}
TypeKind::ResolvedTypeRef(t) |
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) => {
if self.has_type_parameter_in_array.contains(&t.into()) {
trace!(
" aliases and type refs to T which have array \
also have array"
);
self.insert(id)
} else {
trace!(
" aliases and type refs to T which do not have array \
also do not have array"
);
ConstrainResult::Same
}
}
TypeKind::Comp(ref info) => {
let bases_have = info.base_members().iter().any(|base| {
self.has_type_parameter_in_array.contains(&base.ty.into())
});
if bases_have {
trace!(" bases have array, so we also have");
return self.insert(id);
}
let fields_have = info.fields().iter().any(|f| match *f {
Field::DataMember(ref data) => self
.has_type_parameter_in_array
.contains(&data.ty().into()),
Field::Bitfields(..) => false,
});
if fields_have {
trace!(" fields have array, so we also have");
return self.insert(id);
}
trace!(" comp doesn't have array");
ConstrainResult::Same
}
TypeKind::TemplateInstantiation(ref template) => {
let args_have =
template.template_arguments().iter().any(|arg| {
self.has_type_parameter_in_array.contains(&arg.into())
});
if args_have {
trace!(
" template args have array, so \
instantiation also has array"
);
return self.insert(id);
}
let def_has = self
.has_type_parameter_in_array
.contains(&template.template_definition().into());
if def_has {
trace!(
" template definition has array, so \
instantiation also has"
);
return self.insert(id);
}
trace!(" template instantiation do not have array");
ConstrainResult::Same
}
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<HasTypeParameterInArray<'ctx>> for HashSet<ItemId> {
fn from(analysis: HasTypeParameterInArray<'ctx>) -> Self {
analysis.has_type_parameter_in_array
}
}

235
vendor/bindgen/ir/analysis/has_vtable.rs vendored Normal file
View File

@@ -0,0 +1,235 @@
//! Determining which types has vtable
use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{Entry, HashMap};
use std::cmp;
use std::ops;
/// The result of the `HasVtableAnalysis` for an individual item.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Default)]
pub(crate) enum HasVtableResult {
/// The item does not have a vtable pointer.
#[default]
No,
/// The item has a vtable and the actual vtable pointer is within this item.
SelfHasVtable,
/// The item has a vtable, but the actual vtable pointer is in a base
/// member.
BaseHasVtable,
}
impl HasVtableResult {
/// Take the least upper bound of `self` and `rhs`.
pub(crate) fn join(self, rhs: Self) -> Self {
cmp::max(self, rhs)
}
}
impl ops::BitOr for HasVtableResult {
type Output = Self;
fn bitor(self, rhs: HasVtableResult) -> Self::Output {
self.join(rhs)
}
}
impl ops::BitOrAssign for HasVtableResult {
fn bitor_assign(&mut self, rhs: HasVtableResult) {
*self = self.join(rhs);
}
}
/// An analysis that finds for each IR item whether it has vtable or not
///
/// We use the monotone function `has vtable`, defined as follows:
///
/// * If T is a type alias, a templated alias, an indirection to another type,
/// or a reference of a type, T has vtable if the type T refers to has vtable.
/// * If T is a compound type, T has vtable if we saw a virtual function when
/// parsing it or any of its base member has vtable.
/// * If T is an instantiation of an abstract template definition, T has
/// vtable if template definition has vtable
#[derive(Debug, Clone)]
pub(crate) struct HasVtableAnalysis<'ctx> {
ctx: &'ctx BindgenContext,
// The incremental result of this analysis's computation. Everything in this
// set definitely has a vtable.
have_vtable: HashMap<ItemId, HasVtableResult>,
// Dependencies saying that if a key ItemId has been inserted into the
// `have_vtable` set, then each of the ids in Vec<ItemId> need to be
// considered again.
//
// This is a subset of the natural IR graph with reversed edges, where we
// only include the edges from the IR graph that can affect whether a type
// has a vtable or not.
dependencies: HashMap<ItemId, Vec<ItemId>>,
}
impl HasVtableAnalysis<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
// These are the only edges that can affect whether a type has a
// vtable or not.
matches!(
kind,
EdgeKind::TypeReference |
EdgeKind::BaseMember |
EdgeKind::TemplateDeclaration
)
}
fn insert<Id: Into<ItemId>>(
&mut self,
id: Id,
result: HasVtableResult,
) -> ConstrainResult {
if let HasVtableResult::No = result {
return ConstrainResult::Same;
}
let id = id.into();
match self.have_vtable.entry(id) {
Entry::Occupied(mut entry) => {
if *entry.get() < result {
entry.insert(result);
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
Entry::Vacant(entry) => {
entry.insert(result);
ConstrainResult::Changed
}
}
}
fn forward<Id1, Id2>(&mut self, from: Id1, to: Id2) -> ConstrainResult
where
Id1: Into<ItemId>,
Id2: Into<ItemId>,
{
let from = from.into();
let to = to.into();
match self.have_vtable.get(&from) {
None => ConstrainResult::Same,
Some(r) => self.insert(to, *r),
}
}
}
impl<'ctx> MonotoneFramework for HasVtableAnalysis<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashMap<ItemId, HasVtableResult>;
fn new(ctx: &'ctx BindgenContext) -> HasVtableAnalysis<'ctx> {
let have_vtable = HashMap::default();
let dependencies = generate_dependencies(ctx, Self::consider_edge);
HasVtableAnalysis {
ctx,
have_vtable,
dependencies,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
self.ctx.allowlisted_items().iter().copied().collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
trace!("constrain {id:?}");
let item = self.ctx.resolve_item(id);
let ty = match item.as_type() {
None => return ConstrainResult::Same,
Some(ty) => ty,
};
// TODO #851: figure out a way to handle deriving from template type parameters.
match *ty.kind() {
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::ResolvedTypeRef(t) |
TypeKind::Reference(t) => {
trace!(
" aliases and references forward to their inner type"
);
self.forward(t, id)
}
TypeKind::Comp(ref info) => {
trace!(" comp considers its own methods and bases");
let mut result = HasVtableResult::No;
if info.has_own_virtual_method() {
trace!(" comp has its own virtual method");
result |= HasVtableResult::SelfHasVtable;
}
let bases_has_vtable = info.base_members().iter().any(|base| {
trace!(" comp has a base with a vtable: {base:?}");
self.have_vtable.contains_key(&base.ty.into())
});
if bases_has_vtable {
result |= HasVtableResult::BaseHasVtable;
}
self.insert(id, result)
}
TypeKind::TemplateInstantiation(ref inst) => {
self.forward(inst.template_definition(), id)
}
_ => ConstrainResult::Same,
}
}
fn each_depending_on<F>(&self, id: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&id) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<HasVtableAnalysis<'ctx>> for HashMap<ItemId, HasVtableResult> {
fn from(analysis: HasVtableAnalysis<'ctx>) -> Self {
// We let the lack of an entry mean "No" to save space.
extra_assert!(analysis
.have_vtable
.values()
.all(|v| { *v != HasVtableResult::No }));
analysis.have_vtable
}
}
/// A convenience trait for the things for which we might wonder if they have a
/// vtable during codegen.
///
/// This is not for _computing_ whether the thing has a vtable, it is for
/// looking up the results of the `HasVtableAnalysis`'s computations for a
/// specific thing.
pub(crate) trait HasVtable {
/// Return `true` if this thing has vtable, `false` otherwise.
fn has_vtable(&self, ctx: &BindgenContext) -> bool;
/// Return `true` if this thing has an actual vtable pointer in itself, as
/// opposed to transitively in a base member.
fn has_vtable_ptr(&self, ctx: &BindgenContext) -> bool;
}

395
vendor/bindgen/ir/analysis/mod.rs vendored Normal file
View File

@@ -0,0 +1,395 @@
//! Fix-point analyses on the IR using the "monotone framework".
//!
//! A lattice is a set with a partial ordering between elements, where there is
//! a single least upper bound and a single greatest least bound for every
//! subset. We are dealing with finite lattices, which means that it has a
//! finite number of elements, and it follows that there exists a single top and
//! a single bottom member of the lattice. For example, the power set of a
//! finite set forms a finite lattice where partial ordering is defined by set
//! inclusion, that is `a <= b` if `a` is a subset of `b`. Here is the finite
//! lattice constructed from the set {0,1,2}:
//!
//! ```text
//! .----- Top = {0,1,2} -----.
//! / | \
//! / | \
//! / | \
//! {0,1} -------. {0,2} .--------- {1,2}
//! | \ / \ / |
//! | / \ |
//! | / \ / \ |
//! {0} --------' {1} `---------- {2}
//! \ | /
//! \ | /
//! \ | /
//! `------ Bottom = {} ------'
//! ```
//!
//! A monotone function `f` is a function where if `x <= y`, then it holds that
//! `f(x) <= f(y)`. It should be clear that running a monotone function to a
//! fix-point on a finite lattice will always terminate: `f` can only "move"
//! along the lattice in a single direction, and therefore can only either find
//! a fix-point in the middle of the lattice or continue to the top or bottom
//! depending if it is ascending or descending the lattice respectively.
//!
//! For a deeper introduction to the general form of this kind of analysis, see
//! [Static Program Analysis by Anders Møller and Michael I. Schwartzbach][spa].
//!
//! [spa]: https://cs.au.dk/~amoeller/spa/spa.pdf
// Re-export individual analyses.
mod template_params;
pub(crate) use self::template_params::UsedTemplateParameters;
mod derive;
pub use self::derive::DeriveTrait;
pub(crate) use self::derive::{as_cannot_derive_set, CannotDerive};
mod has_vtable;
pub(crate) use self::has_vtable::{
HasVtable, HasVtableAnalysis, HasVtableResult,
};
mod has_destructor;
pub(crate) use self::has_destructor::HasDestructorAnalysis;
mod has_type_param_in_array;
pub(crate) use self::has_type_param_in_array::HasTypeParameterInArray;
mod has_float;
pub(crate) use self::has_float::HasFloat;
mod sizedness;
pub(crate) use self::sizedness::{
Sizedness, SizednessAnalysis, SizednessResult,
};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::traversal::{EdgeKind, Trace};
use crate::HashMap;
use std::fmt;
use std::ops;
/// An analysis in the monotone framework.
///
/// Implementors of this trait must maintain the following two invariants:
///
/// 1. The concrete data must be a member of a finite-height lattice.
/// 2. The concrete `constrain` method must be monotone: that is,
/// if `x <= y`, then `constrain(x) <= constrain(y)`.
///
/// If these invariants do not hold, iteration to a fix-point might never
/// complete.
///
/// For a simple example analysis, see the `ReachableFrom` type in the `tests`
/// module below.
pub(crate) trait MonotoneFramework: Sized + fmt::Debug {
/// The type of node in our dependency graph.
///
/// This is just generic (and not `ItemId`) so that we can easily unit test
/// without constructing real `Item`s and their `ItemId`s.
type Node: Copy;
/// Any extra data that is needed during computation.
///
/// Again, this is just generic (and not `&BindgenContext`) so that we can
/// easily unit test without constructing real `BindgenContext`s full of
/// real `Item`s and real `ItemId`s.
type Extra: Sized;
/// The final output of this analysis. Once we have reached a fix-point, we
/// convert `self` into this type, and return it as the final result of the
/// analysis.
type Output: From<Self> + fmt::Debug;
/// Construct a new instance of this analysis.
fn new(extra: Self::Extra) -> Self;
/// Get the initial set of nodes from which to start the analysis. Unless
/// you are sure of some domain-specific knowledge, this should be the
/// complete set of nodes.
fn initial_worklist(&self) -> Vec<Self::Node>;
/// Update the analysis for the given node.
///
/// If this results in changing our internal state (ie, we discovered that
/// we have not reached a fix-point and iteration should continue), return
/// `ConstrainResult::Changed`. Otherwise, return `ConstrainResult::Same`.
/// When `constrain` returns `ConstrainResult::Same` for all nodes in the
/// set, we have reached a fix-point and the analysis is complete.
fn constrain(&mut self, node: Self::Node) -> ConstrainResult;
/// For each node `d` that depends on the given `node`'s current answer when
/// running `constrain(d)`, call `f(d)`. This informs us which new nodes to
/// queue up in the worklist when `constrain(node)` reports updated
/// information.
fn each_depending_on<F>(&self, node: Self::Node, f: F)
where
F: FnMut(Self::Node);
}
/// Whether an analysis's `constrain` function modified the incremental results
/// or not.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Default)]
pub(crate) enum ConstrainResult {
/// The incremental results were updated, and the fix-point computation
/// should continue.
Changed,
/// The incremental results were not updated.
#[default]
Same,
}
impl ops::BitOr for ConstrainResult {
type Output = Self;
fn bitor(self, rhs: ConstrainResult) -> Self::Output {
if self == ConstrainResult::Changed || rhs == ConstrainResult::Changed {
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
}
impl ops::BitOrAssign for ConstrainResult {
fn bitor_assign(&mut self, rhs: ConstrainResult) {
*self = *self | rhs;
}
}
/// Run an analysis in the monotone framework.
pub(crate) fn analyze<Analysis>(extra: Analysis::Extra) -> Analysis::Output
where
Analysis: MonotoneFramework,
{
let mut analysis = Analysis::new(extra);
let mut worklist = analysis.initial_worklist();
while let Some(node) = worklist.pop() {
if let ConstrainResult::Changed = analysis.constrain(node) {
analysis.each_depending_on(node, |needs_work| {
worklist.push(needs_work);
});
}
}
analysis.into()
}
/// Generate the dependency map for analysis
pub(crate) fn generate_dependencies<F>(
ctx: &BindgenContext,
consider_edge: F,
) -> HashMap<ItemId, Vec<ItemId>>
where
F: Fn(EdgeKind) -> bool,
{
let mut dependencies = HashMap::default();
for &item in ctx.allowlisted_items() {
dependencies.entry(item).or_insert_with(Vec::new);
{
// We reverse our natural IR graph edges to find dependencies
// between nodes.
item.trace(
ctx,
&mut |sub_item: ItemId, edge_kind| {
if ctx.allowlisted_items().contains(&sub_item) &&
consider_edge(edge_kind)
{
dependencies
.entry(sub_item)
.or_insert_with(Vec::new)
.push(item);
}
},
&(),
);
}
}
dependencies
}
#[cfg(test)]
mod tests {
use super::*;
use crate::HashSet;
// Here we find the set of nodes that are reachable from any given
// node. This is a lattice mapping nodes to subsets of all nodes. Our join
// function is set union.
//
// This is our test graph:
//
// +---+ +---+
// | | | |
// | 1 | .----| 2 |
// | | | | |
// +---+ | +---+
// | | ^
// | | |
// | +---+ '------'
// '----->| |
// | 3 |
// .------| |------.
// | +---+ |
// | ^ |
// v | v
// +---+ | +---+ +---+
// | | | | | | |
// | 4 | | | 5 |--->| 6 |
// | | | | | | |
// +---+ | +---+ +---+
// | | | |
// | | | v
// | +---+ | +---+
// | | | | | |
// '----->| 7 |<-----' | 8 |
// | | | |
// +---+ +---+
//
// And here is the mapping from a node to the set of nodes that are
// reachable from it within the test graph:
//
// 1: {3,4,5,6,7,8}
// 2: {2}
// 3: {3,4,5,6,7,8}
// 4: {3,4,5,6,7,8}
// 5: {3,4,5,6,7,8}
// 6: {8}
// 7: {3,4,5,6,7,8}
// 8: {}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
struct Node(usize);
#[derive(Clone, Debug, Default, PartialEq, Eq)]
struct Graph(HashMap<Node, Vec<Node>>);
impl Graph {
fn make_test_graph() -> Graph {
let mut g = Graph::default();
g.0.insert(Node(1), vec![Node(3)]);
g.0.insert(Node(2), vec![Node(2)]);
g.0.insert(Node(3), vec![Node(4), Node(5)]);
g.0.insert(Node(4), vec![Node(7)]);
g.0.insert(Node(5), vec![Node(6), Node(7)]);
g.0.insert(Node(6), vec![Node(8)]);
g.0.insert(Node(7), vec![Node(3)]);
g.0.insert(Node(8), vec![]);
g
}
fn reverse(&self) -> Graph {
let mut reversed = Graph::default();
for (node, edges) in &self.0 {
reversed.0.entry(*node).or_insert_with(Vec::new);
for referent in edges {
reversed
.0
.entry(*referent)
.or_insert_with(Vec::new)
.push(*node);
}
}
reversed
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct ReachableFrom<'a> {
reachable: HashMap<Node, HashSet<Node>>,
graph: &'a Graph,
reversed: Graph,
}
impl<'a> MonotoneFramework for ReachableFrom<'a> {
type Node = Node;
type Extra = &'a Graph;
type Output = HashMap<Node, HashSet<Node>>;
fn new(graph: &'a Graph) -> Self {
let reversed = graph.reverse();
ReachableFrom {
reachable: Default::default(),
graph,
reversed,
}
}
fn initial_worklist(&self) -> Vec<Node> {
self.graph.0.keys().copied().collect()
}
fn constrain(&mut self, node: Node) -> ConstrainResult {
// The set of nodes reachable from a node `x` is
//
// reachable(x) = s_0 U s_1 U ... U reachable(s_0) U reachable(s_1) U ...
//
// where there exist edges from `x` to each of `s_0, s_1, ...`.
//
// Yes, what follows is a **terribly** inefficient set union
// implementation. Don't copy this code outside of this test!
let original_size = self.reachable.entry(node).or_default().len();
for sub_node in &self.graph.0[&node] {
self.reachable.get_mut(&node).unwrap().insert(*sub_node);
let sub_reachable =
self.reachable.entry(*sub_node).or_default().clone();
for transitive in sub_reachable {
self.reachable.get_mut(&node).unwrap().insert(transitive);
}
}
let new_size = self.reachable[&node].len();
if original_size == new_size {
ConstrainResult::Same
} else {
ConstrainResult::Changed
}
}
fn each_depending_on<F>(&self, node: Node, mut f: F)
where
F: FnMut(Node),
{
for dep in &self.reversed.0[&node] {
f(*dep);
}
}
}
impl<'a> From<ReachableFrom<'a>> for HashMap<Node, HashSet<Node>> {
fn from(reachable: ReachableFrom<'a>) -> Self {
reachable.reachable
}
}
#[test]
fn monotone() {
let g = Graph::make_test_graph();
let reachable = analyze::<ReachableFrom>(&g);
println!("reachable = {reachable:#?}");
fn nodes<A>(nodes: A) -> HashSet<Node>
where
A: AsRef<[usize]>,
{
nodes.as_ref().iter().copied().map(Node).collect()
}
let mut expected = HashMap::default();
expected.insert(Node(1), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(2), nodes([2]));
expected.insert(Node(3), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(4), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(5), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(6), nodes([8]));
expected.insert(Node(7), nodes([3, 4, 5, 6, 7, 8]));
expected.insert(Node(8), nodes([]));
println!("expected = {expected:#?}");
assert_eq!(reachable, expected);
}
}

353
vendor/bindgen/ir/analysis/sizedness.rs vendored Normal file
View File

@@ -0,0 +1,353 @@
//! Determining the sizedness of types (as base classes and otherwise).
use super::{
generate_dependencies, ConstrainResult, HasVtable, MonotoneFramework,
};
use crate::ir::context::{BindgenContext, TypeId};
use crate::ir::item::IsOpaque;
use crate::ir::traversal::EdgeKind;
use crate::ir::ty::TypeKind;
use crate::{Entry, HashMap};
use std::{cmp, ops};
/// The result of the `Sizedness` analysis for an individual item.
///
/// This is a chain lattice of the form:
///
/// ```ignore
/// NonZeroSized
/// |
/// DependsOnTypeParam
/// |
/// ZeroSized
/// ```
///
/// We initially assume that all types are `ZeroSized` and then update our
/// understanding as we learn more about each type.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Default)]
pub(crate) enum SizednessResult {
/// The type is zero-sized.
///
/// This means that if it is a C++ type, and is not being used as a base
/// member, then we must add an `_address` byte to enforce the
/// unique-address-per-distinct-object-instance rule.
#[default]
ZeroSized,
/// Whether this type is zero-sized or not depends on whether a type
/// parameter is zero-sized or not.
///
/// For example, given these definitions:
///
/// ```c++
/// template<class T>
/// class Flongo : public T {};
///
/// class Empty {};
///
/// class NonEmpty { int x; };
/// ```
///
/// Then `Flongo<Empty>` is zero-sized, and needs an `_address` byte
/// inserted, while `Flongo<NonEmpty>` is *not* zero-sized, and should *not*
/// have an `_address` byte inserted.
///
/// We don't properly handle this situation correctly right now:
/// <https://github.com/rust-lang/rust-bindgen/issues/586>
DependsOnTypeParam,
/// Has some size that is known to be greater than zero. That doesn't mean
/// it has a static size, but it is not zero sized for sure. In other words,
/// it might contain an incomplete array or some other dynamically sized
/// type.
NonZeroSized,
}
impl SizednessResult {
/// Take the least upper bound of `self` and `rhs`.
pub(crate) fn join(self, rhs: Self) -> Self {
cmp::max(self, rhs)
}
}
impl ops::BitOr for SizednessResult {
type Output = Self;
fn bitor(self, rhs: SizednessResult) -> Self::Output {
self.join(rhs)
}
}
impl ops::BitOrAssign for SizednessResult {
fn bitor_assign(&mut self, rhs: SizednessResult) {
*self = self.join(rhs);
}
}
/// An analysis that computes the sizedness of all types.
///
/// * For types with known sizes -- for example pointers, scalars, etc... --
/// they are assigned `NonZeroSized`.
///
/// * For compound structure types with one or more fields, they are assigned
/// `NonZeroSized`.
///
/// * For compound structure types without any fields, the results of the bases
/// are `join`ed.
///
/// * For type parameters, `DependsOnTypeParam` is assigned.
#[derive(Debug)]
pub(crate) struct SizednessAnalysis<'ctx> {
ctx: &'ctx BindgenContext,
dependencies: HashMap<TypeId, Vec<TypeId>>,
// Incremental results of the analysis. Missing entries are implicitly
// considered `ZeroSized`.
sized: HashMap<TypeId, SizednessResult>,
}
impl SizednessAnalysis<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
// These are the only edges that can affect whether a type is
// zero-sized or not.
matches!(
kind,
EdgeKind::TemplateArgument |
EdgeKind::TemplateParameterDefinition |
EdgeKind::TemplateDeclaration |
EdgeKind::TypeReference |
EdgeKind::BaseMember |
EdgeKind::Field
)
}
/// Insert an incremental result, and return whether this updated our
/// knowledge of types and we should continue the analysis.
fn insert(
&mut self,
id: TypeId,
result: SizednessResult,
) -> ConstrainResult {
trace!("inserting {result:?} for {id:?}");
if let SizednessResult::ZeroSized = result {
return ConstrainResult::Same;
}
match self.sized.entry(id) {
Entry::Occupied(mut entry) => {
if *entry.get() < result {
entry.insert(result);
ConstrainResult::Changed
} else {
ConstrainResult::Same
}
}
Entry::Vacant(entry) => {
entry.insert(result);
ConstrainResult::Changed
}
}
}
fn forward(&mut self, from: TypeId, to: TypeId) -> ConstrainResult {
match self.sized.get(&from) {
None => ConstrainResult::Same,
Some(r) => self.insert(to, *r),
}
}
}
impl<'ctx> MonotoneFramework for SizednessAnalysis<'ctx> {
type Node = TypeId;
type Extra = &'ctx BindgenContext;
type Output = HashMap<TypeId, SizednessResult>;
fn new(ctx: &'ctx BindgenContext) -> SizednessAnalysis<'ctx> {
let dependencies = generate_dependencies(ctx, Self::consider_edge)
.into_iter()
.filter_map(|(id, sub_ids)| {
id.as_type_id(ctx).map(|id| {
(
id,
sub_ids
.into_iter()
.filter_map(|s| s.as_type_id(ctx))
.collect::<Vec<_>>(),
)
})
})
.collect();
let sized = HashMap::default();
SizednessAnalysis {
ctx,
dependencies,
sized,
}
}
fn initial_worklist(&self) -> Vec<TypeId> {
self.ctx
.allowlisted_items()
.iter()
.filter_map(|id| id.as_type_id(self.ctx))
.collect()
}
fn constrain(&mut self, id: TypeId) -> ConstrainResult {
trace!("constrain {id:?}");
if let Some(SizednessResult::NonZeroSized) = self.sized.get(&id) {
trace!(" already know it is not zero-sized");
return ConstrainResult::Same;
}
if id.has_vtable_ptr(self.ctx) {
trace!(" has an explicit vtable pointer, therefore is not zero-sized");
return self.insert(id, SizednessResult::NonZeroSized);
}
let ty = self.ctx.resolve_type(id);
if id.is_opaque(self.ctx, &()) {
trace!(" type is opaque; checking layout...");
let result =
ty.layout(self.ctx).map_or(SizednessResult::ZeroSized, |l| {
if l.size == 0 {
trace!(" ...layout has size == 0");
SizednessResult::ZeroSized
} else {
trace!(" ...layout has size > 0");
SizednessResult::NonZeroSized
}
});
return self.insert(id, result);
}
match *ty.kind() {
TypeKind::Void => {
trace!(" void is zero-sized");
self.insert(id, SizednessResult::ZeroSized)
}
TypeKind::TypeParam => {
trace!(
" type params sizedness depends on what they're \
instantiated as"
);
self.insert(id, SizednessResult::DependsOnTypeParam)
}
TypeKind::Int(..) |
TypeKind::Float(..) |
TypeKind::Complex(..) |
TypeKind::Function(..) |
TypeKind::Enum(..) |
TypeKind::Reference(..) |
TypeKind::NullPtr |
TypeKind::ObjCId |
TypeKind::ObjCSel |
TypeKind::Pointer(..) => {
trace!(" {:?} is known not to be zero-sized", ty.kind());
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::ObjCInterface(..) => {
trace!(" obj-c interfaces always have at least the `isa` pointer");
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::TemplateAlias(t, _) |
TypeKind::Alias(t) |
TypeKind::BlockPointer(t) |
TypeKind::ResolvedTypeRef(t) => {
trace!(" aliases and type refs forward to their inner type");
self.forward(t, id)
}
TypeKind::TemplateInstantiation(ref inst) => {
trace!(
" template instantiations are zero-sized if their \
definition is zero-sized"
);
self.forward(inst.template_definition(), id)
}
TypeKind::Array(_, 0) => {
trace!(" arrays of zero elements are zero-sized");
self.insert(id, SizednessResult::ZeroSized)
}
TypeKind::Array(..) => {
trace!(" arrays of > 0 elements are not zero-sized");
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::Vector(..) => {
trace!(" vectors are not zero-sized");
self.insert(id, SizednessResult::NonZeroSized)
}
TypeKind::Comp(ref info) => {
trace!(" comp considers its own fields and bases");
if !info.fields().is_empty() {
return self.insert(id, SizednessResult::NonZeroSized);
}
let result = info
.base_members()
.iter()
.filter_map(|base| self.sized.get(&base.ty))
.fold(SizednessResult::ZeroSized, |a, b| a.join(*b));
self.insert(id, result)
}
TypeKind::Opaque => {
unreachable!("covered by the .is_opaque() check above")
}
TypeKind::UnresolvedTypeRef(..) => {
unreachable!("Should have been resolved after parsing!");
}
}
}
fn each_depending_on<F>(&self, id: TypeId, mut f: F)
where
F: FnMut(TypeId),
{
if let Some(edges) = self.dependencies.get(&id) {
for ty in edges {
trace!("enqueue {ty:?} into worklist");
f(*ty);
}
}
}
}
impl<'ctx> From<SizednessAnalysis<'ctx>> for HashMap<TypeId, SizednessResult> {
fn from(analysis: SizednessAnalysis<'ctx>) -> Self {
// We let the lack of an entry mean "ZeroSized" to save space.
extra_assert!(analysis
.sized
.values()
.all(|v| { *v != SizednessResult::ZeroSized }));
analysis.sized
}
}
/// A convenience trait for querying whether some type or ID is sized.
///
/// This is not for _computing_ whether the thing is sized, it is for looking up
/// the results of the `Sizedness` analysis's computations for a specific thing.
pub(crate) trait Sizedness {
/// Get the sizedness of this type.
fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult;
/// Is the sizedness for this type `SizednessResult::ZeroSized`?
fn is_zero_sized(&self, ctx: &BindgenContext) -> bool {
self.sizedness(ctx) == SizednessResult::ZeroSized
}
}

View File

@@ -0,0 +1,601 @@
//! Discover which template type parameters are actually used.
//!
//! ### Why do we care?
//!
//! C++ allows ignoring template parameters, while Rust does not. Usually we can
//! blindly stick a `PhantomData<T>` inside a generic Rust struct to make up for
//! this. That doesn't work for templated type aliases, however:
//!
//! ```C++
//! template <typename T>
//! using Fml = int;
//! ```
//!
//! If we generate the naive Rust code for this alias, we get:
//!
//! ```ignore
//! pub(crate) type Fml<T> = ::std::os::raw::int;
//! ```
//!
//! And this is rejected by `rustc` due to the unused type parameter.
//!
//! (Aside: in these simple cases, `libclang` will often just give us the
//! aliased type directly, and we will never even know we were dealing with
//! aliases, let alone templated aliases. It's the more convoluted scenarios
//! where we get to have some fun...)
//!
//! For such problematic template aliases, we could generate a tuple whose
//! second member is a `PhantomData<T>`. Or, if we wanted to go the extra mile,
//! we could even generate some smarter wrapper that implements `Deref`,
//! `DerefMut`, `From`, `Into`, `AsRef`, and `AsMut` to the actually aliased
//! type. However, this is still lackluster:
//!
//! 1. Even with a billion conversion-trait implementations, using the generated
//! bindings is rather un-ergonomic.
//! 2. With either of these solutions, we need to keep track of which aliases
//! we've transformed like this in order to generate correct uses of the
//! wrapped type.
//!
//! Given that we have to properly track which template parameters ended up used
//! for (2), we might as well leverage that information to make ergonomic
//! bindings that don't contain any unused type parameters at all, and
//! completely avoid the pain of (1).
//!
//! ### How do we determine which template parameters are used?
//!
//! Determining which template parameters are actually used is a trickier
//! problem than it might seem at a glance. On the one hand, trivial uses are
//! easy to detect:
//!
//! ```C++
//! template <typename T>
//! class Foo {
//! T trivial_use_of_t;
//! };
//! ```
//!
//! It gets harder when determining if one template parameter is used depends on
//! determining if another template parameter is used. In this example, whether
//! `U` is used depends on whether `T` is used.
//!
//! ```C++
//! template <typename T>
//! class DoesntUseT {
//! int x;
//! };
//!
//! template <typename U>
//! class Fml {
//! DoesntUseT<U> lololol;
//! };
//! ```
//!
//! We can express the set of used template parameters as a constraint solving
//! problem (where the set of template parameters used by a given IR item is the
//! union of its sub-item's used template parameters) and iterate to a
//! fixed-point.
//!
//! We use the `ir::analysis::MonotoneFramework` infrastructure for this
//! fix-point analysis, where our lattice is the mapping from each IR item to
//! the powerset of the template parameters that appear in the input C++ header,
//! our join function is set union. The set of template parameters appearing in
//! the program is finite, as is the number of IR items. We start at our
//! lattice's bottom element: every item mapping to an empty set of template
//! parameters. Our analysis only adds members to each item's set of used
//! template parameters, never removes them, so it is monotone. Because our
//! lattice is finite and our constraint function is monotone, iteration to a
//! fix-point will terminate.
//!
//! See `src/ir/analysis.rs` for more.
use super::{ConstrainResult, MonotoneFramework};
use crate::ir::context::{BindgenContext, ItemId};
use crate::ir::item::{Item, ItemSet};
use crate::ir::template::{TemplateInstantiation, TemplateParameters};
use crate::ir::traversal::{EdgeKind, Trace};
use crate::ir::ty::TypeKind;
use crate::{HashMap, HashSet};
/// An analysis that finds for each IR item its set of template parameters that
/// it uses.
///
/// We use the monotone constraint function `template_param_usage`, defined as
/// follows:
///
/// * If `T` is a named template type parameter, it trivially uses itself:
///
/// ```ignore
/// template_param_usage(T) = { T }
/// ```
///
/// * If `inst` is a template instantiation, `inst.args` are the template
/// instantiation's template arguments, `inst.def` is the template definition
/// being instantiated, and `inst.def.params` is the template definition's
/// template parameters, then the instantiation's usage is the union of each
/// of its arguments' usages *if* the corresponding template parameter is in
/// turn used by the template definition:
///
/// ```ignore
/// template_param_usage(inst) = union(
/// template_param_usage(inst.args[i])
/// for i in 0..length(inst.args.length)
/// if inst.def.params[i] in template_param_usage(inst.def)
/// )
/// ```
///
/// * Finally, for all other IR item kinds, we use our lattice's `join`
/// operation: set union with each successor of the given item's template
/// parameter usage:
///
/// ```ignore
/// template_param_usage(v) =
/// union(template_param_usage(w) for w in successors(v))
/// ```
///
/// Note that we ignore certain edges in the graph, such as edges from a
/// template declaration to its template parameters' definitions for this
/// analysis. If we didn't, then we would mistakenly determine that ever
/// template parameter is always used.
///
/// The final wrinkle is handling of blocklisted types. Normally, we say that
/// the set of allowlisted items is the transitive closure of items explicitly
/// called out for allowlisting, *without* any items explicitly called out as
/// blocklisted. However, for the purposes of this analysis's correctness, we
/// simplify and consider run the analysis on the full transitive closure of
/// allowlisted items. We do, however, treat instantiations of blocklisted items
/// specially; see `constrain_instantiation_of_blocklisted_template` and its
/// documentation for details.
#[derive(Debug, Clone)]
pub(crate) struct UsedTemplateParameters<'ctx> {
ctx: &'ctx BindgenContext,
// The Option is only there for temporary moves out of the hash map. See the
// comments in `UsedTemplateParameters::constrain` below.
used: HashMap<ItemId, Option<ItemSet>>,
dependencies: HashMap<ItemId, Vec<ItemId>>,
// The set of allowlisted items, without any blocklisted items reachable
// from the allowlisted items which would otherwise be considered
// allowlisted as well.
allowlisted_items: HashSet<ItemId>,
}
impl UsedTemplateParameters<'_> {
fn consider_edge(kind: EdgeKind) -> bool {
match kind {
// For each of these kinds of edges, if the referent uses a template
// parameter, then it should be considered that the origin of the
// edge also uses the template parameter.
EdgeKind::TemplateArgument |
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::Constructor |
EdgeKind::Destructor |
EdgeKind::VarType |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::TypeReference => true,
// An inner var or type using a template parameter is orthogonal
// from whether we use it. See template-param-usage-{6,11}.hpp.
EdgeKind::InnerVar | EdgeKind::InnerType => false,
// We can't emit machine code for new monomorphizations of class
// templates' methods (and don't detect explicit instantiations) so
// we must ignore template parameters that are only used by
// methods. This doesn't apply to a function type's return or
// parameter types, however, because of type aliases of function
// pointers that use template parameters, eg
// tests/headers/struct_with_typedef_template_arg.hpp
EdgeKind::Method => false,
// If we considered these edges, we would end up mistakenly claiming
// that every template parameter always used.
EdgeKind::TemplateDeclaration |
EdgeKind::TemplateParameterDefinition => false,
// Since we have to be careful about which edges we consider for
// this analysis to be correct, we ignore generic edges. We also
// avoid a `_` wild card to force authors of new edge kinds to
// determine whether they need to be considered by this analysis.
EdgeKind::Generic => false,
}
}
fn take_this_id_usage_set<Id: Into<ItemId>>(
&mut self,
this_id: Id,
) -> ItemSet {
let this_id = this_id.into();
self.used
.get_mut(&this_id)
.expect(
"Should have a set of used template params for every item \
id",
)
.take()
.expect(
"Should maintain the invariant that all used template param \
sets are `Some` upon entry of `constrain`",
)
}
/// We say that blocklisted items use all of their template parameters. The
/// blocklisted type is most likely implemented explicitly by the user,
/// since it won't be in the generated bindings, and we don't know exactly
/// what they'll to with template parameters, but we can push the issue down
/// the line to them.
fn constrain_instantiation_of_blocklisted_template(
&self,
this_id: ItemId,
used_by_this_id: &mut ItemSet,
instantiation: &TemplateInstantiation,
) {
trace!(
" instantiation of blocklisted template, uses all template \
arguments"
);
let args = instantiation
.template_arguments()
.iter()
.map(|a| {
a.into_resolver()
.through_type_refs()
.through_type_aliases()
.resolve(self.ctx)
.id()
})
.filter(|a| *a != this_id)
.flat_map(|a| {
self.used
.get(&a)
.expect("Should have a used entry for the template arg")
.as_ref()
.expect(
"Because a != this_id, and all used template \
param sets other than this_id's are `Some`, \
a's used template param set should be `Some`",
)
.iter()
});
used_by_this_id.extend(args);
}
/// A template instantiation's concrete template argument is only used if
/// the template definition uses the corresponding template parameter.
fn constrain_instantiation(
&self,
this_id: ItemId,
used_by_this_id: &mut ItemSet,
instantiation: &TemplateInstantiation,
) {
trace!(" template instantiation");
let decl = self.ctx.resolve_type(instantiation.template_definition());
let args = instantiation.template_arguments();
let params = decl.self_template_params(self.ctx);
debug_assert!(this_id != instantiation.template_definition());
let used_by_def = self.used
.get(&instantiation.template_definition().into())
.expect("Should have a used entry for instantiation's template definition")
.as_ref()
.expect("And it should be Some because only this_id's set is None, and an \
instantiation's template definition should never be the \
instantiation itself");
for (arg, param) in args.iter().zip(params.iter()) {
trace!(
" instantiation's argument {arg:?} is used if definition's \
parameter {param:?} is used",
);
if used_by_def.contains(&param.into()) {
trace!(" param is used by template definition");
let arg = arg
.into_resolver()
.through_type_refs()
.through_type_aliases()
.resolve(self.ctx)
.id();
if arg == this_id {
continue;
}
let used_by_arg = self
.used
.get(&arg)
.expect("Should have a used entry for the template arg")
.as_ref()
.expect(
"Because arg != this_id, and all used template \
param sets other than this_id's are `Some`, \
arg's used template param set should be \
`Some`",
)
.iter();
used_by_this_id.extend(used_by_arg);
}
}
}
/// The join operation on our lattice: the set union of all of this ID's
/// successors.
fn constrain_join(&self, used_by_this_id: &mut ItemSet, item: &Item) {
trace!(" other item: join with successors' usage");
item.trace(
self.ctx,
&mut |sub_id, edge_kind| {
// Ignore ourselves, since union with ourself is a
// no-op. Ignore edges that aren't relevant to the
// analysis.
if sub_id == item.id() || !Self::consider_edge(edge_kind) {
return;
}
let used_by_sub_id = self
.used
.get(&sub_id)
.expect("Should have a used set for the sub_id successor")
.as_ref()
.expect(
"Because sub_id != id, and all used template \
param sets other than id's are `Some`, \
sub_id's used template param set should be \
`Some`",
)
.iter();
trace!(
" union with {sub_id:?}'s usage: {:?}",
used_by_sub_id.clone().collect::<Vec<_>>()
);
used_by_this_id.extend(used_by_sub_id);
},
&(),
);
}
}
impl<'ctx> MonotoneFramework for UsedTemplateParameters<'ctx> {
type Node = ItemId;
type Extra = &'ctx BindgenContext;
type Output = HashMap<ItemId, ItemSet>;
fn new(ctx: &'ctx BindgenContext) -> UsedTemplateParameters<'ctx> {
let mut used = HashMap::default();
let mut dependencies = HashMap::default();
let allowlisted_items: HashSet<_> =
ctx.allowlisted_items().iter().copied().collect();
let allowlisted_and_blocklisted_items: ItemSet = allowlisted_items
.iter()
.copied()
.flat_map(|i| {
let mut reachable = vec![i];
i.trace(
ctx,
&mut |s, _| {
reachable.push(s);
},
&(),
);
reachable
})
.collect();
for item in allowlisted_and_blocklisted_items {
dependencies.entry(item).or_insert_with(Vec::new);
used.entry(item).or_insert_with(|| Some(ItemSet::new()));
{
// We reverse our natural IR graph edges to find dependencies
// between nodes.
item.trace(
ctx,
&mut |sub_item: ItemId, _| {
used.entry(sub_item)
.or_insert_with(|| Some(ItemSet::new()));
dependencies
.entry(sub_item)
.or_insert_with(Vec::new)
.push(item);
},
&(),
);
}
// Additionally, whether a template instantiation's template
// arguments are used depends on whether the template declaration's
// generic template parameters are used.
let item_kind =
ctx.resolve_item(item).as_type().map(|ty| ty.kind());
if let Some(TypeKind::TemplateInstantiation(inst)) = item_kind {
let decl = ctx.resolve_type(inst.template_definition());
let args = inst.template_arguments();
// Although template definitions should always have
// template parameters, there is a single exception:
// opaque templates. Hence the unwrap_or.
let params = decl.self_template_params(ctx);
for (arg, param) in args.iter().zip(params.iter()) {
let arg = arg
.into_resolver()
.through_type_aliases()
.through_type_refs()
.resolve(ctx)
.id();
let param = param
.into_resolver()
.through_type_aliases()
.through_type_refs()
.resolve(ctx)
.id();
used.entry(arg).or_insert_with(|| Some(ItemSet::new()));
used.entry(param).or_insert_with(|| Some(ItemSet::new()));
dependencies
.entry(arg)
.or_insert_with(Vec::new)
.push(param);
}
}
}
if cfg!(feature = "__testing_only_extra_assertions") {
// Invariant: The `used` map has an entry for every allowlisted
// item, as well as all explicitly blocklisted items that are
// reachable from allowlisted items.
//
// Invariant: the `dependencies` map has an entry for every
// allowlisted item.
//
// (This is so that every item we call `constrain` on is guaranteed
// to have a set of template parameters, and we can allow
// blocklisted templates to use all of their parameters).
for item in &allowlisted_items {
extra_assert!(used.contains_key(item));
extra_assert!(dependencies.contains_key(item));
item.trace(
ctx,
&mut |sub_item, _| {
extra_assert!(used.contains_key(&sub_item));
extra_assert!(dependencies.contains_key(&sub_item));
},
&(),
);
}
}
UsedTemplateParameters {
ctx,
used,
dependencies,
allowlisted_items,
}
}
fn initial_worklist(&self) -> Vec<ItemId> {
// The transitive closure of all allowlisted items, including explicitly
// blocklisted items.
self.ctx
.allowlisted_items()
.iter()
.copied()
.flat_map(|i| {
let mut reachable = vec![i];
i.trace(
self.ctx,
&mut |s, _| {
reachable.push(s);
},
&(),
);
reachable
})
.collect()
}
fn constrain(&mut self, id: ItemId) -> ConstrainResult {
// Invariant: all hash map entries' values are `Some` upon entering and
// exiting this method.
extra_assert!(self.used.values().all(|v| v.is_some()));
// Take the set for this ID out of the hash map while we mutate it based
// on other hash map entries. We *must* put it back into the hash map at
// the end of this method. This allows us to side-step HashMap's lack of
// an analog to slice::split_at_mut.
let mut used_by_this_id = self.take_this_id_usage_set(id);
trace!("constrain {id:?}");
trace!(" initially, used set is {used_by_this_id:?}");
let original_len = used_by_this_id.len();
let item = self.ctx.resolve_item(id);
let ty_kind = item.as_type().map(|ty| ty.kind());
match ty_kind {
// Named template type parameters trivially use themselves.
Some(&TypeKind::TypeParam) => {
trace!(" named type, trivially uses itself");
used_by_this_id.insert(id);
}
// Template instantiations only use their template arguments if the
// template definition uses the corresponding template parameter.
Some(TypeKind::TemplateInstantiation(inst)) => {
if self
.allowlisted_items
.contains(&inst.template_definition().into())
{
self.constrain_instantiation(
id,
&mut used_by_this_id,
inst,
);
} else {
self.constrain_instantiation_of_blocklisted_template(
id,
&mut used_by_this_id,
inst,
);
}
}
// Otherwise, add the union of each of its referent item's template
// parameter usage.
_ => self.constrain_join(&mut used_by_this_id, item),
}
trace!(" finally, used set is {used_by_this_id:?}");
let new_len = used_by_this_id.len();
assert!(
new_len >= original_len,
"This is the property that ensures this function is monotone -- \
if it doesn't hold, the analysis might never terminate!"
);
// Put the set back in the hash map and restore our invariant.
debug_assert!(self.used[&id].is_none());
self.used.insert(id, Some(used_by_this_id));
extra_assert!(self.used.values().all(|v| v.is_some()));
if new_len == original_len {
ConstrainResult::Same
} else {
ConstrainResult::Changed
}
}
fn each_depending_on<F>(&self, item: ItemId, mut f: F)
where
F: FnMut(ItemId),
{
if let Some(edges) = self.dependencies.get(&item) {
for item in edges {
trace!("enqueue {item:?} into worklist");
f(*item);
}
}
}
}
impl<'ctx> From<UsedTemplateParameters<'ctx>> for HashMap<ItemId, ItemSet> {
fn from(used_templ_params: UsedTemplateParameters<'ctx>) -> Self {
used_templ_params
.used
.into_iter()
.map(|(k, v)| (k, v.unwrap()))
.collect()
}
}

259
vendor/bindgen/ir/annotations.rs vendored Normal file
View File

@@ -0,0 +1,259 @@
//! Types and functions related to bindgen annotation comments.
//!
//! Users can add annotations in doc comments to types that they would like to
//! replace other types with, mark as opaque, etc. This module deals with all of
//! that stuff.
use std::str::FromStr;
use crate::clang;
/// What kind of visibility modifier should be used for a struct or field?
#[derive(Copy, PartialEq, Eq, PartialOrd, Ord, Clone, Debug, Default)]
pub enum FieldVisibilityKind {
/// Fields are marked as private, i.e., struct Foo {bar: bool}
Private,
/// Fields are marked as crate public, i.e., struct Foo {pub(crate) bar: bool}
PublicCrate,
/// Fields are marked as public, i.e., struct Foo {pub bar: bool}
#[default]
Public,
}
impl FromStr for FieldVisibilityKind {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"private" => Ok(Self::Private),
"crate" => Ok(Self::PublicCrate),
"public" => Ok(Self::Public),
_ => Err(format!("Invalid visibility kind: `{s}`")),
}
}
}
impl std::fmt::Display for FieldVisibilityKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match self {
FieldVisibilityKind::Private => "private",
FieldVisibilityKind::PublicCrate => "crate",
FieldVisibilityKind::Public => "public",
};
s.fmt(f)
}
}
/// What kind of accessor should we provide for a field?
#[derive(Copy, PartialEq, Eq, Clone, Debug)]
pub(crate) enum FieldAccessorKind {
/// No accessor.
None,
/// Plain accessor.
Regular,
/// Unsafe accessor.
Unsafe,
/// Immutable accessor.
Immutable,
}
/// Annotations for a given item, or a field.
///
/// You can see the kind of comments that are accepted in the [Doxygen documentation](https://www.doxygen.nl/manual/docblocks.html).
#[derive(Default, Clone, PartialEq, Eq, Debug)]
pub(crate) struct Annotations {
/// Whether this item is marked as opaque. Only applies to types.
opaque: bool,
/// Whether this item should be hidden from the output. Only applies to
/// types, or enum variants.
hide: bool,
/// Whether this type should be replaced by another. The name is a
/// namespace-aware path.
use_instead_of: Option<Vec<String>>,
/// Manually disable deriving copy/clone on this type. Only applies to
/// struct or union types.
disallow_copy: bool,
/// Manually disable deriving debug on this type.
disallow_debug: bool,
/// Manually disable deriving/implement default on this type.
disallow_default: bool,
/// Whether to add a `#[must_use]` annotation to this type.
must_use_type: bool,
/// Visibility of struct fields. You can set this on
/// structs (it will apply to all the fields), or individual fields.
visibility_kind: Option<FieldVisibilityKind>,
/// The kind of accessor this field will have. Also can be applied to
/// structs so all the fields inside share it by default.
accessor_kind: Option<FieldAccessorKind>,
/// Whether this enum variant should be constified.
///
/// This is controlled by the `constant` attribute, this way:
///
/// ```cpp
/// enum Foo {
/// Bar = 0, /**< <div rustbindgen constant></div> */
/// Baz = 0,
/// };
/// ```
///
/// In that case, bindgen will generate a constant for `Bar` instead of
/// `Baz`.
constify_enum_variant: bool,
/// List of explicit derives for this type.
derives: Vec<String>,
/// List of explicit attributes for this type.
attributes: Vec<String>,
}
fn parse_accessor(s: &str) -> FieldAccessorKind {
match s {
"false" => FieldAccessorKind::None,
"unsafe" => FieldAccessorKind::Unsafe,
"immutable" => FieldAccessorKind::Immutable,
_ => FieldAccessorKind::Regular,
}
}
impl Annotations {
/// Construct new annotations for the given cursor and its bindgen comments
/// (if any).
pub(crate) fn new(cursor: &clang::Cursor) -> Option<Annotations> {
let mut anno = Annotations::default();
let mut matched_one = false;
anno.parse(&cursor.comment(), &mut matched_one);
if matched_one {
Some(anno)
} else {
None
}
}
/// Should this type be hidden?
pub(crate) fn hide(&self) -> bool {
self.hide
}
/// Should this type be opaque?
pub(crate) fn opaque(&self) -> bool {
self.opaque
}
/// For a given type, indicates the type it should replace.
///
/// For example, in the following code:
///
/// ```cpp
///
/// /** <div rustbindgen replaces="Bar"></div> */
/// struct Foo { int x; };
///
/// struct Bar { char foo; };
/// ```
///
/// the generated code would look something like:
///
/// ```
/// /** <div rustbindgen replaces="Bar"></div> */
/// struct Bar {
/// x: ::std::os::raw::c_int,
/// };
/// ```
///
/// That is, code for `Foo` is used to generate `Bar`.
pub(crate) fn use_instead_of(&self) -> Option<&[String]> {
self.use_instead_of.as_deref()
}
/// The list of derives that have been specified in this annotation.
pub(crate) fn derives(&self) -> &[String] {
&self.derives
}
/// The list of attributes that have been specified in this annotation.
pub(crate) fn attributes(&self) -> &[String] {
&self.attributes
}
/// Should we avoid implementing the `Copy` trait?
pub(crate) fn disallow_copy(&self) -> bool {
self.disallow_copy
}
/// Should we avoid implementing the `Debug` trait?
pub(crate) fn disallow_debug(&self) -> bool {
self.disallow_debug
}
/// Should we avoid implementing the `Default` trait?
pub(crate) fn disallow_default(&self) -> bool {
self.disallow_default
}
/// Should this type get a `#[must_use]` annotation?
pub(crate) fn must_use_type(&self) -> bool {
self.must_use_type
}
/// What kind of accessors should we provide for this type's fields?
pub(crate) fn visibility_kind(&self) -> Option<FieldVisibilityKind> {
self.visibility_kind
}
/// What kind of accessors should we provide for this type's fields?
pub(crate) fn accessor_kind(&self) -> Option<FieldAccessorKind> {
self.accessor_kind
}
fn parse(&mut self, comment: &clang::Comment, matched: &mut bool) {
use clang_sys::CXComment_HTMLStartTag;
if comment.kind() == CXComment_HTMLStartTag &&
comment.get_tag_name() == "div" &&
comment
.get_tag_attrs()
.next()
.is_some_and(|attr| attr.name == "rustbindgen")
{
*matched = true;
for attr in comment.get_tag_attrs() {
match attr.name.as_str() {
"opaque" => self.opaque = true,
"hide" => self.hide = true,
"nocopy" => self.disallow_copy = true,
"nodebug" => self.disallow_debug = true,
"nodefault" => self.disallow_default = true,
"mustusetype" => self.must_use_type = true,
"replaces" => {
self.use_instead_of = Some(
attr.value.split("::").map(Into::into).collect(),
);
}
"derive" => self.derives.push(attr.value),
"attribute" => self.attributes.push(attr.value),
"private" => {
self.visibility_kind = if attr.value == "false" {
Some(FieldVisibilityKind::Public)
} else {
Some(FieldVisibilityKind::Private)
};
}
"accessor" => {
self.accessor_kind = Some(parse_accessor(&attr.value));
}
"constant" => self.constify_enum_variant = true,
_ => {}
}
}
}
for child in comment.get_children() {
self.parse(&child, matched);
}
}
/// Returns whether we've parsed a "constant" attribute.
pub(crate) fn constify_enum_variant(&self) -> bool {
self.constify_enum_variant
}
}

100
vendor/bindgen/ir/comment.rs vendored Normal file
View File

@@ -0,0 +1,100 @@
//! Utilities for manipulating C/C++ comments.
/// The type of a comment.
#[derive(Debug, PartialEq, Eq)]
enum Kind {
/// A `///` comment, or something of the like.
/// All lines in a comment should start with the same symbol.
SingleLines,
/// A `/**` comment, where each other line can start with `*` and the
/// entire block ends with `*/`.
MultiLine,
}
/// Preprocesses a C/C++ comment so that it is a valid Rust comment.
pub(crate) fn preprocess(comment: &str) -> String {
match kind(comment) {
Some(Kind::SingleLines) => preprocess_single_lines(comment),
Some(Kind::MultiLine) => preprocess_multi_line(comment),
None => comment.to_owned(),
}
}
/// Gets the kind of the doc comment, if it is one.
fn kind(comment: &str) -> Option<Kind> {
if comment.starts_with("/*") {
Some(Kind::MultiLine)
} else if comment.starts_with("//") {
Some(Kind::SingleLines)
} else {
None
}
}
/// Preprocesses multiple single line comments.
///
/// Handles lines starting with both `//` and `///`.
fn preprocess_single_lines(comment: &str) -> String {
debug_assert!(comment.starts_with("//"), "comment is not single line");
let lines: Vec<_> = comment
.lines()
.map(|l| l.trim().trim_start_matches('/'))
.collect();
lines.join("\n")
}
fn preprocess_multi_line(comment: &str) -> String {
let comment = comment
.trim_start_matches('/')
.trim_end_matches('/')
.trim_end_matches('*');
// Strip any potential `*` characters preceding each line.
let mut lines: Vec<_> = comment
.lines()
.map(|line| line.trim().trim_start_matches('*').trim_start_matches('!'))
.skip_while(|line| line.trim().is_empty()) // Skip the first empty lines.
.collect();
// Remove the trailing line corresponding to the `*/`.
if lines.last().is_some_and(|l| l.trim().is_empty()) {
lines.pop();
}
lines.join("\n")
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn picks_up_single_and_multi_line_doc_comments() {
assert_eq!(kind("/// hello"), Some(Kind::SingleLines));
assert_eq!(kind("/** world */"), Some(Kind::MultiLine));
}
#[test]
fn processes_single_lines_correctly() {
assert_eq!(preprocess("///"), "");
assert_eq!(preprocess("/// hello"), " hello");
assert_eq!(preprocess("// hello"), " hello");
assert_eq!(preprocess("// hello"), " hello");
}
#[test]
fn processes_multi_lines_correctly() {
assert_eq!(preprocess("/**/"), "");
assert_eq!(
preprocess("/** hello \n * world \n * foo \n */"),
" hello\n world\n foo"
);
assert_eq!(
preprocess("/**\nhello\n*world\n*foo\n*/"),
"hello\nworld\nfoo"
);
}
}

1921
vendor/bindgen/ir/comp.rs vendored Normal file

File diff suppressed because it is too large Load Diff

3107
vendor/bindgen/ir/context.rs vendored Normal file

File diff suppressed because it is too large Load Diff

130
vendor/bindgen/ir/derive.rs vendored Normal file
View File

@@ -0,0 +1,130 @@
//! Traits for determining whether we can derive traits for a thing or not.
//!
//! These traits tend to come in pairs:
//!
//! 1. A "trivial" version, whose implementations aren't allowed to recursively
//! look at other types or the results of fix point analyses.
//!
//! 2. A "normal" version, whose implementations simply query the results of a
//! fix point analysis.
//!
//! The former is used by the analyses when creating the results queried by the
//! second.
use super::context::BindgenContext;
use std::cmp;
use std::ops;
/// A trait that encapsulates the logic for whether or not we can derive `Debug`
/// for a given thing.
pub(crate) trait CanDeriveDebug {
/// Return `true` if `Debug` can be derived for this thing, `false`
/// otherwise.
fn can_derive_debug(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive `Copy`
/// for a given thing.
pub(crate) trait CanDeriveCopy {
/// Return `true` if `Copy` can be derived for this thing, `false`
/// otherwise.
fn can_derive_copy(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive
/// `Default` for a given thing.
pub(crate) trait CanDeriveDefault {
/// Return `true` if `Default` can be derived for this thing, `false`
/// otherwise.
fn can_derive_default(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive `Hash`
/// for a given thing.
pub(crate) trait CanDeriveHash {
/// Return `true` if `Hash` can be derived for this thing, `false`
/// otherwise.
fn can_derive_hash(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive
/// `PartialEq` for a given thing.
pub(crate) trait CanDerivePartialEq {
/// Return `true` if `PartialEq` can be derived for this thing, `false`
/// otherwise.
fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive
/// `PartialOrd` for a given thing.
pub(crate) trait CanDerivePartialOrd {
/// Return `true` if `PartialOrd` can be derived for this thing, `false`
/// otherwise.
fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive `Eq`
/// for a given thing.
pub(crate) trait CanDeriveEq {
/// Return `true` if `Eq` can be derived for this thing, `false` otherwise.
fn can_derive_eq(&self, ctx: &BindgenContext) -> bool;
}
/// A trait that encapsulates the logic for whether or not we can derive `Ord`
/// for a given thing.
pub(crate) trait CanDeriveOrd {
/// Return `true` if `Ord` can be derived for this thing, `false` otherwise.
fn can_derive_ord(&self, ctx: &BindgenContext) -> bool;
}
/// Whether it is possible or not to automatically derive trait for an item.
///
/// ```ignore
/// No
/// ^
/// |
/// Manually
/// ^
/// |
/// Yes
/// ```
///
/// Initially we assume that we can derive trait for all types and then
/// update our understanding as we learn more about each type.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Default)]
pub enum CanDerive {
/// Yes, we can derive automatically.
#[default]
Yes,
/// The only thing that stops us from automatically deriving is that
/// array with more than maximum number of elements is used.
///
/// This means we probably can "manually" implement such trait.
Manually,
/// No, we cannot.
No,
}
impl CanDerive {
/// Take the least upper bound of `self` and `rhs`.
pub(crate) fn join(self, rhs: Self) -> Self {
cmp::max(self, rhs)
}
}
impl ops::BitOr for CanDerive {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
self.join(rhs)
}
}
impl ops::BitOrAssign for CanDerive {
fn bitor_assign(&mut self, rhs: Self) {
*self = self.join(rhs);
}
}

85
vendor/bindgen/ir/dot.rs vendored Normal file
View File

@@ -0,0 +1,85 @@
//! Generating Graphviz `dot` files from our IR.
use super::context::{BindgenContext, ItemId};
use super::traversal::Trace;
use std::fs::File;
use std::io::{self, Write};
use std::path::Path;
/// A trait for anything that can write attributes as `<table>` rows to a dot
/// file.
pub(crate) trait DotAttributes {
/// Write this thing's attributes to the given output. Each attribute must
/// be its own `<tr>...</tr>`.
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: Write;
}
/// Write a graphviz dot file containing our IR.
pub(crate) fn write_dot_file<P>(ctx: &BindgenContext, path: P) -> io::Result<()>
where
P: AsRef<Path>,
{
let file = File::create(path)?;
let mut dot_file = io::BufWriter::new(file);
writeln!(&mut dot_file, "digraph {{")?;
let mut err: Option<io::Result<_>> = None;
for (id, item) in ctx.items() {
let is_allowlisted = ctx.allowlisted_items().contains(&id);
writeln!(
&mut dot_file,
r#"{} [fontname="courier", color={}, label=< <table border="0" align="left">"#,
id.as_usize(),
if is_allowlisted { "black" } else { "gray" }
)?;
item.dot_attributes(ctx, &mut dot_file)?;
writeln!(&mut dot_file, "</table> >];")?;
item.trace(
ctx,
&mut |sub_id: ItemId, edge_kind| {
if err.is_some() {
return;
}
match writeln!(
&mut dot_file,
"{} -> {} [label={edge_kind:?}, color={}];",
id.as_usize(),
sub_id.as_usize(),
if is_allowlisted { "black" } else { "gray" }
) {
Ok(_) => {}
Err(e) => err = Some(Err(e)),
}
},
&(),
);
if let Some(err) = err {
return err;
}
if let Some(module) = item.as_module() {
for child in module.children() {
writeln!(
&mut dot_file,
"{} -> {} [style=dotted, color=gray]",
item.id().as_usize(),
child.as_usize()
)?;
}
}
}
writeln!(&mut dot_file, "}}")?;
Ok(())
}

321
vendor/bindgen/ir/enum_ty.rs vendored Normal file
View File

@@ -0,0 +1,321 @@
//! Intermediate representation for C/C++ enumerations.
use super::super::codegen::EnumVariation;
use super::context::{BindgenContext, TypeId};
use super::item::Item;
use super::ty::{Type, TypeKind};
use crate::clang;
use crate::ir::annotations::Annotations;
use crate::parse::ParseError;
use crate::regex_set::RegexSet;
/// An enum representing custom handling that can be given to a variant.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum EnumVariantCustomBehavior {
/// This variant will be a module containing constants.
ModuleConstify,
/// This variant will be constified, that is, forced to generate a constant.
Constify,
/// This variant will be hidden entirely from the resulting enum.
Hide,
}
/// A C/C++ enumeration.
#[derive(Debug)]
pub(crate) struct Enum {
/// The representation used for this enum; it should be an `IntKind` type or
/// an alias to one.
///
/// It's `None` if the enum is a forward declaration and isn't defined
/// anywhere else, see `tests/headers/func_ptr_in_struct.h`.
repr: Option<TypeId>,
/// The different variants, with explicit values.
variants: Vec<EnumVariant>,
}
impl Enum {
/// Construct a new `Enum` with the given representation and variants.
pub(crate) fn new(
repr: Option<TypeId>,
variants: Vec<EnumVariant>,
) -> Self {
Enum { repr, variants }
}
/// Get this enumeration's representation.
pub(crate) fn repr(&self) -> Option<TypeId> {
self.repr
}
/// Get this enumeration's variants.
pub(crate) fn variants(&self) -> &[EnumVariant] {
&self.variants
}
/// Construct an enumeration from the given Clang type.
pub(crate) fn from_ty(
ty: &clang::Type,
ctx: &mut BindgenContext,
) -> Result<Self, ParseError> {
use clang_sys::*;
debug!("Enum::from_ty {ty:?}");
if ty.kind() != CXType_Enum {
return Err(ParseError::Continue);
}
let declaration = ty.declaration().canonical();
let repr = declaration
.enum_type()
.and_then(|et| Item::from_ty(&et, declaration, None, ctx).ok());
let mut variants = vec![];
let variant_ty =
repr.and_then(|r| ctx.resolve_type(r).safe_canonical_type(ctx));
let is_bool = variant_ty.is_some_and(Type::is_bool);
// Assume signedness since the default type by the C standard is an int.
let is_signed = variant_ty.map_or(true, |ty| match *ty.kind() {
TypeKind::Int(ref int_kind) => int_kind.is_signed(),
ref other => {
panic!("Since when enums can be non-integers? {other:?}")
}
});
let type_name = ty.spelling();
let type_name = if type_name.is_empty() {
None
} else {
Some(type_name)
};
let type_name = type_name.as_deref();
let definition = declaration.definition().unwrap_or(declaration);
definition.visit(|cursor| {
if cursor.kind() == CXCursor_EnumConstantDecl {
let value = if is_bool {
cursor.enum_val_boolean().map(EnumVariantValue::Boolean)
} else if is_signed {
cursor.enum_val_signed().map(EnumVariantValue::Signed)
} else {
cursor.enum_val_unsigned().map(EnumVariantValue::Unsigned)
};
if let Some(val) = value {
let name = cursor.spelling();
let annotations = Annotations::new(&cursor);
let custom_behavior = ctx
.options()
.last_callback(|callbacks| {
callbacks
.enum_variant_behavior(type_name, &name, val)
})
.or_else(|| {
let annotations = annotations.as_ref()?;
if annotations.hide() {
Some(EnumVariantCustomBehavior::Hide)
} else if annotations.constify_enum_variant() {
Some(EnumVariantCustomBehavior::Constify)
} else {
None
}
});
let new_name = ctx
.options()
.last_callback(|callbacks| {
callbacks.enum_variant_name(type_name, &name, val)
})
.or_else(|| {
annotations
.as_ref()?
.use_instead_of()?
.last()
.cloned()
})
.unwrap_or_else(|| name.clone());
let comment = cursor.raw_comment();
variants.push(EnumVariant::new(
new_name,
name,
comment,
val,
custom_behavior,
));
}
}
CXChildVisit_Continue
});
Ok(Enum::new(repr, variants))
}
fn is_matching_enum(
&self,
ctx: &BindgenContext,
enums: &RegexSet,
item: &Item,
) -> bool {
let path = item.path_for_allowlisting(ctx);
let enum_ty = item.expect_type();
if enums.matches(path[1..].join("::")) {
return true;
}
// Test the variants if the enum is anonymous.
if enum_ty.name().is_some() {
return false;
}
self.variants().iter().any(|v| enums.matches(v.name()))
}
/// Returns the final representation of the enum.
pub(crate) fn computed_enum_variation(
&self,
ctx: &BindgenContext,
item: &Item,
) -> EnumVariation {
// ModuleConsts has higher precedence before Rust in order to avoid
// problems with overlapping match patterns.
if self.is_matching_enum(
ctx,
&ctx.options().constified_enum_modules,
item,
) {
EnumVariation::ModuleConsts
} else if self.is_matching_enum(
ctx,
&ctx.options().bitfield_enums,
item,
) {
EnumVariation::NewType {
is_bitfield: true,
is_global: false,
}
} else if self.is_matching_enum(ctx, &ctx.options().newtype_enums, item)
{
EnumVariation::NewType {
is_bitfield: false,
is_global: false,
}
} else if self.is_matching_enum(
ctx,
&ctx.options().newtype_global_enums,
item,
) {
EnumVariation::NewType {
is_bitfield: false,
is_global: true,
}
} else if self.is_matching_enum(
ctx,
&ctx.options().rustified_enums,
item,
) {
EnumVariation::Rust {
non_exhaustive: false,
}
} else if self.is_matching_enum(
ctx,
&ctx.options().rustified_non_exhaustive_enums,
item,
) {
EnumVariation::Rust {
non_exhaustive: true,
}
} else if self.is_matching_enum(
ctx,
&ctx.options().constified_enums,
item,
) {
EnumVariation::Consts
} else {
ctx.options().default_enum_style
}
}
}
/// A single enum variant, to be contained only in an enum.
#[derive(Debug)]
pub(crate) struct EnumVariant {
/// The name of the variant.
name: String,
/// The original name of the variant (without user mangling)
name_for_allowlisting: String,
/// An optional doc comment.
comment: Option<String>,
/// The integer value of the variant.
val: EnumVariantValue,
/// The custom behavior this variant may have, if any.
custom_behavior: Option<EnumVariantCustomBehavior>,
}
/// A constant value assigned to an enumeration variant.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum EnumVariantValue {
/// A boolean constant.
Boolean(bool),
/// A signed constant.
Signed(i64),
/// An unsigned constant.
Unsigned(u64),
}
impl EnumVariant {
/// Construct a new enumeration variant from the given parts.
pub(crate) fn new(
name: String,
name_for_allowlisting: String,
comment: Option<String>,
val: EnumVariantValue,
custom_behavior: Option<EnumVariantCustomBehavior>,
) -> Self {
EnumVariant {
name,
name_for_allowlisting,
comment,
val,
custom_behavior,
}
}
/// Get this variant's name.
pub(crate) fn name(&self) -> &str {
&self.name
}
/// Get this variant's name.
pub(crate) fn name_for_allowlisting(&self) -> &str {
&self.name_for_allowlisting
}
/// Get this variant's value.
pub(crate) fn val(&self) -> EnumVariantValue {
self.val
}
/// Get this variant's documentation.
pub(crate) fn comment(&self) -> Option<&str> {
self.comment.as_deref()
}
/// Returns whether this variant should be enforced to be a constant by code
/// generation.
pub(crate) fn force_constification(&self) -> bool {
self.custom_behavior == Some(EnumVariantCustomBehavior::Constify)
}
/// Returns whether the current variant should be hidden completely from the
/// resulting rust enum.
pub(crate) fn hidden(&self) -> bool {
self.custom_behavior == Some(EnumVariantCustomBehavior::Hide)
}
}

838
vendor/bindgen/ir/function.rs vendored Normal file
View File

@@ -0,0 +1,838 @@
//! Intermediate representation for C/C++ functions and methods.
use super::comp::MethodKind;
use super::context::{BindgenContext, TypeId};
use super::dot::DotAttributes;
use super::item::Item;
use super::traversal::{EdgeKind, Trace, Tracer};
use super::ty::TypeKind;
use crate::callbacks::{ItemInfo, ItemKind};
use crate::clang::{self, ABIKind, Attribute};
use crate::parse::{ClangSubItemParser, ParseError, ParseResult};
use clang_sys::CXCallingConv;
use quote::TokenStreamExt;
use std::io;
use std::str::FromStr;
const RUST_DERIVE_FUNPTR_LIMIT: usize = 12;
/// What kind of function are we looking at?
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub(crate) enum FunctionKind {
/// A plain, free function.
Function,
/// A method of some kind.
Method(MethodKind),
}
impl FunctionKind {
/// Given a clang cursor, return the kind of function it represents, or
/// `None` otherwise.
pub(crate) fn from_cursor(cursor: &clang::Cursor) -> Option<FunctionKind> {
// FIXME(emilio): Deduplicate logic with `ir::comp`.
Some(match cursor.kind() {
clang_sys::CXCursor_FunctionDecl => FunctionKind::Function,
clang_sys::CXCursor_Constructor => {
FunctionKind::Method(MethodKind::Constructor)
}
clang_sys::CXCursor_Destructor => {
FunctionKind::Method(if cursor.method_is_virtual() {
MethodKind::VirtualDestructor {
pure_virtual: cursor.method_is_pure_virtual(),
}
} else {
MethodKind::Destructor
})
}
clang_sys::CXCursor_CXXMethod => {
if cursor.method_is_virtual() {
FunctionKind::Method(MethodKind::Virtual {
pure_virtual: cursor.method_is_pure_virtual(),
})
} else if cursor.method_is_static() {
FunctionKind::Method(MethodKind::Static)
} else {
FunctionKind::Method(MethodKind::Normal)
}
}
_ => return None,
})
}
}
/// The style of linkage
#[derive(Debug, Clone, Copy)]
pub(crate) enum Linkage {
/// Externally visible and can be linked against
External,
/// Not exposed externally. 'static inline' functions will have this kind of linkage
Internal,
}
/// A function declaration, with a signature, arguments, and argument names.
///
/// The argument names vector must be the same length as the ones in the
/// signature.
#[derive(Debug)]
pub(crate) struct Function {
/// The name of this function.
name: String,
/// The mangled name, that is, the symbol.
mangled_name: Option<String>,
/// The link name. If specified, overwrite `mangled_name`.
link_name: Option<String>,
/// The ID pointing to the current function signature.
signature: TypeId,
/// The kind of function this is.
kind: FunctionKind,
/// The linkage of the function.
linkage: Linkage,
}
impl Function {
/// Construct a new function.
pub(crate) fn new(
name: String,
mangled_name: Option<String>,
link_name: Option<String>,
signature: TypeId,
kind: FunctionKind,
linkage: Linkage,
) -> Self {
Function {
name,
mangled_name,
link_name,
signature,
kind,
linkage,
}
}
/// Get this function's name.
pub(crate) fn name(&self) -> &str {
&self.name
}
/// Get this function's name.
pub(crate) fn mangled_name(&self) -> Option<&str> {
self.mangled_name.as_deref()
}
/// Get this function's link name.
pub fn link_name(&self) -> Option<&str> {
self.link_name.as_deref()
}
/// Get this function's signature type.
pub(crate) fn signature(&self) -> TypeId {
self.signature
}
/// Get this function's kind.
pub(crate) fn kind(&self) -> FunctionKind {
self.kind
}
/// Get this function's linkage.
pub(crate) fn linkage(&self) -> Linkage {
self.linkage
}
}
impl DotAttributes for Function {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
if let Some(ref mangled) = self.mangled_name {
let mangled: String =
mangled.chars().flat_map(|c| c.escape_default()).collect();
writeln!(out, "<tr><td>mangled name</td><td>{mangled}</td></tr>")?;
}
Ok(())
}
}
/// A valid rust ABI.
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)]
pub enum Abi {
/// The default C ABI.
C,
/// The "stdcall" ABI.
Stdcall,
/// The "efiapi" ABI.
EfiApi,
/// The "fastcall" ABI.
Fastcall,
/// The "thiscall" ABI.
ThisCall,
/// The "vectorcall" ABI.
Vectorcall,
/// The "aapcs" ABI.
Aapcs,
/// The "win64" ABI.
Win64,
/// The "C-unwind" ABI.
CUnwind,
/// The "system" ABI.
System,
}
impl FromStr for Abi {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"C" => Ok(Self::C),
"stdcall" => Ok(Self::Stdcall),
"efiapi" => Ok(Self::EfiApi),
"fastcall" => Ok(Self::Fastcall),
"thiscall" => Ok(Self::ThisCall),
"vectorcall" => Ok(Self::Vectorcall),
"aapcs" => Ok(Self::Aapcs),
"win64" => Ok(Self::Win64),
"C-unwind" => Ok(Self::CUnwind),
"system" => Ok(Self::System),
_ => Err(format!("Invalid or unknown ABI {s:?}")),
}
}
}
impl std::fmt::Display for Abi {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let s = match *self {
Self::C => "C",
Self::Stdcall => "stdcall",
Self::EfiApi => "efiapi",
Self::Fastcall => "fastcall",
Self::ThisCall => "thiscall",
Self::Vectorcall => "vectorcall",
Self::Aapcs => "aapcs",
Self::Win64 => "win64",
Self::CUnwind => "C-unwind",
Abi::System => "system",
};
s.fmt(f)
}
}
impl quote::ToTokens for Abi {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
let abi = self.to_string();
tokens.append_all(quote! { #abi });
}
}
/// An ABI extracted from a clang cursor.
#[derive(Debug, Copy, Clone)]
pub(crate) enum ClangAbi {
/// An ABI known by Rust.
Known(Abi),
/// An unknown or invalid ABI.
Unknown(CXCallingConv),
}
impl ClangAbi {
/// Returns whether this Abi is known or not.
fn is_unknown(self) -> bool {
matches!(self, ClangAbi::Unknown(..))
}
}
impl quote::ToTokens for ClangAbi {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
match *self {
Self::Known(abi) => abi.to_tokens(tokens),
Self::Unknown(cc) => panic!(
"Cannot turn unknown calling convention to tokens: {cc:?}"
),
}
}
}
/// A function signature.
#[derive(Debug)]
pub(crate) struct FunctionSig {
/// The name of this function signature.
name: String,
/// The return type of the function.
return_type: TypeId,
/// The type of the arguments, optionally with the name of the argument when
/// declared.
argument_types: Vec<(Option<String>, TypeId)>,
/// Whether this function is variadic.
is_variadic: bool,
is_divergent: bool,
/// Whether this function's return value must be used.
must_use: bool,
/// The ABI of this function.
abi: ClangAbi,
}
fn get_abi(cc: CXCallingConv) -> ClangAbi {
use clang_sys::*;
match cc {
CXCallingConv_Default | CXCallingConv_C => ClangAbi::Known(Abi::C),
CXCallingConv_X86StdCall => ClangAbi::Known(Abi::Stdcall),
CXCallingConv_X86FastCall => ClangAbi::Known(Abi::Fastcall),
CXCallingConv_X86ThisCall => ClangAbi::Known(Abi::ThisCall),
CXCallingConv_X86VectorCall | CXCallingConv_AArch64VectorCall => {
ClangAbi::Known(Abi::Vectorcall)
}
CXCallingConv_AAPCS => ClangAbi::Known(Abi::Aapcs),
CXCallingConv_X86_64Win64 => ClangAbi::Known(Abi::Win64),
other => ClangAbi::Unknown(other),
}
}
/// Get the mangled name for the cursor's referent.
pub(crate) fn cursor_mangling(
ctx: &BindgenContext,
cursor: &clang::Cursor,
) -> Option<String> {
if !ctx.options().enable_mangling {
return None;
}
// We early return here because libclang may crash in some case
// if we pass in a variable inside a partial specialized template.
// See rust-lang/rust-bindgen#67, and rust-lang/rust-bindgen#462.
if cursor.is_in_non_fully_specialized_template() {
return None;
}
let is_itanium_abi = ctx.abi_kind() == ABIKind::GenericItanium;
let is_destructor = cursor.kind() == clang_sys::CXCursor_Destructor;
if let Ok(mut manglings) = cursor.cxx_manglings() {
while let Some(m) = manglings.pop() {
// Only generate the destructor group 1, see below.
if is_itanium_abi && is_destructor && !m.ends_with("D1Ev") {
continue;
}
return Some(m);
}
}
let mut mangling = cursor.mangling();
if mangling.is_empty() {
return None;
}
if is_itanium_abi && is_destructor {
// With old (3.8-) libclang versions, and the Itanium ABI, clang returns
// the "destructor group 0" symbol, which means that it'll try to free
// memory, which definitely isn't what we want.
//
// Explicitly force the destructor group 1 symbol.
//
// See http://refspecs.linuxbase.org/cxxabi-1.83.html#mangling-special
// for the reference, and http://stackoverflow.com/a/6614369/1091587 for
// a more friendly explanation.
//
// We don't need to do this for constructors since clang seems to always
// have returned the C1 constructor.
//
// FIXME(emilio): Can a legit symbol in other ABIs end with this string?
// I don't think so, but if it can this would become a linker error
// anyway, not an invalid free at runtime.
//
// TODO(emilio, #611): Use cpp_demangle if this becomes nastier with
// time.
if mangling.ends_with("D0Ev") {
let new_len = mangling.len() - 4;
mangling.truncate(new_len);
mangling.push_str("D1Ev");
}
}
Some(mangling)
}
fn args_from_ty_and_cursor(
ty: &clang::Type,
cursor: &clang::Cursor,
ctx: &mut BindgenContext,
) -> Vec<(Option<String>, TypeId)> {
let cursor_args = cursor.args().unwrap_or_default().into_iter();
let type_args = ty.args().unwrap_or_default().into_iter();
// Argument types can be found in either the cursor or the type, but argument names may only be
// found on the cursor. We often have access to both a type and a cursor for each argument, but
// in some cases we may only have one.
//
// Prefer using the type as the source of truth for the argument's type, but fall back to
// inspecting the cursor (this happens for Objective C interfaces).
//
// Prefer using the cursor for the argument's type, but fall back to using the parent's cursor
// (this happens for function pointer return types).
cursor_args
.map(Some)
.chain(std::iter::repeat(None))
.zip(type_args.map(Some).chain(std::iter::repeat(None)))
.take_while(|(cur, ty)| cur.is_some() || ty.is_some())
.map(|(arg_cur, arg_ty)| {
let name = arg_cur.map(|a| a.spelling()).and_then(|name| {
if name.is_empty() {
None
} else {
Some(name)
}
});
let cursor = arg_cur.unwrap_or(*cursor);
let ty = arg_ty.unwrap_or_else(|| cursor.cur_type());
(name, Item::from_ty_or_ref(ty, cursor, None, ctx))
})
.collect()
}
impl FunctionSig {
/// Get the function name.
pub(crate) fn name(&self) -> &str {
&self.name
}
/// Construct a new function signature from the given Clang type.
pub(crate) fn from_ty(
ty: &clang::Type,
cursor: &clang::Cursor,
ctx: &mut BindgenContext,
) -> Result<Self, ParseError> {
use clang_sys::*;
debug!("FunctionSig::from_ty {ty:?} {cursor:?}");
// Skip function templates
let kind = cursor.kind();
if kind == CXCursor_FunctionTemplate {
return Err(ParseError::Continue);
}
let spelling = cursor.spelling();
// Don't parse operatorxx functions in C++
let is_operator = |spelling: &str| {
spelling.starts_with("operator") &&
!clang::is_valid_identifier(spelling)
};
if is_operator(&spelling) && !ctx.options().represent_cxx_operators {
return Err(ParseError::Continue);
}
// Constructors of non-type template parameter classes for some reason
// include the template parameter in their name. Just skip them, since
// we don't handle well non-type template parameters anyway.
if (kind == CXCursor_Constructor || kind == CXCursor_Destructor) &&
spelling.contains('<')
{
return Err(ParseError::Continue);
}
let cursor = if cursor.is_valid() {
*cursor
} else {
ty.declaration()
};
let mut args = match kind {
CXCursor_FunctionDecl |
CXCursor_Constructor |
CXCursor_CXXMethod |
CXCursor_ObjCInstanceMethodDecl |
CXCursor_ObjCClassMethodDecl => {
args_from_ty_and_cursor(ty, &cursor, ctx)
}
_ => {
// For non-CXCursor_FunctionDecl, visiting the cursor's children
// is the only reliable way to get parameter names.
let mut args = vec![];
cursor.visit(|c| {
if c.kind() == CXCursor_ParmDecl {
let ty =
Item::from_ty_or_ref(c.cur_type(), c, None, ctx);
let name = c.spelling();
let name =
if name.is_empty() { None } else { Some(name) };
args.push((name, ty));
}
CXChildVisit_Continue
});
if args.is_empty() {
// FIXME(emilio): Sometimes libclang doesn't expose the
// right AST for functions tagged as stdcall and such...
//
// https://bugs.llvm.org/show_bug.cgi?id=45919
args_from_ty_and_cursor(ty, &cursor, ctx)
} else {
args
}
}
};
let (must_use, mut is_divergent) =
if ctx.options().enable_function_attribute_detection {
let [must_use, no_return, no_return_cpp] = cursor.has_attrs(&[
Attribute::MUST_USE,
Attribute::NO_RETURN,
Attribute::NO_RETURN_CPP,
]);
(must_use, no_return || no_return_cpp)
} else {
Default::default()
};
// Check if the type contains __attribute__((noreturn)) outside of parentheses. This is
// somewhat fragile, but it seems to be the only way to get at this information as of
// libclang 9.
let ty_spelling = ty.spelling();
let has_attribute_noreturn = ty_spelling
.match_indices("__attribute__((noreturn))")
.any(|(i, _)| {
let depth = ty_spelling[..i]
.bytes()
.filter_map(|ch| match ch {
b'(' => Some(1),
b')' => Some(-1),
_ => None,
})
.sum::<isize>();
depth == 0
});
is_divergent = is_divergent || has_attribute_noreturn;
let is_method = kind == CXCursor_CXXMethod;
let is_constructor = kind == CXCursor_Constructor;
let is_destructor = kind == CXCursor_Destructor;
if (is_constructor || is_destructor || is_method) &&
cursor.lexical_parent() != cursor.semantic_parent()
{
// Only parse constructors once.
return Err(ParseError::Continue);
}
if is_method || is_constructor || is_destructor {
let is_const = is_method && cursor.method_is_const();
let is_virtual = is_method && cursor.method_is_virtual();
let is_static = is_method && cursor.method_is_static();
if !is_static &&
(!is_virtual ||
ctx.options().use_specific_virtual_function_receiver)
{
let parent = cursor.semantic_parent();
let class = Item::parse(parent, None, ctx)
.expect("Expected to parse the class");
// The `class` most likely is not finished parsing yet, so use
// the unchecked variant.
let class = class.as_type_id_unchecked();
let class = if is_const {
let const_class_id = ctx.next_item_id();
ctx.build_const_wrapper(
const_class_id,
class,
None,
&parent.cur_type(),
)
} else {
class
};
let ptr =
Item::builtin_type(TypeKind::Pointer(class), false, ctx);
args.insert(0, (Some("this".into()), ptr));
} else if is_virtual {
let void = Item::builtin_type(TypeKind::Void, false, ctx);
let ptr =
Item::builtin_type(TypeKind::Pointer(void), false, ctx);
args.insert(0, (Some("this".into()), ptr));
}
}
let ty_ret_type = if kind == CXCursor_ObjCInstanceMethodDecl ||
kind == CXCursor_ObjCClassMethodDecl
{
ty.ret_type()
.or_else(|| cursor.ret_type())
.ok_or(ParseError::Continue)?
} else {
ty.ret_type().ok_or(ParseError::Continue)?
};
let ret = if is_constructor && ctx.is_target_wasm32() {
// Constructors in Clang wasm32 target return a pointer to the object
// being constructed.
let void = Item::builtin_type(TypeKind::Void, false, ctx);
Item::builtin_type(TypeKind::Pointer(void), false, ctx)
} else {
Item::from_ty_or_ref(ty_ret_type, cursor, None, ctx)
};
// Clang plays with us at "find the calling convention", see #549 and
// co. This seems to be a better fix than that commit.
let mut call_conv = ty.call_conv();
if let Some(ty) = cursor.cur_type().canonical_type().pointee_type() {
let cursor_call_conv = ty.call_conv();
if cursor_call_conv != CXCallingConv_Invalid {
call_conv = cursor_call_conv;
}
}
let abi = get_abi(call_conv);
if abi.is_unknown() {
warn!("Unknown calling convention: {call_conv:?}");
}
Ok(Self {
name: spelling,
return_type: ret,
argument_types: args,
is_variadic: ty.is_variadic(),
is_divergent,
must_use,
abi,
})
}
/// Get this function signature's return type.
pub(crate) fn return_type(&self) -> TypeId {
self.return_type
}
/// Get this function signature's argument (name, type) pairs.
pub(crate) fn argument_types(&self) -> &[(Option<String>, TypeId)] {
&self.argument_types
}
/// Get this function signature's ABI.
pub(crate) fn abi(
&self,
ctx: &BindgenContext,
name: Option<&str>,
) -> crate::codegen::error::Result<ClangAbi> {
// FIXME (pvdrz): Try to do this check lazily instead. Maybe store the ABI inside `ctx`
// instead?.
let abi = if let Some(name) = name {
if let Some((abi, _)) = ctx
.options()
.abi_overrides
.iter()
.find(|(_, regex_set)| regex_set.matches(name))
{
ClangAbi::Known(*abi)
} else {
self.abi
}
} else if let Some((abi, _)) = ctx
.options()
.abi_overrides
.iter()
.find(|(_, regex_set)| regex_set.matches(&self.name))
{
ClangAbi::Known(*abi)
} else {
self.abi
};
match abi {
ClangAbi::Known(Abi::ThisCall)
if !ctx.options().rust_features().thiscall_abi =>
{
Err(crate::codegen::error::Error::UnsupportedAbi("thiscall"))
}
ClangAbi::Known(Abi::Vectorcall)
if !ctx.options().rust_features().vectorcall_abi =>
{
Err(crate::codegen::error::Error::UnsupportedAbi("vectorcall"))
}
ClangAbi::Known(Abi::CUnwind)
if !ctx.options().rust_features().c_unwind_abi =>
{
Err(crate::codegen::error::Error::UnsupportedAbi("C-unwind"))
}
ClangAbi::Known(Abi::EfiApi)
if !ctx.options().rust_features().abi_efiapi =>
{
Err(crate::codegen::error::Error::UnsupportedAbi("efiapi"))
}
ClangAbi::Known(Abi::Win64) if self.is_variadic() => {
Err(crate::codegen::error::Error::UnsupportedAbi("Win64"))
}
abi => Ok(abi),
}
}
/// Is this function signature variadic?
pub(crate) fn is_variadic(&self) -> bool {
// Clang reports some functions as variadic when they *might* be
// variadic. We do the argument check because rust doesn't codegen well
// variadic functions without an initial argument.
self.is_variadic && !self.argument_types.is_empty()
}
/// Must this function's return value be used?
pub(crate) fn must_use(&self) -> bool {
self.must_use
}
/// Are function pointers with this signature able to derive Rust traits?
/// Rust only supports deriving traits for function pointers with a limited
/// number of parameters and a couple ABIs.
///
/// For more details, see:
///
/// * <https://github.com/rust-lang/rust-bindgen/issues/547>,
/// * <https://github.com/rust-lang/rust/issues/38848>,
/// * and <https://github.com/rust-lang/rust/issues/40158>
pub(crate) fn function_pointers_can_derive(&self) -> bool {
if self.argument_types.len() > RUST_DERIVE_FUNPTR_LIMIT {
return false;
}
matches!(self.abi, ClangAbi::Known(Abi::C) | ClangAbi::Unknown(..))
}
/// Whether this function has attributes marking it as divergent.
pub(crate) fn is_divergent(&self) -> bool {
self.is_divergent
}
}
impl ClangSubItemParser for Function {
fn parse(
cursor: clang::Cursor,
context: &mut BindgenContext,
) -> Result<ParseResult<Self>, ParseError> {
use clang_sys::*;
let kind = match FunctionKind::from_cursor(&cursor) {
None => return Err(ParseError::Continue),
Some(k) => k,
};
debug!("Function::parse({cursor:?}, {:?})", cursor.cur_type());
let visibility = cursor.visibility();
if visibility != CXVisibility_Default {
return Err(ParseError::Continue);
}
if cursor.access_specifier() == CX_CXXPrivate &&
!context.options().generate_private_functions
{
return Err(ParseError::Continue);
}
let linkage = cursor.linkage();
let linkage = match linkage {
CXLinkage_External | CXLinkage_UniqueExternal => Linkage::External,
CXLinkage_Internal => Linkage::Internal,
_ => return Err(ParseError::Continue),
};
if cursor.is_inlined_function() ||
cursor.definition().is_some_and(|x| x.is_inlined_function())
{
if !context.options().generate_inline_functions &&
!context.options().wrap_static_fns
{
return Err(ParseError::Continue);
}
if cursor.is_deleted_function() &&
!context.options().generate_deleted_functions
{
return Err(ParseError::Continue);
}
// We cannot handle `inline` functions that are not `static`.
if context.options().wrap_static_fns &&
cursor.is_inlined_function() &&
matches!(linkage, Linkage::External)
{
return Err(ParseError::Continue);
}
}
// Grab the signature using Item::from_ty.
let sig = Item::from_ty(&cursor.cur_type(), cursor, None, context)?;
let mut name = cursor.spelling();
assert!(!name.is_empty(), "Empty function name?");
if cursor.kind() == CXCursor_Destructor {
// Remove the leading `~`. The alternative to this is special-casing
// code-generation for destructor functions, which seems less than
// ideal.
if name.starts_with('~') {
name.remove(0);
}
// Add a suffix to avoid colliding with constructors. This would be
// technically fine (since we handle duplicated functions/methods),
// but seems easy enough to handle it here.
name.push_str("_destructor");
}
if let Some(nm) = context.options().last_callback(|callbacks| {
callbacks.generated_name_override(ItemInfo {
name: name.as_str(),
kind: ItemKind::Function,
})
}) {
name = nm;
}
assert!(!name.is_empty(), "Empty function name.");
let mangled_name = cursor_mangling(context, &cursor);
let link_name = context.options().last_callback(|callbacks| {
callbacks.generated_link_name_override(ItemInfo {
name: name.as_str(),
kind: ItemKind::Function,
})
});
let function = Self::new(
name.clone(),
mangled_name,
link_name,
sig,
kind,
linkage,
);
Ok(ParseResult::New(function, Some(cursor)))
}
}
impl Trace for FunctionSig {
type Extra = ();
fn trace<T>(&self, _: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
tracer.visit_kind(self.return_type().into(), EdgeKind::FunctionReturn);
for &(_, ty) in self.argument_types() {
tracer.visit_kind(ty.into(), EdgeKind::FunctionParameter);
}
}
}

128
vendor/bindgen/ir/int.rs vendored Normal file
View File

@@ -0,0 +1,128 @@
//! Intermediate representation for integral types.
/// Which integral type are we dealing with?
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum IntKind {
/// A `bool`.
Bool,
/// A `signed char`.
SChar,
/// An `unsigned char`.
UChar,
/// A `wchar_t`.
WChar,
/// A platform-dependent `char` type, with the signedness support.
Char {
/// Whether the char is signed for the target platform.
is_signed: bool,
},
/// A `short`.
Short,
/// An `unsigned short`.
UShort,
/// An `int`.
Int,
/// An `unsigned int`.
UInt,
/// A `long`.
Long,
/// An `unsigned long`.
ULong,
/// A `long long`.
LongLong,
/// An `unsigned long long`.
ULongLong,
/// A 8-bit signed integer.
I8,
/// A 8-bit unsigned integer.
U8,
/// A 16-bit signed integer.
I16,
/// A 16-bit integer, used only for enum size representation.
U16,
/// The C++ type `char16_t`, which is its own type (unlike in C).
Char16,
/// A 32-bit signed integer.
I32,
/// A 32-bit unsigned integer.
U32,
/// A 64-bit signed integer.
I64,
/// A 64-bit unsigned integer.
U64,
/// An `int128_t`
I128,
/// A `uint128_t`.
U128,
/// A custom integer type, used to allow custom macro types depending on
/// range.
Custom {
/// The name of the type, which would be used without modification.
name: &'static str,
/// Whether the type is signed or not.
is_signed: bool,
},
}
impl IntKind {
/// Is this integral type signed?
pub(crate) fn is_signed(&self) -> bool {
use self::IntKind::*;
match *self {
// TODO(emilio): wchar_t can in theory be signed, but we have no way
// to know whether it is or not right now (unlike char, there's no
// WChar_S / WChar_U).
Bool | UChar | UShort | UInt | ULong | ULongLong | U8 | U16 |
Char16 | WChar | U32 | U64 | U128 => false,
SChar | Short | Int | Long | LongLong | I8 | I16 | I32 | I64 |
I128 => true,
Char { is_signed } | Custom { is_signed, .. } => is_signed,
}
}
/// If this type has a known size, return it (in bytes). This is to
/// alleviate libclang sometimes not giving us a layout (like in the case
/// when an enum is defined inside a class with template parameters).
pub(crate) fn known_size(&self) -> Option<usize> {
use self::IntKind::*;
Some(match *self {
Bool | UChar | SChar | U8 | I8 | Char { .. } => 1,
U16 | I16 | Char16 => 2,
U32 | I32 => 4,
U64 | I64 => 8,
I128 | U128 => 16,
_ => return None,
})
}
/// Whether this type's signedness matches the value.
pub(crate) fn signedness_matches(&self, val: i64) -> bool {
val >= 0 || self.is_signed()
}
}

1994
vendor/bindgen/ir/item.rs vendored Normal file

File diff suppressed because it is too large Load Diff

135
vendor/bindgen/ir/item_kind.rs vendored Normal file
View File

@@ -0,0 +1,135 @@
//! Different variants of an `Item` in our intermediate representation.
use super::context::BindgenContext;
use super::dot::DotAttributes;
use super::function::Function;
use super::module::Module;
use super::ty::Type;
use super::var::Var;
use std::io;
/// A item we parse and translate.
#[derive(Debug)]
pub(crate) enum ItemKind {
/// A module, created implicitly once (the root module), or via C++
/// namespaces.
Module(Module),
/// A type declared in any of the multiple ways it can be declared.
Type(Type),
/// A function or method declaration.
Function(Function),
/// A variable declaration, most likely a static.
Var(Var),
}
impl ItemKind {
/// Get a reference to this `ItemKind`'s underlying `Module`, or `None` if it
/// is some other kind.
pub(crate) fn as_module(&self) -> Option<&Module> {
match *self {
ItemKind::Module(ref module) => Some(module),
_ => None,
}
}
/// Transform our `ItemKind` into a string.
pub(crate) fn kind_name(&self) -> &'static str {
match *self {
ItemKind::Module(..) => "Module",
ItemKind::Type(..) => "Type",
ItemKind::Function(..) => "Function",
ItemKind::Var(..) => "Var",
}
}
/// Is this a module?
pub(crate) fn is_module(&self) -> bool {
self.as_module().is_some()
}
/// Get a reference to this `ItemKind`'s underlying `Function`, or `None` if
/// it is some other kind.
pub(crate) fn as_function(&self) -> Option<&Function> {
match *self {
ItemKind::Function(ref func) => Some(func),
_ => None,
}
}
/// Is this a function?
pub(crate) fn is_function(&self) -> bool {
self.as_function().is_some()
}
/// Get a reference to this `ItemKind`'s underlying `Function`, or panic if
/// it is some other kind.
pub(crate) fn expect_function(&self) -> &Function {
self.as_function().expect("Not a function")
}
/// Get a reference to this `ItemKind`'s underlying `Type`, or `None` if
/// it is some other kind.
pub(crate) fn as_type(&self) -> Option<&Type> {
match *self {
ItemKind::Type(ref ty) => Some(ty),
_ => None,
}
}
/// Get a mutable reference to this `ItemKind`'s underlying `Type`, or `None`
/// if it is some other kind.
pub(crate) fn as_type_mut(&mut self) -> Option<&mut Type> {
match *self {
ItemKind::Type(ref mut ty) => Some(ty),
_ => None,
}
}
/// Is this a type?
pub(crate) fn is_type(&self) -> bool {
self.as_type().is_some()
}
/// Get a reference to this `ItemKind`'s underlying `Type`, or panic if it is
/// some other kind.
pub(crate) fn expect_type(&self) -> &Type {
self.as_type().expect("Not a type")
}
/// Get a reference to this `ItemKind`'s underlying `Var`, or `None` if it is
/// some other kind.
pub(crate) fn as_var(&self) -> Option<&Var> {
match *self {
ItemKind::Var(ref v) => Some(v),
_ => None,
}
}
/// Is this a variable?
pub(crate) fn is_var(&self) -> bool {
self.as_var().is_some()
}
}
impl DotAttributes for ItemKind {
fn dot_attributes<W>(
&self,
ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
writeln!(out, "<tr><td>kind</td><td>{}</td></tr>", self.kind_name())?;
match *self {
ItemKind::Module(ref module) => module.dot_attributes(ctx, out),
ItemKind::Type(ref ty) => ty.dot_attributes(ctx, out),
ItemKind::Function(ref func) => func.dot_attributes(ctx, out),
ItemKind::Var(ref var) => var.dot_attributes(ctx, out),
}
}
}

126
vendor/bindgen/ir/layout.rs vendored Normal file
View File

@@ -0,0 +1,126 @@
//! Intermediate representation for the physical layout of some type.
use super::derive::CanDerive;
use super::ty::{Type, TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT};
use crate::clang;
use crate::ir::context::BindgenContext;
use std::cmp;
/// A type that represents the struct layout of a type.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) struct Layout {
/// The size (in bytes) of this layout.
pub(crate) size: usize,
/// The alignment (in bytes) of this layout.
pub(crate) align: usize,
/// Whether this layout's members are packed or not.
pub(crate) packed: bool,
}
#[test]
fn test_layout_for_size() {
use std::mem::size_of;
let ptr_size = size_of::<*mut ()>();
assert_eq!(
Layout::for_size_internal(ptr_size, ptr_size),
Layout::new(ptr_size, ptr_size)
);
assert_eq!(
Layout::for_size_internal(ptr_size, 3 * ptr_size),
Layout::new(3 * ptr_size, ptr_size)
);
}
impl Layout {
/// Gets the integer type name for a given known size.
pub(crate) fn known_type_for_size(size: usize) -> Option<syn::Type> {
Some(match size {
16 => syn::parse_quote! { u128 },
8 => syn::parse_quote! { u64 },
4 => syn::parse_quote! { u32 },
2 => syn::parse_quote! { u16 },
1 => syn::parse_quote! { u8 },
_ => return None,
})
}
/// Construct a new `Layout` with the given `size` and `align`. It is not
/// packed.
pub(crate) fn new(size: usize, align: usize) -> Self {
Layout {
size,
align,
packed: false,
}
}
fn for_size_internal(ptr_size: usize, size: usize) -> Self {
let mut next_align = 2;
while size % next_align == 0 && next_align <= ptr_size {
next_align *= 2;
}
Layout {
size,
align: next_align / 2,
packed: false,
}
}
/// Creates a non-packed layout for a given size, trying to use the maximum
/// alignment possible.
pub(crate) fn for_size(ctx: &BindgenContext, size: usize) -> Self {
Self::for_size_internal(ctx.target_pointer_size(), size)
}
/// Get this layout as an opaque type.
pub(crate) fn opaque(&self) -> Opaque {
Opaque(*self)
}
}
/// When we are treating a type as opaque, it is just a blob with a `Layout`.
#[derive(Clone, Debug, PartialEq, Eq)]
pub(crate) struct Opaque(pub(crate) Layout);
impl Opaque {
/// Construct a new opaque type from the given clang type.
pub(crate) fn from_clang_ty(
ty: &clang::Type,
ctx: &BindgenContext,
) -> Type {
let layout = Layout::new(ty.size(ctx), ty.align(ctx));
let ty_kind = TypeKind::Opaque;
let is_const = ty.is_const();
Type::new(None, Some(layout), ty_kind, is_const)
}
/// Return the known rust type we should use to create a correctly-aligned
/// field with this layout.
pub(crate) fn known_rust_type_for_array(&self) -> Option<syn::Type> {
Layout::known_type_for_size(self.0.align)
}
/// Return the array size that an opaque type for this layout should have if
/// we know the correct type for it, or `None` otherwise.
pub(crate) fn array_size(&self) -> Option<usize> {
if self.known_rust_type_for_array().is_some() {
Some(self.0.size / cmp::max(self.0.align, 1))
} else {
None
}
}
/// Return `true` if this opaque layout's array size will fit within the
/// maximum number of array elements that Rust allows deriving traits
/// with. Return `false` otherwise.
pub(crate) fn array_size_within_derive_limit(&self) -> CanDerive {
if self
.array_size()
.is_some_and(|size| size <= RUST_DERIVE_IN_ARRAY_LIMIT)
{
CanDerive::Yes
} else {
CanDerive::Manually
}
}
}

25
vendor/bindgen/ir/mod.rs vendored Normal file
View File

@@ -0,0 +1,25 @@
//! The ir module defines bindgen's intermediate representation.
//!
//! Parsing C/C++ generates the IR, while code generation outputs Rust code from
//! the IR.
#![deny(clippy::missing_docs_in_private_items)]
pub(crate) mod analysis;
pub(crate) mod annotations;
pub(crate) mod comment;
pub(crate) mod comp;
pub(crate) mod context;
pub(crate) mod derive;
pub(crate) mod dot;
pub(crate) mod enum_ty;
pub(crate) mod function;
pub(crate) mod int;
pub(crate) mod item;
pub(crate) mod item_kind;
pub(crate) mod layout;
pub(crate) mod module;
pub(crate) mod objc;
pub(crate) mod template;
pub(crate) mod traversal;
pub(crate) mod ty;
pub(crate) mod var;

96
vendor/bindgen/ir/module.rs vendored Normal file
View File

@@ -0,0 +1,96 @@
//! Intermediate representation for modules (AKA C++ namespaces).
use super::context::BindgenContext;
use super::dot::DotAttributes;
use super::item::ItemSet;
use crate::clang;
use crate::parse::{ClangSubItemParser, ParseError, ParseResult};
use crate::parse_one;
use std::io;
/// Whether this module is inline or not.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub(crate) enum ModuleKind {
/// This module is not inline.
Normal,
/// This module is inline, as in `inline namespace foo {}`.
Inline,
}
/// A module, as in, a C++ namespace.
#[derive(Clone, Debug)]
pub(crate) struct Module {
/// The name of the module, or none if it's anonymous.
name: Option<String>,
/// The kind of module this is.
kind: ModuleKind,
/// The children of this module, just here for convenience.
children: ItemSet,
}
impl Module {
/// Construct a new `Module`.
pub(crate) fn new(name: Option<String>, kind: ModuleKind) -> Self {
Module {
name,
kind,
children: ItemSet::new(),
}
}
/// Get this module's name.
pub(crate) fn name(&self) -> Option<&str> {
self.name.as_deref()
}
/// Get a mutable reference to this module's children.
pub(crate) fn children_mut(&mut self) -> &mut ItemSet {
&mut self.children
}
/// Get this module's children.
pub(crate) fn children(&self) -> &ItemSet {
&self.children
}
/// Whether this namespace is inline.
pub(crate) fn is_inline(&self) -> bool {
self.kind == ModuleKind::Inline
}
}
impl DotAttributes for Module {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
writeln!(out, "<tr><td>ModuleKind</td><td>{:?}</td></tr>", self.kind)
}
}
impl ClangSubItemParser for Module {
fn parse(
cursor: clang::Cursor,
ctx: &mut BindgenContext,
) -> Result<ParseResult<Self>, ParseError> {
use clang_sys::*;
match cursor.kind() {
CXCursor_Namespace => {
let module_id = ctx.module(cursor);
ctx.with_module(module_id, |ctx| {
cursor.visit_sorted(ctx, |ctx, child| {
parse_one(ctx, child, Some(module_id.into()));
});
});
Ok(ParseResult::AlreadyResolved(module_id.into()))
}
_ => Err(ParseError::Continue),
}
}
}

343
vendor/bindgen/ir/objc.rs vendored Normal file
View File

@@ -0,0 +1,343 @@
//! Objective C types
use super::context::{BindgenContext, ItemId};
use super::function::FunctionSig;
use super::item::Item;
use super::traversal::{Trace, Tracer};
use super::ty::TypeKind;
use crate::clang;
use clang_sys::CXChildVisit_Continue;
use clang_sys::CXCursor_ObjCCategoryDecl;
use clang_sys::CXCursor_ObjCClassMethodDecl;
use clang_sys::CXCursor_ObjCClassRef;
use clang_sys::CXCursor_ObjCInstanceMethodDecl;
use clang_sys::CXCursor_ObjCProtocolDecl;
use clang_sys::CXCursor_ObjCProtocolRef;
use clang_sys::CXCursor_ObjCSuperClassRef;
use clang_sys::CXCursor_TemplateTypeParameter;
use proc_macro2::{Ident, Span, TokenStream};
/// Objective-C interface as used in `TypeKind`
///
/// Also, protocols and categories are parsed as this type
#[derive(Debug)]
pub(crate) struct ObjCInterface {
/// The name
/// like, `NSObject`
name: String,
category: Option<String>,
is_protocol: bool,
/// The list of template names almost always, `ObjectType` or `KeyType`
pub(crate) template_names: Vec<String>,
/// The list of protocols that this interface conforms to.
pub(crate) conforms_to: Vec<ItemId>,
/// The direct parent for this interface.
pub(crate) parent_class: Option<ItemId>,
/// List of the methods defined in this interface
methods: Vec<ObjCMethod>,
class_methods: Vec<ObjCMethod>,
}
/// The objective c methods
#[derive(Debug)]
pub(crate) struct ObjCMethod {
/// The original method selector name
/// like, dataWithBytes:length:
name: String,
/// Method name as converted to rust
/// like, `dataWithBytes_length`_
rust_name: String,
signature: FunctionSig,
/// Is class method?
is_class_method: bool,
}
impl ObjCInterface {
fn new(name: &str) -> ObjCInterface {
ObjCInterface {
name: name.to_owned(),
category: None,
is_protocol: false,
template_names: Vec::new(),
parent_class: None,
conforms_to: Vec::new(),
methods: Vec::new(),
class_methods: Vec::new(),
}
}
/// The name
/// like, `NSObject`
pub(crate) fn name(&self) -> &str {
self.name.as_ref()
}
/// Formats the name for rust
/// Can be like `NSObject`, but with categories might be like `NSObject_NSCoderMethods`
/// and protocols are like `PNSObject`
pub(crate) fn rust_name(&self) -> String {
if let Some(ref cat) = self.category {
format!("{}_{cat}", self.name())
} else if self.is_protocol {
format!("P{}", self.name())
} else {
format!("I{}", self.name().to_owned())
}
}
/// Is this a template interface?
pub(crate) fn is_template(&self) -> bool {
!self.template_names.is_empty()
}
/// List of the methods defined in this interface
pub(crate) fn methods(&self) -> &Vec<ObjCMethod> {
&self.methods
}
/// Is this a protocol?
pub(crate) fn is_protocol(&self) -> bool {
self.is_protocol
}
/// Is this a category?
pub(crate) fn is_category(&self) -> bool {
self.category.is_some()
}
/// List of the class methods defined in this interface
pub(crate) fn class_methods(&self) -> &Vec<ObjCMethod> {
&self.class_methods
}
/// Parses the Objective C interface from the cursor
pub(crate) fn from_ty(
cursor: &clang::Cursor,
ctx: &mut BindgenContext,
) -> Option<Self> {
let name = cursor.spelling();
let mut interface = Self::new(&name);
if cursor.kind() == CXCursor_ObjCProtocolDecl {
interface.is_protocol = true;
}
cursor.visit(|c| {
match c.kind() {
CXCursor_ObjCClassRef => {
if cursor.kind() == CXCursor_ObjCCategoryDecl {
// We are actually a category extension, and we found the reference
// to the original interface, so name this interface appropriately
interface.name = c.spelling();
interface.category = Some(cursor.spelling());
}
}
CXCursor_ObjCProtocolRef => {
// Gather protocols this interface conforms to
let needle = format!("P{}", c.spelling());
let items_map = ctx.items();
debug!(
"Interface {} conforms to {needle}, find the item",
interface.name,
);
for (id, item) in items_map {
if let Some(ty) = item.as_type() {
if let TypeKind::ObjCInterface(ref protocol) =
*ty.kind()
{
if protocol.is_protocol {
debug!(
"Checking protocol {}, ty.name {:?}",
protocol.name,
ty.name()
);
if Some(needle.as_ref()) == ty.name() {
debug!("Found conforming protocol {item:?}");
interface.conforms_to.push(id);
break;
}
}
}
}
}
}
CXCursor_ObjCInstanceMethodDecl |
CXCursor_ObjCClassMethodDecl => {
let name = c.spelling();
let signature =
FunctionSig::from_ty(&c.cur_type(), &c, ctx)
.expect("Invalid function sig");
let is_class_method =
c.kind() == CXCursor_ObjCClassMethodDecl;
let method =
ObjCMethod::new(&name, signature, is_class_method);
interface.add_method(method);
}
CXCursor_TemplateTypeParameter => {
let name = c.spelling();
interface.template_names.push(name);
}
CXCursor_ObjCSuperClassRef => {
let item = Item::from_ty_or_ref(c.cur_type(), c, None, ctx);
interface.parent_class = Some(item.into());
}
_ => {}
}
CXChildVisit_Continue
});
Some(interface)
}
fn add_method(&mut self, method: ObjCMethod) {
if method.is_class_method {
self.class_methods.push(method);
} else {
self.methods.push(method);
}
}
}
impl ObjCMethod {
fn new(
name: &str,
signature: FunctionSig,
is_class_method: bool,
) -> ObjCMethod {
let split_name: Vec<&str> = name.split(':').collect();
let rust_name = split_name.join("_");
ObjCMethod {
name: name.to_owned(),
rust_name,
signature,
is_class_method,
}
}
/// Method name as converted to rust
/// like, `dataWithBytes_length`_
pub(crate) fn rust_name(&self) -> &str {
self.rust_name.as_ref()
}
/// Returns the methods signature as `FunctionSig`
pub(crate) fn signature(&self) -> &FunctionSig {
&self.signature
}
/// Is this a class method?
pub(crate) fn is_class_method(&self) -> bool {
self.is_class_method
}
/// Formats the method call
pub(crate) fn format_method_call(
&self,
args: &[TokenStream],
) -> TokenStream {
let split_name: Vec<Option<Ident>> = self
.name
.split(':')
.enumerate()
.map(|(idx, name)| {
if name.is_empty() {
None
} else if idx == 0 {
// Try to parse the method name as an identifier. Having a keyword is ok
// unless it is `crate`, `self`, `super` or `Self`, so we try to add the `_`
// suffix to it and parse it.
if ["crate", "self", "super", "Self"].contains(&name) {
Some(Ident::new(&format!("{name}_"), Span::call_site()))
} else {
Some(Ident::new(name, Span::call_site()))
}
} else {
// Try to parse the current joining name as an identifier. This might fail if the name
// is a keyword, so we try to "r#" to it and parse again, this could also fail
// if the name is `crate`, `self`, `super` or `Self`, so we try to add the `_`
// suffix to it and parse again. If this also fails, we panic with the first
// error.
Some(
syn::parse_str::<Ident>(name)
.or_else(|err| {
syn::parse_str::<Ident>(&format!("r#{name}"))
.map_err(|_| err)
})
.or_else(|err| {
syn::parse_str::<Ident>(&format!("{name}_"))
.map_err(|_| err)
})
.expect("Invalid identifier"),
)
}
})
.collect();
// No arguments
if args.is_empty() && split_name.len() == 1 {
let name = &split_name[0];
return quote! {
#name
};
}
// Check right amount of arguments
assert_eq!(args.len(), split_name.len() - 1, "Incorrect method name or arguments for objc method, {args:?} vs {split_name:?}");
// Get arguments without type signatures to pass to `msg_send!`
let mut args_without_types = vec![];
for arg in args {
let arg = arg.to_string();
let name_and_sig: Vec<&str> = arg.split(' ').collect();
let name = name_and_sig[0];
args_without_types.push(Ident::new(name, Span::call_site()));
}
let args = split_name.into_iter().zip(args_without_types).map(
|(arg, arg_val)| {
if let Some(arg) = arg {
quote! { #arg: #arg_val }
} else {
quote! { #arg_val: #arg_val }
}
},
);
quote! {
#( #args )*
}
}
}
impl Trace for ObjCInterface {
type Extra = ();
fn trace<T>(&self, context: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
for method in &self.methods {
method.signature.trace(context, tracer, &());
}
for class_method in &self.class_methods {
class_method.signature.trace(context, tracer, &());
}
for protocol in &self.conforms_to {
tracer.visit(*protocol);
}
}
}

335
vendor/bindgen/ir/template.rs vendored Normal file
View File

@@ -0,0 +1,335 @@
//! Template declaration and instantiation related things.
//!
//! The nomenclature surrounding templates is often confusing, so here are a few
//! brief definitions:
//!
//! * "Template definition": a class/struct/alias/function definition that takes
//! generic template parameters. For example:
//!
//! ```c++
//! template<typename T>
//! class List<T> {
//! // ...
//! };
//! ```
//!
//! * "Template instantiation": an instantiation is a use of a template with
//! concrete template arguments. For example, `List<int>`.
//!
//! * "Template specialization": an alternative template definition providing a
//! custom definition for instantiations with the matching template
//! arguments. This C++ feature is unsupported by bindgen. For example:
//!
//! ```c++
//! template<>
//! class List<int> {
//! // Special layout for int lists...
//! };
//! ```
use super::context::{BindgenContext, ItemId, TypeId};
use super::item::{IsOpaque, Item, ItemAncestors};
use super::traversal::{EdgeKind, Trace, Tracer};
use crate::clang;
/// Template declaration (and such declaration's template parameters) related
/// methods.
///
/// This trait's methods distinguish between `None` and `Some([])` for
/// declarations that are not templates and template declarations with zero
/// parameters, in general.
///
/// Consider this example:
///
/// ```c++
/// template <typename T, typename U>
/// class Foo {
/// T use_of_t;
/// U use_of_u;
///
/// template <typename V>
/// using Bar = V*;
///
/// class Inner {
/// T x;
/// U y;
/// Bar<int> z;
/// };
///
/// template <typename W>
/// class Lol {
/// // No use of W, but here's a use of T.
/// T t;
/// };
///
/// template <typename X>
/// class Wtf {
/// // X is not used because W is not used.
/// Lol<X> lololol;
/// };
/// };
///
/// class Qux {
/// int y;
/// };
/// ```
///
/// The following table depicts the results of each trait method when invoked on
/// each of the declarations above:
///
/// |Decl. | self_template_params | num_self_template_params | all_template_parameters |
/// |------|----------------------|--------------------------|-------------------------|
/// |Foo | T, U | 2 | T, U |
/// |Bar | V | 1 | T, U, V |
/// |Inner | | 0 | T, U |
/// |Lol | W | 1 | T, U, W |
/// |Wtf | X | 1 | T, U, X |
/// |Qux | | 0 | |
///
/// | Decl. | used_template_params |
/// |-------|----------------------|
/// | Foo | T, U |
/// | Bar | V |
/// | Inner | |
/// | Lol | T |
/// | Wtf | T |
/// | Qux | |
pub(crate) trait TemplateParameters: Sized {
/// Get the set of `ItemId`s that make up this template declaration's free
/// template parameters.
///
/// Note that these might *not* all be named types: C++ allows
/// constant-value template parameters as well as template-template
/// parameters. Of course, Rust does not allow generic parameters to be
/// anything but types, so we must treat them as opaque, and avoid
/// instantiating them.
fn self_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId>;
/// Get the number of free template parameters this template declaration
/// has.
fn num_self_template_params(&self, ctx: &BindgenContext) -> usize {
self.self_template_params(ctx).len()
}
/// Get the complete set of template parameters that can affect this
/// declaration.
///
/// Note that this item doesn't need to be a template declaration itself for
/// `Some` to be returned here (in contrast to `self_template_params`). If
/// this item is a member of a template declaration, then the parent's
/// template parameters are included here.
///
/// In the example above, `Inner` depends on both of the `T` and `U` type
/// parameters, even though it is not itself a template declaration and
/// therefore has no type parameters itself. Perhaps it helps to think about
/// how we would fully reference such a member type in C++:
/// `Foo<int,char>::Inner`. `Foo` *must* be instantiated with template
/// arguments before we can gain access to the `Inner` member type.
fn all_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId>
where
Self: ItemAncestors,
{
let mut ancestors: Vec<_> = self.ancestors(ctx).collect();
ancestors.reverse();
ancestors
.into_iter()
.flat_map(|id| id.self_template_params(ctx).into_iter())
.collect()
}
/// Get only the set of template parameters that this item uses. This is a
/// subset of `all_template_params` and does not necessarily contain any of
/// `self_template_params`.
fn used_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId>
where
Self: AsRef<ItemId>,
{
assert!(
ctx.in_codegen_phase(),
"template parameter usage is not computed until codegen"
);
let id = *self.as_ref();
ctx.resolve_item(id)
.all_template_params(ctx)
.into_iter()
.filter(|p| ctx.uses_template_parameter(id, *p))
.collect()
}
}
/// A trait for things which may or may not be a named template type parameter.
pub(crate) trait AsTemplateParam {
/// Any extra information the implementor might need to make this decision.
type Extra;
/// Convert this thing to the item ID of a named template type parameter.
fn as_template_param(
&self,
ctx: &BindgenContext,
extra: &Self::Extra,
) -> Option<TypeId>;
/// Is this a named template type parameter?
fn is_template_param(
&self,
ctx: &BindgenContext,
extra: &Self::Extra,
) -> bool {
self.as_template_param(ctx, extra).is_some()
}
}
/// A concrete instantiation of a generic template.
#[derive(Clone, Debug)]
pub(crate) struct TemplateInstantiation {
/// The template definition which this is instantiating.
definition: TypeId,
/// The concrete template arguments, which will be substituted in the
/// definition for the generic template parameters.
args: Vec<TypeId>,
}
impl TemplateInstantiation {
/// Construct a new template instantiation from the given parts.
pub(crate) fn new<I>(definition: TypeId, args: I) -> TemplateInstantiation
where
I: IntoIterator<Item = TypeId>,
{
TemplateInstantiation {
definition,
args: args.into_iter().collect(),
}
}
/// Get the template definition for this instantiation.
pub(crate) fn template_definition(&self) -> TypeId {
self.definition
}
/// Get the concrete template arguments used in this instantiation.
pub(crate) fn template_arguments(&self) -> &[TypeId] {
&self.args[..]
}
/// Parse a `TemplateInstantiation` from a clang `Type`.
pub(crate) fn from_ty(
ty: &clang::Type,
ctx: &mut BindgenContext,
) -> Option<TemplateInstantiation> {
use clang_sys::*;
let template_args = ty.template_args().map_or(vec![], |args| match ty
.canonical_type()
.template_args()
{
Some(canonical_args) => {
let arg_count = args.len();
args.chain(canonical_args.skip(arg_count))
.filter(|t| t.kind() != CXType_Invalid)
.map(|t| {
Item::from_ty_or_ref(t, t.declaration(), None, ctx)
})
.collect()
}
None => args
.filter(|t| t.kind() != CXType_Invalid)
.map(|t| Item::from_ty_or_ref(t, t.declaration(), None, ctx))
.collect(),
});
let declaration = ty.declaration();
let definition = if declaration.kind() == CXCursor_TypeAliasTemplateDecl
{
Some(declaration)
} else {
declaration.specialized().or_else(|| {
let mut template_ref = None;
ty.declaration().visit(|child| {
if child.kind() == CXCursor_TemplateRef {
template_ref = Some(child);
return CXVisit_Break;
}
// Instantiations of template aliases might have the
// TemplateRef to the template alias definition arbitrarily
// deep, so we need to recurse here and not only visit
// direct children.
CXChildVisit_Recurse
});
template_ref.and_then(|cur| cur.referenced())
})
};
let Some(definition) = definition else {
if !ty.declaration().is_builtin() {
warn!(
"Could not find template definition for template \
instantiation"
);
}
return None;
};
let template_definition =
Item::from_ty_or_ref(definition.cur_type(), definition, None, ctx);
Some(TemplateInstantiation::new(
template_definition,
template_args,
))
}
}
impl IsOpaque for TemplateInstantiation {
type Extra = Item;
/// Is this an opaque template instantiation?
fn is_opaque(&self, ctx: &BindgenContext, item: &Item) -> bool {
if self.template_definition().is_opaque(ctx, &()) {
return true;
}
// TODO(#774): This doesn't properly handle opaque instantiations where
// an argument is itself an instantiation because `canonical_name` does
// not insert the template arguments into the name, ie it for nested
// template arguments it creates "Foo" instead of "Foo<int>". The fully
// correct fix is to make `canonical_{name,path}` include template
// arguments properly.
let mut path = item.path_for_allowlisting(ctx).clone();
let args: Vec<_> = self
.template_arguments()
.iter()
.map(|arg| {
let arg_path =
ctx.resolve_item(*arg).path_for_allowlisting(ctx);
arg_path[1..].join("::")
})
.collect();
{
let last = path.last_mut().unwrap();
last.push('<');
last.push_str(&args.join(", "));
last.push('>');
}
ctx.opaque_by_name(&path)
}
}
impl Trace for TemplateInstantiation {
type Extra = ();
fn trace<T>(&self, _ctx: &BindgenContext, tracer: &mut T, _: &())
where
T: Tracer,
{
tracer
.visit_kind(self.definition.into(), EdgeKind::TemplateDeclaration);
for arg in self.template_arguments() {
tracer.visit_kind(arg.into(), EdgeKind::TemplateArgument);
}
}
}

478
vendor/bindgen/ir/traversal.rs vendored Normal file
View File

@@ -0,0 +1,478 @@
//! Traversal of the graph of IR items and types.
use super::context::{BindgenContext, ItemId};
use super::item::ItemSet;
use std::collections::{BTreeMap, VecDeque};
/// An outgoing edge in the IR graph is a reference from some item to another
/// item:
///
/// from --> to
///
/// The `from` is left implicit: it is the concrete `Trace` implementer which
/// yielded this outgoing edge.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) struct Edge {
to: ItemId,
kind: EdgeKind,
}
impl Edge {
/// Construct a new edge whose referent is `to` and is of the given `kind`.
pub(crate) fn new(to: ItemId, kind: EdgeKind) -> Edge {
Edge { to, kind }
}
}
impl From<Edge> for ItemId {
fn from(val: Edge) -> Self {
val.to
}
}
/// The kind of edge reference. This is useful when we wish to only consider
/// certain kinds of edges for a particular traversal or analysis.
#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) enum EdgeKind {
/// A generic, catch-all edge.
Generic,
/// An edge from a template declaration, to the definition of a named type
/// parameter. For example, the edge from `Foo<T>` to `T` in the following
/// snippet:
///
/// ```C++
/// template<typename T>
/// class Foo { };
/// ```
TemplateParameterDefinition,
/// An edge from a template instantiation to the template declaration that
/// is being instantiated. For example, the edge from `Foo<int>` to
/// to `Foo<T>`:
///
/// ```C++
/// template<typename T>
/// class Foo { };
///
/// using Bar = Foo<ant>;
/// ```
TemplateDeclaration,
/// An edge from a template instantiation to its template argument. For
/// example, `Foo<Bar>` to `Bar`:
///
/// ```C++
/// template<typename T>
/// class Foo { };
///
/// class Bar { };
///
/// using FooBar = Foo<Bar>;
/// ```
TemplateArgument,
/// An edge from a compound type to one of its base member types. For
/// example, the edge from `Bar` to `Foo`:
///
/// ```C++
/// class Foo { };
///
/// class Bar : public Foo { };
/// ```
BaseMember,
/// An edge from a compound type to the types of one of its fields. For
/// example, the edge from `Foo` to `int`:
///
/// ```C++
/// class Foo {
/// int x;
/// };
/// ```
Field,
/// An edge from an class or struct type to an inner type member. For
/// example, the edge from `Foo` to `Foo::Bar` here:
///
/// ```C++
/// class Foo {
/// struct Bar { };
/// };
/// ```
InnerType,
/// An edge from an class or struct type to an inner static variable. For
/// example, the edge from `Foo` to `Foo::BAR` here:
///
/// ```C++
/// class Foo {
/// static const char* BAR;
/// };
/// ```
InnerVar,
/// An edge from a class or struct type to one of its method functions. For
/// example, the edge from `Foo` to `Foo::bar`:
///
/// ```C++
/// class Foo {
/// bool bar(int x, int y);
/// };
/// ```
Method,
/// An edge from a class or struct type to one of its constructor
/// functions. For example, the edge from `Foo` to `Foo::Foo(int x, int y)`:
///
/// ```C++
/// class Foo {
/// int my_x;
/// int my_y;
///
/// public:
/// Foo(int x, int y);
/// };
/// ```
Constructor,
/// An edge from a class or struct type to its destructor function. For
/// example, the edge from `Doggo` to `Doggo::~Doggo()`:
///
/// ```C++
/// struct Doggo {
/// char* wow;
///
/// public:
/// ~Doggo();
/// };
/// ```
Destructor,
/// An edge from a function declaration to its return type. For example, the
/// edge from `foo` to `int`:
///
/// ```C++
/// int foo(char* string);
/// ```
FunctionReturn,
/// An edge from a function declaration to one of its parameter types. For
/// example, the edge from `foo` to `char*`:
///
/// ```C++
/// int foo(char* string);
/// ```
FunctionParameter,
/// An edge from a static variable to its type. For example, the edge from
/// `FOO` to `const char*`:
///
/// ```C++
/// static const char* FOO;
/// ```
VarType,
/// An edge from a non-templated alias or typedef to the referenced type.
TypeReference,
}
/// A predicate to allow visiting only sub-sets of the whole IR graph by
/// excluding certain edges from being followed by the traversal.
///
/// The predicate must return true if the traversal should follow this edge
/// and visit everything that is reachable through it.
pub(crate) type TraversalPredicate =
for<'a> fn(&'a BindgenContext, Edge) -> bool;
/// A `TraversalPredicate` implementation that follows all edges, and therefore
/// traversals using this predicate will see the whole IR graph reachable from
/// the traversal's roots.
pub(crate) fn all_edges(_: &BindgenContext, _: Edge) -> bool {
true
}
/// A `TraversalPredicate` implementation that only follows
/// `EdgeKind::InnerType` edges, and therefore traversals using this predicate
/// will only visit the traversal's roots and their inner types. This is used
/// in no-recursive-allowlist mode, where inner types such as anonymous
/// structs/unions still need to be processed.
pub(crate) fn only_inner_type_edges(_: &BindgenContext, edge: Edge) -> bool {
edge.kind == EdgeKind::InnerType
}
/// A `TraversalPredicate` implementation that only follows edges to items that
/// are enabled for code generation. This lets us skip considering items for
/// which are not reachable from code generation.
pub(crate) fn codegen_edges(ctx: &BindgenContext, edge: Edge) -> bool {
let cc = &ctx.options().codegen_config;
match edge.kind {
EdgeKind::Generic => {
ctx.resolve_item(edge.to).is_enabled_for_codegen(ctx)
}
// We statically know the kind of item that non-generic edges can point
// to, so we don't need to actually resolve the item and check
// `Item::is_enabled_for_codegen`.
EdgeKind::TemplateParameterDefinition |
EdgeKind::TemplateArgument |
EdgeKind::TemplateDeclaration |
EdgeKind::BaseMember |
EdgeKind::Field |
EdgeKind::InnerType |
EdgeKind::FunctionReturn |
EdgeKind::FunctionParameter |
EdgeKind::VarType |
EdgeKind::TypeReference => cc.types(),
EdgeKind::InnerVar => cc.vars(),
EdgeKind::Method => cc.methods(),
EdgeKind::Constructor => cc.constructors(),
EdgeKind::Destructor => cc.destructors(),
}
}
/// The storage for the set of items that have been seen (although their
/// outgoing edges might not have been fully traversed yet) in an active
/// traversal.
pub(crate) trait TraversalStorage<'ctx> {
/// Construct a new instance of this `TraversalStorage`, for a new traversal.
fn new(ctx: &'ctx BindgenContext) -> Self;
/// Add the given item to the storage. If the item has never been seen
/// before, return `true`. Otherwise, return `false`.
///
/// The `from` item is the item from which we discovered this item, or is
/// `None` if this item is a root.
fn add(&mut self, from: Option<ItemId>, item: ItemId) -> bool;
}
impl<'ctx> TraversalStorage<'ctx> for ItemSet {
fn new(_: &'ctx BindgenContext) -> Self {
ItemSet::new()
}
fn add(&mut self, _: Option<ItemId>, item: ItemId) -> bool {
self.insert(item)
}
}
/// A `TraversalStorage` implementation that keeps track of how we first reached
/// each item. This is useful for providing debug assertions with meaningful
/// diagnostic messages about dangling items.
#[derive(Debug)]
pub(crate) struct Paths<'ctx>(BTreeMap<ItemId, ItemId>, &'ctx BindgenContext);
impl<'ctx> TraversalStorage<'ctx> for Paths<'ctx> {
fn new(ctx: &'ctx BindgenContext) -> Self {
Paths(BTreeMap::new(), ctx)
}
fn add(&mut self, from: Option<ItemId>, item: ItemId) -> bool {
let newly_discovered =
self.0.insert(item, from.unwrap_or(item)).is_none();
if self.1.resolve_item_fallible(item).is_none() {
let mut path = vec![];
let mut current = item;
loop {
let predecessor = *self.0.get(&current).expect(
"We know we found this item id, so it must have a \
predecessor",
);
if predecessor == current {
break;
}
path.push(predecessor);
current = predecessor;
}
path.reverse();
panic!(
"Found reference to dangling id = {item:?}\nvia path = {path:?}"
);
}
newly_discovered
}
}
/// The queue of seen-but-not-yet-traversed items.
///
/// Using a FIFO queue with a traversal will yield a breadth-first traversal,
/// while using a LIFO queue will result in a depth-first traversal of the IR
/// graph.
pub(crate) trait TraversalQueue: Default {
/// Add a newly discovered item to the queue.
fn push(&mut self, item: ItemId);
/// Pop the next item to traverse, if any.
fn next(&mut self) -> Option<ItemId>;
}
impl TraversalQueue for Vec<ItemId> {
fn push(&mut self, item: ItemId) {
self.push(item);
}
fn next(&mut self) -> Option<ItemId> {
self.pop()
}
}
impl TraversalQueue for VecDeque<ItemId> {
fn push(&mut self, item: ItemId) {
self.push_back(item);
}
fn next(&mut self) -> Option<ItemId> {
self.pop_front()
}
}
/// Something that can receive edges from a `Trace` implementation.
pub(crate) trait Tracer {
/// Note an edge between items. Called from within a `Trace` implementation.
fn visit_kind(&mut self, item: ItemId, kind: EdgeKind);
/// A synonym for `tracer.visit_kind(item, EdgeKind::Generic)`.
fn visit(&mut self, item: ItemId) {
self.visit_kind(item, EdgeKind::Generic);
}
}
impl<F> Tracer for F
where
F: FnMut(ItemId, EdgeKind),
{
fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) {
(*self)(item, kind);
}
}
/// Trace all of the outgoing edges to other items. Implementations should call
/// one of `tracer.visit(edge)` or `tracer.visit_kind(edge, EdgeKind::Whatever)`
/// for each of their outgoing edges.
pub(crate) trait Trace {
/// If a particular type needs extra information beyond what it has in
/// `self` and `context` to find its referenced items, its implementation
/// can define this associated type, forcing callers to pass the needed
/// information through.
type Extra;
/// Trace all of this item's outgoing edges to other items.
fn trace<T>(
&self,
context: &BindgenContext,
tracer: &mut T,
extra: &Self::Extra,
) where
T: Tracer;
}
/// An graph traversal of the transitive closure of references between items.
///
/// See `BindgenContext::allowlisted_items` for more information.
pub(crate) struct ItemTraversal<'ctx, Storage, Queue>
where
Storage: TraversalStorage<'ctx>,
Queue: TraversalQueue,
{
ctx: &'ctx BindgenContext,
/// The set of items we have seen thus far in this traversal.
seen: Storage,
/// The set of items that we have seen, but have yet to traverse.
queue: Queue,
/// The predicate that determines which edges this traversal will follow.
predicate: TraversalPredicate,
/// The item we are currently traversing.
currently_traversing: Option<ItemId>,
}
impl<'ctx, Storage, Queue> ItemTraversal<'ctx, Storage, Queue>
where
Storage: TraversalStorage<'ctx>,
Queue: TraversalQueue,
{
/// Begin a new traversal, starting from the given roots.
pub(crate) fn new<R>(
ctx: &'ctx BindgenContext,
roots: R,
predicate: TraversalPredicate,
) -> ItemTraversal<'ctx, Storage, Queue>
where
R: IntoIterator<Item = ItemId>,
{
let mut seen = Storage::new(ctx);
let mut queue = Queue::default();
for id in roots {
seen.add(None, id);
queue.push(id);
}
ItemTraversal {
ctx,
seen,
queue,
predicate,
currently_traversing: None,
}
}
}
impl<'ctx, Storage, Queue> Tracer for ItemTraversal<'ctx, Storage, Queue>
where
Storage: TraversalStorage<'ctx>,
Queue: TraversalQueue,
{
fn visit_kind(&mut self, item: ItemId, kind: EdgeKind) {
let edge = Edge::new(item, kind);
if !(self.predicate)(self.ctx, edge) {
return;
}
let is_newly_discovered =
self.seen.add(self.currently_traversing, item);
if is_newly_discovered {
self.queue.push(item);
}
}
}
impl<'ctx, Storage, Queue> Iterator for ItemTraversal<'ctx, Storage, Queue>
where
Storage: TraversalStorage<'ctx>,
Queue: TraversalQueue,
{
type Item = ItemId;
fn next(&mut self) -> Option<Self::Item> {
let id = self.queue.next()?;
let newly_discovered = self.seen.add(None, id);
debug_assert!(
!newly_discovered,
"should have already seen anything we get out of our queue"
);
debug_assert!(
self.ctx.resolve_item_fallible(id).is_some(),
"should only get IDs of actual items in our context during traversal"
);
self.currently_traversing = Some(id);
id.trace(self.ctx, self, &());
self.currently_traversing = None;
Some(id)
}
}
/// An iterator to find any dangling items.
///
/// See `BindgenContext::assert_no_dangling_item_traversal` for more
/// information.
pub(crate) type AssertNoDanglingItemsTraversal<'ctx> =
ItemTraversal<'ctx, Paths<'ctx>, VecDeque<ItemId>>;

1256
vendor/bindgen/ir/ty.rs vendored Normal file

File diff suppressed because it is too large Load Diff

523
vendor/bindgen/ir/var.rs vendored Normal file
View File

@@ -0,0 +1,523 @@
//! Intermediate representation of variables.
use super::super::codegen::MacroTypeVariation;
use super::context::{BindgenContext, TypeId};
use super::dot::DotAttributes;
use super::function::cursor_mangling;
use super::int::IntKind;
use super::item::Item;
use super::ty::{FloatKind, TypeKind};
use crate::callbacks::{ItemInfo, ItemKind, MacroParsingBehavior};
use crate::clang;
use crate::clang::ClangToken;
use crate::parse::{ClangSubItemParser, ParseError, ParseResult};
use std::io;
use std::num::Wrapping;
/// The type for a constant variable.
#[derive(Debug)]
pub(crate) enum VarType {
/// A boolean.
Bool(bool),
/// An integer.
Int(i64),
/// A floating point number.
Float(f64),
/// A character.
Char(u8),
/// A string, not necessarily well-formed utf-8.
String(Vec<u8>),
}
/// A `Var` is our intermediate representation of a variable.
#[derive(Debug)]
pub(crate) struct Var {
/// The name of the variable.
name: String,
/// The mangled name of the variable.
mangled_name: Option<String>,
/// The link name of the variable.
link_name: Option<String>,
/// The type of the variable.
ty: TypeId,
/// The value of the variable, that needs to be suitable for `ty`.
val: Option<VarType>,
/// Whether this variable is const.
is_const: bool,
}
impl Var {
/// Construct a new `Var`.
pub(crate) fn new(
name: String,
mangled_name: Option<String>,
link_name: Option<String>,
ty: TypeId,
val: Option<VarType>,
is_const: bool,
) -> Var {
assert!(!name.is_empty());
Var {
name,
mangled_name,
link_name,
ty,
val,
is_const,
}
}
/// Is this variable `const` qualified?
pub(crate) fn is_const(&self) -> bool {
self.is_const
}
/// The value of this constant variable, if any.
pub(crate) fn val(&self) -> Option<&VarType> {
self.val.as_ref()
}
/// Get this variable's type.
pub(crate) fn ty(&self) -> TypeId {
self.ty
}
/// Get this variable's name.
pub(crate) fn name(&self) -> &str {
&self.name
}
/// Get this variable's mangled name.
pub(crate) fn mangled_name(&self) -> Option<&str> {
self.mangled_name.as_deref()
}
/// Get this variable's link name.
pub fn link_name(&self) -> Option<&str> {
self.link_name.as_deref()
}
}
impl DotAttributes for Var {
fn dot_attributes<W>(
&self,
_ctx: &BindgenContext,
out: &mut W,
) -> io::Result<()>
where
W: io::Write,
{
if self.is_const {
writeln!(out, "<tr><td>const</td><td>true</td></tr>")?;
}
if let Some(ref mangled) = self.mangled_name {
writeln!(out, "<tr><td>mangled name</td><td>{mangled}</td></tr>")?;
}
Ok(())
}
}
fn default_macro_constant_type(ctx: &BindgenContext, value: i64) -> IntKind {
if value < 0 ||
ctx.options().default_macro_constant_type ==
MacroTypeVariation::Signed
{
if value < i64::from(i32::MIN) || value > i64::from(i32::MAX) {
IntKind::I64
} else if !ctx.options().fit_macro_constants ||
value < i64::from(i16::MIN) ||
value > i64::from(i16::MAX)
{
IntKind::I32
} else if value < i64::from(i8::MIN) || value > i64::from(i8::MAX) {
IntKind::I16
} else {
IntKind::I8
}
} else if value > i64::from(u32::MAX) {
IntKind::U64
} else if !ctx.options().fit_macro_constants || value > i64::from(u16::MAX)
{
IntKind::U32
} else if value > i64::from(u8::MAX) {
IntKind::U16
} else {
IntKind::U8
}
}
/// Parses tokens from a `CXCursor_MacroDefinition` pointing into a function-like
/// macro, and calls the `func_macro` callback.
fn handle_function_macro(
cursor: &clang::Cursor,
callbacks: &dyn crate::callbacks::ParseCallbacks,
) {
let is_closing_paren = |t: &ClangToken| {
// Test cheap token kind before comparing exact spellings.
t.kind == clang_sys::CXToken_Punctuation && t.spelling() == b")"
};
let tokens: Vec<_> = cursor.tokens().iter().collect();
if let Some(boundary) = tokens.iter().position(is_closing_paren) {
let mut spelled = tokens.iter().map(ClangToken::spelling);
// Add 1, to convert index to length.
let left = spelled.by_ref().take(boundary + 1);
let left = left.collect::<Vec<_>>().concat();
if let Ok(left) = String::from_utf8(left) {
let right: Vec<_> = spelled.collect();
callbacks.func_macro(&left, &right);
}
}
}
impl ClangSubItemParser for Var {
fn parse(
cursor: clang::Cursor,
ctx: &mut BindgenContext,
) -> Result<ParseResult<Self>, ParseError> {
use cexpr::expr::EvalResult;
use cexpr::literal::CChar;
use clang_sys::*;
match cursor.kind() {
CXCursor_MacroDefinition => {
for callbacks in &ctx.options().parse_callbacks {
match callbacks.will_parse_macro(&cursor.spelling()) {
MacroParsingBehavior::Ignore => {
return Err(ParseError::Continue);
}
MacroParsingBehavior::Default => {}
}
if cursor.is_macro_function_like() {
handle_function_macro(&cursor, callbacks.as_ref());
// We handled the macro, skip macro processing below.
return Err(ParseError::Continue);
}
}
let value = parse_macro(ctx, &cursor);
let Some((id, value)) = value else {
return Err(ParseError::Continue);
};
assert!(!id.is_empty(), "Empty macro name?");
let previously_defined = ctx.parsed_macro(&id);
// NB: It's important to "note" the macro even if the result is
// not an integer, otherwise we might loose other kind of
// derived macros.
ctx.note_parsed_macro(id.clone(), value.clone());
if previously_defined {
let name = String::from_utf8(id).unwrap();
duplicated_macro_diagnostic(&name, cursor.location(), ctx);
return Err(ParseError::Continue);
}
// NOTE: Unwrapping, here and above, is safe, because the
// identifier of a token comes straight from clang, and we
// enforce utf8 there, so we should have already panicked at
// this point.
let name = String::from_utf8(id).unwrap();
let (type_kind, val) = match value {
EvalResult::Invalid => return Err(ParseError::Continue),
EvalResult::Float(f) => {
(TypeKind::Float(FloatKind::Double), VarType::Float(f))
}
EvalResult::Char(c) => {
let c = match c {
CChar::Char(c) => {
assert_eq!(c.len_utf8(), 1);
c as u8
}
CChar::Raw(c) => u8::try_from(c).unwrap(),
};
(TypeKind::Int(IntKind::U8), VarType::Char(c))
}
EvalResult::Str(val) => {
let char_ty = Item::builtin_type(
TypeKind::Int(IntKind::U8),
true,
ctx,
);
for callbacks in &ctx.options().parse_callbacks {
callbacks.str_macro(&name, &val);
}
(TypeKind::Pointer(char_ty), VarType::String(val))
}
EvalResult::Int(Wrapping(value)) => {
let kind = ctx
.options()
.last_callback(|c| c.int_macro(&name, value))
.unwrap_or_else(|| {
default_macro_constant_type(ctx, value)
});
(TypeKind::Int(kind), VarType::Int(value))
}
};
let ty = Item::builtin_type(type_kind, true, ctx);
Ok(ParseResult::New(
Var::new(name, None, None, ty, Some(val), true),
Some(cursor),
))
}
CXCursor_VarDecl => {
let mut name = cursor.spelling();
if cursor.linkage() == CXLinkage_External {
if let Some(nm) = ctx.options().last_callback(|callbacks| {
callbacks.generated_name_override(ItemInfo {
name: name.as_str(),
kind: ItemKind::Var,
})
}) {
name = nm;
}
}
// No more changes to name
let name = name;
if name.is_empty() {
warn!("Empty constant name?");
return Err(ParseError::Continue);
}
let link_name = ctx.options().last_callback(|callbacks| {
callbacks.generated_link_name_override(ItemInfo {
name: name.as_str(),
kind: ItemKind::Var,
})
});
let ty = cursor.cur_type();
// TODO(emilio): do we have to special-case constant arrays in
// some other places?
let is_const = ty.is_const() ||
([CXType_ConstantArray, CXType_IncompleteArray]
.contains(&ty.kind()) &&
ty.elem_type()
.is_some_and(|element| element.is_const()));
let ty = match Item::from_ty(&ty, cursor, None, ctx) {
Ok(ty) => ty,
Err(e) => {
assert!(
matches!(ty.kind(), CXType_Auto | CXType_Unexposed),
"Couldn't resolve constant type, and it \
wasn't an nondeductible auto type or unexposed \
type: {ty:?}"
);
return Err(e);
}
};
// Note: Ty might not be totally resolved yet, see
// tests/headers/inner_const.hpp
//
// That's fine because in that case we know it's not a literal.
let canonical_ty = ctx
.safe_resolve_type(ty)
.and_then(|t| t.safe_canonical_type(ctx));
let is_integer = canonical_ty.is_some_and(|t| t.is_integer());
let is_float = canonical_ty.is_some_and(|t| t.is_float());
// TODO: We could handle `char` more gracefully.
// TODO: Strings, though the lookup is a bit more hard (we need
// to look at the canonical type of the pointee too, and check
// is char, u8, or i8 I guess).
let value = if is_integer {
let TypeKind::Int(kind) = *canonical_ty.unwrap().kind()
else {
unreachable!()
};
let mut val = cursor.evaluate().and_then(|v| v.as_int());
if val.is_none() || !kind.signedness_matches(val.unwrap()) {
val = get_integer_literal_from_cursor(&cursor);
}
val.map(|val| {
if kind == IntKind::Bool {
VarType::Bool(val != 0)
} else {
VarType::Int(val)
}
})
} else if is_float {
cursor
.evaluate()
.and_then(|v| v.as_double())
.map(VarType::Float)
} else {
cursor
.evaluate()
.and_then(|v| v.as_literal_string())
.map(VarType::String)
};
let mangling = cursor_mangling(ctx, &cursor);
let var =
Var::new(name, mangling, link_name, ty, value, is_const);
Ok(ParseResult::New(var, Some(cursor)))
}
_ => {
/* TODO */
Err(ParseError::Continue)
}
}
}
}
/// This function uses a [`FallbackTranslationUnit`][clang::FallbackTranslationUnit] to parse each
/// macro that cannot be parsed by the normal bindgen process for `#define`s.
///
/// To construct the [`FallbackTranslationUnit`][clang::FallbackTranslationUnit], first precompiled
/// headers are generated for all input headers. An empty temporary `.c` file is generated to pass
/// to the translation unit. On the evaluation of each macro, a [`String`] is generated with the
/// new contents of the empty file and passed in for reparsing. The precompiled headers and
/// preservation of the [`FallbackTranslationUnit`][clang::FallbackTranslationUnit] across macro
/// evaluations are both optimizations that have significantly improved the performance.
fn parse_macro_clang_fallback(
ctx: &mut BindgenContext,
cursor: &clang::Cursor,
) -> Option<(Vec<u8>, cexpr::expr::EvalResult)> {
if !ctx.options().clang_macro_fallback {
return None;
}
let ftu = ctx.try_ensure_fallback_translation_unit()?;
let contents = format!("int main() {{ {}; }}", cursor.spelling());
ftu.reparse(&contents).ok()?;
// Children of root node of AST
let root_children = ftu.translation_unit().cursor().collect_children();
// Last child in root is function declaration
// Should be FunctionDecl
let main_func = root_children.last()?;
// Children should all be statements in function declaration
let all_stmts = main_func.collect_children();
// First child in all_stmts should be the statement containing the macro to evaluate
// Should be CompoundStmt
let macro_stmt = all_stmts.first()?;
// Children should all be expressions from the compound statement
let paren_exprs = macro_stmt.collect_children();
// First child in all_exprs is the expression utilizing the given macro to be evaluated
// Should be ParenExpr
let paren = paren_exprs.first()?;
Some((
cursor.spelling().into_bytes(),
cexpr::expr::EvalResult::Int(Wrapping(paren.evaluate()?.as_int()?)),
))
}
/// Try and parse a macro using all the macros parsed until now.
fn parse_macro(
ctx: &mut BindgenContext,
cursor: &clang::Cursor,
) -> Option<(Vec<u8>, cexpr::expr::EvalResult)> {
use cexpr::expr;
let mut cexpr_tokens = cursor.cexpr_tokens();
for callbacks in &ctx.options().parse_callbacks {
callbacks.modify_macro(&cursor.spelling(), &mut cexpr_tokens);
}
let parser = expr::IdentifierParser::new(ctx.parsed_macros());
match parser.macro_definition(&cexpr_tokens) {
Ok((_, (id, val))) => Some((id.into(), val)),
_ => parse_macro_clang_fallback(ctx, cursor),
}
}
fn parse_int_literal_tokens(cursor: &clang::Cursor) -> Option<i64> {
use cexpr::expr;
use cexpr::expr::EvalResult;
let cexpr_tokens = cursor.cexpr_tokens();
// TODO(emilio): We can try to parse other kinds of literals.
match expr::expr(&cexpr_tokens) {
Ok((_, EvalResult::Int(Wrapping(val)))) => Some(val),
_ => None,
}
}
fn get_integer_literal_from_cursor(cursor: &clang::Cursor) -> Option<i64> {
use clang_sys::*;
let mut value = None;
cursor.visit(|c| {
match c.kind() {
CXCursor_IntegerLiteral | CXCursor_UnaryOperator => {
value = parse_int_literal_tokens(&c);
}
CXCursor_UnexposedExpr => {
value = get_integer_literal_from_cursor(&c);
}
_ => (),
}
if value.is_some() {
CXChildVisit_Break
} else {
CXChildVisit_Continue
}
});
value
}
fn duplicated_macro_diagnostic(
macro_name: &str,
_location: clang::SourceLocation,
_ctx: &BindgenContext,
) {
warn!("Duplicated macro definition: {macro_name}");
#[cfg(feature = "experimental")]
// FIXME (pvdrz & amanjeev): This diagnostic message shows way too often to be actually
// useful. We have to change the logic where this function is called to be able to emit this
// message only when the duplication is an actual issue.
//
// If I understood correctly, `bindgen` ignores all `#undef` directives. Meaning that this:
// ```c
// #define FOO 1
// #undef FOO
// #define FOO 2
// ```
//
// Will trigger this message even though there's nothing wrong with it.
#[allow(clippy::overly_complex_bool_expr)]
if false && _ctx.options().emit_diagnostics {
use crate::diagnostics::{get_line, Diagnostic, Level, Slice};
use std::borrow::Cow;
let mut slice = Slice::default();
let mut source = Cow::from(macro_name);
let (file, line, col, _) = _location.location();
if let Some(filename) = file.name() {
if let Ok(Some(code)) = get_line(&filename, line) {
source = code.into();
}
slice.with_location(filename, line, col);
}
slice.with_source(source);
Diagnostic::default()
.with_title("Duplicated macro definition.", Level::Warning)
.add_slice(slice)
.add_annotation("This macro had a duplicate.", Level::Note)
.display();
}
}

1422
vendor/bindgen/lib.rs vendored Normal file

File diff suppressed because it is too large Load Diff

38
vendor/bindgen/log_stubs.rs vendored Normal file
View File

@@ -0,0 +1,38 @@
#![allow(unused)]
#[clippy::format_args]
macro_rules! log {
(target: $target:expr, $lvl:expr, $($arg:tt)+) => {{
let _ = $target;
let _ = log!($lvl, $($arg)+);
}};
($lvl:expr, $($arg:tt)+) => {{
let _ = $lvl;
let _ = format_args!($($arg)+);
}};
}
#[clippy::format_args]
macro_rules! error {
(target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) };
($($arg:tt)+) => { log!("", $($arg)+) };
}
#[clippy::format_args]
macro_rules! warn {
(target: $target:expr, $($arg:tt)*) => { log!(target: $target, "", $($arg)*) };
($($arg:tt)*) => { log!("", $($arg)*) };
}
#[clippy::format_args]
macro_rules! info {
(target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) };
($($arg:tt)+) => { log!("", $($arg)+) };
}
#[clippy::format_args]
macro_rules! debug {
(target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) };
($($arg:tt)+) => { log!("", $($arg)+) };
}
#[clippy::format_args]
macro_rules! trace {
(target: $target:expr, $($arg:tt)+) => { log!(target: $target, "", $($arg)+) };
($($arg:tt)+) => { log!("", $($arg)+) };
}

52
vendor/bindgen/options/as_args.rs vendored Normal file
View File

@@ -0,0 +1,52 @@
use std::path::PathBuf;
use crate::regex_set::RegexSet;
/// Trait used to turn [`crate::BindgenOptions`] fields into CLI args.
pub(super) trait AsArgs {
fn as_args(&self, args: &mut Vec<String>, flag: &str);
}
/// If the `bool` is `true`, `flag` is pushed into `args`.
///
/// be careful about the truth value of the field as some options, like `--no-layout-tests`, are
/// actually negations of the fields.
impl AsArgs for bool {
fn as_args(&self, args: &mut Vec<String>, flag: &str) {
if *self {
args.push(flag.to_string());
}
}
}
/// Iterate over all the items of the `RegexSet` and push `flag` followed by the item into `args`
/// for each item.
impl AsArgs for RegexSet {
fn as_args(&self, args: &mut Vec<String>, flag: &str) {
for item in self.get_items() {
args.extend_from_slice(&[flag.to_owned(), item.clone().into()]);
}
}
}
/// If the `Option` is `Some(value)`, push `flag` followed by `value`.
impl AsArgs for Option<String> {
fn as_args(&self, args: &mut Vec<String>, flag: &str) {
if let Some(string) = self {
args.extend_from_slice(&[flag.to_owned(), string.clone()]);
}
}
}
/// If the `Option` is `Some(path)`, push `flag` followed by the [`std::path::Path::display`]
/// representation of `path`.
impl AsArgs for Option<PathBuf> {
fn as_args(&self, args: &mut Vec<String>, flag: &str) {
if let Some(path) = self {
args.extend_from_slice(&[
flag.to_owned(),
path.display().to_string(),
]);
}
}
}

1151
vendor/bindgen/options/cli.rs vendored Normal file

File diff suppressed because it is too large Load Diff

43
vendor/bindgen/options/helpers.rs vendored Normal file
View File

@@ -0,0 +1,43 @@
/// Helper function that appends extra documentation to [`crate::Builder`] methods that support regular
/// expressions in their input.
macro_rules! regex_option {
($(#[$attrs:meta])* pub fn $($tokens:tt)*) => {
$(#[$attrs])*
///
/// Regular expressions are supported. Check the [regular expression
/// arguments](./struct.Builder.html#regular-expression-arguments) section and the
/// [regex](https://docs.rs/regex) crate documentation for further information.
pub fn $($tokens)*
};
}
/// Helper macro to set the default value of each option.
///
/// This macro is an internal implementation detail of the `options` macro and should not be used
/// directly.
macro_rules! default {
() => {
Default::default()
};
($expr:expr) => {
$expr
};
}
/// Helper macro to set the conversion to CLI arguments for each option.
///
/// This macro is an internal implementation detail of the `options` macro and should not be used
/// directly.
macro_rules! as_args {
($flag:literal) => {
|field, args| AsArgs::as_args(field, args, $flag)
};
($expr:expr) => {
$expr
};
}
/// Helper function to ignore an option when converting it into CLI arguments.
///
/// This function is only used inside `options` and should not be used in other contexts.
pub(super) fn ignore<T>(_: &T, _: &mut Vec<String>) {}

2286
vendor/bindgen/options/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

41
vendor/bindgen/parse.rs vendored Normal file
View File

@@ -0,0 +1,41 @@
//! Common traits and types related to parsing our IR from Clang cursors.
#![deny(clippy::missing_docs_in_private_items)]
use crate::clang;
use crate::ir::context::{BindgenContext, ItemId};
/// Not so much an error in the traditional sense, but a control flow message
/// when walking over Clang's AST with a cursor.
#[derive(Debug)]
pub(crate) enum ParseError {
/// Recurse down the current AST node's children.
Recurse,
/// Continue on to the next sibling AST node, or back up to the parent's
/// siblings if we've exhausted all of this node's siblings (and so on).
Continue,
}
/// The result of parsing a Clang AST node.
#[derive(Debug)]
pub(crate) enum ParseResult<T> {
/// We've already resolved this item before, here is the extant `ItemId` for
/// it.
AlreadyResolved(ItemId),
/// This is a newly parsed item. If the cursor is `Some`, it points to the
/// AST node where the new `T` was declared.
New(T, Option<clang::Cursor>),
}
/// An intermediate representation "sub-item" (i.e. one of the types contained
/// inside an `ItemKind` variant) that can be parsed from a Clang cursor.
pub(crate) trait ClangSubItemParser: Sized {
/// Attempt to parse this type from the given cursor.
///
/// The fact that is a reference guarantees it's held by the context, and
/// allow returning already existing types.
fn parse(
cursor: clang::Cursor,
context: &mut BindgenContext,
) -> Result<ParseResult<Self>, ParseError>;
}

199
vendor/bindgen/regex_set.rs vendored Normal file
View File

@@ -0,0 +1,199 @@
//! A type that represents the union of a set of regular expressions.
#![deny(clippy::missing_docs_in_private_items)]
use regex::RegexSet as RxSet;
use std::cell::Cell;
/// A dynamic set of regular expressions.
#[derive(Clone, Debug, Default)]
pub(crate) struct RegexSet {
items: Vec<Box<str>>,
/// Whether any of the items in the set was ever matched. The length of this
/// vector is exactly the length of `items`.
matched: Vec<Cell<bool>>,
set: Option<RxSet>,
/// Whether we should record matching items in the `matched` vector or not.
record_matches: bool,
}
impl RegexSet {
/// Is this set empty?
pub(crate) fn is_empty(&self) -> bool {
self.items.is_empty()
}
/// Insert a new regex into this set.
pub(crate) fn insert<S>(&mut self, string: S)
where
S: AsRef<str>,
{
self.items.push(string.as_ref().to_owned().into_boxed_str());
self.matched.push(Cell::new(false));
self.set = None;
}
/// Returns slice of String from its field 'items'
pub(crate) fn get_items(&self) -> &[Box<str>] {
&self.items
}
/// Returns an iterator over regexes in the set which didn't match any
/// strings yet.
pub(crate) fn unmatched_items(&self) -> impl Iterator<Item = &str> {
self.items.iter().enumerate().filter_map(move |(i, item)| {
if !self.record_matches || self.matched[i].get() {
return None;
}
Some(item.as_ref())
})
}
/// Construct a `RegexSet` from the set of entries we've accumulated.
///
/// Must be called before calling `matches()`, or it will always return
/// false.
#[inline]
#[allow(unused)]
pub(crate) fn build(&mut self, record_matches: bool) {
self.build_inner(record_matches, None);
}
#[cfg(all(feature = "__cli", feature = "experimental"))]
/// Construct a `RegexSet` from the set of entries we've accumulated and emit diagnostics if the
/// name of the regex set is passed to it.
///
/// Must be called before calling `matches()`, or it will always return
/// false.
#[inline]
pub(crate) fn build_with_diagnostics(
&mut self,
record_matches: bool,
name: Option<&'static str>,
) {
self.build_inner(record_matches, name);
}
#[cfg(all(not(feature = "__cli"), feature = "experimental"))]
/// Construct a RegexSet from the set of entries we've accumulated and emit diagnostics if the
/// name of the regex set is passed to it.
///
/// Must be called before calling `matches()`, or it will always return
/// false.
#[inline]
pub(crate) fn build_with_diagnostics(
&mut self,
record_matches: bool,
name: Option<&'static str>,
) {
self.build_inner(record_matches, name);
}
fn build_inner(
&mut self,
record_matches: bool,
_name: Option<&'static str>,
) {
let items = self.items.iter().map(|item| format!("^({item})$"));
self.record_matches = record_matches;
self.set = match RxSet::new(items) {
Ok(x) => Some(x),
Err(e) => {
warn!("Invalid regex in {:?}: {e:?}", self.items);
#[cfg(feature = "experimental")]
if let Some(name) = _name {
invalid_regex_warning(self, e, name);
}
None
}
}
}
/// Does the given `string` match any of the regexes in this set?
pub(crate) fn matches<S>(&self, string: S) -> bool
where
S: AsRef<str>,
{
let s = string.as_ref();
let Some(ref set) = self.set else {
return false;
};
if !self.record_matches {
return set.is_match(s);
}
let matches = set.matches(s);
if !matches.matched_any() {
return false;
}
for i in &matches {
self.matched[i].set(true);
}
true
}
}
#[cfg(feature = "experimental")]
fn invalid_regex_warning(
set: &RegexSet,
err: regex::Error,
name: &'static str,
) {
use crate::diagnostics::{Diagnostic, Level, Slice};
let mut diagnostic = Diagnostic::default();
match err {
regex::Error::Syntax(string) => {
if string.starts_with("regex parse error:\n") {
let mut source = String::new();
let mut parsing_source = true;
for line in string.lines().skip(1) {
if parsing_source {
if line.starts_with(' ') {
source.push_str(line);
source.push('\n');
continue;
}
parsing_source = false;
}
let error = "error: ";
if line.starts_with(error) {
let (_, msg) = line.split_at(error.len());
diagnostic.add_annotation(msg.to_owned(), Level::Error);
} else {
diagnostic.add_annotation(line.to_owned(), Level::Info);
}
}
let mut slice = Slice::default();
slice.with_source(source);
diagnostic.add_slice(slice);
diagnostic.with_title(
"Error while parsing a regular expression.",
Level::Warning,
);
} else {
diagnostic.with_title(string, Level::Warning);
}
}
err => {
let err = err.to_string();
diagnostic.with_title(err, Level::Warning);
}
}
diagnostic.add_annotation(
format!("This regular expression was passed via `{name}`."),
Level::Note,
);
if set.items.iter().any(|item| item.as_ref() == "*") {
diagnostic.add_annotation("Wildcard patterns \"*\" are no longer considered valid. Use \".*\" instead.", Level::Help);
}
diagnostic.display();
}

52
vendor/bindgen/time.rs vendored Normal file
View File

@@ -0,0 +1,52 @@
use std::io::{self, Write};
use std::time::{Duration, Instant};
/// RAII timer to measure how long phases take.
#[derive(Debug)]
pub struct Timer<'a> {
output: bool,
name: &'a str,
start: Instant,
}
impl<'a> Timer<'a> {
/// Creates a Timer with the given name, and starts it. By default,
/// will print to stderr when it is `drop`'d
pub fn new(name: &'a str) -> Self {
Timer {
output: true,
name,
start: Instant::now(),
}
}
/// Sets whether or not the Timer will print a message
/// when it is dropped.
pub fn with_output(mut self, output: bool) -> Self {
self.output = output;
self
}
/// Returns the time elapsed since the timer's creation
pub fn elapsed(&self) -> Duration {
self.start.elapsed()
}
fn print_elapsed(&mut self) {
if self.output {
let elapsed = self.elapsed();
let time = (elapsed.as_secs() as f64) * 1e3 +
f64::from(elapsed.subsec_nanos()) / 1e6;
let stderr = io::stderr();
// Arbitrary output format, subject to change.
writeln!(stderr.lock(), " time: {time:>9.3} ms.\t{}", self.name)
.expect("timer write should not fail");
}
}
}
impl Drop for Timer<'_> {
fn drop(&mut self) {
self.print_elapsed();
}
}