Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

55
vendor/wit-bindgen/src/examples.rs vendored Normal file
View File

@@ -0,0 +1,55 @@
//! Examples of output of the [`generate!`] macro.
//!
//! This module is only included in docs.rs documentation and is not present in
//! the actual crate when compiling from crates.io. The purpose of this module
//! is to showcase what the output of the [`generate!`] macro looks like.
//!
//! [`generate!`]: crate::generate
/// An example of generated bindings for top-level imported functions and
/// interfaces into a world.
///
/// The code used to generate this module is:
///
/// ```rust
#[doc = include_str!("./examples/_0_world_imports.rs")]
/// ```
pub mod _0_world_imports;
/// An example of importing interfaces into a world.
///
/// The code used to generate this module is:
///
/// ```rust
#[doc = include_str!("./examples/_1_interface_imports.rs")]
/// ```
pub mod _1_interface_imports;
/// An example of importing resources into a world.
///
/// The code used to generate this module is:
///
/// ```rust
#[doc = include_str!("./examples/_2_imported_resources.rs")]
/// ```
pub mod _2_imported_resources;
/// An example of exporting items from a world and the traits that they
/// generate.
///
/// The code used to generate this module is:
///
/// ```rust
#[doc = include_str!("./examples/_3_world_exports.rs")]
/// ```
pub mod _3_world_exports;
/// An example of exporting resources from a world and the traits that they
/// generate.
///
/// The code used to generate this module is:
///
/// ```rust
#[doc = include_str!("./examples/_4_exported_resources.rs")]
/// ```
pub mod _4_exported_resources;

View File

@@ -0,0 +1,17 @@
crate::generate!({
inline: r#"
package example:world-imports;
world with-imports {
/// Fetch a greeting to present.
import greet: func() -> string;
/// Log a message to the host.
import log: func(msg: string);
import my-custom-host: interface {
tick: func();
}
}
"#,
});

View File

@@ -0,0 +1,32 @@
crate::generate!({
inline: r#"
package example:interface-imports;
interface logging {
enum level {
debug,
info,
warn,
error,
}
log: func(level: level, msg: string);
}
world with-imports {
// Local interfaces can be imported.
import logging;
// Dependencies can also be referenced, and they're loaded from the
// `path` directive specified below.
import wasi:cli/environment@0.2.0;
}
"#,
path: "wasi-cli@0.2.0.wasm",
// specify that this interface dependency should be generated as well.
with: {
"wasi:cli/environment@0.2.0": generate,
}
});

View File

@@ -0,0 +1,22 @@
crate::generate!({
inline: r#"
package example:imported-resources;
world import-some-resources {
enum level {
debug,
info,
warn,
error,
}
resource logger {
constructor(max-level: level);
get-max-level: func() -> level;
set-max-level: func(level: level);
log: func(level: level, msg: string);
}
}
"#,
});

View File

@@ -0,0 +1,47 @@
crate::generate!({
inline: r#"
package example:world-exports;
world with-exports {
import log: func(msg: string);
export run: func();
/// An example of exporting an interface inline naming it directly.
export environment: interface {
get: func(var: string) -> string;
set: func(var: string, val: string);
}
/// An example of exporting an interface defined in this file.
export units;
/// An example of exporting an interface defined in a dependency.
export wasi:random/insecure@0.2.0;
}
interface units {
use wasi:clocks/monotonic-clock@0.2.0.{duration};
/// Renders the number of bytes as a human readable string.
bytes-to-string: func(bytes: u64) -> string;
/// Renders the provided duration as a human readable string.
duration-to-string: func(dur: duration) -> string;
}
"#,
// provided here to get the export macro rendered in documentation, not
// required for external use.
pub_export_macro: true,
// provided to specify the path to `wasi:*` dependencies referenced above.
path: "wasi-cli@0.2.0.wasm",
// specify that these interface dependencies should be generated as well.
with: {
"wasi:random/insecure@0.2.0": generate,
"wasi:clocks/monotonic-clock@0.2.0": generate,
"wasi:io/poll@0.2.0": generate
}
});

View File

@@ -0,0 +1,26 @@
crate::generate!({
inline: r#"
package example:exported-resources;
world import-some-resources {
export logging;
}
interface logging {
enum level {
debug,
info,
warn,
error,
}
resource logger {
constructor(max-level: level);
get-max-level: func() -> level;
set-max-level: func(level: level);
log: func(level: level, msg: string);
}
}
"#,
});

881
vendor/wit-bindgen/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,881 @@
//! Bindings generation support for Rust with the Component Model.
//!
//! This crate is a bindings generator for [WIT] and the [Component Model].
//! Users are likely interested in the [`generate!`] macro which actually
//! generates bindings. Otherwise this crate provides any runtime support
//! necessary for the macro-generated code.
//!
//! [WIT]: https://component-model.bytecodealliance.org/design/wit.html
//! [Component Model]: https://component-model.bytecodealliance.org/
#![no_std]
#[cfg(not(feature = "rustc-dep-of-std"))]
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
/// Generate bindings for an input WIT document.
///
/// This macro is the bread-and-butter of the `wit-bindgen` crate. The macro
/// here will parse [WIT] as input and generate Rust bindings to work with the
/// `world` that's specified in the [WIT]. For a primer on WIT see [this
/// documentation][WIT] and for a primer on worlds see [here][worlds].
///
/// [WIT]: https://component-model.bytecodealliance.org/design/wit.html
/// [worlds]: https://component-model.bytecodealliance.org/design/worlds.html
///
/// This macro takes as input a [WIT package] as well as a [`world`][worlds]
/// within that package. It will then generate a Rust function for all `import`s
/// into the world. If there are any `export`s then a Rust `trait` will be
/// generated for you to implement. The macro additionally takes a number of
/// configuration parameters documented below as well.
///
/// Basic invocation of the macro can look like:
///
/// ```
/// use wit_bindgen::generate;
/// # macro_rules! generate { ($($t:tt)*) => () }
///
/// generate!();
/// ```
///
/// This will parse a WIT package in the `wit` folder adjacent to your project's
/// `Cargo.toml` file. Within this WIT package there must be precisely one
/// `world` and that world will be the one that has bindings generated for it.
/// All other options remain at their default values (more on this below).
///
/// If your WIT package has more than one `world`, or if you want to select a
/// world from the dependencies, you can specify a world explicitly:
///
/// ```
/// use wit_bindgen::generate;
/// # macro_rules! generate { ($($t:tt)*) => () }
///
/// generate!("my-world");
/// generate!("wasi:cli/imports");
/// ```
///
/// This form of the macro takes a single string as an argument which is a
/// "world specifier" to select which world is being generated. As a single
/// string, such as `"my-world"`, this selects the world named `my-world` in the
/// package being parsed in the `wit` folder. The longer form specification
/// `"wasi:cli/imports"` indicates that the `wasi:cli` package, located in the
/// `wit/deps` folder, will have a world named `imports` and those bindings will
/// be generated.
///
/// If your WIT package is located in a different directory than one called
/// `wit` then it can be specified with the `in` keyword:
///
/// ```
/// use wit_bindgen::generate;
/// # macro_rules! generate { ($($t:tt)*) => () }
///
/// generate!(in "./my/other/path/to/wit");
/// generate!("a-world" in "../path/to/wit");
/// ```
///
/// The full-form of the macro, however, takes a braced structure which is a
/// "bag of options":
///
/// ```
/// use wit_bindgen::generate;
/// # macro_rules! generate { ($($t:tt)*) => () }
///
/// generate!({
/// world: "my-world",
/// path: "../path/to/wit",
/// // ...
/// });
/// ```
///
/// For documentation on each option, see below.
///
/// ## Exploring generated bindings
///
/// Once bindings have been generated they can be explored via a number of means
/// to see what was generated:
///
/// * Using `cargo doc` should render all of the generated bindings in addition
/// to the original comments in the WIT format itself.
/// * If your IDE supports `rust-analyzer` code completion should be available
/// to explore and see types.
/// * The `wit-bindgen` CLI tool, packaged as `wit-bindgen-cli` on crates.io,
/// can be executed the same as the `generate!` macro and the output can be
/// read.
/// * If you're seeing an error, `WIT_BINDGEN_DEBUG=1` can help debug what's
/// happening (more on this below) by emitting macro output to a file.
/// * This documentation can be consulted for various constructs as well.
///
/// Currently browsing generated code may have road bumps on the way. If you run
/// into issues or have idea of how to improve the situation please [file an
/// issue].
///
/// [file an issue]: https://github.com/bytecodealliance/wit-bindgen/issues/new
///
/// ## Namespacing
///
/// In WIT, worlds can import and export `interface`s, functions, and types. Each
/// `interface` can either be "anonymous" and only named within the context of a
/// `world` or it can have a "package ID" associated with it. Names in Rust take
/// into account all the names associated with a WIT `interface`. For example
/// the package ID `foo:bar/baz` would create a `mod foo` which contains a `mod
/// bar` which contains a `mod baz`.
///
/// WIT imports and exports are additionally separated into their own
/// namespaces. Imports are generated at the level of the `generate!` macro
/// where exports are generated under an `exports` namespace.
///
/// ## Imports
///
/// Imports into a `world` can be types, resources, functions, and interfaces.
/// Each of these is bound as a Rust type, function, or module. The intent is
/// that the WIT interfaces map to what is roughly idiomatic Rust for the given
/// interface.
///
/// ### Imports: Top-level functions and types
///
/// Imports at the top-level of a world are generated directly where the
/// `generate!` macro is invoked.
///
/// ```
/// use wit_bindgen::generate;
///
/// generate!({
/// inline: r"
/// package a:b;
///
/// world the-world {
/// record fahrenheit {
/// degrees: f32,
/// }
///
/// import what-temperature-is-it: func() -> fahrenheit;
///
/// record celsius {
/// degrees: f32,
/// }
///
/// import convert-to-celsius: func(a: fahrenheit) -> celsius;
/// }
/// ",
/// });
///
/// fn test() {
/// let current_temp = what_temperature_is_it();
/// println!("current temp in fahrenheit is {}", current_temp.degrees);
/// let in_celsius: Celsius = convert_to_celsius(current_temp);
/// println!("current temp in celsius is {}", in_celsius.degrees);
/// }
/// ```
///
/// ### Imports: Interfaces
///
/// Interfaces are placed into submodules where the `generate!` macro is
/// invoked and are namespaced based on their identifiers.
///
/// ```
/// use wit_bindgen::generate;
///
/// generate!({
/// inline: r"
/// package my:test;
///
/// interface logging {
/// enum level {
/// debug,
/// info,
/// error,
/// }
/// log: func(level: level, msg: string);
/// }
///
/// world the-world {
/// import logging;
/// import global-logger: interface {
/// use logging.{level};
///
/// set-current-level: func(level: level);
/// get-current-level: func() -> level;
/// }
/// }
/// ",
/// });
///
/// // `my` and `test` are from `package my:test;` and `logging` is for the
/// // interfac name.
/// use my::test::logging::Level;
///
/// fn test() {
/// let current_level = global_logger::get_current_level();
/// println!("current logging level is {current_level:?}");
/// global_logger::set_current_level(Level::Error);
///
/// my::test::logging::log(Level::Info, "Hello there!");
/// }
/// #
/// # fn main() {}
/// ```
///
/// ### Imports: Resources
///
/// Imported resources generate a type named after the name of the resource.
/// This type is then used both for borrows as `&T` as well as via ownership as
/// `T`. Resource methods are bound as methods on the type `T`.
///
/// ```
/// use wit_bindgen::generate;
///
/// generate!({
/// inline: r#"
/// package my:test;
///
/// interface logger {
/// enum level {
/// debug,
/// info,
/// error,
/// }
///
/// resource logger {
/// constructor(destination: string);
/// log: func(level: level, msg: string);
/// }
/// }
///
/// // Note that while this world does not textually import the above
/// // `logger` interface it is a transitive dependency via the `use`
/// // statement so the "elaborated world" imports the logger.
/// world the-world {
/// use logger.{logger};
///
/// import get-global-logger: func() -> logger;
/// }
/// "#,
/// });
///
/// use my::test::logger::Level;
///
/// fn test() {
/// let logger = get_global_logger();
/// logger.log(Level::Debug, "This is a global message");
///
/// let logger2 = Logger::new("/tmp/other.log");
/// logger2.log(Level::Info, "This is not a global message");
/// }
/// #
/// # fn main() {}
/// ```
///
/// Note in the above example the lack of import of `Logger`. The `use`
/// statement imported the `Logger` type, an alias of it, from the `logger`
/// interface into `the-world`. This generated a Rust `type` alias so `Logger`
/// was available at the top-level.
///
/// ## Exports: Basic Usage
///
/// A WIT world can not only `import` functionality but can additionally
/// `export` functionality as well. An `export` represents a contract that the
/// Rust program must implement to be able to work correctly. The `generate!`
/// macro's goal is to take care of all the low-level and ABI details for you,
/// so the end result is that `generate!`, for exports, will generate Rust
/// `trait`s that you must implement.
///
/// A minimal example of this is:
///
/// ```
/// use wit_bindgen::generate;
///
/// generate!({
/// inline: r#"
/// package my:test;
///
/// world my-world {
/// export hello: func();
/// }
/// "#,
/// });
///
/// struct MyComponent;
///
/// impl Guest for MyComponent {
/// fn hello() {}
/// }
///
/// export!(MyComponent);
/// #
/// # fn main() {}
/// ```
///
/// Here the `Guest` trait was generated by the `generate!` macro and represents
/// the functions at the top-level of `my-world`, in this case the function
/// `hello`. A custom type, here called `MyComponent`, is created and the trait
/// is implemented for that type.
///
/// Additionally a macro is generated by `generate!` (macros generating macros)
/// called `export!`. The `export!` macro is given a component that implements
/// the export `trait`s and then it will itself generate all necessary
/// `#[unsafe(no_mangle)]` functions to implement the ABI required.
///
/// ## Exports: Multiple Interfaces
///
/// Each `interface` in WIT will generate a `trait` that must be implemented in
/// addition to the top-level `trait` for the world. All traits are named
/// `Guest` here and are namespaced appropriately in modules:
///
/// ```
/// use wit_bindgen::generate;
///
/// generate!({
/// inline: r#"
/// package my:test;
///
/// interface a {
/// func-in-a: func();
/// second-func-in-a: func();
/// }
///
/// world my-world {
/// export a;
/// export b: interface {
/// func-in-b: func();
/// }
/// export c: func();
/// }
/// "#,
/// });
///
/// struct MyComponent;
///
/// impl Guest for MyComponent {
/// fn c() {}
/// }
///
/// impl exports::my::test::a::Guest for MyComponent {
/// fn func_in_a() {}
/// fn second_func_in_a() {}
/// }
///
/// impl exports::b::Guest for MyComponent {
/// fn func_in_b() {}
/// }
///
/// export!(MyComponent);
/// #
/// # fn main() {}
/// ```
///
/// Here note that there were three `Guest` traits generated for each of the
/// three groups: two interfaces and one `world`. Also note that traits (and
/// types) for exports are namespaced in an `exports` module.
///
/// Note that when the top-level `world` does not have any exported functions,
/// or if an interface does not have any functions, then no `trait` is
/// generated:
///
/// ```
/// use wit_bindgen::generate;
///
/// generate!({
/// inline: r#"
/// package my:test;
///
/// interface a {
/// type my-type = u32;
/// }
///
/// world my-world {
/// export b: interface {
/// use a.{my-type};
///
/// foo: func() -> my-type;
/// }
/// }
/// "#,
/// });
///
/// struct MyComponent;
///
/// impl exports::b::Guest for MyComponent {
/// fn foo() -> u32 {
/// 42
/// }
/// }
///
/// export!(MyComponent);
/// #
/// # fn main() {}
/// ```
///
/// ## Exports: Resources
///
/// Exporting a resource is significantly different than importing a resource.
/// A component defining a resource can create new resources of that type at any
/// time, for example. Additionally resources can be "dereferenced" into their
/// underlying values within the component.
///
/// Owned resources have a custom type generated and borrowed resources are
/// generated with a type of the same name suffixed with `Borrow<'_>`, such as
/// `MyResource` and `MyResourceBorrow<'_>`.
///
/// Like `interface`s the methods and functions used with a `resource` are
/// packaged up into a `trait`.
///
/// Specifying a custom resource type is done with an associated type on the
/// corresponding trait for the resource's containing interface/world:
///
/// ```
/// use wit_bindgen::generate;
/// use std::cell::{RefCell, Cell};
///
/// generate!({
/// inline: r#"
/// package my:test;
///
/// interface logging {
/// enum level {
/// debug,
/// info,
/// error,
/// }
///
/// resource logger {
/// constructor(level: level);
/// log: func(level: level, msg: string);
/// level: func() -> level;
/// set-level: func(level: level);
/// }
/// }
///
/// world my-world {
/// export logging;
/// }
/// "#,
/// });
///
/// use exports::my::test::logging::{Guest, GuestLogger, Level};
///
/// struct MyComponent;
///
/// // Note that the `logging` interface has no methods of its own but a trait
/// // is required to be implemented here to specify the type of `Logger`.
/// impl Guest for MyComponent {
/// type Logger = MyLogger;
/// }
///
/// struct MyLogger {
/// level: Cell<Level>,
/// contents: RefCell<String>,
/// }
///
/// impl GuestLogger for MyLogger {
/// fn new(level: Level) -> MyLogger {
/// MyLogger {
/// level: Cell::new(level),
/// contents: RefCell::new(String::new()),
/// }
/// }
///
/// fn log(&self, level: Level, msg: String) {
/// if level as u32 <= self.level.get() as u32 {
/// self.contents.borrow_mut().push_str(&msg);
/// self.contents.borrow_mut().push_str("\n");
/// }
/// }
///
/// fn level(&self) -> Level {
/// self.level.get()
/// }
///
/// fn set_level(&self, level: Level) {
/// self.level.set(level);
/// }
/// }
///
/// export!(MyComponent);
/// #
/// # fn main() {}
/// ```
///
/// It's important to note that resources in Rust do not get `&mut self` as
/// methods, but instead are required to be defined with `&self`. This requires
/// the use of interior mutability such as `Cell` and `RefCell` above from the
/// `std::cell` module.
///
/// ## Exports: The `export!` macro
///
/// Components are created by having exported WebAssembly functions with
/// specific names, and these functions are not created when `generate!` is
/// invoked. Instead these functions are created afterwards once you've defined
/// your own type an implemented the various `trait`s for it. The
/// `#[unsafe(no_mangle)]` functions that will become the component are created
/// with the generated `export!` macro.
///
/// Each call to `generate!` will itself generate a macro called `export!`.
/// The macro's first argument is the name of a type that implements the traits
/// generated:
///
/// ```
/// use wit_bindgen::generate;
///
/// generate!({
/// inline: r#"
/// package my:test;
///
/// world my-world {
/// # export hello: func();
/// // ...
/// }
/// "#,
/// });
///
/// struct MyComponent;
///
/// impl Guest for MyComponent {
/// # fn hello() {}
/// // ...
/// }
///
/// export!(MyComponent);
/// #
/// # fn main() {}
/// ```
///
/// This argument is a Rust type which implements the `Guest` traits generated
/// by `generate!`. Note that all `Guest` traits must be implemented for the
/// type provided or an error will be generated.
///
/// This macro additionally accepts a second argument. The macro itself needs to
/// be able to find the module where the `generate!` macro itself was originally
/// invoked. Currently that can't be done automatically so a path to where
/// `generate!` was provided can also be passed to the macro. By default, the
/// argument is set to `self`:
///
/// ```
/// use wit_bindgen::generate;
///
/// generate!({
/// // ...
/// # inline: r#"
/// # package my:test;
/// #
/// # world my-world {
/// # export hello: func();
/// # // ...
/// # }
/// # "#,
/// });
/// #
/// # struct MyComponent;
/// #
/// # impl Guest for MyComponent {
/// # fn hello() {}
/// # // ...
/// # }
/// #
/// export!(MyComponent with_types_in self);
/// #
/// # fn main() {}
/// ```
///
/// This indicates that the current module, referred to with `self`, is the one
/// which had the `generate!` macro expanded.
///
/// If, however, the `generate!` macro was run in a different module then that
/// must be configured:
///
/// ```
/// mod bindings {
/// wit_bindgen::generate!({
/// // ...
/// # inline: r#"
/// # package my:test;
/// #
/// # world my-world {
/// # export hello: func();
/// # // ...
/// # }
/// # "#,
/// });
/// }
/// #
/// # struct MyComponent;
/// #
/// # impl bindings::Guest for MyComponent {
/// # fn hello() {}
/// # // ...
/// # }
/// #
/// bindings::export!(MyComponent with_types_in bindings);
/// #
/// # fn main() {}
/// ```
///
/// ## Debugging output to `generate!`
///
/// While `wit-bindgen` is tested to the best of our ability there are
/// inevitably bugs and issues that arise. These can range from bad error
/// messages to misconfigured invocations to bugs in the macro itself. To assist
/// with debugging these situations the macro recognizes an environment
/// variable:
///
/// ```shell
/// export WIT_BINDGEN_DEBUG=1
/// ```
///
/// When set the macro will emit the result of expansion to a file and then
/// `include!` that file. Any error messages generated by `rustc` should then
/// point to the generated file and allow you to open it up, read it, and
/// inspect it. This can often provide better context to the error than rustc
/// provides by default with macros.
///
/// It is not recommended to set this environment variable by default as it will
/// cause excessive rebuilds of Cargo projects. It's recommended to only use it
/// as necessary to debug issues.
///
/// ## Options to `generate!`
///
/// The full list of options that can be passed to the `generate!` macro are as
/// follows. Note that there are no required options, they all have default
/// values.
///
///
/// ```
/// use wit_bindgen::generate;
/// # macro_rules! generate { ($($t:tt)*) => () }
///
/// generate!({
/// // The name of the world that bindings are being generated for. If this
/// // is not specified then it's required that the package selected
/// // below has a single `world` in it.
/// world: "my-world",
///
/// // Path to parse WIT and its dependencies from. Defaults to the `wit`
/// // folder adjacent to your `Cargo.toml`.
/// //
/// // This parameter also supports the form of a list, such as:
/// // ["../path/to/wit1", "../path/to/wit2"]
/// // Usually used in testing, our test suite may want to generate code
/// // from wit files located in multiple paths within a single mod, and we
/// // don't want to copy these files again. Currently these locations must
/// // be ordered, as later paths can't contain dependencies on earlier
/// // paths. This restriction may be lifted in the future.
/// path: "../path/to/wit",
///
/// // Enables passing "inline WIT". If specified this is the default
/// // package that a world is selected from. Any dependencies that this
/// // inline WIT refers to must be defined in the `path` option above.
/// //
/// // By default this is not specified.
/// inline: "
/// world my-world {
/// import wasi:cli/imports;
///
/// export my-run: func()
/// }
/// ",
///
/// // Additional traits to derive for all defined types. Note that not all
/// // types may be able to implement these traits, such as resources.
/// //
/// // By default this set is empty.
/// additional_derives: [PartialEq, Eq, Hash, Clone],
///
/// // When generating bindings for interfaces that are not defined in the
/// // same package as `world`, this option can be used to either generate
/// // those bindings or point to already generated bindings.
/// // For example, if your world refers to WASI types then the `wasi` crate
/// // already has generated bindings for all WASI types and structures. In this
/// // situation the key `with` here can be used to use those types
/// // elsewhere rather than regenerating types.
/// // If for example your world refers to some type and you want to use
/// // your own custom implementation of that type then you can specify
/// // that here as well. There is a requirement on the remapped (custom)
/// // type to have the same internal structure and identical to what would
/// // wit-bindgen generate (including alignment, etc.), since
/// // lifting/lowering uses its fields directly.
/// //
/// // If, however, your world refers to interfaces for which you don't have
/// // already generated bindings then you can use the special `generate` value
/// // to have those bindings generated.
/// //
/// // The `with` key here works for interfaces and individual types.
/// //
/// // When an interface or type is specified here no bindings will be
/// // generated at all. It's assumed bindings are fully generated
/// // somewhere else. This is an indicator that any further references to types
/// // defined in these interfaces should use the upstream paths specified
/// // here instead.
/// //
/// // Any unused keys in this map are considered an error.
/// with: {
/// "wasi:io/poll": wasi::io::poll,
/// "some:package/my-interface": generate,
/// "some:package/my-interface/my-type": my_crate::types::MyType,
/// },
///
/// // Indicates that all interfaces not present in `with` should be assumed
/// // to be marked with `generate`.
/// generate_all,
///
/// // An optional list of function names to skip generating bindings for.
/// // This is only applicable to imports and the name specified is the name
/// // of the function.
/// skip: ["foo", "bar", "baz"],
///
/// // Configuration of how Rust types are generated.
/// //
/// // This option will change how WIT types are mapped to Rust types. There
/// // are a number of ways this can be done depending on the context. For
/// // example a Rust `&str` is suitable to pass to an imported function but
/// // an exported function receives a `String`. These both represent the
/// // WIT type `string`, however.
/// //
/// // Type generation becomes extra-significant when aggregates come into
/// // play (such as a WIT `record` or `variant`), especially when the
/// // aggregate is used both in an imported function and exported one.
/// //
/// // There are three modes of ownership, documented here, but only one
/// // can be specified.
/// //
/// // The default mode is "Owning" meaning that all Rust types will by
/// // default contain their owned containers. For example a `record` with
/// // a `string` will map to a Rust `struct` containing a `String`. This
/// // maximizes the chance that types can be shared between imports and
/// // exports but can come at a cost where calling an import may require
/// // more allocations than necessary.
/// ownership: Owning,
///
/// // Specifies an alternative name for the `export!` macro generated for
/// // any exports this world has.
/// //
/// // Defaults to "export"
/// export_macro_name: "export",
///
/// // Indicates whether the `export!` macro is `pub` or just `pub(crate)`.
/// //
/// // This defaults to `false`.
/// pub_export_macro: false,
///
/// // The second mode of ownership is "Borrowing". This mode then
/// // additionally has a boolean flag indicating whether duplicate types
/// // should be generated if necessary.
/// //
/// // This mode will prefer using borrowed values in Rust to represent WIT
/// // values where possible. For example if the argument to an imported
/// // function is a record-with-a-string then in Rust that will generate a
/// // `struct` with a lifetime parameter storing `&'a str`.
/// //
/// // The `duplicate_if_necessary` flag will cause duplicate types to be
/// // generated when a WIT type is used both in an import and export. In
/// // this situation one will be called `FooParam` and one will be called
/// // `FooResult` (where `foo` is the WIT name).
/// //
/// // It's generally recommended to not turn this on unless performance
/// // requires it. Even if so, please feel free to open an issue on the
/// // `wit-bindgen` repository to help improve the default "Owning" use
/// // case above if possible.
/// ownership: Borrowing { duplicate_if_necessary: false },
///
/// // The generated `export!` macro, if any, will by default look for
/// // generated types adjacent to where the `export!` macro is invoked
/// // through the `self` module. This option can be used to change the
/// // defaults to look somewhere else instead.
/// default_bindings_module: "path::to::bindings",
///
/// // This will suffix the custom section containing component type
/// // information with the specified string. This is not required by
/// // default but if the same world is generated in two different locations
/// // in the crate then one bindings generation location will need this
/// // suffix to avoid having the custom sections corrupt each other.
/// type_section_suffix: "suffix",
///
/// // Configures the path to the `wit-bindgen` crate itself. By default
/// // this is `wit_bindgen` assuming that your crate depends on the
/// // `wit-bindgen` crate itself.
/// runtime_path: "path::to::wit_bindgen",
///
/// // Configure where the `bitflags` crate is located. By default this
/// // is `wit_bindgen::bitflags` which already reexports `bitflags` for
/// // you.
/// bitflags_path: "path::to::bitflags",
///
/// // Indicates that instead of `&str` and `String` the `&[u8]` and
/// // `Vec<u8>` types should be used. Only intended for cases where
/// // compiled size is of the utmost concern as this can avoid pulling in
/// // UTF-8 validation.
/// raw_strings,
///
/// // Emits `#[cfg(feature = "std")]` around `impl Error for ... {}` blocks
/// // for generated types. This is a niche option that is only here to
/// // support the standard library itself depending on this crate one day.
/// std_feature,
///
/// // Disable a workaround to force wasm constructors to be run only once
/// // when exported functions are called.
/// disable_run_ctors_once_workaround: false,
///
/// // Whether to generate unused `record`, `enum`, `variant` types.
/// // By default, they will not be generated unless they are used as input
/// // or return value of a function.
/// generate_unused_types: false,
///
/// // A list of "features" which correspond to WIT features to activate
/// // when parsing WIT files. This enables `@unstable` annotations showing
/// // up and having bindings generated for them.
/// //
/// // By default this is an empty list.
/// features: ["foo", "bar", "baz"],
///
/// // Disables generation of a `#[used]` static to try harder to get the
/// // custom section describing WIT types linked into the binary when
/// // used in library-like situations. This is `false` by default with
/// // `#[used]` statics being emitted.
/// disable_custom_section_link_helpers: false,
///
/// // Write generated code to a .rs file, which allows the compiler to
/// // emit more useful diagnostics for errors in the generated code. This
/// // is primarily useful for `wit-bindgen` developers.
/// //
/// // This does the same thing as setting `WIT_BINDGEN_DEBUG=1`, except
/// // that it can be used on a more fine-grained basis (i.e. it only affects
/// // the specific `generate!` call where it is used.
/// debug: true,
///
/// // Generate async import and/or export bindings.
/// //
/// // The resulting bindings will use the component model
/// // [async ABI](https://github.com/WebAssembly/component-model/blob/main/design/mvp/Async.md).
/// //
/// // If this option is not provided then the WIT's source annotation will
/// // be used instead.
/// async: true, // all bindings are async
/// async: false, // all bindings are sync
/// // With an array per-function configuration can be specified. A leading
/// // '-' will disable async for that particular function.
/// async: [
/// "wasi:http/types@0.3.0-draft#[static]body.finish",
/// "import:wasi:http/handler@0.3.0-draft#handle",
/// "-export:wasi:http/handler@0.3.0-draft#handle",
/// "all",
/// ],
/// });
/// ```
///
/// [WIT package]: https://component-model.bytecodealliance.org/design/packages.html
#[cfg(feature = "macros")]
pub use wit_bindgen_rust_macro::generate;
#[cfg(docsrs)]
pub mod examples;
#[doc(hidden)]
pub mod rt;
#[cfg(feature = "async")]
pub use rt::async_support::{
backpressure_dec, backpressure_inc, backpressure_set, block_on, spawn, yield_async,
yield_blocking, AbiBuffer, FutureRead, FutureReader, FutureWrite, FutureWriteCancel,
FutureWriteError, FutureWriter, StreamRead, StreamReader, StreamResult, StreamWrite,
StreamWriter,
};

View File

@@ -0,0 +1,634 @@
#![deny(missing_docs)]
// TODO: Switch to interior mutability (e.g. use Mutexes or thread-local
// RefCells) and remove this, since even in single-threaded mode `static mut`
// references can be a hazard due to recursive access.
#![allow(static_mut_refs)]
extern crate std;
use core::sync::atomic::{AtomicBool, Ordering};
use std::boxed::Box;
use std::collections::BTreeMap;
use std::ffi::c_void;
use std::future::Future;
use std::mem;
use std::pin::Pin;
use std::ptr;
use std::sync::Arc;
use std::task::{Context, Poll, Wake, Waker};
use std::vec::Vec;
use futures::channel::oneshot;
use futures::future::FutureExt;
use futures::stream::{FuturesUnordered, StreamExt};
macro_rules! rtdebug {
($($f:tt)*) => {
// Change this flag to enable debugging, right now we're not using a
// crate like `log` or such to reduce runtime deps. Intended to be used
// during development for now.
if false {
std::eprintln!($($f)*);
}
}
}
mod abi_buffer;
mod cabi;
mod error_context;
mod future_support;
mod stream_support;
mod subtask;
mod waitable;
mod waitable_set;
use self::waitable_set::WaitableSet;
pub use abi_buffer::*;
pub use error_context::*;
pub use future_support::*;
pub use stream_support::*;
#[doc(hidden)]
pub use subtask::Subtask;
pub use futures;
type BoxFuture = Pin<Box<dyn Future<Output = ()> + 'static>>;
/// Represents a task created by either a call to an async-lifted export or a
/// future run using `block_on` or `start_task`.
struct FutureState {
/// Remaining work to do (if any) before this task can be considered "done".
///
/// Note that we won't tell the host the task is done until this is drained
/// and `waitables` is empty.
tasks: FuturesUnordered<BoxFuture>,
/// The waitable set containing waitables created by this task, if any.
waitable_set: Option<WaitableSet>,
/// State of all waitables in `waitable_set`, and the ptr/callback they're
/// associated with.
//
// Note that this is a `BTreeMap` rather than a `HashMap` only because, as
// of this writing, initializing the default hasher for `HashMap` requires
// calling `wasi_snapshot_preview1:random_get`, which requires initializing
// the `wasi_snapshot_preview1` adapter when targeting `wasm32-wasip2` and
// later, and that's expensive enough that we'd prefer to avoid it for apps
// which otherwise make no use of the adapter.
waitables: BTreeMap<u32, (*mut c_void, unsafe extern "C" fn(*mut c_void, u32))>,
/// Raw structure used to pass to `cabi::wasip3_task_set`
wasip3_task: cabi::wasip3_task,
/// Rust-level state for the waker, notably a bool as to whether this has
/// been woken.
waker: Arc<FutureWaker>,
/// Clone of `waker` field, but represented as `std::task::Waker`.
waker_clone: Waker,
}
impl FutureState {
fn new(future: BoxFuture) -> FutureState {
let waker = Arc::new(FutureWaker::default());
FutureState {
waker_clone: waker.clone().into(),
waker,
tasks: [future].into_iter().collect(),
waitable_set: None,
waitables: BTreeMap::new(),
wasip3_task: cabi::wasip3_task {
// This pointer is filled in before calling `wasip3_task_set`.
ptr: ptr::null_mut(),
version: cabi::WASIP3_TASK_V1,
waitable_register,
waitable_unregister,
},
}
}
fn get_or_create_waitable_set(&mut self) -> &WaitableSet {
self.waitable_set.get_or_insert_with(WaitableSet::new)
}
fn add_waitable(&mut self, waitable: u32) {
self.get_or_create_waitable_set().join(waitable)
}
fn remove_waitable(&mut self, waitable: u32) {
WaitableSet::remove_waitable_from_all_sets(waitable)
}
fn remaining_work(&self) -> bool {
!self.waitables.is_empty()
}
/// Handles the `event{0,1,2}` event codes and returns a corresponding
/// return code along with a flag whether this future is "done" or not.
fn callback(&mut self, event0: u32, event1: u32, event2: u32) -> (u32, bool) {
match event0 {
EVENT_NONE => rtdebug!("EVENT_NONE"),
EVENT_SUBTASK => rtdebug!("EVENT_SUBTASK({event1:#x}, {event2:#x})"),
EVENT_STREAM_READ => rtdebug!("EVENT_STREAM_READ({event1:#x}, {event2:#x})"),
EVENT_STREAM_WRITE => rtdebug!("EVENT_STREAM_WRITE({event1:#x}, {event2:#x})"),
EVENT_FUTURE_READ => rtdebug!("EVENT_FUTURE_READ({event1:#x}, {event2:#x})"),
EVENT_FUTURE_WRITE => rtdebug!("EVENT_FUTURE_WRITE({event1:#x}, {event2:#x})"),
EVENT_CANCEL => {
rtdebug!("EVENT_CANCEL");
// Cancellation is mapped to destruction in Rust, so return a
// code/bool indicating we're done. The caller will then
// appropriately deallocate this `FutureState` which will
// transitively run all destructors.
return (CALLBACK_CODE_EXIT, true);
}
_ => unreachable!(),
}
if event0 != EVENT_NONE {
self.deliver_waitable_event(event1, event2)
}
self.poll()
}
/// Deliver the `code` event to the `waitable` store within our map. This
/// waitable should be present because it's part of the waitable set which
/// is kept in-sync with our map.
fn deliver_waitable_event(&mut self, waitable: u32, code: u32) {
self.remove_waitable(waitable);
let (ptr, callback) = self.waitables.remove(&waitable).unwrap();
unsafe {
callback(ptr, code);
}
}
/// Poll this task until it either completes or can't make immediate
/// progress.
///
/// Returns the code representing what happened along with a boolean as to
/// whether this execution is done.
fn poll(&mut self) -> (u32, bool) {
self.with_p3_task_set(|me| {
let mut context = Context::from_waker(&me.waker_clone);
loop {
// Reset the waker before polling to clear out any pending
// notification, if any.
me.waker.0.store(false, Ordering::Relaxed);
// Poll our future, handling `SPAWNED` around this.
let poll;
unsafe {
poll = me.tasks.poll_next_unpin(&mut context);
if !SPAWNED.is_empty() {
me.tasks.extend(SPAWNED.drain(..));
}
}
match poll {
// A future completed, yay! Keep going to see if more have
// completed.
Poll::Ready(Some(())) => (),
// The `FuturesUnordered` list is empty meaning that there's no
// more work left to do, so we're done.
Poll::Ready(None) => {
assert!(!me.remaining_work());
assert!(me.tasks.is_empty());
break (CALLBACK_CODE_EXIT, true);
}
// Some future within `FuturesUnordered` is not ready yet. If
// our `waker` was signaled then that means this is a yield
// operation, otherwise it means we're blocking on something.
Poll::Pending => {
assert!(!me.tasks.is_empty());
if me.waker.0.load(Ordering::Relaxed) {
break (CALLBACK_CODE_YIELD, false);
}
assert!(me.remaining_work());
let waitable = me.waitable_set.as_ref().unwrap().as_raw();
break (CALLBACK_CODE_WAIT | (waitable << 4), false);
}
}
}
})
}
fn with_p3_task_set<R>(&mut self, f: impl FnOnce(&mut Self) -> R) -> R {
// Finish our `wasip3_task` by initializing its self-referential pointer,
// and then register it for the duration of this function with
// `wasip3_task_set`. The previous value of `wasip3_task_set` will get
// restored when this function returns.
struct ResetTask(*mut cabi::wasip3_task);
impl Drop for ResetTask {
fn drop(&mut self) {
unsafe {
cabi::wasip3_task_set(self.0);
}
}
}
let self_raw = self as *mut FutureState;
self.wasip3_task.ptr = self_raw.cast();
let prev = unsafe { cabi::wasip3_task_set(&mut self.wasip3_task) };
let _reset = ResetTask(prev);
f(self)
}
}
impl Drop for FutureState {
fn drop(&mut self) {
// If this state has active tasks then they need to be dropped which may
// execute arbitrary code. This arbitrary code might require the p3 APIs
// for managing waitables, notably around removing them. In this
// situation we ensure that the p3 task is set while futures are being
// destroyed.
if !self.tasks.is_empty() {
self.with_p3_task_set(|me| {
me.tasks = Default::default();
})
}
}
}
unsafe extern "C" fn waitable_register(
ptr: *mut c_void,
waitable: u32,
callback: unsafe extern "C" fn(*mut c_void, u32),
callback_ptr: *mut c_void,
) -> *mut c_void {
let ptr = ptr.cast::<FutureState>();
assert!(!ptr.is_null());
(*ptr).add_waitable(waitable);
match (*ptr).waitables.insert(waitable, (callback_ptr, callback)) {
Some((prev, _)) => prev,
None => ptr::null_mut(),
}
}
unsafe extern "C" fn waitable_unregister(ptr: *mut c_void, waitable: u32) -> *mut c_void {
let ptr = ptr.cast::<FutureState>();
assert!(!ptr.is_null());
(*ptr).remove_waitable(waitable);
match (*ptr).waitables.remove(&waitable) {
Some((prev, _)) => prev,
None => ptr::null_mut(),
}
}
#[derive(Default)]
struct FutureWaker(AtomicBool);
impl Wake for FutureWaker {
fn wake(self: Arc<Self>) {
Self::wake_by_ref(&self)
}
fn wake_by_ref(self: &Arc<Self>) {
self.0.store(true, Ordering::Relaxed)
}
}
/// Any newly-deferred work queued by calls to the `spawn` function while
/// polling the current task.
static mut SPAWNED: Vec<BoxFuture> = Vec::new();
const EVENT_NONE: u32 = 0;
const EVENT_SUBTASK: u32 = 1;
const EVENT_STREAM_READ: u32 = 2;
const EVENT_STREAM_WRITE: u32 = 3;
const EVENT_FUTURE_READ: u32 = 4;
const EVENT_FUTURE_WRITE: u32 = 5;
const EVENT_CANCEL: u32 = 6;
const CALLBACK_CODE_EXIT: u32 = 0;
const CALLBACK_CODE_YIELD: u32 = 1;
const CALLBACK_CODE_WAIT: u32 = 2;
const _CALLBACK_CODE_POLL: u32 = 3;
const STATUS_STARTING: u32 = 0;
const STATUS_STARTED: u32 = 1;
const STATUS_RETURNED: u32 = 2;
const STATUS_STARTED_CANCELLED: u32 = 3;
const STATUS_RETURNED_CANCELLED: u32 = 4;
const BLOCKED: u32 = 0xffff_ffff;
const COMPLETED: u32 = 0x0;
const DROPPED: u32 = 0x1;
const CANCELLED: u32 = 0x2;
/// Return code of stream/future operations.
#[derive(PartialEq, Debug, Copy, Clone)]
enum ReturnCode {
/// The operation is blocked and has not completed.
Blocked,
/// The operation completed with the specified number of items.
Completed(u32),
/// The other end is dropped, but before that the specified number of items
/// were transferred.
Dropped(u32),
/// The operation was cancelled, but before that the specified number of
/// items were transferred.
Cancelled(u32),
}
impl ReturnCode {
fn decode(val: u32) -> ReturnCode {
if val == BLOCKED {
return ReturnCode::Blocked;
}
let amt = val >> 4;
match val & 0xf {
COMPLETED => ReturnCode::Completed(amt),
DROPPED => ReturnCode::Dropped(amt),
CANCELLED => ReturnCode::Cancelled(amt),
_ => panic!("unknown return code {val:#x}"),
}
}
}
/// Starts execution of the `task` provided, an asynchronous computation.
///
/// This is used for async-lifted exports at their definition site. The
/// representation of the export is `task` and this function is called from the
/// entrypoint. The code returned here is the same as the callback associated
/// with this export, and the callback will be used if this task doesn't exit
/// immediately with its result.
#[doc(hidden)]
pub fn start_task(task: impl Future<Output = ()> + 'static) -> i32 {
// Allocate a new `FutureState` which will track all state necessary for
// our exported task.
let state = Box::into_raw(Box::new(FutureState::new(Box::pin(task))));
// Store our `FutureState` into our context-local-storage slot and then
// pretend we got EVENT_NONE to kick off everything.
//
// SAFETY: we should own `context.set` as we're the root level exported
// task, and then `callback` is only invoked when context-local storage is
// valid.
unsafe {
assert!(context_get().is_null());
context_set(state.cast());
callback(EVENT_NONE, 0, 0) as i32
}
}
/// Handle a progress notification from the host regarding either a call to an
/// async-lowered import or a stream/future read/write operation.
///
/// # Unsafety
///
/// This function assumes that `context_get()` returns a `FutureState`.
#[doc(hidden)]
pub unsafe fn callback(event0: u32, event1: u32, event2: u32) -> u32 {
// Acquire our context-local state, assert it's not-null, and then reset
// the state to null while we're running to help prevent any unintended
// usage.
let state = context_get().cast::<FutureState>();
assert!(!state.is_null());
unsafe {
context_set(ptr::null_mut());
}
// Use `state` to run the `callback` function in the context of our event
// codes we received. If the callback decides to exit then we're done with
// our future so deallocate it. Otherwise put our future back in
// context-local storage and forward the code.
unsafe {
let (rc, done) = (*state).callback(event0, event1, event2);
if done {
drop(Box::from_raw(state));
} else {
context_set(state.cast());
}
rtdebug!(" => (cb) {rc:#x}");
rc
}
}
/// Defer the specified future to be run after the current async-lifted export
/// task has returned a value.
///
/// The task will remain in a running state until all spawned futures have
/// completed.
pub fn spawn(future: impl Future<Output = ()> + 'static) {
unsafe { SPAWNED.push(Box::pin(future)) }
}
/// Run the specified future to completion, returning the result.
///
/// This uses `waitable-set.wait` to poll for progress on any in-progress calls
/// to async-lowered imports as necessary.
// TODO: refactor so `'static` bounds aren't necessary
pub fn block_on<T: 'static>(future: impl Future<Output = T> + 'static) -> T {
let (tx, mut rx) = oneshot::channel();
let state = &mut FutureState::new(Box::pin(future.map(move |v| drop(tx.send(v)))) as BoxFuture);
let mut event = (EVENT_NONE, 0, 0);
loop {
match state.callback(event.0, event.1, event.2) {
(_, true) => break rx.try_recv().unwrap().unwrap(),
(CALLBACK_CODE_YIELD, false) => event = state.waitable_set.as_ref().unwrap().poll(),
_ => event = state.waitable_set.as_ref().unwrap().wait(),
}
}
}
/// Call the `yield` canonical built-in function.
///
/// This yields control to the host temporarily, allowing other tasks to make
/// progress. It's a good idea to call this inside a busy loop which does not
/// otherwise ever yield control the host.
///
/// Note that this function is a blocking function, not an `async` function.
/// That means that this is not an async yield which allows other tasks in this
/// component to progress, but instead this will block the current function
/// until the host gets back around to returning from this yield. Asynchronous
/// functions should probably use [`yield_async`] instead.
///
/// # Return Value
///
/// This function returns a `bool` which indicates whether execution should
/// continue after this yield point. A return value of `true` means that the
/// task was not cancelled and execution should continue. A return value of
/// `false`, however, means that the task was cancelled while it was suspended
/// at this yield point. The caller should return back and exit from the task
/// ASAP in this situation.
pub fn yield_blocking() -> bool {
#[cfg(not(target_arch = "wasm32"))]
unsafe fn yield_() -> bool {
unreachable!();
}
#[cfg(target_arch = "wasm32")]
#[link(wasm_import_module = "$root")]
extern "C" {
#[link_name = "[thread-yield]"]
fn yield_() -> bool;
}
// Note that the return value from the raw intrinsic is inverted, the
// canonical ABI returns "did this task get cancelled" while this function
// works as "should work continue going".
unsafe { !yield_() }
}
/// The asynchronous counterpart to [`yield_blocking`].
///
/// This function does not block the current task but instead gives the
/// Rust-level executor a chance to yield control back to the host temporarily.
/// This means that other Rust-level tasks may also be able to progress during
/// this yield operation.
///
/// # Return Value
///
/// Unlike [`yield_blocking`] this function does not return anything. If this
/// component task is cancelled while paused at this yield point then the future
/// will be dropped and a Rust-level destructor will take over and clean up the
/// task. It's not necessary to do anything with the return value of this
/// function other than ensuring that you `.await` the function call.
pub async fn yield_async() {
#[derive(Default)]
struct Yield {
yielded: bool,
}
impl Future for Yield {
type Output = ();
fn poll(mut self: Pin<&mut Self>, context: &mut Context<'_>) -> Poll<()> {
if self.yielded {
Poll::Ready(())
} else {
self.yielded = true;
context.waker().wake_by_ref();
Poll::Pending
}
}
}
Yield::default().await;
}
/// Call the `backpressure.set` canonical built-in function.
///
/// When `enabled` is `true`, this tells the host to defer any new calls to this
/// component instance until further notice (i.e. until `backpressure.set` is
/// called again with `enabled` set to `false`).
#[deprecated = "use backpressure_{inc,dec} instead"]
pub fn backpressure_set(enabled: bool) {
#[cfg(not(target_arch = "wasm32"))]
unsafe fn backpressure_set(_: i32) {
unreachable!();
}
#[cfg(target_arch = "wasm32")]
#[link(wasm_import_module = "$root")]
extern "C" {
#[link_name = "[backpressure-set]"]
fn backpressure_set(_: i32);
}
unsafe { backpressure_set(if enabled { 1 } else { 0 }) }
}
/// Call the `backpressure.inc` canonical built-in function.
pub fn backpressure_inc() {
#[cfg(not(target_arch = "wasm32"))]
unsafe fn backpressure_inc() {
unreachable!();
}
#[cfg(target_arch = "wasm32")]
#[link(wasm_import_module = "$root")]
extern "C" {
#[link_name = "[backpressure-inc]"]
fn backpressure_inc();
}
unsafe { backpressure_inc() }
}
/// Call the `backpressure.dec` canonical built-in function.
pub fn backpressure_dec() {
#[cfg(not(target_arch = "wasm32"))]
unsafe fn backpressure_dec() {
unreachable!();
}
#[cfg(target_arch = "wasm32")]
#[link(wasm_import_module = "$root")]
extern "C" {
#[link_name = "[backpressure-dec]"]
fn backpressure_dec();
}
unsafe { backpressure_dec() }
}
fn context_get() -> *mut u8 {
#[cfg(not(target_arch = "wasm32"))]
unsafe fn get() -> *mut u8 {
unreachable!()
}
#[cfg(target_arch = "wasm32")]
#[link(wasm_import_module = "$root")]
extern "C" {
#[link_name = "[context-get-0]"]
fn get() -> *mut u8;
}
unsafe { get() }
}
unsafe fn context_set(value: *mut u8) {
#[cfg(not(target_arch = "wasm32"))]
unsafe fn set(_: *mut u8) {
unreachable!()
}
#[cfg(target_arch = "wasm32")]
#[link(wasm_import_module = "$root")]
extern "C" {
#[link_name = "[context-set-0]"]
fn set(value: *mut u8);
}
unsafe { set(value) }
}
#[doc(hidden)]
pub struct TaskCancelOnDrop {
_priv: (),
}
impl TaskCancelOnDrop {
#[doc(hidden)]
pub fn new() -> TaskCancelOnDrop {
TaskCancelOnDrop { _priv: () }
}
#[doc(hidden)]
pub fn forget(self) {
mem::forget(self);
}
}
impl Drop for TaskCancelOnDrop {
fn drop(&mut self) {
#[cfg(not(target_arch = "wasm32"))]
unsafe fn cancel() {
unreachable!()
}
#[cfg(target_arch = "wasm32")]
#[link(wasm_import_module = "[export]$root")]
extern "C" {
#[link_name = "[task-cancel]"]
fn cancel();
}
unsafe { cancel() }
}
}

View File

@@ -0,0 +1,417 @@
use crate::rt::async_support::StreamVtable;
use crate::rt::Cleanup;
use std::alloc::Layout;
use std::mem::{self, MaybeUninit};
use std::ptr;
use std::vec::Vec;
/// A helper structure used with a stream to handle the canonical ABI
/// representation of lists and track partial writes.
///
/// This structure is returned whenever a write to a stream completes. This
/// keeps track of the original buffer used to perform a write (`Vec<T>`) and
/// additionally tracks any partial writes. Writes can then be resumed with
/// this buffer again or the partial write can be converted back to `Vec<T>` to
/// get access to the remaining values.
///
/// This value is created through the [`StreamWrite`](super::StreamWrite)
/// future's return value.
pub struct AbiBuffer<T: 'static> {
rust_storage: Vec<MaybeUninit<T>>,
vtable: &'static StreamVtable<T>,
alloc: Option<Cleanup>,
cursor: usize,
}
impl<T: 'static> AbiBuffer<T> {
pub(crate) fn new(mut vec: Vec<T>, vtable: &'static StreamVtable<T>) -> AbiBuffer<T> {
assert_eq!(vtable.lower.is_some(), vtable.lift.is_some());
// SAFETY: We're converting `Vec<T>` to `Vec<MaybeUninit<T>>`, which
// should be safe.
let rust_storage = unsafe {
let ptr = vec.as_mut_ptr();
let len = vec.len();
let cap = vec.capacity();
mem::forget(vec);
Vec::<MaybeUninit<T>>::from_raw_parts(ptr.cast(), len, cap)
};
// If `lower` is provided then the canonical ABI format is different
// from the native format, so all items are converted at this time.
//
// Note that this is probably pretty inefficient for "big" use cases
// but it's hoped that "big" use cases are using `u8` and therefore
// skip this entirely.
let alloc = vtable.lower.and_then(|lower| {
let layout = Layout::from_size_align(
vtable.layout.size() * rust_storage.len(),
vtable.layout.align(),
)
.unwrap();
let (mut ptr, cleanup) = Cleanup::new(layout);
let cleanup = cleanup?;
// SAFETY: All items in `rust_storage` are already initialized so
// it should be safe to read them and move ownership into the
// canonical ABI format.
unsafe {
for item in rust_storage.iter() {
let item = item.assume_init_read();
lower(item, ptr);
ptr = ptr.add(vtable.layout.size());
}
}
Some(cleanup)
});
AbiBuffer {
rust_storage,
alloc,
vtable,
cursor: 0,
}
}
/// Returns the canonical ABI pointer/length to pass off to a write
/// operation.
pub(crate) fn abi_ptr_and_len(&self) -> (*const u8, usize) {
// If there's no `lower` operation then it means that `T`'s layout is
// the same in the canonical ABI so it can be used as-is. In this
// situation the list would have been un-tampered with above.
if self.vtable.lower.is_none() {
// SAFETY: this should be in-bounds, so it should be safe.
let ptr = unsafe { self.rust_storage.as_ptr().add(self.cursor).cast() };
let len = self.rust_storage.len() - self.cursor;
return (ptr, len.try_into().unwrap());
}
// Othereise when `lower` is present that means that `self.alloc` has
// the ABI pointer we should pass along.
let ptr = self
.alloc
.as_ref()
.map(|c| c.ptr.as_ptr())
.unwrap_or(ptr::null_mut());
(
// SAFETY: this should be in-bounds, so it should be safe.
unsafe { ptr.add(self.cursor * self.vtable.layout.size()) },
self.rust_storage.len() - self.cursor,
)
}
/// Converts this `AbiBuffer<T>` back into a `Vec<T>`
///
/// This commit consumes this buffer and yields back unwritten values as a
/// `Vec<T>`. The remaining items in `Vec<T>` have not yet been written and
/// all written items have been removed from the front of the list.
///
/// Note that the backing storage of the returned `Vec<T>` has not changed
/// from whe this buffer was created.
///
/// Also note that this can be an expensive operation if a partial write
/// occurred as this will involve shifting items from the end of the vector
/// to the start of the vector.
pub fn into_vec(mut self) -> Vec<T> {
self.take_vec()
}
/// Returns the number of items remaining in this buffer.
pub fn remaining(&self) -> usize {
self.rust_storage.len() - self.cursor
}
/// Advances this buffer by `amt` items.
///
/// This signals that `amt` items are no longer going to be yielded from
/// `abi_ptr_and_len`. Additionally this will perform any deallocation
/// necessary for the starting `amt` items in this list.
pub(crate) fn advance(&mut self, amt: usize) {
assert!(amt + self.cursor <= self.rust_storage.len());
let Some(dealloc_lists) = self.vtable.dealloc_lists else {
self.cursor += amt;
return;
};
let (mut ptr, len) = self.abi_ptr_and_len();
assert!(amt <= len);
for _ in 0..amt {
// SAFETY: we're managing the pointer passed to `dealloc_lists` and
// it was initialized with a `lower`, and then the pointer
// arithmetic should all be in-bounds.
unsafe {
dealloc_lists(ptr.cast_mut());
ptr = ptr.add(self.vtable.layout.size());
}
}
self.cursor += amt;
}
fn take_vec(&mut self) -> Vec<T> {
// First, if necessary, convert remaining values within `self.alloc`
// back into `self.rust_storage`. This is necessary when a lift
// operation is available meaning that the representation of `T` is
// different in the canonical ABI.
//
// Note that when `lift` is provided then when this original
// `AbiBuffer` was created it moved ownership of all values from the
// original vector into the `alloc` value. This is the reverse
// operation, moving all the values back into the vector.
if let Some(lift) = self.vtable.lift {
let (mut ptr, mut len) = self.abi_ptr_and_len();
// SAFETY: this should be safe as `lift` is operating on values that
// were initialized with a previous `lower`, and the pointer
// arithmetic here should all be in-bounds.
unsafe {
for dst in self.rust_storage[self.cursor..].iter_mut() {
dst.write(lift(ptr.cast_mut()));
ptr = ptr.add(self.vtable.layout.size());
len -= 1;
}
assert_eq!(len, 0);
}
}
// Next extract the rust storage and zero out this struct's fields.
// This is also the location where a "shift" happens to remove items
// from the beginning of the returned vector as those have already been
// transferred somewhere else.
let mut storage = mem::take(&mut self.rust_storage);
storage.drain(..self.cursor);
self.cursor = 0;
self.alloc = None;
// SAFETY: we're casting `Vec<MaybeUninit<T>>` here to `Vec<T>`. The
// elements were either always initialized (`lift` is `None`) or we just
// re-initialized them above from `self.alloc`.
unsafe {
let ptr = storage.as_mut_ptr();
let len = storage.len();
let cap = storage.capacity();
mem::forget(storage);
Vec::<T>::from_raw_parts(ptr.cast(), len, cap)
}
}
}
impl<T> Drop for AbiBuffer<T> {
fn drop(&mut self) {
let _ = self.take_vec();
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use std::vec;
extern "C" fn cancel(_: u32) -> u32 {
todo!()
}
extern "C" fn drop(_: u32) {
todo!()
}
extern "C" fn new() -> u64 {
todo!()
}
extern "C" fn start_read(_: u32, _: *mut u8, _: usize) -> u32 {
todo!()
}
extern "C" fn start_write(_: u32, _: *const u8, _: usize) -> u32 {
todo!()
}
static BLANK: StreamVtable<u8> = StreamVtable {
cancel_read: cancel,
cancel_write: cancel,
drop_readable: drop,
drop_writable: drop,
dealloc_lists: None,
lift: None,
lower: None,
layout: unsafe { Layout::from_size_align_unchecked(1, 1) },
new,
start_read,
start_write,
};
#[test]
fn blank_advance_to_end() {
let mut buffer = AbiBuffer::new(vec![1, 2, 3, 4], &BLANK);
assert_eq!(buffer.remaining(), 4);
buffer.advance(1);
assert_eq!(buffer.remaining(), 3);
buffer.advance(2);
assert_eq!(buffer.remaining(), 1);
buffer.advance(1);
assert_eq!(buffer.remaining(), 0);
assert_eq!(buffer.into_vec(), []);
}
#[test]
fn blank_advance_partial() {
let buffer = AbiBuffer::new(vec![1, 2, 3, 4], &BLANK);
assert_eq!(buffer.into_vec(), [1, 2, 3, 4]);
let mut buffer = AbiBuffer::new(vec![1, 2, 3, 4], &BLANK);
buffer.advance(1);
assert_eq!(buffer.into_vec(), [2, 3, 4]);
let mut buffer = AbiBuffer::new(vec![1, 2, 3, 4], &BLANK);
buffer.advance(1);
buffer.advance(2);
assert_eq!(buffer.into_vec(), [4]);
}
#[test]
fn blank_ptr_eq() {
let mut buf = vec![1, 2, 3, 4];
let ptr = buf.as_mut_ptr();
let mut buffer = AbiBuffer::new(buf, &BLANK);
let (a, b) = buffer.abi_ptr_and_len();
assert_eq!(a, ptr);
assert_eq!(b, 4);
unsafe {
assert_eq!(std::slice::from_raw_parts(a, b), [1, 2, 3, 4]);
}
buffer.advance(1);
let (a, b) = buffer.abi_ptr_and_len();
assert_eq!(a, ptr.wrapping_add(1));
assert_eq!(b, 3);
unsafe {
assert_eq!(std::slice::from_raw_parts(a, b), [2, 3, 4]);
}
buffer.advance(2);
let (a, b) = buffer.abi_ptr_and_len();
assert_eq!(a, ptr.wrapping_add(3));
assert_eq!(b, 1);
unsafe {
assert_eq!(std::slice::from_raw_parts(a, b), [4]);
}
let ret = buffer.into_vec();
assert_eq!(ret, [4]);
assert_eq!(ret.as_ptr(), ptr);
}
#[derive(PartialEq, Eq, Debug)]
struct B(u8);
static OP: StreamVtable<B> = StreamVtable {
cancel_read: cancel,
cancel_write: cancel,
drop_readable: drop,
drop_writable: drop,
dealloc_lists: Some(|_ptr| {}),
lift: Some(|ptr| unsafe { B(*ptr - 1) }),
lower: Some(|b, ptr| unsafe {
*ptr = b.0 + 1;
}),
layout: unsafe { Layout::from_size_align_unchecked(1, 1) },
new,
start_read,
start_write,
};
#[test]
fn op_advance_to_end() {
let mut buffer = AbiBuffer::new(vec![B(1), B(2), B(3), B(4)], &OP);
assert_eq!(buffer.remaining(), 4);
buffer.advance(1);
assert_eq!(buffer.remaining(), 3);
buffer.advance(2);
assert_eq!(buffer.remaining(), 1);
buffer.advance(1);
assert_eq!(buffer.remaining(), 0);
assert_eq!(buffer.into_vec(), []);
}
#[test]
fn op_advance_partial() {
let buffer = AbiBuffer::new(vec![B(1), B(2), B(3), B(4)], &OP);
assert_eq!(buffer.into_vec(), [B(1), B(2), B(3), B(4)]);
let mut buffer = AbiBuffer::new(vec![B(1), B(2), B(3), B(4)], &OP);
buffer.advance(1);
assert_eq!(buffer.into_vec(), [B(2), B(3), B(4)]);
let mut buffer = AbiBuffer::new(vec![B(1), B(2), B(3), B(4)], &OP);
buffer.advance(1);
buffer.advance(2);
assert_eq!(buffer.into_vec(), [B(4)]);
}
#[test]
fn op_ptrs() {
let mut buf = vec![B(1), B(2), B(3), B(4)];
let ptr = buf.as_mut_ptr().cast::<u8>();
let mut buffer = AbiBuffer::new(buf, &OP);
let (a, b) = buffer.abi_ptr_and_len();
let base = a;
assert_ne!(a, ptr);
assert_eq!(b, 4);
unsafe {
assert_eq!(std::slice::from_raw_parts(a, b), [2, 3, 4, 5]);
}
buffer.advance(1);
let (a, b) = buffer.abi_ptr_and_len();
assert_ne!(a, ptr.wrapping_add(1));
assert_eq!(a, base.wrapping_add(1));
assert_eq!(b, 3);
unsafe {
assert_eq!(std::slice::from_raw_parts(a, b), [3, 4, 5]);
}
buffer.advance(2);
let (a, b) = buffer.abi_ptr_and_len();
assert_ne!(a, ptr.wrapping_add(3));
assert_eq!(a, base.wrapping_add(3));
assert_eq!(b, 1);
unsafe {
assert_eq!(std::slice::from_raw_parts(a, b), [5]);
}
let ret = buffer.into_vec();
assert_eq!(ret, [B(4)]);
assert_eq!(ret.as_ptr(), ptr.cast());
}
#[test]
fn dealloc_lists() {
static DEALLOCS: AtomicUsize = AtomicUsize::new(0);
static OP: StreamVtable<B> = StreamVtable {
cancel_read: cancel,
cancel_write: cancel,
drop_readable: drop,
drop_writable: drop,
dealloc_lists: Some(|ptr| {
let prev = DEALLOCS.fetch_add(1, Relaxed);
assert_eq!(unsafe { usize::from(*ptr) }, prev + 1);
}),
lift: Some(|ptr| unsafe { B(*ptr) }),
lower: Some(|b, ptr| unsafe {
*ptr = b.0;
}),
layout: unsafe { Layout::from_size_align_unchecked(1, 1) },
new,
start_read,
start_write,
};
assert_eq!(DEALLOCS.load(Relaxed), 0);
let buf = vec![B(1), B(2), B(3), B(4)];
let mut buffer = AbiBuffer::new(buf, &OP);
assert_eq!(DEALLOCS.load(Relaxed), 0);
buffer.abi_ptr_and_len();
assert_eq!(DEALLOCS.load(Relaxed), 0);
buffer.advance(1);
assert_eq!(DEALLOCS.load(Relaxed), 1);
buffer.abi_ptr_and_len();
assert_eq!(DEALLOCS.load(Relaxed), 1);
buffer.advance(2);
assert_eq!(DEALLOCS.load(Relaxed), 3);
buffer.abi_ptr_and_len();
assert_eq!(DEALLOCS.load(Relaxed), 3);
buffer.into_vec();
assert_eq!(DEALLOCS.load(Relaxed), 3);
}
}

View File

@@ -0,0 +1,112 @@
//! Definition of the "C ABI" of how imported functions interact with exported
//! tasks.
//!
//! Ok this crate is written in Rust, why in the world does this exist? This
//! comment is intended to explain this rationale but the tl;dr; is we want
//! this to work:
//!
//! * Within a single component ...
//! * One rust crate uses `wit-bindgen 0.A.0` to generate an exported function.
//! * One rust crate uses `wit-bindgen 0.B.0` to bind an imported function.
//! * The two crates are connected in the application with
//! `std::future::Future`.
//!
//! Without this module this situation won't work because 0.A.0 has no
//! knowledge of 0.B.0 meaning that 0.B.0 has no means of inserting a `waitable`
//! into the `waitable-set` managed by 0.A.0's export.
//!
//! To solve this problem the long-term intention is that something will live
//! in `wasi-libc` itself, but in the meantime it's living "somewhere" within
//! `wit-bindgen 0.*.0`. Specifically all `wit-bindgen` versions will
//! reference, via C linkage, a single function which is used to manipulate a
//! single pointer in linear memory. This pointer is a `wasip3_task` structure
//! which has all the various fields to use it.
//!
//! The `wasip3_task_set` symbol is itself defined in C inside of the
//! `src/wit_bindgen_cabi.c` file at this time, specifically because it's
//! annotated with `__weak__` meaning that any definition of it suffices. This
//! isn't possible to define in stable Rust (specifically `__weak__`).
//!
//! Once `wasip3_task_set` is defined everything then operates via indirection,
//! aka based off the returned pointer. The intention is that exported functions
//! will set this (it's sort of like an executor) and then imported functions
//! will all use this as the source of registering waitables. In the end that
//! means that it's possible to share types with `std::future::Future` that
//! are backed at the ABI level with this "channel".
//!
//! In the future it's hoped that this can move into `wasi-libc` itself, or if
//! `wasi-libc` provides something else that would be prioritized over this.
//! For now this is basically an affordance that we're going to be frequently
//! releaseing new major versions of `wit-bindgen` and we don't want to force
//! applications to all be using the exact same version of the bindings
//! generator and async bindings.
//!
//! Additionally for now this file is serving as documentation of this
//! interface.
use core::ffi::c_void;
#[cfg(target_family = "wasm")]
extern "C" {
/// Sets the global task pointer to `ptr` provided. Returns the previous
/// value.
///
/// This function acts as a dual getter and a setter. To get the
/// current task pointer a dummy `ptr` can be provided (e.g. NULL) and then
/// it's passed back when you're done working with it. When setting the
/// current task pointer it's recommended to call this and then call it
/// again with the previous value when the tasks's work is done.
///
/// For executors they need to ensure that the `ptr` passed in lives for
/// the entire lifetime of the component model task.
pub fn wasip3_task_set(ptr: *mut wasip3_task) -> *mut wasip3_task;
}
#[cfg(not(target_family = "wasm"))]
pub unsafe extern "C" fn wasip3_task_set(ptr: *mut wasip3_task) -> *mut wasip3_task {
let _ = ptr;
unreachable!();
}
/// The first version of `wasip3_task` which implies the existence of the
/// fields `ptr`, `waitable_register`, and `waitable_unregister`.
pub const WASIP3_TASK_V1: u32 = 1;
/// Indirect "vtable" used to connect imported functions and exported tasks.
/// Executors (e.g. exported functions) define and manage this while imports
/// use it.
#[repr(C)]
pub struct wasip3_task {
/// Currently `WASIP3_TASK_V1`. Indicates what fields are present next
/// depending on the version here.
pub version: u32,
/// Private pointer owned by the `wasip3_task` itself, passed to callbacks
/// below as the first argument.
pub ptr: *mut c_void,
/// Register a new `waitable` for this exported task.
///
/// This exported task will add `waitable` to its `waitable-set`. When it
/// becomes ready then `callback` will be invoked with the ready code as
/// well as the `callback_ptr` provided.
///
/// If `waitable` was previously registered with this task then the
/// previous `callback_ptr` is returned. Otherwise `NULL` is returned.
///
/// It's the caller's responsibility to ensure that `callback_ptr` is valid
/// until `callback` is invoked, `waitable_unregister` is invoked, or
/// `waitable_register` is called again to overwrite the value.
pub waitable_register: unsafe extern "C" fn(
ptr: *mut c_void,
waitable: u32,
callback: unsafe extern "C" fn(callback_ptr: *mut c_void, code: u32),
callback_ptr: *mut c_void,
) -> *mut c_void,
/// Removes the `waitable` from this task's `waitable-set`.
///
/// Returns the `callback_ptr` passed to `waitable_register` if present, or
/// `NULL` if it's not present.
pub waitable_unregister: unsafe extern "C" fn(ptr: *mut c_void, waitable: u32) -> *mut c_void,
}

View File

@@ -0,0 +1,94 @@
//! Raw bindings to `error-context` in the canonical ABI.
use std::fmt::{self, Debug, Display};
use std::ptr;
use std::string::String;
/// Represents the Component Model `error-context` type.
#[derive(PartialEq, Eq)]
pub struct ErrorContext {
handle: u32,
}
impl ErrorContext {
/// Call the `error-context.new` canonical built-in function.
pub fn new(debug_message: &str) -> ErrorContext {
unsafe {
let handle = new(debug_message.as_ptr(), debug_message.len());
ErrorContext::from_handle(handle)
}
}
#[doc(hidden)]
pub fn from_handle(handle: u32) -> Self {
Self { handle }
}
#[doc(hidden)]
pub fn handle(&self) -> u32 {
self.handle
}
/// Extract the debug message from a given [`ErrorContext`]
pub fn debug_message(&self) -> String {
unsafe {
let mut ret = RetPtr {
ptr: ptr::null_mut(),
len: 0,
};
debug_message(self.handle, &mut ret);
String::from_raw_parts(ret.ptr, ret.len, ret.len)
}
}
}
impl Debug for ErrorContext {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ErrorContext")
.field("debug_message", &self.debug_message())
.finish()
}
}
impl Display for ErrorContext {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Display::fmt(&self.debug_message(), f)
}
}
impl std::error::Error for ErrorContext {}
impl Drop for ErrorContext {
fn drop(&mut self) {
#[cfg(target_arch = "wasm32")]
unsafe {
drop(self.handle)
}
}
}
#[repr(C)]
struct RetPtr {
ptr: *mut u8,
len: usize,
}
#[cfg(not(target_arch = "wasm32"))]
unsafe fn new(_: *const u8, _: usize) -> u32 {
unreachable!()
}
#[cfg(not(target_arch = "wasm32"))]
fn debug_message(_: u32, _: &mut RetPtr) {
unreachable!()
}
#[cfg(target_arch = "wasm32")]
#[link(wasm_import_module = "$root")]
extern "C" {
#[link_name = "[error-context-new-utf8]"]
fn new(_: *const u8, _: usize) -> u32;
#[link_name = "[error-context-drop]"]
fn drop(_: u32);
#[link_name = "[error-context-debug-message-utf8]"]
fn debug_message(_: u32, _: &mut RetPtr);
}

View File

@@ -0,0 +1,734 @@
//! Runtime support for `future<T>` in the component model.
//!
//! There are a number of tricky concerns to all balance when implementing
//! bindings to `future<T>`, specifically with how it interacts with Rust. This
//! will attempt to go over some of the high-level details of the implementation
//! here.
//!
//! ## Leak safety
//!
//! It's safe to leak any value at any time currently in Rust. In other words
//! Rust doesn't have linear types (yet). Typically this isn't really a problem
//! but the component model intrinsics we're working with here operate by being
//! given a pointer and then at some point in the future the pointer may be
//! read. This means that it's our responsibility to keep this pointer alive and
//! valid for the entire duration of an asynchronous operation.
//!
//! Chiefly this means that borrowed values are a no-no in this module. For
//! example if you were to send a `&[u8]` as an implementation of
//! `future<list<u8>>` that would not be sound. For example:
//!
//! * The future send operation is started, recording an address of `&[u8]`.
//! * The future is then leaked.
//! * According to rustc, later in code the original `&[u8]` is then no longer
//! borrowed.
//! * The original source of `&[u8]` could then be deallocated.
//! * Then the component model actually reads the pointer that it was given.
//!
//! This constraint effectively means that all types flowing in-and-out of
//! futures, streams, and async APIs are all "owned values", notably no
//! lifetimes. This requires, for example, that `future<list<u8>>` operates on
//! `Vec<u8>`.
//!
//! This is in stark contrast to bindings generated for `list<u8>` otherwise,
//! however, where for example a synchronous import with a `list<u8>` argument
//! would be bound with a `&[u8]` argument. Until Rust has some form of linear
//! types, however, it's not possible to loosen this restriction soundly because
//! it's generally not safe to leak an active I/O operation. This restriction is
//! similar to why it's so difficult to bind `io_uring` in safe Rust, which
//! operates similarly to the component model where pointers are submitted and
//! read in the future after the original call for submission returns.
//!
//! ## Lowering Owned Values
//!
//! According to the above everything with futures/streams operates on owned
//! values already, but this also affects precisely how lifting and lowering is
//! performed. In general any active asynchronous operation could be cancelled
//! at any time, meaning we have to deal with situations such as:
//!
//! * A `write` hasn't even started yet.
//! * A `write` was started and then cancelled.
//! * A `write` was started and then the other end dropped the channel.
//! * A `write` was started and then the other end received the value.
//!
//! In all of these situations regardless of the structure of `T` we can't leak
//! memory. The `future.write` intrinsic, however, takes no ownership of the
//! memory involved which means that we're still responsible for cleaning up
//! lists. It does take ownership, however, of `own<T>` handles and other
//! resources.
//!
//! The way that this is solved for futures/streams is to lean further into
//! processing owned values. Namely lowering a `T` takes `T`-by-value, not `&T`.
//! This means that lowering operates similarly to return values of exported
//! functions, not parameters to imported functions. By lowering an owned value
//! of `T` this preserves a nice property where the lowered value has exclusive
//! ownership of all of its pointers/resources/etc. Lowering `&T` may require a
//! "cleanup list" for example which we avoid here entirely.
//!
//! This then makes the second and third cases above, getting a value back after
//! lowering, much easier. Namely re-acquisition of a value is simple `lift`
//! operation as if we received a value on the channel.
//!
//! ## Inefficiencies
//!
//! The above requirements generally mean that this is not a hyper-efficient
//! implementation. All writes and reads, for example, start out with allocation
//! memory on the heap to be owned by the asynchronous operation. Writing a
//! `list<u8>` to a future passes ownership of `Vec<u8>` but in theory doesn't
//! not actually require relinquishing ownership of the vector. Furthermore
//! there's no way to re-acquire a `T` after it has been sent, but all of `T` is
//! still valid except for `own<U>` resources.
//!
//! That's all to say that this implementation can probably still be improved
//! upon, but doing so is thought to be pretty nontrivial at this time. It
//! should be noted though that there are other high-level inefficiencies with
//! WIT unrelated to this module. For example `list<T>` is not always
//! represented the same in Rust as it is in the canonical ABI. That means that
//! sending `list<T>` into a future might require copying the entire list and
//! changing its layout. Currently this is par-for-the-course with bindings.
//!
//! ## Linear (exactly once) Writes
//!
//! The component model requires that a writable end of a future must be written
//! to before closing, otherwise the drop operation traps. Ideally usage of
//! this API shouldn't result in traps so this is modeled in the Rust-level API
//! to prevent this trap from occurring. Rust does not support linear types
//! (types that must be used exactly once), instead it only has affine types
//! (types which must be used at most once), meaning that this requires some
//! runtime support.
//!
//! Specifically the `FutureWriter` structure stores two auxiliary Rust-specific
//! pieces of information:
//!
//! * A `should_write_default_value` boolean - if `true` on destruction then a
//! value has not yet been written and something must be written.
//! * A `default: fn() -> T` constructor to lazily create the default value to
//! be sent in this situation.
//!
//! This `default` field is provided by the user when the future is initially
//! created. Additionally during `Drop` a new Rust-level task will be spawned to
//! perform the write in the background. That'll keep the component-level task
//! alive until that write completes but otherwise shouldn't hinder anything
//! else.
use {
crate::rt::async_support::waitable::{WaitableOp, WaitableOperation},
crate::rt::async_support::ReturnCode,
crate::rt::Cleanup,
std::{
alloc::Layout,
fmt,
future::{Future, IntoFuture},
marker,
pin::Pin,
ptr,
sync::atomic::{AtomicU32, Ordering::Relaxed},
task::{Context, Poll},
},
};
/// Function table used for [`FutureWriter`] and [`FutureReader`]
///
/// Instances of this table are generated by `wit_bindgen::generate!`. This is
/// not a trait to enable different `FutureVtable<()>` instances to exist, for
/// example, through different calls to `wit_bindgen::generate!`.
///
/// It's not intended that any user implements this vtable, instead it's
/// intended to only be auto-generated.
#[doc(hidden)]
pub struct FutureVtable<T> {
/// The Canonical ABI layout of `T` in-memory.
pub layout: Layout,
/// A callback to consume a value of `T` and lower it to the canonical ABI
/// pointed to by `dst`.
///
/// The `dst` pointer should have `self.layout`. This is used to convert
/// in-memory representations in Rust to their canonical representations in
/// the component model.
pub lower: unsafe fn(value: T, dst: *mut u8),
/// A callback to deallocate any lists within the canonical ABI value `dst`
/// provided.
///
/// This is used when a value is successfully sent to another component. In
/// such a situation it may be possible that the canonical lowering of `T`
/// has lists that are still owned by this component and must be
/// deallocated. This is akin to a `post-return` callback for returns of
/// exported functions.
pub dealloc_lists: unsafe fn(dst: *mut u8),
/// A callback to lift a value of `T` from the canonical ABI representation
/// provided.
pub lift: unsafe fn(dst: *mut u8) -> T,
/// The raw `future.write` intrinsic.
pub start_write: unsafe extern "C" fn(future: u32, val: *const u8) -> u32,
/// The raw `future.read` intrinsic.
pub start_read: unsafe extern "C" fn(future: u32, val: *mut u8) -> u32,
/// The raw `future.cancel-write` intrinsic.
pub cancel_write: unsafe extern "C" fn(future: u32) -> u32,
/// The raw `future.cancel-read` intrinsic.
pub cancel_read: unsafe extern "C" fn(future: u32) -> u32,
/// The raw `future.drop-writable` intrinsic.
pub drop_writable: unsafe extern "C" fn(future: u32),
/// The raw `future.drop-readable` intrinsic.
pub drop_readable: unsafe extern "C" fn(future: u32),
/// The raw `future.new` intrinsic.
pub new: unsafe extern "C" fn() -> u64,
}
/// Helper function to create a new read/write pair for a component model
/// future.
///
/// # Unsafety
///
/// This function is unsafe as it requires the functions within `vtable` to
/// correctly uphold the contracts of the component model.
pub unsafe fn future_new<T>(
default: fn() -> T,
vtable: &'static FutureVtable<T>,
) -> (FutureWriter<T>, FutureReader<T>) {
unsafe {
let handles = (vtable.new)();
let reader = handles as u32;
let writer = (handles >> 32) as u32;
rtdebug!("future.new() = [{writer}, {reader}]");
(
FutureWriter::new(writer, default, vtable),
FutureReader::new(reader, vtable),
)
}
}
/// Represents the writable end of a Component Model `future`.
///
/// A [`FutureWriter`] can be used to send a single value of `T` to the other
/// end of a `future`. In a sense this is similar to a oneshot channel in Rust.
pub struct FutureWriter<T: 'static> {
handle: u32,
vtable: &'static FutureVtable<T>,
/// Whether or not a value should be written during `drop`.
///
/// This is set to `false` when a value is successfully written or when a
/// value is written but the future is witnessed as being dropped.
///
/// Note that this is set to `true` on construction to ensure that only
/// location which actually witness a completed write set it to `false`.
should_write_default_value: bool,
/// Constructor for the default value to write during `drop`, should one
/// need to be written.
default: fn() -> T,
}
impl<T> FutureWriter<T> {
/// Helper function to wrap a handle/vtable into a `FutureWriter`.
///
/// # Unsafety
///
/// This function is unsafe as it requires the functions within `vtable` to
/// correctly uphold the contracts of the component model.
#[doc(hidden)]
pub unsafe fn new(handle: u32, default: fn() -> T, vtable: &'static FutureVtable<T>) -> Self {
Self {
handle,
default,
should_write_default_value: true,
vtable,
}
}
/// Write the specified `value` to this `future`.
///
/// This method is equivalent to an `async fn` which sends the `value` into
/// this future. The asynchronous operation acts as a rendezvous where the
/// operation does not complete until the other side has successfully
/// received the value.
///
/// # Return Value
///
/// The returned [`FutureWrite`] is a future that can be `.await`'d. The
/// return value of this future is:
///
/// * `Ok(())` - the `value` was sent and received. The `self` value was
/// consumed along the way and will no longer be accessible.
/// * `Err(FutureWriteError { value })` - an attempt was made to send
/// `value` but the other half of this [`FutureWriter`] was dropped before
/// the value was received. This consumes `self` because the channel is
/// now dropped, but `value` is returned in case the caller wants to reuse
/// it.
///
/// # Cancellation
///
/// The returned future can be cancelled normally via `drop` which means
/// that the `value` provided here, along with this `FutureWriter` itself,
/// will be lost. There is also [`FutureWrite::cancel`] which can be used to
/// possibly re-acquire `value` and `self` if the operation was cancelled.
/// In such a situation the operation can be retried at a future date.
pub fn write(self, value: T) -> FutureWrite<T> {
FutureWrite {
op: WaitableOperation::new((self, value)),
}
}
}
impl<T> fmt::Debug for FutureWriter<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FutureWriter")
.field("handle", &self.handle)
.finish()
}
}
impl<T> Drop for FutureWriter<T> {
fn drop(&mut self) {
// If a value has not yet been written into this writer than that must
// be done so now. Perform a "clone" of `self` by moving our data into a
// subtask, but ensure that `should_write_default_value` is set to
// `false` to avoid infinite loops by accident. Once the task is spawned
// we're done and the subtask's destructor of the closed-over
// `FutureWriter` will be responsible for performing the
// `drop-writable` call below.
//
// Note, though, that if `should_write_default_value` is `false` then a
// write has already happened and we can go ahead and just synchronously
// drop this writer as we would any other handle.
if self.should_write_default_value {
let clone = FutureWriter {
handle: self.handle,
default: self.default,
should_write_default_value: false,
vtable: self.vtable,
};
crate::rt::async_support::spawn(async move {
let value = (clone.default)();
let _ = clone.write(value).await;
});
} else {
unsafe {
rtdebug!("future.drop-writable({})", self.handle);
(self.vtable.drop_writable)(self.handle);
}
}
}
}
/// Represents a write operation which may be cancelled prior to completion.
///
/// This is returned by [`FutureWriter::write`].
pub struct FutureWrite<T: 'static> {
op: WaitableOperation<FutureWriteOp<T>>,
}
struct FutureWriteOp<T>(marker::PhantomData<T>);
enum WriteComplete<T> {
Written,
Dropped(T),
Cancelled(T),
}
unsafe impl<T> WaitableOp for FutureWriteOp<T>
where
T: 'static,
{
type Start = (FutureWriter<T>, T);
type InProgress = (FutureWriter<T>, Option<Cleanup>);
type Result = (WriteComplete<T>, FutureWriter<T>);
type Cancel = FutureWriteCancel<T>;
fn start((writer, value): Self::Start) -> (u32, Self::InProgress) {
// TODO: it should be safe to store the lower-destination in
// `WaitableOperation` using `Pin` memory and such, but that would
// require some type-level trickery to get a correctly-sized value
// plumbed all the way to here. For now just dynamically allocate it and
// leave the optimization of leaving out this dynamic allocation to the
// future.
//
// In lieu of that a dedicated location on the heap is created for the
// lowering, and then `value`, as an owned value, is lowered into this
// pointer to initialize it.
let (ptr, cleanup) = Cleanup::new(writer.vtable.layout);
// SAFETY: `ptr` is allocated with `vtable.layout` and should be
// safe to use here.
let code = unsafe {
(writer.vtable.lower)(value, ptr);
(writer.vtable.start_write)(writer.handle, ptr)
};
rtdebug!("future.write({}, {ptr:?}) = {code:#x}", writer.handle);
(code, (writer, cleanup))
}
fn start_cancelled((writer, value): Self::Start) -> Self::Cancel {
FutureWriteCancel::Cancelled(value, writer)
}
fn in_progress_update(
(mut writer, cleanup): Self::InProgress,
code: u32,
) -> Result<Self::Result, Self::InProgress> {
let ptr = cleanup
.as_ref()
.map(|c| c.ptr.as_ptr())
.unwrap_or(ptr::null_mut());
match code {
super::BLOCKED => Err((writer, cleanup)),
// The other end has dropped its end.
//
// The value was not received by the other end so `ptr` still has
// all of its resources intact. Use `lift` to construct a new
// instance of `T` which takes ownership of pointers and resources
// and such. The allocation of `ptr` is then cleaned up naturally
// when `cleanup` goes out of scope.
super::DROPPED | super::CANCELLED => {
// SAFETY: we're the ones managing `ptr` so we know it's safe to
// pass here.
let value = unsafe { (writer.vtable.lift)(ptr) };
let status = if code == super::DROPPED {
// This writer has been witnessed to be dropped, meaning that
// `writer` is going to get destroyed soon as this return
// value propagates up the stack. There's no need to write
// the default value, so set this to `false`.
writer.should_write_default_value = false;
WriteComplete::Dropped(value)
} else {
WriteComplete::Cancelled(value)
};
Ok((status, writer))
}
// This write has completed.
//
// Here we need to clean up our allocations. The `ptr` exclusively
// owns all of the value being sent and we notably need to cleanup
// the transitive list allocations present in this pointer. Use
// `dealloc_lists` for that (effectively a post-return lookalike).
//
// Afterwards the `cleanup` itself is naturally dropped and cleaned
// up.
super::COMPLETED => {
// A value was written, so no need to write the default value.
writer.should_write_default_value = false;
// SAFETY: we're the ones managing `ptr` so we know it's safe to
// pass here.
unsafe {
(writer.vtable.dealloc_lists)(ptr);
}
Ok((WriteComplete::Written, writer))
}
other => unreachable!("unexpected code {other:?}"),
}
}
fn in_progress_waitable((writer, _): &Self::InProgress) -> u32 {
writer.handle
}
fn in_progress_cancel((writer, _): &Self::InProgress) -> u32 {
// SAFETY: we're managing `writer` and all the various operational bits,
// so this relies on `WaitableOperation` being safe.
let code = unsafe { (writer.vtable.cancel_write)(writer.handle) };
rtdebug!("future.cancel-write({}) = {code:#x}", writer.handle);
code
}
fn result_into_cancel((result, writer): Self::Result) -> Self::Cancel {
match result {
// The value was actually sent, meaning we can't yield back the
// future nor the value.
WriteComplete::Written => FutureWriteCancel::AlreadySent,
// The value was not sent because the other end either hung up or we
// successfully cancelled. In both cases return back the value here
// with the writer.
WriteComplete::Dropped(val) => FutureWriteCancel::Dropped(val),
WriteComplete::Cancelled(val) => FutureWriteCancel::Cancelled(val, writer),
}
}
}
impl<T: 'static> Future for FutureWrite<T> {
type Output = Result<(), FutureWriteError<T>>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.pin_project()
.poll_complete(cx)
.map(|(result, _writer)| match result {
WriteComplete::Written => Ok(()),
WriteComplete::Dropped(value) | WriteComplete::Cancelled(value) => {
Err(FutureWriteError { value })
}
})
}
}
impl<T: 'static> FutureWrite<T> {
fn pin_project(self: Pin<&mut Self>) -> Pin<&mut WaitableOperation<FutureWriteOp<T>>> {
// SAFETY: we've chosen that when `Self` is pinned that it translates to
// always pinning the inner field, so that's codified here.
unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().op) }
}
/// Cancel this write if it hasn't already completed.
///
/// This method can be used to cancel a write-in-progress and re-acquire
/// the writer and the value being sent. Note that the write operation may
/// succeed racily or the other end may also drop racily, and these
/// outcomes are reflected in the returned value here.
///
/// # Panics
///
/// Panics if the operation has already been completed via `Future::poll`,
/// or if this method is called twice.
pub fn cancel(self: Pin<&mut Self>) -> FutureWriteCancel<T> {
self.pin_project().cancel()
}
}
/// Error type in the result of [`FutureWrite`], or the error type that is a result of
/// a failure to write a future.
pub struct FutureWriteError<T> {
/// The value that could not be sent.
pub value: T,
}
impl<T> fmt::Debug for FutureWriteError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FutureWriteError").finish_non_exhaustive()
}
}
impl<T> fmt::Display for FutureWriteError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"read end dropped".fmt(f)
}
}
impl<T> std::error::Error for FutureWriteError<T> {}
/// Result of [`FutureWrite::cancel`].
#[derive(Debug)]
pub enum FutureWriteCancel<T: 'static> {
/// The cancel request raced with the receipt of the sent value, and the
/// value was actually sent. Neither the value nor the writer are made
/// available here as both are gone.
AlreadySent,
/// The other end was dropped before cancellation happened.
///
/// In this case the original value is returned back to the caller but the
/// writer itself is not longer accessible as it's no longer usable.
Dropped(T),
/// The pending write was successfully cancelled and the value being written
/// is returned along with the writer to resume again in the future if
/// necessary.
Cancelled(T, FutureWriter<T>),
}
/// Represents the readable end of a Component Model `future<T>`.
pub struct FutureReader<T: 'static> {
handle: AtomicU32,
vtable: &'static FutureVtable<T>,
}
impl<T> fmt::Debug for FutureReader<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FutureReader")
.field("handle", &self.handle)
.finish()
}
}
impl<T> FutureReader<T> {
#[doc(hidden)]
pub fn new(handle: u32, vtable: &'static FutureVtable<T>) -> Self {
Self {
handle: AtomicU32::new(handle),
vtable,
}
}
#[doc(hidden)]
pub fn take_handle(&self) -> u32 {
let ret = self.opt_handle().unwrap();
self.handle.store(u32::MAX, Relaxed);
ret
}
fn handle(&self) -> u32 {
self.opt_handle().unwrap()
}
fn opt_handle(&self) -> Option<u32> {
match self.handle.load(Relaxed) {
u32::MAX => None,
other => Some(other),
}
}
}
impl<T> IntoFuture for FutureReader<T> {
type Output = T;
type IntoFuture = FutureRead<T>;
/// Convert this object into a `Future` which will resolve when a value is
/// written to the writable end of this `future`.
fn into_future(self) -> Self::IntoFuture {
FutureRead {
op: WaitableOperation::new(self),
}
}
}
impl<T> Drop for FutureReader<T> {
fn drop(&mut self) {
let Some(handle) = self.opt_handle() else {
return;
};
unsafe {
rtdebug!("future.drop-readable({handle})");
(self.vtable.drop_readable)(handle);
}
}
}
/// Represents a read operation which may be cancelled prior to completion.
///
/// This represents a read operation on a [`FutureReader`] and is created via
/// `IntoFuture`.
pub struct FutureRead<T: 'static> {
op: WaitableOperation<FutureReadOp<T>>,
}
struct FutureReadOp<T>(marker::PhantomData<T>);
enum ReadComplete<T> {
Value(T),
Cancelled,
}
unsafe impl<T> WaitableOp for FutureReadOp<T>
where
T: 'static,
{
type Start = FutureReader<T>;
type InProgress = (FutureReader<T>, Option<Cleanup>);
type Result = (ReadComplete<T>, FutureReader<T>);
type Cancel = Result<T, FutureReader<T>>;
fn start(reader: Self::Start) -> (u32, Self::InProgress) {
let (ptr, cleanup) = Cleanup::new(reader.vtable.layout);
// SAFETY: `ptr` is allocated with `vtable.layout` and should be
// safe to use here. Its lifetime for the async operation is hinged on
// `WaitableOperation` being safe.
let code = unsafe { (reader.vtable.start_read)(reader.handle(), ptr) };
rtdebug!("future.read({}, {ptr:?}) = {code:#x}", reader.handle());
(code, (reader, cleanup))
}
fn start_cancelled(state: Self::Start) -> Self::Cancel {
Err(state)
}
fn in_progress_update(
(reader, cleanup): Self::InProgress,
code: u32,
) -> Result<Self::Result, Self::InProgress> {
match ReturnCode::decode(code) {
ReturnCode::Blocked => Err((reader, cleanup)),
// Let `cleanup` fall out of scope to clean up its allocation here,
// and otherwise tahe reader is plumbed through to possibly restart
// the read in the future.
ReturnCode::Cancelled(0) => Ok((ReadComplete::Cancelled, reader)),
// The read has completed, so lift the value from the stored memory and
// `cleanup` naturally falls out of scope after transferring ownership of
// everything to the returned `value`.
ReturnCode::Completed(0) => {
let ptr = cleanup
.as_ref()
.map(|c| c.ptr.as_ptr())
.unwrap_or(ptr::null_mut());
// SAFETY: we're the ones managing `ptr` so we know it's safe to
// pass here.
let value = unsafe { (reader.vtable.lift)(ptr) };
Ok((ReadComplete::Value(value), reader))
}
other => panic!("unexpected code {other:?}"),
}
}
fn in_progress_waitable((reader, _): &Self::InProgress) -> u32 {
reader.handle()
}
fn in_progress_cancel((reader, _): &Self::InProgress) -> u32 {
// SAFETY: we're managing `reader` and all the various operational bits,
// so this relies on `WaitableOperation` being safe.
let code = unsafe { (reader.vtable.cancel_read)(reader.handle()) };
rtdebug!("future.cancel-read({}) = {code:#x}", reader.handle());
code
}
fn result_into_cancel((value, reader): Self::Result) -> Self::Cancel {
match value {
// The value was actually read, so thread that through here.
ReadComplete::Value(value) => Ok(value),
// The read was successfully cancelled, so thread through the
// `reader` to possibly restart later on.
ReadComplete::Cancelled => Err(reader),
}
}
}
impl<T: 'static> Future for FutureRead<T> {
type Output = T;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.pin_project()
.poll_complete(cx)
.map(|(result, _reader)| match result {
ReadComplete::Value(val) => val,
// This is only possible if, after calling `FutureRead::cancel`,
// the future is polled again. The `cancel` method is documented
// as "don't do that" so this is left to panic.
ReadComplete::Cancelled => panic!("cannot poll after cancelling"),
})
}
}
impl<T> FutureRead<T> {
fn pin_project(self: Pin<&mut Self>) -> Pin<&mut WaitableOperation<FutureReadOp<T>>> {
// SAFETY: we've chosen that when `Self` is pinned that it translates to
// always pinning the inner field, so that's codified here.
unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().op) }
}
/// Cancel this read if it hasn't already completed.
///
/// Return values include:
///
/// * `Ok(value)` - future completed before this cancellation request
/// was received.
/// * `Err(reader)` - read operation was cancelled and it can be retried in
/// the future if desired.
///
/// # Panics
///
/// Panics if the operation has already been completed via `Future::poll`,
/// or if this method is called twice. Additionally if this method completes
/// then calling `poll` again on `self` will panic.
pub fn cancel(self: Pin<&mut Self>) -> Result<T, FutureReader<T>> {
self.pin_project().cancel()
}
}

View File

@@ -0,0 +1,604 @@
//! For a high-level overview of how this module is implemented see the
//! module documentation in `future_support.rs`.
use crate::rt::async_support::waitable::{WaitableOp, WaitableOperation};
use crate::rt::async_support::{AbiBuffer, ReturnCode, DROPPED};
use {
crate::rt::Cleanup,
std::{
alloc::Layout,
fmt,
future::Future,
marker,
pin::Pin,
ptr,
sync::atomic::{AtomicU32, Ordering::Relaxed},
task::{Context, Poll},
vec::Vec,
},
};
/// Operations that a stream requires throughout the implementation.
///
/// This is generated by `wit_bindgen::generate!` primarily.
#[doc(hidden)]
pub struct StreamVtable<T> {
/// The in-memory canonical ABI layout of a single value of `T`.
pub layout: Layout,
/// An optional callback where if provided will lower an owned `T` value
/// into the `dst` pointer.
///
/// If this is called the ownership of all of `T`'s lists and resources are
/// passed to `dst`, possibly by reallocating if `T`'s layout differs from
/// the canonical ABI layout.
///
/// If this is `None` then it means that `T` has the same layout in-memory
/// in Rust as it does in the canonical ABI. In such a situation the
/// lower/lift operation can be dropped.
pub lower: Option<unsafe fn(value: T, dst: *mut u8)>,
/// Callback used to deallocate any owned lists in `dst` after a value has
/// been successfully sent along a stream.
///
/// `None` means that `T` has no lists internally.
pub dealloc_lists: Option<unsafe fn(dst: *mut u8)>,
/// Dual of `lower`, and like `lower` if this is missing then it means that
/// `T` has the same in-memory representation in Rust and the canonical ABI.
pub lift: Option<unsafe fn(dst: *mut u8) -> T>,
/// The raw `stream.write` intrinsic.
pub start_write: unsafe extern "C" fn(stream: u32, val: *const u8, amt: usize) -> u32,
/// The raw `stream.read` intrinsic.
pub start_read: unsafe extern "C" fn(stream: u32, val: *mut u8, amt: usize) -> u32,
/// The raw `stream.cancel-write` intrinsic.
pub cancel_write: unsafe extern "C" fn(stream: u32) -> u32,
/// The raw `stream.cancel-read` intrinsic.
pub cancel_read: unsafe extern "C" fn(stream: u32) -> u32,
/// The raw `stream.drop-writable` intrinsic.
pub drop_writable: unsafe extern "C" fn(stream: u32),
/// The raw `stream.drop-readable` intrinsic.
pub drop_readable: unsafe extern "C" fn(stream: u32),
/// The raw `stream.new` intrinsic.
pub new: unsafe extern "C" fn() -> u64,
}
/// Helper function to create a new read/write pair for a component model
/// stream.
pub unsafe fn stream_new<T>(
vtable: &'static StreamVtable<T>,
) -> (StreamWriter<T>, StreamReader<T>) {
unsafe {
let handles = (vtable.new)();
let reader = handles as u32;
let writer = (handles >> 32) as u32;
rtdebug!("stream.new() = [{writer}, {reader}]");
(
StreamWriter::new(writer, vtable),
StreamReader::new(reader, vtable),
)
}
}
/// Represents the writable end of a Component Model `stream`.
pub struct StreamWriter<T: 'static> {
handle: u32,
vtable: &'static StreamVtable<T>,
done: bool,
}
impl<T> StreamWriter<T> {
#[doc(hidden)]
pub unsafe fn new(handle: u32, vtable: &'static StreamVtable<T>) -> Self {
Self {
handle,
vtable,
done: false,
}
}
/// Initiate a write of the `values` provided into this stream.
///
/// This method is akin to an `async fn` except that the returned
/// [`StreamWrite`] future can also be cancelled via [`StreamWrite::cancel`]
/// to re-acquire undelivered values.
///
/// This method will perform at most a single write of the `values`
/// provided. The returned future will resolve once the write has completed.
///
/// # Return Values
///
/// The returned [`StreamWrite`] future returns a tuple of `(result, buf)`.
/// The `result` can be `StreamResult::Complete(n)` meaning that `n` values
/// were sent from `values` into this writer. A result of
/// `StreamResult::Dropped` means that no values were sent and the other side
/// has hung-up and sending values will no longer be possible.
///
/// The `buf` returned is an [`AbiBuffer<T>`] which retains ownership of the
/// original `values` provided here. That can be used to re-acquire `values`
/// through the [`AbiBuffer::into_vec`] method. The `buf` maintains an
/// internal cursor of how many values have been written and if the write
/// should be resumed to write the entire buffer then the
/// [`StreamWriter::write_buf`] method can be used to resume writing at the
/// next value in the buffer.
///
/// # Cancellation
///
/// The returned [`StreamWrite`] future can be cancelled like any other Rust
/// future via `drop`, but this means that `values` will be lost within the
/// future. The [`StreamWrite::cancel`] method can be used to re-acquire the
/// in-progress write that is being done with `values`. This is effectively
/// a way of forcing the future to immediately resolve.
///
/// Note that if this future is cancelled via `drop` it does not mean that
/// no values were sent. It may be possible that values were still sent
/// despite being cancelled. Cancelling a write and determining what
/// happened must be done with [`StreamWrite::cancel`].
pub fn write(&mut self, values: Vec<T>) -> StreamWrite<'_, T> {
self.write_buf(AbiBuffer::new(values, self.vtable))
}
/// Same as [`StreamWriter::write`], except this takes [`AbiBuffer<T>`]
/// instead of `Vec<T>`.
pub fn write_buf(&mut self, values: AbiBuffer<T>) -> StreamWrite<'_, T> {
StreamWrite {
op: WaitableOperation::new((self, values)),
}
}
/// Writes all of the `values` provided into this stream.
///
/// This is a higher-level method than [`StreamWriter::write`] and does not
/// expose cancellation for example. This will successively attempt to write
/// all of `values` provided into this stream. Upon completion the same
/// vector will be returned and any remaining elements in the vector were
/// not sent because the stream was dropped.
pub async fn write_all(&mut self, values: Vec<T>) -> Vec<T> {
// Perform an initial write which converts `values` into `AbiBuffer`.
let (mut status, mut buf) = self.write(values).await;
// While the previous write completed and there's still remaining items
// in the buffer, perform another write.
while let StreamResult::Complete(_) = status {
if buf.remaining() == 0 {
break;
}
(status, buf) = self.write_buf(buf).await;
// FIXME(WebAssembly/component-model#490)
if status == StreamResult::Cancelled {
status = StreamResult::Complete(0);
}
}
// Return back any values that weren't written by shifting them to the
// front of the returned vector.
assert!(buf.remaining() == 0 || matches!(status, StreamResult::Dropped));
buf.into_vec()
}
/// Writes the singular `value` provided
///
/// This is a higher-level method than [`StreamWriter::write`] and does not
/// expose cancellation for example. This will attempt to send `value` on
/// this stream.
///
/// If the other end hangs up then the value is returned back as
/// `Some(value)`, otherwise `None` is returned indicating the value was
/// sent.
pub async fn write_one(&mut self, value: T) -> Option<T> {
// TODO: can probably be a bit more efficient about this and avoid
// moving `value` onto the heap in some situations, but that's left as
// an optimization for later.
self.write_all(std::vec![value]).await.pop()
}
}
impl<T> fmt::Debug for StreamWriter<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("StreamWriter")
.field("handle", &self.handle)
.finish()
}
}
impl<T> Drop for StreamWriter<T> {
fn drop(&mut self) {
rtdebug!("stream.drop-writable({})", self.handle);
unsafe {
(self.vtable.drop_writable)(self.handle);
}
}
}
/// Represents a write operation which may be cancelled prior to completion.
pub struct StreamWrite<'a, T: 'static> {
op: WaitableOperation<StreamWriteOp<'a, T>>,
}
struct StreamWriteOp<'a, T: 'static>(marker::PhantomData<(&'a mut StreamWriter<T>, T)>);
/// Result of a [`StreamWriter::write`] or [`StreamReader::read`] operation,
/// yielded by the [`StreamWrite`] or [`StreamRead`] futures.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum StreamResult {
/// The provided number of values were successfully transferred.
///
/// For writes this is how many items were written, and for reads this is
/// how many items were read.
Complete(usize),
/// No values were written, the other end has dropped its handle.
Dropped,
/// No values were written, the operation was cancelled.
Cancelled,
}
unsafe impl<'a, T> WaitableOp for StreamWriteOp<'a, T>
where
T: 'static,
{
type Start = (&'a mut StreamWriter<T>, AbiBuffer<T>);
type InProgress = (&'a mut StreamWriter<T>, AbiBuffer<T>);
type Result = (StreamResult, AbiBuffer<T>);
type Cancel = (StreamResult, AbiBuffer<T>);
fn start((writer, buf): Self::Start) -> (u32, Self::InProgress) {
if writer.done {
return (DROPPED, (writer, buf));
}
let (ptr, len) = buf.abi_ptr_and_len();
// SAFETY: sure hope this is safe, everything in this module and
// `AbiBuffer` is trying to make this safe.
let code = unsafe { (writer.vtable.start_write)(writer.handle, ptr, len) };
rtdebug!(
"stream.write({}, {ptr:?}, {len}) = {code:#x}",
writer.handle
);
(code, (writer, buf))
}
fn start_cancelled((_writer, buf): Self::Start) -> Self::Cancel {
(StreamResult::Cancelled, buf)
}
fn in_progress_update(
(writer, mut buf): Self::InProgress,
code: u32,
) -> Result<Self::Result, Self::InProgress> {
match ReturnCode::decode(code) {
ReturnCode::Blocked => Err((writer, buf)),
ReturnCode::Dropped(0) => Ok((StreamResult::Dropped, buf)),
ReturnCode::Cancelled(0) => Ok((StreamResult::Cancelled, buf)),
code @ (ReturnCode::Completed(amt)
| ReturnCode::Dropped(amt)
| ReturnCode::Cancelled(amt)) => {
let amt = amt.try_into().unwrap();
buf.advance(amt);
if let ReturnCode::Dropped(_) = code {
writer.done = true;
}
Ok((StreamResult::Complete(amt), buf))
}
}
}
fn in_progress_waitable((writer, _): &Self::InProgress) -> u32 {
writer.handle
}
fn in_progress_cancel((writer, _): &Self::InProgress) -> u32 {
// SAFETY: we're managing `writer` and all the various operational bits,
// so this relies on `WaitableOperation` being safe.
let code = unsafe { (writer.vtable.cancel_write)(writer.handle) };
rtdebug!("stream.cancel-write({}) = {code:#x}", writer.handle);
code
}
fn result_into_cancel(result: Self::Result) -> Self::Cancel {
result
}
}
impl<T: 'static> Future for StreamWrite<'_, T> {
type Output = (StreamResult, AbiBuffer<T>);
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.pin_project().poll_complete(cx)
}
}
impl<'a, T: 'static> StreamWrite<'a, T> {
fn pin_project(self: Pin<&mut Self>) -> Pin<&mut WaitableOperation<StreamWriteOp<'a, T>>> {
// SAFETY: we've chosen that when `Self` is pinned that it translates to
// always pinning the inner field, so that's codified here.
unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().op) }
}
/// Cancel this write if it hasn't already completed.
///
/// This method can be used to cancel a write-in-progress and re-acquire
/// values being sent. Note that the result here may still indicate that
/// some values were written if the race to cancel the write was lost.
///
/// # Panics
///
/// Panics if the operation has already been completed via `Future::poll`,
/// or if this method is called twice.
pub fn cancel(self: Pin<&mut Self>) -> (StreamResult, AbiBuffer<T>) {
self.pin_project().cancel()
}
}
/// Represents the readable end of a Component Model `stream`.
pub struct StreamReader<T: 'static> {
handle: AtomicU32,
vtable: &'static StreamVtable<T>,
done: bool,
}
impl<T> fmt::Debug for StreamReader<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("StreamReader")
.field("handle", &self.handle)
.finish()
}
}
impl<T> StreamReader<T> {
#[doc(hidden)]
pub fn new(handle: u32, vtable: &'static StreamVtable<T>) -> Self {
Self {
handle: AtomicU32::new(handle),
vtable,
done: false,
}
}
#[doc(hidden)]
pub fn take_handle(&self) -> u32 {
let ret = self.opt_handle().unwrap();
self.handle.store(u32::MAX, Relaxed);
ret
}
fn handle(&self) -> u32 {
self.opt_handle().unwrap()
}
fn opt_handle(&self) -> Option<u32> {
match self.handle.load(Relaxed) {
u32::MAX => None,
other => Some(other),
}
}
/// Starts a new read operation on this stream into `buf`.
///
/// This method will read values into the spare capacity of the `buf`
/// provided. If `buf` has no spare capacity then this will be equivalent
/// to a zero-length read.
///
/// Upon completion the `buf` will be yielded back to the caller via the
/// completion of the [`StreamRead`] future.
///
/// # Cancellation
///
/// Cancelling the returned future can be done with `drop` like all Rust
/// futures, but it does not mean that no values were read. To accurately
/// determine if values were read the [`StreamRead::cancel`] method must be
/// used.
pub fn read(&mut self, buf: Vec<T>) -> StreamRead<'_, T> {
StreamRead {
op: WaitableOperation::new((self, buf)),
}
}
/// Reads a single item from this stream.
///
/// This is a higher-level method than [`StreamReader::read`] in that it
/// reads only a single item and does not expose control over cancellation.
pub async fn next(&mut self) -> Option<T> {
// TODO: should amortize this allocation and avoid doing it every time.
// Or somehow perhaps make this more optimal.
let (_result, mut buf) = self.read(Vec::with_capacity(1)).await;
buf.pop()
}
/// Reads all items from this stream and returns the list.
///
/// This method will read all remaining items from this stream into a list
/// and await the stream to be dropped.
pub async fn collect(mut self) -> Vec<T> {
let mut ret = Vec::new();
loop {
// If there's no more spare capacity then reserve room for one item
// which should trigger `Vec`'s built-in resizing logic, which will
// free up likely more capacity than just one slot.
if ret.len() == ret.capacity() {
ret.reserve(1);
}
let (status, buf) = self.read(ret).await;
ret = buf;
match status {
StreamResult::Complete(_) => {}
StreamResult::Dropped => break,
StreamResult::Cancelled => unreachable!(),
}
}
ret
}
}
impl<T> Drop for StreamReader<T> {
fn drop(&mut self) {
let Some(handle) = self.opt_handle() else {
return;
};
unsafe {
rtdebug!("stream.drop-readable({})", handle);
(self.vtable.drop_readable)(handle);
}
}
}
/// Represents a read operation which may be cancelled prior to completion.
pub struct StreamRead<'a, T: 'static> {
op: WaitableOperation<StreamReadOp<'a, T>>,
}
struct StreamReadOp<'a, T: 'static>(marker::PhantomData<(&'a mut StreamReader<T>, T)>);
unsafe impl<'a, T> WaitableOp for StreamReadOp<'a, T>
where
T: 'static,
{
type Start = (&'a mut StreamReader<T>, Vec<T>);
type InProgress = (&'a mut StreamReader<T>, Vec<T>, Option<Cleanup>);
type Result = (StreamResult, Vec<T>);
type Cancel = (StreamResult, Vec<T>);
fn start((reader, mut buf): Self::Start) -> (u32, Self::InProgress) {
if reader.done {
return (DROPPED, (reader, buf, None));
}
let cap = buf.spare_capacity_mut();
let ptr;
let cleanup;
// If `T` requires a lifting operation, then allocate a slab of memory
// which will store the canonical ABI read. Otherwise we can use the
// raw capacity in `buf` itself.
if reader.vtable.lift.is_some() {
let layout = Layout::from_size_align(
reader.vtable.layout.size() * cap.len(),
reader.vtable.layout.align(),
)
.unwrap();
(ptr, cleanup) = Cleanup::new(layout);
} else {
ptr = cap.as_mut_ptr().cast();
cleanup = None;
}
// SAFETY: `ptr` is either in `buf` or in `cleanup`, both of which will
// persist with this async operation itself.
let code = unsafe { (reader.vtable.start_read)(reader.handle(), ptr, cap.len()) };
rtdebug!(
"stream.read({}, {ptr:?}, {}) = {code:#x}",
reader.handle(),
cap.len()
);
(code, (reader, buf, cleanup))
}
fn start_cancelled((_, buf): Self::Start) -> Self::Cancel {
(StreamResult::Cancelled, buf)
}
fn in_progress_update(
(reader, mut buf, cleanup): Self::InProgress,
code: u32,
) -> Result<Self::Result, Self::InProgress> {
match ReturnCode::decode(code) {
ReturnCode::Blocked => Err((reader, buf, cleanup)),
// Note that the `cleanup`, if any, is discarded here.
ReturnCode::Dropped(0) => Ok((StreamResult::Dropped, buf)),
// When an in-progress read is successfully cancelled then the
// allocation that was being read into, if any, is just discarded.
//
// TODO: should maybe thread this around like `AbiBuffer` to cache
// the read allocation?
ReturnCode::Cancelled(0) => Ok((StreamResult::Cancelled, buf)),
code @ (ReturnCode::Completed(amt)
| ReturnCode::Dropped(amt)
| ReturnCode::Cancelled(amt)) => {
let amt = usize::try_from(amt).unwrap();
let cur_len = buf.len();
assert!(amt <= buf.capacity() - cur_len);
match reader.vtable.lift {
// With a `lift` operation this now requires reading `amt` items
// from `cleanup` and pushing them into `buf`.
Some(lift) => {
let mut ptr = cleanup
.as_ref()
.map(|c| c.ptr.as_ptr())
.unwrap_or(ptr::null_mut());
for _ in 0..amt {
unsafe {
buf.push(lift(ptr));
ptr = ptr.add(reader.vtable.layout.size());
}
}
}
// If no `lift` was necessary, then the results of this operation
// were read directly into `buf`, so just update its length now that
// values have been initialized.
None => unsafe { buf.set_len(cur_len + amt) },
}
// Intentionally dispose of `cleanup` here as, if it was used, all
// allocations have been read from it and appended to `buf`.
drop(cleanup);
if let ReturnCode::Dropped(_) = code {
reader.done = true;
}
Ok((StreamResult::Complete(amt), buf))
}
}
}
fn in_progress_waitable((reader, ..): &Self::InProgress) -> u32 {
reader.handle()
}
fn in_progress_cancel((reader, ..): &Self::InProgress) -> u32 {
// SAFETY: we're managing `reader` and all the various operational bits,
// so this relies on `WaitableOperation` being safe.
let code = unsafe { (reader.vtable.cancel_read)(reader.handle()) };
rtdebug!("stream.cancel-read({}) = {code:#x}", reader.handle());
code
}
fn result_into_cancel(result: Self::Result) -> Self::Cancel {
result
}
}
impl<T: 'static> Future for StreamRead<'_, T> {
type Output = (StreamResult, Vec<T>);
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.pin_project().poll_complete(cx)
}
}
impl<'a, T> StreamRead<'a, T> {
fn pin_project(self: Pin<&mut Self>) -> Pin<&mut WaitableOperation<StreamReadOp<'a, T>>> {
// SAFETY: we've chosen that when `Self` is pinned that it translates to
// always pinning the inner field, so that's codified here.
unsafe { Pin::new_unchecked(&mut self.get_unchecked_mut().op) }
}
/// Cancel this read if it hasn't already completed.
///
/// This method will initiate a cancellation operation for this active
/// read. This may race with the actual read itself and so this may actually
/// complete with some results.
///
/// The final result of cancellation is returned, along with the original
/// buffer.
///
/// # Panics
///
/// Panics if the operation has already been completed via `Future::poll`,
/// or if this method is called twice.
pub fn cancel(self: Pin<&mut Self>) -> (StreamResult, Vec<T>) {
self.pin_project().cancel()
}
}

View File

@@ -0,0 +1,289 @@
//! Bindings used to manage subtasks, or invocations of imported functions.
//!
//! See `future_support` for some more discussion but the basic idea is the same
//! where we require that everything is passed by ownership to primarily deal
//! with the possibility of leaking futures. By always requiring ownership we
//! can guarantee that even when a future is leaked all its parameters passed to
//! the canonical ABI are additionally leaked with it which should be memory
//! safe.
use crate::rt::async_support::waitable::{WaitableOp, WaitableOperation};
use crate::rt::async_support::{
STATUS_RETURNED, STATUS_RETURNED_CANCELLED, STATUS_STARTED, STATUS_STARTED_CANCELLED,
STATUS_STARTING,
};
use crate::rt::Cleanup;
use std::alloc::Layout;
use std::future::Future;
use std::marker;
use std::num::NonZeroU32;
use std::ptr;
/// Raw operations used to invoke an imported asynchronous function.
///
/// This trait is implemented by generated bindings and is used to implement
/// asynchronous imports.
///
/// # Unsafety
///
/// All operations/constants must be self-consistent for how this module expects
/// them all to be used.
pub unsafe trait Subtask {
/// The in-memory layout of both parameters and results allocated with
/// parameters coming first.
const ABI_LAYOUT: Layout;
/// The offset, in bytes, from the start of `ABI_LAYOUT` to where the
/// results will be stored.
const RESULTS_OFFSET: usize;
/// The parameters to this task.
type Params;
/// The representation of lowered parameters for this task.
///
/// This is used to account for how lowered imports may have up to 4 flat
/// arguments or may also be indirect as well in memory. Either way this
/// represents the actual ABI values passed to the import.
type ParamsLower: Copy;
/// The results of this task.
type Results;
/// The raw function import using `[async-lower]` and the canonical ABI.
unsafe fn call_import(params: Self::ParamsLower, results: *mut u8) -> u32;
/// Bindings-generated version of lowering `params`.
///
/// This may use the heap-allocated `dst`, which is an uninitialized
/// allocation of `Self::ABI_LAYOUT`. This returns any ABI parameters
/// necessary to actually invoke the imported function.
///
/// Note that `ParamsLower` may return `dst` if there are more ABI
/// parameters than are allowed flat params (as specified by the canonical
/// ABI).
unsafe fn params_lower(params: Self::Params, dst: *mut u8) -> Self::ParamsLower;
/// Bindings-generated version of deallocating any lists stored within
/// `lower`.
unsafe fn params_dealloc_lists(lower: Self::ParamsLower);
/// Bindings-generated version of deallocating not only owned lists within
/// `lower` but also deallocating any owned resources.
unsafe fn params_dealloc_lists_and_own(lower: Self::ParamsLower);
/// Bindings-generated version of lifting the results stored at `src`.
unsafe fn results_lift(src: *mut u8) -> Self::Results;
/// Helper function to actually perform this asynchronous call with
/// `params`.
fn call(params: Self::Params) -> impl Future<Output = Self::Results>
where
Self: Sized,
{
async {
match WaitableOperation::<SubtaskOps<Self>>::new(Start { params }).await {
Ok(results) => results,
Err(_) => unreachable!(
"cancellation is not exposed API-wise, \
should not be possible"
),
}
}
}
}
struct SubtaskOps<T>(marker::PhantomData<T>);
struct Start<T: Subtask> {
params: T::Params,
}
unsafe impl<T: Subtask> WaitableOp for SubtaskOps<T> {
type Start = Start<T>;
type InProgress = InProgress<T>;
type Result = Result<T::Results, ()>;
type Cancel = Result<T::Results, ()>;
fn start(state: Self::Start) -> (u32, Self::InProgress) {
unsafe {
let (ptr_params, cleanup) = Cleanup::new(T::ABI_LAYOUT);
let ptr_results = ptr_params.add(T::RESULTS_OFFSET);
let params_lower = T::params_lower(state.params, ptr_params);
let packed = T::call_import(params_lower, ptr_results);
let code = packed & 0xf;
let subtask = NonZeroU32::new(packed >> 4).map(|handle| SubtaskHandle { handle });
rtdebug!("<import>({ptr_params:?}, {ptr_results:?}) = ({code:#x}, {subtask:#x?})");
(
code,
InProgress {
params_lower,
params_and_results: cleanup,
subtask,
started: false,
_marker: marker::PhantomData,
},
)
}
}
fn start_cancelled(_state: Self::Start) -> Self::Cancel {
Err(())
}
fn in_progress_update(
mut state: Self::InProgress,
code: u32,
) -> Result<Self::Result, Self::InProgress> {
match code {
// Nothing new to do in this state, we're still waiting for the task
// to start.
STATUS_STARTING => {
assert!(!state.started);
Err(state)
}
// Still not done yet, but we can record that this is started and
// otherwise deallocate lists in the parameters.
STATUS_STARTED => {
state.flag_started();
Err(state)
}
STATUS_RETURNED => {
// Conditionally flag as started if we haven't otherwise
// explicitly transitioned through `STATUS_STARTED`.
if !state.started {
state.flag_started();
}
// Now that our results have been written we can read them.
//
// Note that by dropping `state` here we'll both deallocate the
// params/results storage area as well as the subtask handle
// itself.
unsafe { Ok(Ok(T::results_lift(state.ptr_results()))) }
}
// This subtask was dropped which forced cancellation. Said
// cancellation stopped the subtask before it reached the "started"
// state, meaning that we still own all of the parameters in their
// lowered form.
//
// In this situation we lift the parameters, even after we
// previously lowered them, back into `T::Params`. That notably
// re-acquires ownership and is suitable for disposing of all of
// the parameters via normal Rust-based destructors.
STATUS_STARTED_CANCELLED => {
assert!(!state.started);
unsafe {
T::params_dealloc_lists_and_own(state.params_lower);
}
Ok(Err(()))
}
// This subtask was dropped which forced cancellation. Said
// cancellation stopped the subtask before it reached the "returned"
// state, meaning that it started, received the arguments, but then
// did not complete.
//
// In this situation we may have already received `STATUS_STARTED`,
// but we also might not have. This means we conditionally need
// to flag this task as started which will deallocate all lists
// owned by the parameters.
//
// After that though we do not have ownership of the parameters any
// more (e.g. own resources are all gone) so there's nothing to
// return. Here we yield a result and dispose of the in-progress
// state.
STATUS_RETURNED_CANCELLED => {
if !state.started {
state.flag_started();
}
Ok(Err(()))
}
other => panic!("unknown code {other:#x}"),
}
}
fn in_progress_waitable(state: &Self::InProgress) -> u32 {
// This shouldn't get called in the one case this isn't present: when
// `STATUS_RETURNED` is returned and no waitable is created. That's the
// `unwrap()` condition here.
state.subtask.as_ref().unwrap().handle.get()
}
fn in_progress_cancel(state: &Self::InProgress) -> u32 {
unsafe { cancel(Self::in_progress_waitable(state)) }
}
fn result_into_cancel(result: Self::Result) -> Self::Cancel {
result
}
}
#[derive(Debug)]
struct SubtaskHandle {
handle: NonZeroU32,
}
impl Drop for SubtaskHandle {
fn drop(&mut self) {
unsafe {
drop(self.handle.get());
}
}
}
struct InProgress<T: Subtask> {
params_and_results: Option<Cleanup>,
params_lower: T::ParamsLower,
started: bool,
subtask: Option<SubtaskHandle>,
_marker: marker::PhantomData<T>,
}
impl<T: Subtask> InProgress<T> {
fn flag_started(&mut self) {
assert!(!self.started);
self.started = true;
// SAFETY: the initial entrypoint of `call` requires that the vtable is
// setup correctly and we're obeying the invariants of the vtable,
// deallocating lists in an allocation that we exclusively own.
unsafe {
T::params_dealloc_lists(self.params_lower);
}
}
fn ptr_results(&self) -> *mut u8 {
// SAFETY: the `T` trait has unsafely promised us that the offset is
// in-bounds of the allocation layout.
unsafe {
self.params_and_results
.as_ref()
.map(|c| c.ptr.as_ptr())
.unwrap_or(ptr::null_mut())
.add(T::RESULTS_OFFSET)
}
}
}
#[cfg(not(target_arch = "wasm32"))]
unsafe fn drop(_: u32) {
unreachable!()
}
#[cfg(not(target_arch = "wasm32"))]
unsafe fn cancel(_: u32) -> u32 {
unreachable!()
}
#[cfg(target_arch = "wasm32")]
#[link(wasm_import_module = "$root")]
extern "C" {
#[link_name = "[subtask-cancel]"]
fn cancel(handle: u32) -> u32;
#[link_name = "[subtask-drop]"]
fn drop(handle: u32);
}

View File

@@ -0,0 +1,466 @@
//! Generic support for "any waitable" and performing asynchronous operations on
//! that waitable.
use super::cabi;
use std::ffi::c_void;
use std::future::Future;
use std::marker;
use std::mem;
use std::pin::Pin;
use std::ptr;
use std::task::{Context, Poll, Waker};
/// Generic future-based operation on any "waitable" in the component model.
///
/// This is used right now to power futures and streams for both read/write
/// halves. This structure is driven by `S`, an implementation of
/// [`WaitableOp`], which codifies the various state transitions and what to do
/// on each state transition.
pub struct WaitableOperation<S: WaitableOp> {
state: WaitableOperationState<S>,
/// Storage for the final result of this asynchronous operation, if it's
/// completed asynchronously.
completion_status: CompletionStatus,
}
/// Structure used to store the `u32` return code from the canonical ABI about
/// an asynchronous operation.
///
/// When an asynchronous operation is started and it does not immediately
/// complete then this structure is used to asynchronously fill in the return
/// code. A `Pin<&mut CompletionStatus>` is used to register a pointer with
/// `FutureState` to get filled in.
///
/// Note that this means that this type is participating in unsafe lifetime
/// management and has properties it needs to uphold as a result. Specifically
/// the `PhantomPinned` field here means that `Pin` actually has meaning for
/// this structure, notably that once `Pin<&mut CompletionStatus>` is created
/// then it's guaranteed the destructor will be run before the backing memory
/// is deallocated. That's used in `WaitableOperation` above to share an
/// internal pointer of this data structure with `FuturesState` safely. The
/// destructor of `WaitableOperation` will deregister from `FutureState` meaning
/// that if `FuturesState` has a pointer here then it should be valid .
struct CompletionStatus {
/// Where the async operation's code is filled in, and `None` until that
/// happens.
code: Option<u32>,
waker: Option<Waker>,
/// This is necessary to ensure that `Pin<&mut CompletionStatus>` carries
/// the "pin guarantee", basically to mean that it's not safe to construct
/// `Pin<&mut CompletionStatus>` and it must somehow require `unsafe` code.
_pinned: marker::PhantomPinned,
}
/// Helper trait to be used with `WaitableOperation` to assist with machinery
/// necessary to track in-flight reads/writes on futures.
///
/// # Unsafety
///
/// This trait is `unsafe` as it has various guarantees that must be upheld by
/// implementors such as:
///
/// * `S::in_progress_waitable` must always return the same value for the state
/// given.
pub unsafe trait WaitableOp {
/// Initial state of this operation, used to kick off the actual component
/// model operation and transition to `InProgress`.
type Start;
/// Intermediate state of this operation when the component model is
/// involved but it hasn't resolved just yet.
type InProgress;
/// Result type of this operation.
type Result;
/// Result of when this operation is cancelled.
type Cancel;
/// Starts the async operation.
///
/// This method will actually call `{future,stream}.{read,write}` with
/// `state` provided. The return code of the intrinsic is returned here
/// along with the `InProgress` state.
fn start(state: Self::Start) -> (u32, Self::InProgress);
/// Optionally complete the async operation.
///
/// This method will transition from the `InProgress` state, with some
/// status code that was received, to either a completed result or a new
/// `InProgress` state. This is invoked when:
///
/// * a new status code has been received by an async export's `callback`
/// * cancellation returned a code to be processed here
fn in_progress_update(
state: Self::InProgress,
code: u32,
) -> Result<Self::Result, Self::InProgress>;
/// Conversion from the "start" state to the "cancel" result, needed when an
/// operation is cancelled before it's started.
fn start_cancelled(state: Self::Start) -> Self::Cancel;
/// Acquires the component-model `waitable` index that the `InProgress`
/// state is waiting on.
fn in_progress_waitable(state: &Self::InProgress) -> u32;
/// Initiates a request for cancellation of this operation. Returns the
/// status code returned by the `{future,stream}.cancel-{read,write}`
/// intrinsic.
///
/// Note that this must synchronously complete the operation somehow. This
/// cannot return a status code indicating that an operation is pending,
/// instead the operation must be complete with the returned code. That may
/// mean that this intrinsic can block while figuring things out in the
/// component model ABI, for example.
fn in_progress_cancel(state: &Self::InProgress) -> u32;
/// Converts a "completion result" into a "cancel result". This is necessary
/// when an in-progress operation is cancelled so the in-progress result is
/// first acquired and then transitioned to a cancel request.
fn result_into_cancel(result: Self::Result) -> Self::Cancel;
}
enum WaitableOperationState<S: WaitableOp> {
Start(S::Start),
InProgress(S::InProgress),
Done,
}
impl<S> WaitableOperation<S>
where
S: WaitableOp,
{
/// Creates a new operation in the initial state.
pub fn new(state: S::Start) -> WaitableOperation<S> {
WaitableOperation {
state: WaitableOperationState::Start(state),
completion_status: CompletionStatus {
code: None,
waker: None,
_pinned: marker::PhantomPinned,
},
}
}
fn pin_project(
self: Pin<&mut Self>,
) -> (&mut WaitableOperationState<S>, Pin<&mut CompletionStatus>) {
// SAFETY: this is the one method used to project from `Pin<&mut Self>`
// to the fields, and the contract we're deciding on is that
// `state` is never pinned but the `CompletionStatus` is. That's used
// to share a raw pointer with the completion callback with
// respect to `Option<u32>` internally.
unsafe {
let me = self.get_unchecked_mut();
(&mut me.state, Pin::new_unchecked(&mut me.completion_status))
}
}
/// Registers a completion of `waitable` within the current task's future to:
///
/// * Fill in `completion_status` with the result of a completion event.
/// * Call `cx.waker().wake()`.
pub fn register_waker(self: Pin<&mut Self>, waitable: u32, cx: &mut Context) {
let (_, mut completion_status) = self.pin_project();
debug_assert!(completion_status.as_mut().code_mut().is_none());
*completion_status.as_mut().waker_mut() = Some(cx.waker().clone());
// SAFETY: There's quite a lot going on here. First is the usage of
// `task` below, and for that see `unregister_waker` below for why this
// pattern should be safe.
//
// Otherwise we're handing off a pointer to `completion_status` to the
// `task` itself. That should be safe as we're guaranteed, via
// `Pin<&mut Self>`, that before `&mut Self` is deallocated the
// destructor will be run which will perform de-registration via
// cancellation.
unsafe {
let task = cabi::wasip3_task_set(ptr::null_mut());
assert!(!task.is_null());
assert!((*task).version >= cabi::WASIP3_TASK_V1);
let ptr: *mut CompletionStatus = completion_status.get_unchecked_mut();
let prev = ((*task).waitable_register)((*task).ptr, waitable, cabi_wake, ptr.cast());
// We might be inserting a waker for the first time or overwriting
// the previous waker. Only assert the expected value here if the
// previous value was non-null.
if !prev.is_null() {
assert_eq!(ptr, prev.cast());
}
cabi::wasip3_task_set(task);
}
unsafe extern "C" fn cabi_wake(ptr: *mut c_void, code: u32) {
let ptr: &mut CompletionStatus = &mut *ptr.cast::<CompletionStatus>();
ptr.code = Some(code);
ptr.waker.take().unwrap().wake()
}
}
/// Deregisters the corresponding `register_waker` within the current task
/// for the `waitable` passed here.
///
/// This relinquishes control of the original `completion_status` pointer
/// passed to `register_waker` after this call has completed.
pub fn unregister_waker(self: Pin<&mut Self>, waitable: u32) {
// SAFETY: the contract of `wasip3_task_set` is that the returned
// pointer is valid for the lifetime of our entire task, so it's valid
// for this stack frame. Additionally we assert it's non-null to
// double-check it's initialized and additionally check the version for
// the fields that we access.
//
// Otherwise the `waitable_unregister` callback should be safe because:
//
// * We're fulfilling the contract where the first argument must be
// `(*task).ptr`
// * We own the `waitable` that we're passing in, so we're fulfilling
// the contract that arbitrary waitables for other units of work
// aren't being manipulated.
unsafe {
let task = cabi::wasip3_task_set(ptr::null_mut());
assert!(!task.is_null());
assert!((*task).version >= cabi::WASIP3_TASK_V1);
let prev = ((*task).waitable_unregister)((*task).ptr, waitable);
// Note that `_prev` here is not guaranteed to be either `NULL` or
// not. A racy completion notification may have come in and
// removed our waitable from the map even though we're in the
// `InProgress` state, meaning it may not be present.
//
// The main thing is that after this method is called the
// internal `completion_status` is guaranteed to no longer be in
// `task`.
//
// Note, though, that if present this must be our `CompletionStatus`
// pointer.
if !prev.is_null() {
let ptr: *mut CompletionStatus = self.pin_project().1.get_unchecked_mut();
assert_eq!(ptr, prev.cast());
}
cabi::wasip3_task_set(task);
}
}
/// Polls this operation to see if it has completed yet.
///
/// This is intended to be used within `Future::poll`.
pub fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<S::Result> {
use WaitableOperationState::*;
let (state, completion_status) = self.as_mut().pin_project();
// First up, determine the completion status, if any, that's available.
let optional_code = match state {
// If this operation hasn't actually started yet then now's the
// time to start it.
Start(_) => {
let Start(s) = mem::replace(state, Done) else {
unreachable!()
};
let (code, s) = S::start(s);
*state = InProgress(s);
Some(code)
}
// This operation was previously queued so we're just waiting on
// the completion to come in. Read the completion status and
// interpret it down below.
//
// Note that it's the responsibility of the completion callback at
// the ABI level that we install to fill in this pointer, e.g. it's
// part of the `register_waker` contract.
InProgress(_) => completion_status.code_mut().take(),
// This write has already completed, it's a Rust-level API violation
// to call this function again.
Done => panic!("cannot re-poll after operation completes"),
};
self.poll_complete_with_code(Some(cx), optional_code)
}
/// After acquiring the current return of this operation in `optional_code`,
/// figures out what to do with it.
///
/// The `cx` argument is optional to do nothing in the case that
/// `optional_code` is not present.
fn poll_complete_with_code(
mut self: Pin<&mut Self>,
cx: Option<&mut Context>,
optional_code: Option<u32>,
) -> Poll<S::Result> {
use WaitableOperationState::*;
let (state, _completion_status) = self.as_mut().pin_project();
// If a status code is provided, then extract the in-progress state and
// see what it thinks about this code. If we're done, yay! If not then
// record the new in-progress state and fall through to registering a
// waker.
//
// If no status code is available then that means we were polled before
// the status came back, so just re-register the waker.
if let Some(code) = optional_code {
let InProgress(in_progress) = mem::replace(state, Done) else {
unreachable!()
};
match S::in_progress_update(in_progress, code) {
Ok(result) => return Poll::Ready(result),
Err(in_progress) => *state = InProgress(in_progress),
}
}
let in_progress = match state {
InProgress(s) => s,
_ => unreachable!(),
};
// The operation is still in progress.
//
// Register the `cx.waker()` to get notified when `writer.handle`
// receives its completion.
if let Some(cx) = cx {
let handle = S::in_progress_waitable(in_progress);
self.register_waker(handle, cx);
}
Poll::Pending
}
/// Cancels the in-flight operation, if it's still in-flight, and sees what
/// happened.
///
/// Defers to `S` how to communicate the current status through the
/// cancellation type.
///
/// # Panics
///
/// Panics if the operation has already been completed via `poll_complete`
/// above.
/// Panics if this method is called twice.
pub fn cancel(mut self: Pin<&mut Self>) -> S::Cancel {
use WaitableOperationState::*;
let (state, mut completion_status) = self.as_mut().pin_project();
let in_progress = match state {
// This operation was never actually started, so there's no need to
// cancel anything, just pull out the value and return it.
Start(_) => {
let Start(s) = mem::replace(state, Done) else {
unreachable!()
};
return S::start_cancelled(s);
}
// This operation is actively in progress, fall through to below.
InProgress(s) => s,
// This operation was already completed after a `poll_complete`
// above advanced to the `Done` state, or this was cancelled twice.
// In such situations this is a programmer error to call this
// method, so panic.
Done => panic!("cannot cancel operation after completing it"),
};
// Our operation is in-progress, let's take a look at the pending
// completion code, if any.
match completion_status.as_mut().code_mut().take() {
// A completion code, or status update, is available. This can
// happen for example if an export received a status update for
// this operation but then during the subsequent poll we decided
// that the future should be dropped instead, aka a race between
// two events. In this situation though to fully process the
// cancellation we need to see what's up, so check to see if the
// operation is done with this code.
//
// Note that in this branch it's known that this operation's waker
// is not registered with the exported task because the exported
// task already delivered us the completion code, which
// automatically deregisters it at this time.
Some(code) => {
match self.as_mut().poll_complete_with_code(None, Some(code)) {
// The operation completed without us needing to cancel it,
// so just convert that to the `Cancel` type. In this
// situation no cancellation is necessary, the async
// operation is now inert, and we can immediately return.
Poll::Ready(result) => return S::result_into_cancel(result),
// The operation, despite receiving an update via a code,
// has not yet completed. In this case we do indeed need to
// perform cancellation, so fall through to below.
Poll::Pending => {}
}
}
// A completion code is not yet available. In this situation we
// deregister our waker from the exported task's waitable set and
// callback handling since we'll be no longer waiting for events.
// Cancellation below happens synchronously.
//
// After we've unregistered fall through to below.
None => {
let waitable = S::in_progress_waitable(in_progress);
self.as_mut().unregister_waker(waitable);
}
}
// This operation is guaranteed actively in progress at this point.
// That means we really do in fact need to cancel it. Here the
// appropriate cancellation intrinsic for the component model is
// invoked which returns the final completion status for this
// operation.
//
// The completion code is forwarded to `poll_complete_with_code` which
// determines what happened as a result. Note that at this time
// cancellation is required to be a synchronous operation in Rust, even
// if it's async in the component model, since that's the only way for
// this to be sound. Rust doesn't currently have linear types or async
// destructors for example to ensure otherwise that if this were to
// proceed asynchronously that we could rely on it being invoked.
let (InProgress(in_progress), _) = self.as_mut().pin_project() else {
unreachable!()
};
let code = S::in_progress_cancel(in_progress);
match self.poll_complete_with_code(None, Some(code)) {
Poll::Ready(result) => S::result_into_cancel(result),
Poll::Pending => unreachable!(),
}
}
}
impl<S: WaitableOp> Future for WaitableOperation<S> {
type Output = S::Result;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<S::Result> {
self.poll_complete(cx)
}
}
impl<S: WaitableOp> Drop for WaitableOperation<S> {
fn drop(&mut self) {
// SAFETY: we're in the destructor here so the value `self` is about
// to go away and we can guarantee we're not moving out of it.
let mut pin = unsafe { Pin::new_unchecked(self) };
let (state, _) = pin.as_mut().pin_project();
// If this operation has already completed then skip cancellation,
// otherwise it's our job to cancel anything in-flight.
if let WaitableOperationState::Done = state {
return;
}
pin.cancel();
}
}
impl CompletionStatus {
fn code_mut(self: Pin<&mut Self>) -> &mut Option<u32> {
unsafe { &mut self.get_unchecked_mut().code }
}
fn waker_mut(self: Pin<&mut Self>) -> &mut Option<Waker> {
unsafe { &mut self.get_unchecked_mut().waker }
}
}

View File

@@ -0,0 +1,100 @@
//! Low-level FFI-like bindings around `waitable-set` in the canonical ABI.
use std::num::NonZeroU32;
pub struct WaitableSet(NonZeroU32);
impl WaitableSet {
pub fn new() -> WaitableSet {
let ret = WaitableSet(NonZeroU32::new(unsafe { new() }).unwrap());
rtdebug!("waitable-set.new() = {}", ret.0.get());
ret
}
pub fn join(&self, waitable: u32) {
rtdebug!("waitable-set.join({waitable}, {})", self.0.get());
unsafe { join(waitable, self.0.get()) }
}
pub fn remove_waitable_from_all_sets(waitable: u32) {
rtdebug!("waitable-set.join({waitable}, 0)");
unsafe { join(waitable, 0) }
}
pub fn wait(&self) -> (u32, u32, u32) {
unsafe {
let mut payload = [0; 2];
let event0 = wait(self.0.get(), &mut payload);
rtdebug!(
"waitable-set.wait({}) = ({event0}, {:#x}, {:#x})",
self.0.get(),
payload[0],
payload[1],
);
(event0, payload[0], payload[1])
}
}
pub fn poll(&self) -> (u32, u32, u32) {
unsafe {
let mut payload = [0; 2];
let event0 = poll(self.0.get(), &mut payload);
rtdebug!(
"waitable-set.poll({}) = ({event0}, {:#x}, {:#x})",
self.0.get(),
payload[0],
payload[1],
);
(event0, payload[0], payload[1])
}
}
pub fn as_raw(&self) -> u32 {
self.0.get()
}
}
impl Drop for WaitableSet {
fn drop(&mut self) {
unsafe {
rtdebug!("waitable-set.drop({})", self.0.get());
drop(self.0.get());
}
}
}
#[cfg(not(target_arch = "wasm32"))]
unsafe fn new() -> u32 {
unreachable!()
}
#[cfg(not(target_arch = "wasm32"))]
unsafe fn drop(_: u32) {
unreachable!()
}
#[cfg(not(target_arch = "wasm32"))]
unsafe fn join(_: u32, _: u32) {
unreachable!()
}
#[cfg(not(target_arch = "wasm32"))]
unsafe fn wait(_: u32, _: *mut [u32; 2]) -> u32 {
unreachable!();
}
#[cfg(not(target_arch = "wasm32"))]
unsafe fn poll(_: u32, _: *mut [u32; 2]) -> u32 {
unreachable!();
}
#[cfg(target_arch = "wasm32")]
#[link(wasm_import_module = "$root")]
extern "C" {
#[link_name = "[waitable-set-new]"]
fn new() -> u32;
#[link_name = "[waitable-set-drop]"]
fn drop(set: u32);
#[link_name = "[waitable-join]"]
fn join(waitable: u32, set: u32);
#[link_name = "[waitable-set-wait]"]
fn wait(_: u32, _: *mut [u32; 2]) -> u32;
#[link_name = "[waitable-set-poll]"]
fn poll(_: u32, _: *mut [u32; 2]) -> u32;
}

Binary file not shown.

230
vendor/wit-bindgen/src/rt/mod.rs vendored Normal file
View File

@@ -0,0 +1,230 @@
use core::alloc::Layout;
use core::ptr::{self, NonNull};
// Re-export `bitflags` so that we can reference it from macros.
#[cfg(feature = "bitflags")]
pub use bitflags;
#[cfg(not(feature = "bitflags"))]
pub mod bitflags {
#[macro_export]
macro_rules! bitflags {
(
$(#[$attr:meta])*
$vis:vis struct $name:ident : $repr:ty {
$(
$(#[$flag_attr:meta])*
const $flag:ident = $val:expr;
)*
}
) => {
$(#[$attr])*
$vis struct $name {
bits: $repr,
}
impl $name {
$(
$(#[$flag_attr])*
$vis const $flag: Self = Self { bits: $val };
)*
$vis fn empty() -> Self {
Self { bits: 0 }
}
$vis fn from_bits_retain(bits: $repr) -> Self {
Self { bits }
}
$vis fn bits(&self) -> $repr {
self.bits
}
}
impl core::ops::BitOr<$name> for $name {
type Output = Self;
fn bitor(self, rhs: $name) -> $name {
Self { bits: self.bits | rhs.bits }
}
}
impl core::ops::BitAnd<$name> for $name {
type Output = Self;
fn bitand(self, rhs: $name) -> $name {
Self { bits: self.bits & rhs.bits }
}
}
impl core::ops::BitXor<$name> for $name {
type Output = Self;
fn bitxor(self, rhs: $name) -> $name {
Self { bits: self.bits ^ rhs.bits }
}
}
};
}
pub use crate::bitflags;
}
/// For more information about this see `./ci/rebuild-libwit-bindgen-cabi.sh`.
#[cfg(not(target_env = "p2"))]
mod wit_bindgen_cabi_realloc;
/// This function is called from generated bindings and will be deleted by
/// the linker. The purpose of this function is to force a reference to the
/// symbol `cabi_realloc` to make its way through to the final linker
/// command line. That way `wasm-ld` will pick it up, see it needs to be
/// exported, and then export it.
///
/// For more information about this see `./ci/rebuild-libwit-bindgen-cabi.sh`.
pub fn maybe_link_cabi_realloc() {
#[cfg(all(target_family = "wasm", not(target_env = "p2")))]
{
extern "C" {
fn cabi_realloc(
old_ptr: *mut u8,
old_len: usize,
align: usize,
new_len: usize,
) -> *mut u8;
}
// Force the `cabi_realloc` symbol to be referenced from here. This
// is done with a `#[used]` Rust `static` to ensure that this
// reference makes it all the way to the linker before it's
// considered for garbage collection. When the linker sees it it'll
// remove this `static` here (due to it not actually being needed)
// but the linker will have at that point seen the `cabi_realloc`
// symbol and it should get exported.
#[used]
static _NAME_DOES_NOT_MATTER: unsafe extern "C" fn(
*mut u8,
usize,
usize,
usize,
) -> *mut u8 = cabi_realloc;
}
}
/// NB: this function is called by a generated function in the
/// `cabi_realloc` module above. It's otherwise never explicitly called.
///
/// For more information about this see `./ci/rebuild-libwit-bindgen-cabi.sh`.
#[cfg(not(target_env = "p2"))]
pub unsafe fn cabi_realloc(
old_ptr: *mut u8,
old_len: usize,
align: usize,
new_len: usize,
) -> *mut u8 {
use alloc::alloc::{alloc as allocate, handle_alloc_error, realloc, Layout};
let layout;
let ptr = if old_len == 0 {
if new_len == 0 {
return align as *mut u8;
}
layout = Layout::from_size_align_unchecked(new_len, align);
allocate(layout)
} else {
debug_assert_ne!(new_len, 0, "non-zero old_len requires non-zero new_len!");
layout = Layout::from_size_align_unchecked(old_len, align);
realloc(old_ptr, layout, new_len)
};
if ptr.is_null() {
// Print a nice message in debug mode, but in release mode don't
// pull in so many dependencies related to printing so just emit an
// `unreachable` instruction.
if cfg!(debug_assertions) {
handle_alloc_error(layout);
} else {
#[cfg(target_arch = "wasm32")]
core::arch::wasm32::unreachable();
#[cfg(not(target_arch = "wasm32"))]
unreachable!();
}
}
return ptr;
}
/// Provide a hook for generated export functions to run static constructors at
/// most once.
///
/// wit-bindgen-rust generates a call to this function at the start of all
/// component export functions. Importantly, it is not called as part of
/// `cabi_realloc`, which is a *core* export func, but should not execute ctors.
#[cfg(target_arch = "wasm32")]
pub fn run_ctors_once() {
static mut RUN: bool = false;
unsafe {
if !RUN {
// This function is synthesized by `wasm-ld` to run all static
// constructors. wasm-ld will either provide an implementation
// of this symbol, or synthesize a wrapper around each
// exported function to (unconditionally) run ctors. By using
// this function, the linked module is opting into "manually"
// running ctors.
extern "C" {
fn __wasm_call_ctors();
}
__wasm_call_ctors();
RUN = true;
}
}
}
/// Support for using the Component Model Async ABI
#[cfg(feature = "async")]
pub mod async_support;
/// Cleanup helper used to deallocate blocks of canonical ABI data from
/// lowerings.
pub struct Cleanup {
ptr: NonNull<u8>,
layout: Layout,
}
// Usage of the returned pointer is always unsafe and must abide by these
// conventions, but this structure itself has no inherent reason to not be
// send/sync.
unsafe impl Send for Cleanup {}
unsafe impl Sync for Cleanup {}
impl Cleanup {
/// Allocates a chunk of memory with `layout` and returns an object to clean
/// it up.
///
/// Always returns a pointer which is null if `layout` has size zero. The
/// optional cleanup returned will be present if `layout` has a non-zero
/// size. When dropped `Cleanup` will deallocate the pointer returned.
pub fn new(layout: Layout) -> (*mut u8, Option<Cleanup>) {
use alloc::alloc;
if layout.size() == 0 {
return (ptr::null_mut(), None);
}
let ptr = unsafe { alloc::alloc(layout) };
let ptr = match NonNull::new(ptr) {
Some(ptr) => ptr,
None => alloc::handle_alloc_error(layout),
};
(ptr.as_ptr(), Some(Cleanup { ptr, layout }))
}
/// Discards this cleanup to leak its memory or intentionally transfer
/// ownership to some other location.
pub fn forget(self) {
core::mem::forget(self);
}
}
impl Drop for Cleanup {
fn drop(&mut self) {
unsafe {
for i in 0..self.layout.size() {
*self.ptr.add(i).as_ptr() = 0xff;
}
alloc::alloc::dealloc(self.ptr.as_ptr(), self.layout);
}
}
}

View File

@@ -0,0 +1,20 @@
// This file is generated by ./ci/rebuild-libwit-bindgen-cabi.sh
#include <stdint.h>
#include <stdlib.h>
extern void *cabi_realloc_wit_bindgen_0_41_0(void *ptr, size_t old_size, size_t align, size_t new_size);
__attribute__((__weak__, __export_name__("cabi_realloc")))
void *cabi_realloc(void *ptr, size_t old_size, size_t align, size_t new_size) {
return cabi_realloc_wit_bindgen_0_41_0(ptr, old_size, align, new_size);
}
static void *WASIP3_TASK = NULL;
__attribute__((__weak__))
void *wasip3_task_set(void *ptr) {
void *ret = WASIP3_TASK;
WASIP3_TASK = ptr;
return ret;
}

Binary file not shown.

View File

@@ -0,0 +1,10 @@
// This file is generated by ./ci/rebuild-libwit-bindgen-cabi.sh
#include <stdint.h>
extern void *cabi_realloc_wit_bindgen_0_46_0(void *ptr, size_t old_size, size_t align, size_t new_size);
__attribute__((__weak__, __export_name__("cabi_realloc")))
void *cabi_realloc(void *ptr, size_t old_size, size_t align, size_t new_size) {
return cabi_realloc_wit_bindgen_0_46_0(ptr, old_size, align, new_size);
}

Binary file not shown.

View File

@@ -0,0 +1,11 @@
// This file is generated by ./ci/rebuild-libwit-bindgen-cabi.sh
#[unsafe(no_mangle)]
pub unsafe extern "C" fn cabi_realloc_wit_bindgen_0_46_0(
old_ptr: *mut u8,
old_len: usize,
align: usize,
new_len: usize,
) -> *mut u8 {
crate::rt::cabi_realloc(old_ptr, old_len, align, new_len)
}

View File

@@ -0,0 +1,12 @@
// This file is generated by ./ci/rebuild-libwit-bindgen-cabi.sh
#include <stdlib.h>
static void *WASIP3_TASK = NULL;
__attribute__((__weak__))
void *wasip3_task_set(void *ptr) {
void *ret = WASIP3_TASK;
WASIP3_TASK = ptr;
return ret;
}

Binary file not shown.