Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

1
vendor/oboe/.cargo-checksum.json vendored Normal file
View File

@@ -0,0 +1 @@
{"files":{"Cargo.toml":"454d13b9ba90157b817cd0fcc35e42220f42d61e6cd155bb34ed80b9243dcb64","README.md":"7f913d65d89e86e0786c76ba26d9d462d313869d7de222f0b1ed313f8781b0b3","src/audio_stream.rs":"9c31757a69af4c074fab7067ae6187ca02b97878f8c3d60dede7bd84f75d77c4","src/audio_stream_base.rs":"1e4b09a1f70f4b36adcd371a8777ca81b8af33c77816d52738b74cac1c999a7c","src/audio_stream_builder.rs":"6702cdaf9b78d0ecd1157d22442706517f0b2e8fe734a8566294d84e513da96a","src/audio_stream_callback.rs":"bb5f436c5d2a8e14af51c230704b4ea82e8d27b1f51817ee56b4d3a4462d3e83","src/definitions.rs":"0a0c299c299f27a1642a04fa4bbffb71236be884de7a7d17acbcd195ccb2275c","src/java_interface.rs":"8b56ab1dc12fa39b659ac5e88a0f72a19fba765dc6c274b9aa1c5847399f8023","src/java_interface/audio_features.rs":"a44caa1fb6bbd139c42fb7da92e23b2bbb5a86e302844cd04603f6a18570811b","src/java_interface/definitions.rs":"d89ae972badb5b18da774f6e54fa716140f35bfa332cd267814c7aa038c462d8","src/java_interface/devices_info.rs":"51463c6d3247fd63a5c629ded617223f370145649dc756d9fe3c64834c278be9","src/java_interface/stream_defaults.rs":"65b2d6a6fce39b8a973e89c9c656c543e1e02dddacf0c2faadab921be22d40dd","src/java_interface/utils.rs":"4cc088cfa62338deaa1d898dd8cc38d2c5b87b241d78b8ca02025fa82c826e94","src/lib.rs":"2dd0fabba486b7840465782966ce2e6021cc4b93a60f61d4ab3f87a4728f1311","src/private.rs":"d73b0bd8970e9223281012ed811d49723ba0a517f72cf93b7053c2907cd84104","src/type_guide.rs":"c26fed05169b3c9d37a44feb76c26110d30560ac3825a5d154af904b187c1e50","src/version.rs":"9d4041c1811d30a23b79b2569d66b6a42ccb5d7d4d2a1d71960cc5cab7958653"},"package":"e8b61bebd49e5d43f5f8cc7ee2891c16e0f41ec7954d36bcb6c14c5e0de867fb"}

94
vendor/oboe/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,94 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
name = "oboe"
version = "0.6.1"
authors = ["K. <kayo@illumium.org>"]
include = [
"/src/**/*.rs",
"/README.md",
]
description = "Safe interface for oboe an android library for low latency audio IO"
homepage = "https://github.com/katyo/oboe-rs"
readme = "README.md"
keywords = [
"oboe",
"android",
"audio",
"aaudio",
"opensles",
]
categories = [
"api-bindings",
"multimedia::audio",
]
license = "Apache-2.0"
repository = "https://github.com/katyo/oboe-rs"
[package.metadata.docs.rs]
features = [
"java-interface",
"doc-cfg",
]
targets = [
"aarch64-linux-android",
"armv7-linux-androideabi",
"i686-linux-android",
"x86_64-linux-android",
]
[profile.release]
opt-level = "z"
lto = true
codegen-units = 1
debug = 0
debug-assertions = false
rpath = false
panic = "unwind"
overflow-checks = false
incremental = false
strip = true
[dependencies.jni]
version = "0.21"
optional = true
[dependencies.ndk]
version = "0.8"
optional = true
default-features = false
[dependencies.ndk-context]
version = "0.1"
optional = true
[dependencies.num-derive]
version = "0.4"
[dependencies.num-traits]
version = "0.2"
[dependencies.oboe-sys]
version = "0.6"
[features]
doc-cfg = []
fetch-prebuilt = ["oboe-sys/fetch-prebuilt"]
generate-bindings = ["oboe-sys/generate-bindings"]
java-interface = [
"ndk",
"ndk-context",
"jni",
]
shared-link = ["oboe-sys/shared-link"]
shared-stdcxx = ["oboe-sys/shared-stdcxx"]

125
vendor/oboe/README.md vendored Normal file
View File

@@ -0,0 +1,125 @@
# Rust bindings for Oboe library
[![github](https://img.shields.io/badge/github-katyo/oboe--rs-8da0cb.svg?style=for-the-badge&logo=github)](https://github.com/katyo/oboe-rs)
[![Crates.io Package](https://img.shields.io/crates/v/oboe.svg?style=for-the-badge&color=fc8d62&logo=rust)](https://crates.io/crates/oboe)
[![Docs.rs API Docs](https://img.shields.io/badge/docs.rs-oboe-66c2a5?style=for-the-badge&logo=data:image/svg+xml;base64,PHN2ZyByb2xlPSJpbWciIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgdmlld0JveD0iMCAwIDUxMiA1MTIiPjxwYXRoIGZpbGw9IiNmNWY1ZjUiIGQ9Ik00ODguNiAyNTAuMkwzOTIgMjE0VjEwNS41YzAtMTUtOS4zLTI4LjQtMjMuNC0zMy43bC0xMDAtMzcuNWMtOC4xLTMuMS0xNy4xLTMuMS0yNS4zIDBsLTEwMCAzNy41Yy0xNC4xIDUuMy0yMy40IDE4LjctMjMuNCAzMy43VjIxNGwtOTYuNiAzNi4yQzkuMyAyNTUuNSAwIDI2OC45IDAgMjgzLjlWMzk0YzAgMTMuNiA3LjcgMjYuMSAxOS45IDMyLjJsMTAwIDUwYzEwLjEgNS4xIDIyLjEgNS4xIDMyLjIgMGwxMDMuOS01MiAxMDMuOSA1MmMxMC4xIDUuMSAyMi4xIDUuMSAzMi4yIDBsMTAwLTUwYzEyLjItNi4xIDE5LjktMTguNiAxOS45LTMyLjJWMjgzLjljMC0xNS05LjMtMjguNC0yMy40LTMzLjd6TTM1OCAyMTQuOGwtODUgMzEuOXYtNjguMmw4NS0zN3Y3My4zek0xNTQgMTA0LjFsMTAyLTM4LjIgMTAyIDM4LjJ2LjZsLTEwMiA0MS40LTEwMi00MS40di0uNnptODQgMjkxLjFsLTg1IDQyLjV2LTc5LjFsODUtMzguOHY3NS40em0wLTExMmwtMTAyIDQxLjQtMTAyLTQxLjR2LS42bDEwMi0zOC4yIDEwMiAzOC4ydi42em0yNDAgMTEybC04NSA0Mi41di03OS4xbDg1LTM4Ljh2NzUuNHptMC0xMTJsLTEwMiA0MS40LTEwMi00MS40di0uNmwxMDItMzguMiAxMDIgMzguMnYuNnoiPjwvcGF0aD48L3N2Zz4K)](https://docs.rs/oboe)
[![License: Apache-2.0](https://img.shields.io/badge/License-Apache--2.0-brightgreen.svg?style=for-the-badge)](https://opensource.org/licenses/Apache-2.0)
[![CI Status](https://img.shields.io/github/actions/workflow/status/katyo/oboe-rs/rust.yml?branch=master&style=for-the-badge&logo=github-actions&logoColor=white)](https://github.com/katyo/oboe-rs/actions?query=workflow%3ARust)
Safe Rust interface for [Oboe](https://github.com/google/oboe) High-Performance Audio library for Android.
Also it provides interface for some platform APIs significant to Audio IO.
__Oboe__ is a C++ library which makes it easy to build high-performance audio apps on Android. It was created primarily to allow developers to target a simplified API that works across multiple API levels back to API level 16 (Jelly Bean).
## Crate features
- __java-interface__ Add interface for some Android platform APIs.
- __generate-bindings__ Generate bindings at compile-time. By default the pregenerated bindings will be used.
- __compile-library__ Compile _oboe_ C++ library at compile-time using __cmake__. By default the precompiled library will be used.
- __shared-link__ Use shared linking. By default the static Oboe libarary will be used.
The crate already has pregenerated bindings and precompiled static libraries for the following Android targets:
- __armv7__
- __aarch64__
- __i686__
- __x86_64__
## Build issues
The **[clang-sys](https://crates.io/crates/clang-sys)** crate uses **[llvm-config](http://llvm.org/docs/CommandGuide/llvm-config.html)** for searching [libclang](https://clang.llvm.org/docs/Tooling.html) library and preparing _C_/_C++_ compiler configuration. In order to get proper setup you should add *llvm-config* to your executables search path.
In case of using tools with libclang under the hood like __bindgen__ you must be sure in proper your setup. Otherwise you get an errors related to missing headers or definitions.
To build applications you need recent version of __cargo-apk__, which supports latest Android [SDK](https://developer.android.com/studio#command-tools) (28+) and [NDK](https://developer.android.com/ndk) (20+). Don't forget to set ANDROID_SDK_ROOT environment variable with paths to installed SDK.
For building host crates which requires C-compiler you may also set __HOST_CC__ environment variable with path to your C-compiler.
## Usage example
Playing sine wave in asynchronous (callback-driven) mode:
```rust
use oboe::{
AudioOutputCallback,
AudioOutputStream,
AudioStreamBuilder,
DataCallbackResult,
PerformanceMode,
SharingMode,
Mono,
};
// Structure for sound generator
pub struct SineWave {
frequency: f32,
gain: f32,
phase: f32,
delta: Option<f32>,
}
// Default constructor for sound generator
impl Default for SineWave {
fn default() -> Self {
Self {
frequency: 440.0,
gain: 0.5,
phase: 0.0,
delta: None,
}
}
}
// Audio output callback trait implementation
impl AudioOutputCallback for SineWave {
// Define type for frames which we would like to process
type FrameType = (f32, Mono);
// Implement sound data output callback
fn on_audio_ready(&mut self, stream: &mut dyn AudioOutputStream, frames: &mut [f32]) -> DataCallbackResult {
// Configure out wave generator
if self.delta.is_none() {
let sample_rate = stream.get_sample_rate() as f32;
self.delta = (self.frequency * 2.0 * PI / sample_rate).into();
println!("Prepare sine wave generator: samplerate={}, time delta={}", sample_rate, self.delta.unwrap());
}
let delta = self.delta.unwrap();
// Generate audio frames to fill the output buffer
for frame in frames {
*frame = self.gain * self.phase.sin();
self.phase += delta;
while self.phase > 2.0 * PI {
self.phase -= 2.0 * PI;
}
}
// Notify the oboe that stream is continued
DataCallbackResult::Continue
}
}
// ...
// Create playback stream
let mut sine = AudioStreamBuilder::default()
// select desired performance mode
.set_performance_mode(PerformanceMode::LowLatency)
// select desired sharing mode
.set_sharing_mode(SharingMode::Shared)
// select sound sample format
.set_format::<f32>()
// select channels configuration
.set_channel_count::<Mono>()
// set our generator as callback
.set_callback(SineWave::default())
// open the output stream
.open_stream()
.unwrap();
// Start playback
sine.start().unwrap();
// ...
```

786
vendor/oboe/src/audio_stream.rs vendored Normal file
View File

@@ -0,0 +1,786 @@
use num_traits::FromPrimitive;
use oboe_sys as ffi;
use std::{
ffi::c_void,
fmt::{self, Display},
marker::PhantomData,
mem::{transmute, MaybeUninit},
ops::{Deref, DerefMut},
};
use super::{
audio_stream_base_fmt, wrap_result, wrap_status, AudioApi, AudioStreamBase, FrameTimestamp,
Input, IsFrameType, Output, RawAudioInputStream, RawAudioOutputStream, RawAudioStream,
RawAudioStreamBase, Result, Status, StreamState, NANOS_PER_MILLISECOND,
};
/**
* The default number of nanoseconds to wait for when performing state change operations on the
* stream, such as `start` and `stop`.
*
* See [AudioStream::start_with_timeout]
*/
pub const DEFAULT_TIMEOUT_NANOS: i64 = 2000 * NANOS_PER_MILLISECOND;
/**
* Safe base trait for Oboe audio stream.
*/
pub trait AudioStreamSafe: AudioStreamBase {
/**
* Query the current state, eg. `StreamState::Pausing`
*/
fn get_state(&self) -> StreamState;
/**
* This can be used to adjust the latency of the buffer by changing
* the threshold where blocking will occur.
* By combining this with [`AudioStreamSafe::get_xrun_count`], the latency can be tuned
* at run-time for each device.
*
* This cannot be set higher than [`AudioStreamBase::get_buffer_capacity_in_frames`].
*/
fn set_buffer_size_in_frames(&mut self, _requested_frames: i32) -> Result<i32>;
/**
* An XRun is an Underrun or an Overrun.
* During playing, an underrun will occur if the stream is not written in time
* and the system runs out of valid data.
* During recording, an overrun will occur if the stream is not read in time
* and there is no place to put the incoming data so it is discarded.
*
* An underrun or overrun can cause an audible "pop" or "glitch".
*/
fn get_xrun_count(&self) -> Result<i32>;
/**
* Returns true if XRun counts are supported on the stream
*/
fn is_xrun_count_supported(&self) -> bool;
/**
* Query the number of frames that are read or written by the endpoint at one time.
*/
fn get_frames_per_burst(&mut self) -> i32;
/**
* Get the number of bytes in each audio frame. This is calculated using the channel count
* and the sample format. For example, a 2 channel floating point stream will have
* 2 * 4 = 8 bytes per frame.
*/
fn get_bytes_per_frame(&mut self) -> i32 {
self.get_channel_count() as i32 * self.get_bytes_per_sample()
}
/**
* Get the number of bytes per sample. This is calculated using the sample format. For example,
* a stream using 16-bit integer samples will have 2 bytes per sample.
*
* @return the number of bytes per sample.
*/
fn get_bytes_per_sample(&mut self) -> i32;
/**
* Calculate the latency of a stream based on getTimestamp().
*
* Output latency is the time it takes for a given frame to travel from the
* app to some type of digital-to-analog converter. If the DAC is external, for example
* in a USB interface or a TV connected by HDMI, then there may be additional latency
* that the Android device is unaware of.
*
* Input latency is the time it takes to a given frame to travel from an analog-to-digital
* converter (ADC) to the app.
*
* Note that the latency of an OUTPUT stream will increase abruptly when you write data to it
* and then decrease slowly over time as the data is consumed.
*
* The latency of an INPUT stream will decrease abruptly when you read data from it
* and then increase slowly over time as more data arrives.
*
* The latency of an OUTPUT stream is generally higher than the INPUT latency
* because an app generally tries to keep the OUTPUT buffer full and the INPUT buffer empty.
*/
fn calculate_latency_millis(&mut self) -> Result<f64>;
/**
* Get the estimated time that the frame at `frame_position` entered or left the audio processing
* pipeline.
*
* This can be used to coordinate events and interactions with the external environment, and to
* estimate the latency of an audio stream. An example of usage can be found in the hello-oboe
* sample (search for "calculate_current_output_latency_millis").
*
* The time is based on the implementation's best effort, using whatever knowledge is available
* to the system, but cannot account for any delay unknown to the implementation.
*
* @param clockId the type of clock to use e.g. CLOCK_MONOTONIC
* @return a FrameTimestamp containing the position and time at which a particular audio frame
* entered or left the audio processing pipeline, or an error if the operation failed.
*/
fn get_timestamp(&mut self, clock_id: i32) -> Result<FrameTimestamp>;
/**
* Get the underlying audio API which the stream uses.
*/
fn get_audio_api(&self) -> AudioApi;
/**
* Returns true if the underlying audio API is AAudio.
*/
fn uses_aaudio(&self) -> bool {
self.get_audio_api() == AudioApi::AAudio
}
/**
* Returns the number of frames of data currently in the buffer
*/
fn get_available_frames(&mut self) -> Result<i32>;
}
/**
* Base trait for Oboe audio stream.
*/
pub trait AudioStream: AudioStreamSafe {
/**
* Open a stream based on the current settings.
*
* Note that we do not recommend re-opening a stream that has been closed.
* TODO Should we prevent re-opening?
*/
fn open(&mut self) -> Status {
Ok(())
}
/**
* Close the stream and deallocate any resources from the open() call.
*/
fn close(&mut self) -> Status;
/**
* Start the stream. This will block until the stream has been started, an error occurs
* or `timeout_nanoseconds` has been reached.
*/
fn start(&mut self) -> Status {
self.start_with_timeout(DEFAULT_TIMEOUT_NANOS)
}
/**
* Start the stream. This will block until the stream has been started, an error occurs
* or `timeout_nanoseconds` has been reached.
*/
fn start_with_timeout(&mut self, timeout_nanoseconds: i64) -> Status;
/**
* Stop the stream. This will block until the stream has been stopped, an error occurs
* or `timeoutNanoseconds` has been reached.
*/
fn stop(&mut self) -> Status {
self.stop_with_timeout(DEFAULT_TIMEOUT_NANOS)
}
/**
* Stop the stream. This will block until the stream has been stopped, an error occurs
* or `timeoutNanoseconds` has been reached.
*/
fn stop_with_timeout(&mut self, timeout_nanoseconds: i64) -> Status;
/**
* Start the stream asynchronously. Returns immediately (does not block). Equivalent to calling
* `start(0)`.
*/
fn request_start(&mut self) -> Status;
/**
* Stop the stream asynchronously. Returns immediately (does not block). Equivalent to calling
* `stop(0)`.
*/
fn request_stop(&mut self) -> Status;
/**
* Wait until the stream's current state no longer matches the input state.
* The input state is passed to avoid race conditions caused by the state
* changing between calls.
*
* Note that generally applications do not need to call this. It is considered
* an advanced technique and is mostly used for testing.
*
* ```ignore
* const TIMEOUT_NANOS: i64 = 500 * NANOS_PER_MILLISECOND; // arbitrary 1/2 second
* let mut current_state = stream.get_state();
* loop {
* if let Ok(next_state) = stream.wait_for_state_change(current_state, TIMEOUT_NANOS) {
* if next_state != StreamState::Paused {
* current_state = next_state;
* continue;
* }
* }
* break;
* }
* ```
*
* If the state does not change within the timeout period then it will
* return [`Error::Timeout`](crate::Error::Timeout). This is true even if timeout_nanoseconds is zero.
*/
fn wait_for_state_change(
&mut self,
input_state: StreamState,
timeout_nanoseconds: i64,
) -> Result<StreamState>;
/**
* Wait until the stream has a minimum amount of data available in its buffer.
* This can be used with an EXCLUSIVE MMAP input stream to avoid reading data too close to
* the DSP write position, which may cause glitches.
*/
fn wait_for_available_frames(
&mut self,
num_frames: i32,
timeout_nanoseconds: i64,
) -> Result<i32>;
}
/**
* The stream which is used for async audio input
*/
pub trait AudioInputStreamSafe: AudioStreamSafe {
/**
* The number of audio frames read from the stream.
* This monotonic counter will never get reset.
*/
fn get_frames_read(&mut self) -> i64;
}
/**
* The stream which is used for audio input
*/
pub trait AudioInputStream: AudioStream + AudioInputStreamSafe {}
/**
* The stream which can be used for audio input in synchronous mode
*/
pub trait AudioInputStreamSync: AudioInputStream {
type FrameType: IsFrameType;
/**
* Read data into the supplied buffer from the stream. This method will block until the read
* is complete or it runs out of time.
*
* If `timeout_nanoseconds` is zero then this call will not wait.
*/
fn read(
&mut self,
_buffer: &mut [<Self::FrameType as IsFrameType>::Type],
_timeout_nanoseconds: i64,
) -> Result<i32>;
}
/**
* The stream which is used for async audio output
*/
pub trait AudioOutputStreamSafe: AudioStreamSafe {
/**
* The number of audio frames written into the stream.
* This monotonic counter will never get reset.
*/
fn get_frames_written(&mut self) -> i64;
}
/**
* The stream which has pause/flush capabilities
*/
pub trait AudioOutputStream: AudioStream + AudioOutputStreamSafe {
/**
* Pause the stream. This will block until the stream has been paused, an error occurs
* or `timeoutNanoseconds` has been reached.
*/
fn pause(&mut self) -> Status {
self.pause_with_timeout(DEFAULT_TIMEOUT_NANOS)
}
/**
* Pause the stream. This will block until the stream has been paused, an error occurs
* or `timeoutNanoseconds` has been reached.
*/
fn pause_with_timeout(&mut self, timeout_nanoseconds: i64) -> Status;
/**
* Flush the stream. This will block until the stream has been flushed, an error occurs
* or `timeoutNanoseconds` has been reached.
*/
fn flush(&mut self) -> Status {
self.flush_with_timeout(DEFAULT_TIMEOUT_NANOS)
}
/**
* Flush the stream. This will block until the stream has been flushed, an error occurs
* or `timeoutNanoseconds` has been reached.
*/
fn flush_with_timeout(&mut self, timeout_nanoseconds: i64) -> Status;
/**
* Pause the stream asynchronously. Returns immediately (does not block). Equivalent to calling
* `pause(0)`.
*/
fn request_pause(&mut self) -> Status;
/**
* Flush the stream asynchronously. Returns immediately (does not block). Equivalent to calling
* `flush(0)`.
*/
fn request_flush(&mut self) -> Status;
}
/**
* The stream which can be used for audio output in synchronous mode
*/
pub trait AudioOutputStreamSync: AudioOutputStream {
type FrameType: IsFrameType;
/**
* Write data from the supplied buffer into the stream. This method will block until the write
* is complete or it runs out of time.
*
* If `timeout_nanoseconds` is zero then this call will not wait.
*/
fn write(
&mut self,
_buffer: &[<Self::FrameType as IsFrameType>::Type],
_timeout_nanoseconds: i64,
) -> Result<i32>;
}
impl<T: RawAudioStream + RawAudioStreamBase> AudioStreamSafe for T {
fn set_buffer_size_in_frames(&mut self, requested_frames: i32) -> Result<i32> {
wrap_result(unsafe {
ffi::oboe_AudioStream_setBufferSizeInFrames(self._raw_stream_mut(), requested_frames)
})
}
fn get_state(&self) -> StreamState {
FromPrimitive::from_i32(unsafe {
ffi::oboe_AudioStream_getState(self._raw_stream() as *const _ as *mut _)
})
.unwrap()
}
fn get_xrun_count(&self) -> Result<i32> {
wrap_result(unsafe {
ffi::oboe_AudioStream_getXRunCount(self._raw_stream() as *const _ as *mut _)
})
}
fn is_xrun_count_supported(&self) -> bool {
unsafe { ffi::oboe_AudioStream_isXRunCountSupported(self._raw_stream()) }
}
fn get_frames_per_burst(&mut self) -> i32 {
unsafe { ffi::oboe_AudioStream_getFramesPerBurst(self._raw_stream_mut()) }
}
fn get_bytes_per_sample(&mut self) -> i32 {
unsafe { ffi::oboe_AudioStream_getBytesPerSample(self._raw_stream_mut()) }
}
fn calculate_latency_millis(&mut self) -> Result<f64> {
wrap_result(unsafe { ffi::oboe_AudioStream_calculateLatencyMillis(self._raw_stream_mut()) })
}
fn get_timestamp(&mut self, clock_id: i32 /* clockid_t */) -> Result<FrameTimestamp> {
wrap_result(unsafe {
transmute(ffi::oboe_AudioStream_getTimestamp(
self._raw_stream_mut() as *mut _ as *mut c_void,
clock_id,
))
})
}
fn get_audio_api(&self) -> AudioApi {
FromPrimitive::from_i32(unsafe { ffi::oboe_AudioStream_getAudioApi(self._raw_stream()) })
.unwrap()
}
fn get_available_frames(&mut self) -> Result<i32> {
wrap_result(unsafe { ffi::oboe_AudioStream_getAvailableFrames(self._raw_stream_mut()) })
}
}
impl<T: RawAudioStream + RawAudioStreamBase> AudioStream for T {
fn open(&mut self) -> Status {
wrap_status(unsafe { ffi::oboe_AudioStream_open(self._raw_stream_mut()) })
}
fn close(&mut self) -> Status {
wrap_status(unsafe { ffi::oboe_AudioStream_close1(self._raw_stream_mut()) })
}
fn start_with_timeout(&mut self, timeout_nanoseconds: i64) -> Status {
wrap_status(unsafe {
ffi::oboe_AudioStream_start(
self._raw_stream_mut() as *mut _ as *mut c_void,
timeout_nanoseconds,
)
})
}
fn stop_with_timeout(&mut self, timeout_nanoseconds: i64) -> Status {
wrap_status(unsafe {
ffi::oboe_AudioStream_stop(
self._raw_stream_mut() as *mut _ as *mut c_void,
timeout_nanoseconds,
)
})
}
fn request_start(&mut self) -> Status {
wrap_status(unsafe { ffi::oboe_AudioStream_requestStart(self._raw_stream_mut()) })
}
fn request_stop(&mut self) -> Status {
wrap_status(unsafe { ffi::oboe_AudioStream_requestStop(self._raw_stream_mut()) })
}
fn wait_for_state_change(
&mut self,
input_state: StreamState,
timeout_nanoseconds: i64,
) -> Result<StreamState> {
let mut next_state = MaybeUninit::<StreamState>::uninit();
wrap_status(unsafe {
ffi::oboe_AudioStream_waitForStateChange(
self._raw_stream_mut(),
input_state as i32,
next_state.as_mut_ptr() as *mut i32,
timeout_nanoseconds,
)
})
.map(|_| unsafe { next_state.assume_init() })
}
fn wait_for_available_frames(
&mut self,
num_frames: i32,
timeout_nanoseconds: i64,
) -> Result<i32> {
wrap_result(unsafe {
ffi::oboe_AudioStream_waitForAvailableFrames(
self._raw_stream_mut(),
num_frames,
timeout_nanoseconds,
)
})
}
}
impl<T: RawAudioInputStream + RawAudioStream + RawAudioStreamBase> AudioInputStreamSafe for T {
fn get_frames_read(&mut self) -> i64 {
unsafe {
ffi::oboe_AudioStream_getFramesRead(self._raw_stream_mut() as *mut _ as *mut c_void)
}
}
}
impl<T: RawAudioInputStream + RawAudioStream + RawAudioStreamBase> AudioInputStream for T {}
impl<T: RawAudioOutputStream + RawAudioStream + RawAudioStreamBase> AudioOutputStreamSafe for T {
fn get_frames_written(&mut self) -> i64 {
unsafe {
ffi::oboe_AudioStream_getFramesWritten(self._raw_stream_mut() as *mut _ as *mut c_void)
}
}
}
impl<T: RawAudioOutputStream + RawAudioStream + RawAudioStreamBase> AudioOutputStream for T {
fn pause_with_timeout(&mut self, timeout_nanoseconds: i64) -> Status {
wrap_status(unsafe {
ffi::oboe_AudioStream_pause(
self._raw_stream_mut() as *mut _ as *mut c_void,
timeout_nanoseconds,
)
})
}
fn flush_with_timeout(&mut self, timeout_nanoseconds: i64) -> Status {
wrap_status(unsafe {
ffi::oboe_AudioStream_flush(
self._raw_stream_mut() as *mut _ as *mut c_void,
timeout_nanoseconds,
)
})
}
fn request_pause(&mut self) -> Status {
wrap_status(unsafe { ffi::oboe_AudioStream_requestPause(self._raw_stream_mut()) })
}
fn request_flush(&mut self) -> Status {
wrap_status(unsafe { ffi::oboe_AudioStream_requestFlush(self._raw_stream_mut()) })
}
}
pub(crate) fn audio_stream_fmt<T: AudioStreamSafe>(
stream: &T,
f: &mut fmt::Formatter<'_>,
) -> fmt::Result {
audio_stream_base_fmt(stream, f)?;
"Audio API: ".fmt(f)?;
fmt::Debug::fmt(&stream.get_audio_api(), f)?;
"\nCurrent state: ".fmt(f)?;
fmt::Debug::fmt(&stream.get_state(), f)?;
"\nXrun count: ".fmt(f)?;
match stream.get_xrun_count() {
Ok(count) => count.fmt(f)?,
Err(error) => fmt::Debug::fmt(&error, f)?,
}
'\n'.fmt(f)
}
pub(crate) struct AudioStreamHandle(ffi::oboe_AudioStreamShared);
impl Clone for AudioStreamHandle {
fn clone(&self) -> Self {
// We free to clone shared pointers
let mut new = Self::default();
unsafe { ffi::oboe_AudioStreamShared_clone(&self.0, new.as_mut()) };
new
}
}
impl Drop for AudioStreamHandle {
/// SAFETY: `self.0` must be valid pointers.
fn drop(&mut self) {
// The error callback could be holding a shared_ptr, so don't delete AudioStream
// directly, but only its shared_ptr.
unsafe { ffi::oboe_AudioStreamShared_delete(&mut self.0 as *mut _) };
}
}
impl Default for AudioStreamHandle {
fn default() -> Self {
Self(unsafe { MaybeUninit::zeroed().assume_init() })
}
}
impl AsRef<ffi::oboe_AudioStreamShared> for AudioStreamHandle {
fn as_ref(&self) -> &ffi::oboe_AudioStreamShared {
&self.0
}
}
impl AsMut<ffi::oboe_AudioStreamShared> for AudioStreamHandle {
fn as_mut(&mut self) -> &mut ffi::oboe_AudioStreamShared {
&mut self.0
}
}
impl Deref for AudioStreamHandle {
type Target = ffi::oboe_AudioStream;
fn deref(&self) -> &Self::Target {
unsafe { &*ffi::oboe_AudioStreamShared_deref(&self.0 as *const _ as *mut _) }
}
}
impl DerefMut for AudioStreamHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *ffi::oboe_AudioStreamShared_deref(&mut self.0) }
}
}
/**
* Reference to the audio stream for passing to callbacks
*/
#[repr(transparent)]
pub struct AudioStreamRef<'s, D> {
raw: &'s mut ffi::oboe_AudioStream,
_phantom: PhantomData<D>,
}
impl<'s, D> fmt::Debug for AudioStreamRef<'s, D> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
audio_stream_fmt(self, f)
}
}
impl<'s, D> AudioStreamRef<'s, D> {
pub(crate) fn wrap_raw<'a: 's>(raw: &'a mut ffi::oboe_AudioStream) -> Self {
Self {
raw,
_phantom: PhantomData,
}
}
}
impl<'s, D> RawAudioStreamBase for AudioStreamRef<'s, D> {
fn _raw_base(&self) -> &ffi::oboe_AudioStreamBase {
unsafe { &*ffi::oboe_AudioStream_getBase(self.raw as *const _ as *mut _) }
}
fn _raw_base_mut(&mut self) -> &mut ffi::oboe_AudioStreamBase {
unsafe { &mut *ffi::oboe_AudioStream_getBase(self.raw) }
}
}
impl<'s, D> RawAudioStream for AudioStreamRef<'s, D> {
fn _raw_stream(&self) -> &ffi::oboe_AudioStream {
self.raw
}
fn _raw_stream_mut(&mut self) -> &mut ffi::oboe_AudioStream {
self.raw
}
}
impl<'s> RawAudioInputStream for AudioStreamRef<'s, Input> {}
impl<'s> RawAudioOutputStream for AudioStreamRef<'s, Output> {}
/**
* The audio stream for asynchronous (callback-driven) mode
*/
pub struct AudioStreamAsync<D, F> {
raw: AudioStreamHandle,
_phantom: PhantomData<(D, F)>,
}
impl<D, F> fmt::Debug for AudioStreamAsync<D, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
audio_stream_fmt(self, f)
}
}
impl<D, F> AudioStreamAsync<D, F> {
// SAFETY: `raw` must be valid.
pub(crate) fn wrap_handle(raw: AudioStreamHandle) -> Self {
Self {
raw,
_phantom: PhantomData,
}
}
}
impl<D, F> Drop for AudioStreamAsync<D, F> {
fn drop(&mut self) {
// SAFETY: As long as the conditions on Self::wrap_raw are guaranteed on the creation of
// self, this is safe.
let _ = self.close();
}
}
impl<D, T> RawAudioStreamBase for AudioStreamAsync<D, T> {
fn _raw_base(&self) -> &ffi::oboe_AudioStreamBase {
unsafe { &*ffi::oboe_AudioStream_getBase(&*self.raw as *const _ as *mut _) }
}
fn _raw_base_mut(&mut self) -> &mut ffi::oboe_AudioStreamBase {
unsafe { &mut *ffi::oboe_AudioStream_getBase(&mut *self.raw as *mut _) }
}
}
impl<D, F> RawAudioStream for AudioStreamAsync<D, F> {
fn _raw_stream(&self) -> &ffi::oboe_AudioStream {
&self.raw
}
fn _raw_stream_mut(&mut self) -> &mut ffi::oboe_AudioStream {
&mut self.raw
}
}
impl<F> RawAudioInputStream for AudioStreamAsync<Input, F> {}
impl<F> RawAudioOutputStream for AudioStreamAsync<Output, F> {}
/**
* The audio stream for synchronous (blocking) mode
*/
pub struct AudioStreamSync<D, F> {
raw: AudioStreamHandle,
_phantom: PhantomData<(D, F)>,
}
impl<D, F> fmt::Debug for AudioStreamSync<D, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
audio_stream_fmt(self, f)
}
}
impl<D, F> AudioStreamSync<D, F> {
// SAFETY: `raw` must be valid.
pub(crate) fn wrap_handle(raw: AudioStreamHandle) -> Self {
Self {
raw,
_phantom: PhantomData,
}
}
}
impl<D, F> Drop for AudioStreamSync<D, F> {
fn drop(&mut self) {
// SAFETY: As long as the conditions on Self::wrap_raw are guaranteed on the creation of
// self, this is safe.
let _ = self.close();
}
}
impl<D, T> RawAudioStreamBase for AudioStreamSync<D, T> {
fn _raw_base(&self) -> &ffi::oboe_AudioStreamBase {
unsafe { &*ffi::oboe_AudioStream_getBase(&*self.raw as *const _ as *mut _) }
}
fn _raw_base_mut(&mut self) -> &mut ffi::oboe_AudioStreamBase {
unsafe { &mut *ffi::oboe_AudioStream_getBase(&mut *self.raw as *mut _) }
}
}
impl<D, F> RawAudioStream for AudioStreamSync<D, F> {
fn _raw_stream(&self) -> &ffi::oboe_AudioStream {
&self.raw
}
fn _raw_stream_mut(&mut self) -> &mut ffi::oboe_AudioStream {
&mut self.raw
}
}
impl<F> RawAudioInputStream for AudioStreamSync<Input, F> {}
impl<F> RawAudioOutputStream for AudioStreamSync<Output, F> {}
impl<F: IsFrameType> AudioInputStreamSync for AudioStreamSync<Input, F> {
type FrameType = F;
fn read(
&mut self,
buffer: &mut [<Self::FrameType as IsFrameType>::Type],
timeout_nanoseconds: i64,
) -> Result<i32> {
wrap_result(unsafe {
ffi::oboe_AudioStream_read(
&mut *self.raw,
buffer.as_mut_ptr() as *mut c_void,
buffer.len() as i32,
timeout_nanoseconds,
)
})
}
}
impl<F: IsFrameType> AudioOutputStreamSync for AudioStreamSync<Output, F> {
type FrameType = F;
fn write(
&mut self,
buffer: &[<Self::FrameType as IsFrameType>::Type],
timeout_nanoseconds: i64,
) -> Result<i32> {
wrap_result(unsafe {
ffi::oboe_AudioStream_write(
&mut *self.raw,
buffer.as_ptr() as *const c_void,
buffer.len() as i32,
timeout_nanoseconds,
)
})
}
}

217
vendor/oboe/src/audio_stream_base.rs vendored Normal file
View File

@@ -0,0 +1,217 @@
//use oboe_sys as ffi;
use num_traits::FromPrimitive;
use std::fmt::{self, Display};
use super::{
AudioFormat, ChannelCount, ContentType, Direction, InputPreset, PerformanceMode,
RawAudioStreamBase, SampleRateConversionQuality, SessionId, SharingMode, Usage,
};
/**
* Base trait containing parameters for audio streams and builders.
*/
pub trait AudioStreamBase {
/**
* Get actual number of channels
*/
fn get_channel_count(&self) -> ChannelCount;
/**
* Get actual stream direction
*
* `Direction::Input` or `Direction::Output`.
*/
fn get_direction(&self) -> Direction;
/**
* Get the actual sample rate for the stream
*/
fn get_sample_rate(&self) -> i32;
/**
* Get the number of frames in each callback
*/
fn get_frames_per_callback(&self) -> i32;
/**
* Get the audio sample format (e.g. F32 or I16)
*/
fn get_format(&self) -> AudioFormat;
/**
* Query the maximum number of frames that can be filled without blocking.
* If the stream has been closed the last known value will be returned.
*/
fn get_buffer_size_in_frames(&self) -> i32;
/**
* Get the capacity in number of frames
*/
fn get_buffer_capacity_in_frames(&self) -> i32;
/**
* Get the sharing mode of the stream
*/
fn get_sharing_mode(&self) -> SharingMode;
/**
* Get the performance mode of the stream
*/
fn get_performance_mode(&self) -> PerformanceMode;
/**
* Get the device identifier of the stream
*/
fn get_device_id(&self) -> i32;
/**
* Get the usage for this stream
*/
fn get_usage(&self) -> Usage;
/**
* Get the stream's content type
*/
fn get_content_type(&self) -> ContentType;
/**
* Get the stream's input preset
*/
fn get_input_preset(&self) -> InputPreset;
/**
* Get the stream's session ID allocation strategy (None or Allocate)
*/
fn get_session_id(&self) -> SessionId;
/**
* Return true if can convert channel counts to achieve optimal results.
*/
fn is_channel_conversion_allowed(&self) -> bool;
/**
* Return true if Oboe can convert data formats to achieve optimal results.
*/
fn is_format_conversion_allowed(&self) -> bool;
/**
* Get whether and how Oboe can convert sample rates to achieve optimal results.
*/
fn get_sample_rate_conversion_quality(&self) -> SampleRateConversionQuality;
}
impl<T: RawAudioStreamBase> AudioStreamBase for T {
fn get_channel_count(&self) -> ChannelCount {
FromPrimitive::from_i32(self._raw_base().mChannelCount).unwrap()
}
fn get_direction(&self) -> Direction {
FromPrimitive::from_i32(self._raw_base().mDirection).unwrap()
}
fn get_sample_rate(&self) -> i32 {
self._raw_base().mSampleRate
}
fn get_frames_per_callback(&self) -> i32 {
self._raw_base().mFramesPerCallback
}
fn get_format(&self) -> AudioFormat {
FromPrimitive::from_i32(self._raw_base().mFormat).unwrap()
}
fn get_buffer_size_in_frames(&self) -> i32 {
self._raw_base().mBufferSizeInFrames
}
fn get_buffer_capacity_in_frames(&self) -> i32 {
self._raw_base().mBufferCapacityInFrames
}
fn get_sharing_mode(&self) -> SharingMode {
FromPrimitive::from_i32(self._raw_base().mSharingMode).unwrap()
}
fn get_performance_mode(&self) -> PerformanceMode {
FromPrimitive::from_i32(self._raw_base().mPerformanceMode).unwrap()
}
fn get_device_id(&self) -> i32 {
self._raw_base().mDeviceId
}
fn get_usage(&self) -> Usage {
FromPrimitive::from_i32(self._raw_base().mUsage).unwrap()
}
fn get_content_type(&self) -> ContentType {
FromPrimitive::from_i32(self._raw_base().mContentType).unwrap()
}
fn get_input_preset(&self) -> InputPreset {
FromPrimitive::from_i32(self._raw_base().mInputPreset).unwrap()
}
fn get_session_id(&self) -> SessionId {
FromPrimitive::from_i32(self._raw_base().mSessionId).unwrap()
}
fn is_channel_conversion_allowed(&self) -> bool {
self._raw_base().mChannelConversionAllowed
}
fn is_format_conversion_allowed(&self) -> bool {
self._raw_base().mFormatConversionAllowed
}
fn get_sample_rate_conversion_quality(&self) -> SampleRateConversionQuality {
FromPrimitive::from_i32(self._raw_base().mSampleRateConversionQuality).unwrap()
}
}
pub(crate) fn audio_stream_base_fmt<T: AudioStreamBase>(
base: &T,
f: &mut fmt::Formatter<'_>,
) -> fmt::Result {
"DeviceId: ".fmt(f)?;
base.get_device_id().fmt(f)?;
"\nSessionId: ".fmt(f)?;
fmt::Debug::fmt(&base.get_session_id(), f)?;
"\nDirection: ".fmt(f)?;
fmt::Debug::fmt(&base.get_direction(), f)?;
if base.get_direction() == Direction::Input {
"\nInput preset: ".fmt(f)?;
fmt::Debug::fmt(&base.get_input_preset(), f)?;
}
"\nBuffer capacity in frames: ".fmt(f)?;
base.get_buffer_capacity_in_frames().fmt(f)?;
"\nBuffer size in frames: ".fmt(f)?;
base.get_buffer_size_in_frames().fmt(f)?;
"\nFrames per callback: ".fmt(f)?;
base.get_frames_per_callback().fmt(f)?;
"\nSample rate: ".fmt(f)?;
base.get_sample_rate().fmt(f)?;
"\nSample rate conversion quality: ".fmt(f)?;
fmt::Debug::fmt(&base.get_sample_rate_conversion_quality(), f)?;
"\nChannel count: ".fmt(f)?;
fmt::Debug::fmt(&base.get_channel_count(), f)?;
if base.is_channel_conversion_allowed() {
" (conversion allowed)".fmt(f)?;
}
"\nFormat: ".fmt(f)?;
fmt::Debug::fmt(&base.get_format(), f)?;
if base.is_format_conversion_allowed() {
" (conversion allowed)".fmt(f)?;
}
"\nSharing mode: ".fmt(f)?;
fmt::Debug::fmt(&base.get_sharing_mode(), f)?;
"\nPerformance mode: ".fmt(f)?;
fmt::Debug::fmt(&base.get_performance_mode(), f)?;
"\nUsage: ".fmt(f)?;
fmt::Debug::fmt(&base.get_usage(), f)?;
"\nContent type: ".fmt(f)?;
fmt::Debug::fmt(&base.get_content_type(), f)?;
'\n'.fmt(f)
}

634
vendor/oboe/src/audio_stream_builder.rs vendored Normal file
View File

@@ -0,0 +1,634 @@
use num_traits::FromPrimitive;
use oboe_sys as ffi;
use std::{
fmt,
marker::PhantomData,
mem::{ManuallyDrop, MaybeUninit},
ops::{Deref, DerefMut},
};
use crate::{set_input_callback, set_output_callback};
use super::{
audio_stream_base_fmt, wrap_status, AudioApi, AudioInputCallback, AudioOutputCallback,
AudioStreamAsync, AudioStreamHandle, AudioStreamSync, ContentType, Input, InputPreset,
IsChannelCount, IsDirection, IsFormat, IsFrameType, Mono, Output, PerformanceMode,
RawAudioStreamBase, Result, SampleRateConversionQuality, SessionId, SharingMode, Stereo,
Unspecified, Usage,
};
#[repr(transparent)]
pub(crate) struct AudioStreamBuilderHandle(ffi::oboe_AudioStreamBuilder);
impl AudioStreamBuilderHandle {
pub(crate) fn open_stream(&mut self) -> Result<AudioStreamHandle> {
let mut stream = AudioStreamHandle::default();
wrap_status(unsafe {
ffi::oboe_AudioStreamBuilder_openStreamShared(&mut **self, stream.as_mut())
})
.map(|_| stream)
}
}
impl Default for AudioStreamBuilderHandle {
fn default() -> Self {
let mut raw = MaybeUninit::zeroed();
Self(unsafe {
ffi::oboe_AudioStreamBuilder_create(raw.as_mut_ptr());
raw.assume_init()
})
}
}
impl Drop for AudioStreamBuilderHandle {
fn drop(&mut self) {
unsafe { ffi::oboe_AudioStreamBuilder_delete(&mut **self) }
}
}
impl Deref for AudioStreamBuilderHandle {
type Target = ffi::oboe_AudioStreamBuilder;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for AudioStreamBuilderHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/**
* Factory for an audio stream.
*/
#[repr(transparent)]
pub struct AudioStreamBuilder<D, C, T> {
raw: ManuallyDrop<AudioStreamBuilderHandle>,
_phantom: PhantomData<(D, C, T)>,
}
impl<D, C, T> Drop for AudioStreamBuilder<D, C, T> {
fn drop(&mut self) {
// SAFETY: self.raw is only drop here, or taken in Self::destructs, which don't drop self.
unsafe {
ManuallyDrop::drop(&mut self.raw);
}
}
}
impl<D, C, T> fmt::Debug for AudioStreamBuilder<D, C, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
audio_stream_base_fmt(self, f)
}
}
impl<D, C, T> RawAudioStreamBase for AudioStreamBuilder<D, C, T> {
fn _raw_base(&self) -> &ffi::oboe_AudioStreamBase {
unsafe { &*ffi::oboe_AudioStreamBuilder_getBase(&**self.raw as *const _ as *mut _) }
}
fn _raw_base_mut(&mut self) -> &mut ffi::oboe_AudioStreamBase {
unsafe { &mut *ffi::oboe_AudioStreamBuilder_getBase(&mut **self.raw) }
}
}
impl Default for AudioStreamBuilder<Output, Unspecified, Unspecified> {
/**
* Create new audio stream builder
*/
fn default() -> Self {
Self {
raw: Default::default(),
_phantom: PhantomData,
}
}
}
impl<D, C, T> AudioStreamBuilder<D, C, T> {
fn convert<D1, C1, T1>(self) -> AudioStreamBuilder<D1, C1, T1> {
AudioStreamBuilder {
raw: ManuallyDrop::new(self.destructs()),
_phantom: PhantomData,
}
}
/**
* Request a specific number of channels
*
* Default is `Unspecified`. If the value is unspecified then
* the application should query for the actual value after the stream is opened.
*/
pub fn set_channel_count<X: IsChannelCount>(self) -> AudioStreamBuilder<D, X, T> {
let mut builder = self.convert();
builder._raw_base_mut().mChannelCount = X::CHANNEL_COUNT as i32;
builder
}
/**
* Request mono mode for a stream
*/
pub fn set_mono(self) -> AudioStreamBuilder<D, Mono, T> {
self.set_channel_count::<Mono>()
}
/**
* Request stereo mode for a stream
*/
pub fn set_stereo(self) -> AudioStreamBuilder<D, Stereo, T> {
self.set_channel_count::<Stereo>()
}
/**
* Request the direction for a stream
*
* The default is `Direction::Output`
*/
pub fn set_direction<X: IsDirection>(self) -> AudioStreamBuilder<X, C, T> {
let mut builder = self.convert();
builder._raw_base_mut().mDirection = X::DIRECTION as i32;
builder
}
/**
* Request input direction for a stream
*/
pub fn set_input(self) -> AudioStreamBuilder<Input, C, T> {
self.set_direction::<Input>()
}
/**
* Request output direction for a stream
*
* It is optional because th stream builder already configured as output by default.
*/
pub fn set_output(self) -> AudioStreamBuilder<Output, C, T> {
self.set_direction::<Output>()
}
/**
* Request a specific sample rate in Hz.
*
* Default is kUnspecified. If the value is unspecified then
* the application should query for the actual value after the stream is opened.
*
* Technically, this should be called the _frame rate_ or _frames per second_,
* because it refers to the number of complete frames transferred per second.
* But it is traditionally called _sample rate_. Se we use that term.
*/
pub fn set_sample_rate(mut self, sample_rate: i32) -> Self {
self._raw_base_mut().mSampleRate = sample_rate;
self
}
/**
* Request a specific number of frames for the data callback.
*
* Default is kUnspecified. If the value is unspecified then
* the actual number may vary from callback to callback.
*
* If an application can handle a varying number of frames then we recommend
* leaving this unspecified. This allow the underlying API to optimize
* the callbacks. But if your application is, for example, doing FFTs or other block
* oriented operations, then call this function to get the sizes you need.
*/
pub fn set_frames_per_callback(mut self, frames_per_callback: i32) -> Self {
self._raw_base_mut().mFramesPerCallback = frames_per_callback;
self
}
/**
* Request a sample data format, for example `f32`.
*
* Default is unspecified. If the value is unspecified then
* the application should query for the actual value after the stream is opened.
*/
pub fn set_format<X: IsFormat>(self) -> AudioStreamBuilder<D, C, X> {
let mut builder = self.convert();
builder._raw_base_mut().mFormat = X::FORMAT as i32;
builder
}
pub fn set_i16(self) -> AudioStreamBuilder<D, C, i16> {
self.set_format::<i16>()
}
pub fn set_f32(self) -> AudioStreamBuilder<D, C, f32> {
self.set_format::<f32>()
}
/**
* Set the requested buffer capacity in frames.
* Buffer capacity in frames is the maximum possible buffer size in frames.
*
* The final stream capacity may differ. For __AAudio__ it should be at least this big.
* For __OpenSL ES__, it could be smaller.
*
* Default is unspecified.
*/
pub fn set_buffer_capacity_in_frames(mut self, buffer_capacity_in_frames: i32) -> Self {
self._raw_base_mut().mBufferCapacityInFrames = buffer_capacity_in_frames;
self
}
/**
* Get the audio API which will be requested when opening the stream. No guarantees that this is
* the API which will actually be used. Query the stream itself to find out the API which is
* being used.
*
* If you do not specify the API, then __AAudio__ will be used if isAAudioRecommended()
* returns true. Otherwise __OpenSL ES__ will be used.
*/
pub fn get_audio_api(&self) -> AudioApi {
FromPrimitive::from_i32(unsafe { ffi::oboe_AudioStreamBuilder_getAudioApi(&**self.raw) })
.unwrap()
}
/**
* If you leave this unspecified then Oboe will choose the best API
* for the device and SDK version at runtime.
*
* This should almost always be left unspecified, except for debugging purposes.
* Specifying __AAudio__ will force Oboe to use AAudio on 8.0, which is extremely risky.
* Specifying __OpenSL ES__ should mainly be used to test legacy performance/functionality.
*
* If the caller requests AAudio and it is supported then AAudio will be used.
*/
pub fn set_audio_api(mut self, audio_api: AudioApi) -> Self {
unsafe { ffi::oboe_AudioStreamBuilder_setAudioApi(&mut **self.raw, audio_api as i32) }
self
}
/**
* Is the AAudio API supported on this device?
*
* AAudio was introduced in the Oreo 8.0 release.
*/
pub fn is_aaudio_supported() -> bool {
unsafe { ffi::oboe_AudioStreamBuilder_isAAudioSupported() }
}
/**
* Is the AAudio API recommended this device?
*
* AAudio may be supported but not recommended because of version specific issues.
* AAudio is not recommended for Android 8.0 or earlier versions.
*/
pub fn is_aaudio_recommended() -> bool {
unsafe { ffi::oboe_AudioStreamBuilder_isAAudioRecommended() }
}
/**
* Request a mode for sharing the device.
* The requested sharing mode may not be available.
* So the application should query for the actual mode after the stream is opened.
*/
pub fn set_sharing_mode(mut self, sharing_mode: SharingMode) -> Self {
self._raw_base_mut().mSharingMode = sharing_mode as i32;
self
}
/**
* Request a shared mode for the device
*/
pub fn set_shared(self) -> Self {
self.set_sharing_mode(SharingMode::Shared)
}
/**
* Request an exclusive mode for the device
*/
pub fn set_exclusive(self) -> Self {
self.set_sharing_mode(SharingMode::Exclusive)
}
/**
* Request a performance level for the stream.
* This will determine the latency, the power consumption, and the level of
* protection from glitches.
*/
pub fn set_performance_mode(mut self, performance_mode: PerformanceMode) -> Self {
self._raw_base_mut().mPerformanceMode = performance_mode as i32;
self
}
/**
* Set the intended use case for the stream.
*
* The system will use this information to optimize the behavior of the stream.
* This could, for example, affect how volume and focus is handled for the stream.
*
* The default, if you do not call this function, is Usage::Media.
*
* Added in API level 28.
*/
pub fn set_usage(mut self, usage: Usage) -> Self {
self._raw_base_mut().mUsage = usage as i32;
self
}
/**
* Set the type of audio data that the stream will carry.
*
* The system will use this information to optimize the behavior of the stream.
* This could, for example, affect whether a stream is paused when a notification occurs.
*
* The default, if you do not call this function, is `ContentType::Music`.
*
* Added in API level 28.
*/
pub fn set_content_type(mut self, content_type: ContentType) -> Self {
self._raw_base_mut().mContentType = content_type as i32;
self
}
/**
* Set the input (capture) preset for the stream.
*
* The system will use this information to optimize the behavior of the stream.
* This could, for example, affect which microphones are used and how the
* recorded data is processed.
*
* The default, if you do not call this function, is InputPreset::VoiceRecognition.
* That is because VoiceRecognition is the preset with the lowest latency
* on many platforms.
*
* Added in API level 28.
*/
pub fn set_input_preset(mut self, input_preset: InputPreset) -> Self {
self._raw_base_mut().mInputPreset = input_preset as i32;
self
}
/**
* Set the requested session ID.
*
* The session ID can be used to associate a stream with effects processors.
* The effects are controlled using the Android AudioEffect Java API.
*
* The default, if you do not call this function, is `SessionId::None`.
*
* If set to `SessionId::Allocate` then a session ID will be allocated
* when the stream is opened.
*
* The allocated session ID can be obtained by calling AudioStream::getSessionId()
* and then used with this function when opening another stream.
* This allows effects to be shared between streams.
*
* Session IDs from Oboe can be used the Android Java APIs and vice versa.
* So a session ID from an Oboe stream can be passed to Java
* and effects applied using the Java AudioEffect API.
*
* Allocated session IDs will always be positive and nonzero.
*
* Added in API level 28.
*/
pub fn set_session_id(mut self, session_id: SessionId) -> Self {
self._raw_base_mut().mSessionId = session_id as i32;
self
}
/**
* Request a stream to a specific audio input/output device given an audio device ID.
*
* In most cases, the primary device will be the appropriate device to use, and the
* device ID can be left unspecified.
*
* On Android, for example, the ID could be obtained from the Java AudioManager.
* AudioManager.getDevices() returns an array of AudioDeviceInfo[], which contains
* a getId() method (as well as other type information), that should be passed
* to this method.
*
* When `java-interface` feature is used you can call [`AudioDeviceInfo::request`](crate::AudioDeviceInfo::request) for listing devices info.
*
* Note that when using OpenSL ES, this will be ignored and the created
* stream will have device ID unspecified.
*/
pub fn set_device_id(mut self, device_id: i32) -> Self {
self._raw_base_mut().mDeviceId = device_id;
self
}
/**
* If true then Oboe might convert channel counts to achieve optimal results.
* On some versions of Android for example, stereo streams could not use a FAST track.
* So a mono stream might be used instead and duplicated to two channels.
* On some devices, mono streams might be broken, so a stereo stream might be opened
* and converted to mono.
*
* Default is true.
*/
pub fn set_channel_conversion_allowed(mut self, allowed: bool) -> Self {
self._raw_base_mut().mChannelConversionAllowed = allowed;
self
}
/**
* If true then Oboe might convert data formats to achieve optimal results.
* On some versions of Android, for example, a float stream could not get a
* low latency data path. So an I16 stream might be opened and converted to float.
*
* Default is true.
*/
pub fn set_format_conversion_allowed(mut self, allowed: bool) -> Self {
self._raw_base_mut().mFormatConversionAllowed = allowed;
self
}
/**
* Specify the quality of the sample rate converter in Oboe.
*
* If set to None then Oboe will not do sample rate conversion. But the underlying APIs might
* still do sample rate conversion if you specify a sample rate.
* That can prevent you from getting a low latency stream.
*
* If you do the conversion in Oboe then you might still get a low latency stream.
*
* Default is `SampleRateConversionQuality::None`
*/
pub fn set_sample_rate_conversion_quality(
mut self,
quality: SampleRateConversionQuality,
) -> Self {
self._raw_base_mut().mSampleRateConversionQuality = quality as i32;
self
}
/**
* Returns true if AAudio will be used based on the current settings.
*/
pub fn will_use_aaudio(&self) -> bool {
let audio_api = self.get_audio_api();
(audio_api == AudioApi::AAudio && Self::is_aaudio_supported())
|| (audio_api == AudioApi::Unspecified && Self::is_aaudio_recommended())
}
/// Descontructs self into its handle, without calling drop.
fn destructs(mut self) -> AudioStreamBuilderHandle {
// Safety: the std::mem::forget prevents `raw` from being dropped by Self::drop.
let raw = unsafe { ManuallyDrop::take(&mut self.raw) };
std::mem::forget(self);
raw
}
}
impl<D: IsDirection, C: IsChannelCount, T: IsFormat> AudioStreamBuilder<D, C, T> {
/**
* Create and open a synchronous (blocking) stream based on the current settings.
*/
pub fn open_stream(self) -> Result<AudioStreamSync<D, (T, C)>> {
let mut raw = self.destructs();
let stream = raw.open_stream().map(AudioStreamSync::wrap_handle);
drop(raw);
stream
}
}
impl<C: IsChannelCount, T: IsFormat> AudioStreamBuilder<Input, C, T> {
/**
* Specifies an object to handle data or error related callbacks from the underlying API.
*
* __Important: See AudioStreamCallback for restrictions on what may be called
* from the callback methods.__
*
* When an error callback occurs, the associated stream will be stopped and closed in a separate thread.
*
* A note on why the streamCallback parameter is a raw pointer rather than a smart pointer:
*
* The caller should retain ownership of the object streamCallback points to. At first glance weak_ptr may seem like
* a good candidate for streamCallback as this implies temporary ownership. However, a weak_ptr can only be created
* from a shared_ptr. A shared_ptr incurs some performance overhead. The callback object is likely to be accessed
* every few milliseconds when the stream requires new data so this overhead is something we want to avoid.
*
* This leaves a raw pointer as the logical type choice. The only caveat being that the caller must not destroy
* the callback before the stream has been closed.
*/
pub fn set_callback<F>(self, stream_callback: F) -> AudioStreamBuilderAsync<Input, F>
where
F: AudioInputCallback<FrameType = (T, C)>,
(T, C): IsFrameType,
{
let mut raw = self.destructs();
set_input_callback(&mut raw, stream_callback);
AudioStreamBuilderAsync {
raw: ManuallyDrop::new(raw),
_phantom: PhantomData,
}
}
}
impl<C: IsChannelCount, T: IsFormat> AudioStreamBuilder<Output, C, T> {
/**
* Specifies an object to handle data or error related callbacks from the underlying API.
*
* __Important: See AudioStreamCallback for restrictions on what may be called
* from the callback methods.__
*
* When an error callback occurs, the associated stream will be stopped and closed in a separate thread.
*
* A note on why the streamCallback parameter is a raw pointer rather than a smart pointer:
*
* The caller should retain ownership of the object streamCallback points to. At first glance weak_ptr may seem like
* a good candidate for streamCallback as this implies temporary ownership. However, a weak_ptr can only be created
* from a shared_ptr. A shared_ptr incurs some performance overhead. The callback object is likely to be accessed
* every few milliseconds when the stream requires new data so this overhead is something we want to avoid.
*
* This leaves a raw pointer as the logical type choice. The only caveat being that the caller must not destroy
* the callback before the stream has been closed.
*/
pub fn set_callback<F>(self, stream_callback: F) -> AudioStreamBuilderAsync<Output, F>
where
F: AudioOutputCallback<FrameType = (T, C)>,
(T, C): IsFrameType,
{
let mut raw = self.destructs();
set_output_callback(&mut raw, stream_callback);
AudioStreamBuilderAsync {
raw: ManuallyDrop::new(raw),
_phantom: PhantomData,
}
}
}
/**
* Factory for an audio stream.
*/
pub struct AudioStreamBuilderAsync<D, F> {
raw: ManuallyDrop<AudioStreamBuilderHandle>,
_phantom: PhantomData<(D, F)>,
}
impl<D, F> Drop for AudioStreamBuilderAsync<D, F> {
fn drop(&mut self) {
// SAFETY: self.raw is only droped here, or taken in Self::destructs, which don't drop self.
unsafe {
ManuallyDrop::drop(&mut self.raw);
}
}
}
impl<D, F> fmt::Debug for AudioStreamBuilderAsync<D, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
audio_stream_base_fmt(self, f)
}
}
impl<D, F> RawAudioStreamBase for AudioStreamBuilderAsync<D, F> {
fn _raw_base(&self) -> &ffi::oboe_AudioStreamBase {
unsafe { &*ffi::oboe_AudioStreamBuilder_getBase(&**self.raw as *const _ as *mut _) }
}
fn _raw_base_mut(&mut self) -> &mut ffi::oboe_AudioStreamBase {
unsafe { &mut *ffi::oboe_AudioStreamBuilder_getBase(&mut **self.raw) }
}
}
impl<D, F> AudioStreamBuilderAsync<D, F> {
/// Descontructs self into its handle without calling drop.
fn destructs(mut self) -> AudioStreamBuilderHandle {
// Safety: the std::mem::forget prevents `raw` from being dropped by Self::drop.
let raw = unsafe { ManuallyDrop::take(&mut self.raw) };
std::mem::forget(self);
raw
}
}
impl<F: AudioInputCallback + Send> AudioStreamBuilderAsync<Input, F> {
/**
* Create and open an asynchronous (callback-driven) input stream based on the current settings.
*/
pub fn open_stream(self) -> Result<AudioStreamAsync<Input, F>> {
let mut raw = self.destructs();
let stream = raw.open_stream().map(AudioStreamAsync::wrap_handle);
drop(raw);
stream
}
}
impl<F: AudioOutputCallback + Send> AudioStreamBuilderAsync<Output, F> {
/**
* Create and open an asynchronous (callback-driven) output stream based on the current settings.
*/
pub fn open_stream(self) -> Result<AudioStreamAsync<Output, F>> {
let mut raw = self.destructs();
let stream = raw.open_stream().map(AudioStreamAsync::wrap_handle);
drop(raw);
stream
}
}

334
vendor/oboe/src/audio_stream_callback.rs vendored Normal file
View File

@@ -0,0 +1,334 @@
use std::{
ffi::c_void,
slice::{from_raw_parts, from_raw_parts_mut},
};
use oboe_sys as ffi;
use num_traits::FromPrimitive;
use super::{
AudioInputStreamSafe, AudioOutputStreamSafe, AudioStreamBuilderHandle, AudioStreamRef,
DataCallbackResult, Error, IsFrameType,
};
/**
* This trait defines a callback interface for:
*
* 1) moving data to/from an audio stream using `on_audio_ready`
* 2) being alerted when a stream has an error using `on_error_*` methods
*/
pub trait AudioInputCallback {
/**
* The sample type and number of channels for processing.
*
* Oboe supports only two sample types:
*
* - **i16** - signed 16-bit integer samples
* - **f32** - 32-bit floating point samples
*
* Oboe supports only mono and stereo channel configurations.
*/
type FrameType: IsFrameType;
/**
* This will be called when an error occurs on a stream or when the stream is disconnected.
*
* Note that this will be called on a different thread than the onAudioReady() thread.
* This thread will be created by Oboe.
*
* The underlying stream will already be stopped by Oboe but not yet closed.
* So the stream can be queried.
*
* Do not close or delete the stream in this method because it will be
* closed after this method returns.
*/
fn on_error_before_close(
&mut self,
_audio_stream: &mut dyn AudioInputStreamSafe,
_error: Error,
) {
}
/**
* This will be called when an error occurs on a stream or when the stream is disconnected.
* The underlying AAudio or OpenSL ES stream will already be stopped AND closed by Oboe.
* So the underlying stream cannot be referenced.
* But you can still query most parameters.
*
* This callback could be used to reopen a new stream on another device.
* You can safely delete the old AudioStream in this method.
*/
fn on_error_after_close(
&mut self,
_audio_stream: &mut dyn AudioInputStreamSafe,
_error: Error,
) {
}
/**
* A buffer is ready for processing.
*
* For an output stream, this function should render and write `num_frames` of data
* in the stream's current data format to the audioData buffer.
*
* For an input stream, this function should read and process `num_frames` of data
* from the audioData buffer.
*
* The audio data is passed through the buffer. So do NOT call read() or
* write() on the stream that is making the callback.
*
* Note that numFrames can vary unless AudioStreamBuilder::setFramesPerCallback()
* is called.
*
* Also note that this callback function should be considered a "real-time" function.
* It must not do anything that could cause an unbounded delay because that can cause the
* audio to glitch or pop.
*
* These are things the function should NOT do:
*
* - allocate memory
* - any file operations such as opening, closing, reading or writing
* - any network operations such as streaming
* - use any mutexes or other blocking synchronization primitives
* - sleep
* - stop or close stream
* - read or write on stream which invoked it
*
* The following are OK to call from the data callback:
*
* - stream.get_*()
*
* If you need to move data, eg. MIDI commands, in or out of the callback function then
* we recommend the use of non-blocking techniques such as an atomic FIFO.
*/
fn on_audio_ready(
&mut self,
audio_stream: &mut dyn AudioInputStreamSafe,
audio_data: &[<Self::FrameType as IsFrameType>::Type],
) -> DataCallbackResult;
}
/**
* This trait defines a callback interface for:
*
* 1) moving data to/from an audio stream using `on_audio_ready`
* 2) being alerted when a stream has an error using `on_error_*` methods
*/
pub trait AudioOutputCallback {
/**
* The sample type and number of channels for processing.
*
* Oboe supports only two sample types:
*
* - **i16** - signed 16-bit integer samples
* - **f32** - 32-bit floating point samples
*
* Oboe supports only mono and stereo channel configurations.
*/
type FrameType: IsFrameType;
/**
* This will be called when an error occurs on a stream or when the stream is disconnected.
*
* Note that this will be called on a different thread than the onAudioReady() thread.
* This thread will be created by Oboe.
*
* The underlying stream will already be stopped by Oboe but not yet closed.
* So the stream can be queried.
*
* Do not close or delete the stream in this method because it will be
* closed after this method returns.
*/
fn on_error_before_close(
&mut self,
_audio_stream: &mut dyn AudioOutputStreamSafe,
_error: Error,
) {
}
/**
* This will be called when an error occurs on a stream or when the stream is disconnected.
* The underlying AAudio or OpenSL ES stream will already be stopped AND closed by Oboe.
* So the underlying stream cannot be referenced.
* But you can still query most parameters.
*
* This callback could be used to reopen a new stream on another device.
* You can safely delete the old AudioStream in this method.
*/
fn on_error_after_close(
&mut self,
_audio_stream: &mut dyn AudioOutputStreamSafe,
_error: Error,
) {
}
/**
* A buffer is ready for processing.
*
* For an output stream, this function should render and write numFrames of data
* in the stream's current data format to the audioData buffer.
*
* For an input stream, this function should read and process numFrames of data
* from the audioData buffer.
*
* The audio data is passed through the buffer. So do NOT call read() or
* write() on the stream that is making the callback.
*
* Note that numFrames can vary unless AudioStreamBuilder::set_frames_per_callback()
* is called.
*
* Also note that this callback function should be considered a "real-time" function.
* It must not do anything that could cause an unbounded delay because that can cause the
* audio to glitch or pop.
*
* These are things the function should NOT do:
*
* - allocate memory
* - any file operations such as opening, closing, reading or writing
* - any network operations such as streaming
* - use any mutexes or other blocking synchronization primitives
* - sleep
* - stop or close stream
* - read or write on stream which invoked it
*
* The following are OK to call from the data callback:
*
* - stream.get_*()
*
* If you need to move data, eg. MIDI commands, in or out of the callback function then
* we recommend the use of non-blocking techniques such as an atomic FIFO.
*/
fn on_audio_ready(
&mut self,
audio_stream: &mut dyn AudioOutputStreamSafe,
audio_data: &mut [<Self::FrameType as IsFrameType>::Type],
) -> DataCallbackResult;
}
pub(crate) fn set_input_callback<T: AudioInputCallback>(
builder: &mut AudioStreamBuilderHandle,
callback: T,
) {
let callback = Box::into_raw(Box::new(callback));
// SAFETY: `callback` has the same type as the first argument of each function, and each
// function follows the C ABI.
unsafe {
ffi::oboe_AudioStreamBuilder_setCallback(
&mut **builder as *mut ffi::oboe_AudioStreamBuilder,
callback.cast(),
Some(drop_context::<T>),
Some(on_audio_ready_input_wrapper::<T>),
Some(on_error_before_close_input_wrapper::<T>),
Some(on_error_after_close_input_wrapper::<T>),
);
}
}
pub(crate) fn set_output_callback<T: AudioOutputCallback>(
builder: &mut AudioStreamBuilderHandle,
callback: T,
) {
let callback = Box::new(callback);
let callback = Box::into_raw(callback);
// SAFETY: `callback` has the same type as the first argument of each function, and each
// function follows the C ABI.
unsafe {
ffi::oboe_AudioStreamBuilder_setCallback(
&mut **builder as *mut ffi::oboe_AudioStreamBuilder,
callback.cast(),
Some(drop_context::<T>),
Some(on_audio_ready_output_wrapper::<T>),
Some(on_error_before_close_output_wrapper::<T>),
Some(on_error_after_close_output_wrapper::<T>),
);
}
}
unsafe extern "C" fn drop_context<T>(context: *mut c_void) {
let context = Box::from_raw(context as *mut T);
drop(context);
}
unsafe extern "C" fn on_error_before_close_input_wrapper<T: AudioInputCallback>(
context: *mut c_void,
audio_stream: *mut ffi::oboe_AudioStream,
error: ffi::oboe_Result,
) {
let mut audio_stream = AudioStreamRef::wrap_raw(&mut *audio_stream);
let callback = &mut *(context as *mut T);
callback.on_error_before_close(&mut audio_stream, FromPrimitive::from_i32(error).unwrap());
}
unsafe extern "C" fn on_error_after_close_input_wrapper<T: AudioInputCallback>(
context: *mut c_void,
audio_stream: *mut ffi::oboe_AudioStream,
error: ffi::oboe_Result,
) {
let mut audio_stream = AudioStreamRef::wrap_raw(&mut *audio_stream);
let callback = &mut *(context as *mut T);
callback.on_error_after_close(&mut audio_stream, FromPrimitive::from_i32(error).unwrap());
}
unsafe extern "C" fn on_audio_ready_input_wrapper<T: AudioInputCallback>(
context: *mut c_void,
audio_stream: *mut ffi::oboe_AudioStream,
audio_data: *mut c_void,
num_frames: i32,
) -> ffi::oboe_DataCallbackResult {
let mut audio_stream = AudioStreamRef::wrap_raw(&mut *audio_stream);
let audio_data = from_raw_parts(
audio_data as *const <T::FrameType as IsFrameType>::Type,
num_frames as usize,
);
let callback = &mut *(context as *mut T);
callback.on_audio_ready(&mut audio_stream, audio_data) as i32
}
unsafe extern "C" fn on_error_before_close_output_wrapper<T: AudioOutputCallback>(
context: *mut c_void,
audio_stream: *mut ffi::oboe_AudioStream,
error: ffi::oboe_Result,
) {
let mut audio_stream = AudioStreamRef::wrap_raw(&mut *audio_stream);
let callback = &mut *(context as *mut T);
callback.on_error_before_close(&mut audio_stream, FromPrimitive::from_i32(error).unwrap());
}
unsafe extern "C" fn on_error_after_close_output_wrapper<T: AudioOutputCallback>(
context: *mut c_void,
audio_stream: *mut ffi::oboe_AudioStream,
error: ffi::oboe_Result,
) {
let mut audio_stream = AudioStreamRef::wrap_raw(&mut *audio_stream);
let callback = &mut *(context as *mut T);
callback.on_error_after_close(&mut audio_stream, FromPrimitive::from_i32(error).unwrap());
}
unsafe extern "C" fn on_audio_ready_output_wrapper<T: AudioOutputCallback>(
context: *mut c_void,
audio_stream: *mut ffi::oboe_AudioStream,
audio_data: *mut c_void,
num_frames: i32,
) -> ffi::oboe_DataCallbackResult {
let mut audio_stream = AudioStreamRef::wrap_raw(&mut *audio_stream);
let audio_data = from_raw_parts_mut(
audio_data as *mut <T::FrameType as IsFrameType>::Type,
num_frames as usize,
);
let callback = &mut *(context as *mut T);
callback.on_audio_ready(&mut audio_stream, audio_data) as i32
}

565
vendor/oboe/src/definitions.rs vendored Normal file
View File

@@ -0,0 +1,565 @@
use num_derive::{FromPrimitive, ToPrimitive};
use num_traits::FromPrimitive;
use oboe_sys as ffi;
use std::{error, fmt, result};
/**
* The number of nanoseconds in a microsecond. 1,000.
*/
pub const NANOS_PER_MICROSECOND: i64 = 1000;
/**
* The number of nanoseconds in a millisecond. 1,000,000.
*/
pub const NANOS_PER_MILLISECOND: i64 = NANOS_PER_MICROSECOND * 1000;
/**
* The number of milliseconds in a second. 1,000.
*/
pub const MILLIS_PER_SECOND: i64 = 1000;
/**
* The number of nanoseconds in a second. 1,000,000,000.
*/
pub const NANOS_PER_SECOND: i64 = NANOS_PER_MILLISECOND * MILLIS_PER_SECOND;
/**
* The state of the audio stream.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum StreamState {
Uninitialized = ffi::oboe_StreamState_Uninitialized,
Unknown = ffi::oboe_StreamState_Unknown,
Open = ffi::oboe_StreamState_Open,
Starting = ffi::oboe_StreamState_Starting,
Started = ffi::oboe_StreamState_Started,
Pausing = ffi::oboe_StreamState_Pausing,
Paused = ffi::oboe_StreamState_Paused,
Flushing = ffi::oboe_StreamState_Flushing,
Flushed = ffi::oboe_StreamState_Flushed,
Stopping = ffi::oboe_StreamState_Stopping,
Stopped = ffi::oboe_StreamState_Stopped,
Closing = ffi::oboe_StreamState_Closing,
Closed = ffi::oboe_StreamState_Closed,
Disconnected = ffi::oboe_StreamState_Disconnected,
}
/**
* The direction of the stream.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum Direction {
/**
* Used for playback.
*/
Output = ffi::oboe_Direction_Output,
/**
* Used for recording.
*/
Input = ffi::oboe_Direction_Input,
}
/**
* The format of audio samples.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum AudioFormat {
/**
* Invalid format.
*/
Invalid = ffi::oboe_AudioFormat_Invalid,
/**
* Unspecified format. Format will be decided by Oboe.
*/
Unspecified = ffi::oboe_AudioFormat_Unspecified,
/**
* Signed 16-bit integers.
*/
I16 = ffi::oboe_AudioFormat_I16,
/**
* Signed 24-bit integers.
*/
I24 = ffi::oboe_AudioFormat_I24,
/**
* Signed 32-bit integers.
*/
I32 = ffi::oboe_AudioFormat_I32,
/**
* Single precision floating points.
*/
F32 = ffi::oboe_AudioFormat_Float,
}
/**
* The result of an audio callback.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum DataCallbackResult {
/**
* Indicates to the caller that the callbacks should continue.
*/
Continue = ffi::oboe_DataCallbackResult_Continue,
/**
* Indicates to the caller that the callbacks should stop immediately.
*/
Stop = ffi::oboe_DataCallbackResult_Stop,
}
/**
* The result of an operation with value
*/
pub type Result<T> = result::Result<T, Error>;
/**
* The result of operation without value
*/
pub type Status = Result<()>;
pub(crate) fn wrap_status(result: i32) -> Status {
if result == ffi::oboe_Result_OK {
Ok(())
} else {
Err(FromPrimitive::from_i32(result).unwrap())
}
}
pub(crate) fn wrap_result<T>(result: ffi::oboe_ResultWithValue<T>) -> Result<T> {
if result.mError == ffi::oboe_Result_OK {
Ok(result.mValue)
} else {
Err(FromPrimitive::from_i32(result.mError).unwrap())
}
}
/**
* The error of an operation.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum Error {
Disconnected = ffi::oboe_Result_ErrorDisconnected,
IllegalArgument = ffi::oboe_Result_ErrorIllegalArgument,
Internal = ffi::oboe_Result_ErrorInternal,
InvalidState = ffi::oboe_Result_ErrorInvalidState,
InvalidHandle = ffi::oboe_Result_ErrorInvalidHandle,
Unimplemented = ffi::oboe_Result_ErrorUnimplemented,
Unavailable = ffi::oboe_Result_ErrorUnavailable,
NoFreeHandles = ffi::oboe_Result_ErrorNoFreeHandles,
NoMemory = ffi::oboe_Result_ErrorNoMemory,
Null = ffi::oboe_Result_ErrorNull,
Timeout = ffi::oboe_Result_ErrorTimeout,
WouldBlock = ffi::oboe_Result_ErrorWouldBlock,
InvalidFormat = ffi::oboe_Result_ErrorInvalidFormat,
OutOfRange = ffi::oboe_Result_ErrorOutOfRange,
NoService = ffi::oboe_Result_ErrorNoService,
InvalidRate = ffi::oboe_Result_ErrorInvalidRate,
Closed = ffi::oboe_Result_ErrorClosed,
}
impl error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(self, f)
}
}
/**
* The sharing mode of the audio stream.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum SharingMode {
/**
* This will be the only stream using a particular source or sink.
* This mode will provide the lowest possible latency.
* You should close EXCLUSIVE streams immediately when you are not using them.
*
* If you do not need the lowest possible latency then we recommend using Shared,
* which is the default.
*/
Exclusive = ffi::oboe_SharingMode_Exclusive,
/**
* Multiple applications can share the same device.
* The data from output streams will be mixed by the audio service.
* The data for input streams will be distributed by the audio service.
*
* This will have higher latency than the EXCLUSIVE mode.
*/
Shared = ffi::oboe_SharingMode_Shared,
}
/**
* The performance mode of the audio stream.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum PerformanceMode {
/**
* No particular performance needs. Default.
*/
None = ffi::oboe_PerformanceMode_None,
/**
* Extending battery life is most important.
*/
PowerSaving = ffi::oboe_PerformanceMode_PowerSaving,
/**
* Reducing latency is most important.
*/
LowLatency = ffi::oboe_PerformanceMode_LowLatency,
}
/**
* The underlying audio API used by the audio stream.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum AudioApi {
/**
* Try to use AAudio. If not available then use OpenSL ES.
*/
Unspecified = ffi::oboe_AudioApi_Unspecified,
/**
* Use OpenSL ES.
*/
OpenSLES = ffi::oboe_AudioApi_OpenSLES,
/**
* Try to use AAudio. Fail if unavailable.
*/
AAudio = ffi::oboe_AudioApi_AAudio,
}
/**
* Specifies the quality of the sample rate conversion performed by Oboe.
* Higher quality will require more CPU load.
* Higher quality conversion will probably be implemented using a sinc based resampler.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum SampleRateConversionQuality {
/**
* No conversion by Oboe. Underlying APIs may still do conversion.
*/
None,
/**
* Fastest conversion but may not sound great.
* This may be implemented using bilinear interpolation.
*/
Fastest,
Low,
Medium,
High,
/**
* Highest quality conversion, which may be expensive in terms of CPU.
*/
Best,
}
/**
* The Usage attribute expresses *why* you are playing a sound, what is this sound used for.
* This information is used by certain platforms or routing policies
* to make more refined volume or routing decisions.
*
* Note that these match the equivalent values in AudioAttributes in the Android Java API.
*
* This attribute only has an effect on Android API 28+.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum Usage {
/**
* Use this for streaming media, music performance, video, podcasts, etcetera.
*/
Media = ffi::oboe_Usage_Media,
/**
* Use this for voice over IP, telephony, etcetera.
*/
VoiceCommunication = ffi::oboe_Usage_VoiceCommunication,
/**
* Use this for sounds associated with telephony such as busy tones, DTMF, etcetera.
*/
VoiceCommunicationSignalling = ffi::oboe_Usage_VoiceCommunicationSignalling,
/**
* Use this to demand the users attention.
*/
Alarm = ffi::oboe_Usage_Alarm,
/**
* Use this for notifying the user when a message has arrived or some
* other background event has occured.
*/
Notification = ffi::oboe_Usage_Notification,
/**
* Use this when the phone rings.
*/
NotificationRingtone = ffi::oboe_Usage_NotificationRingtone,
/**
* Use this to attract the users attention when, for example, the battery is low.
*/
NotificationEvent = ffi::oboe_Usage_NotificationEvent,
/**
* Use this for screen readers, etcetera.
*/
AssistanceAccessibility = ffi::oboe_Usage_AssistanceAccessibility,
/**
* Use this for driving or navigation directions.
*/
AssistanceNavigationGuidance = ffi::oboe_Usage_AssistanceNavigationGuidance,
/**
* Use this for user interface sounds, beeps, etcetera.
*/
AssistanceSonification = ffi::oboe_Usage_AssistanceSonification,
/**
* Use this for game audio and sound effects.
*/
Game = ffi::oboe_Usage_Game,
/**
* Use this for audio responses to user queries, audio instructions or help utterances.
*/
Assistant = ffi::oboe_Usage_Assistant,
}
/**
* The ContentType attribute describes *what* you are playing.
* It expresses the general category of the content. This information is optional.
* But in case it is known (for instance {@link Movie} for a
* movie streaming service or {@link Speech} for
* an audio book application) this information might be used by the audio framework to
* enforce audio focus.
*
* Note that these match the equivalent values in AudioAttributes in the Android Java API.
*
* This attribute only has an effect on Android API 28+.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum ContentType {
/**
* Use this for spoken voice, audio books, etcetera.
*/
Speech = ffi::oboe_ContentType_Speech,
/**
* Use this for pre-recorded or live music.
*/
Music = ffi::oboe_ContentType_Music,
/**
* Use this for a movie or video soundtrack.
*/
Movie = ffi::oboe_ContentType_Movie,
/**
* Use this for sound is designed to accompany a user action,
* such as a click or beep sound made when the user presses a button.
*/
Sonification = ffi::oboe_ContentType_Sonification,
}
/**
* Defines the audio source.
* An audio source defines both a default physical source of audio signal, and a recording
* configuration.
*
* Note that these match the equivalent values in MediaRecorder.AudioSource in the Android Java API.
*
* This attribute only has an effect on Android API 28+.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum InputPreset {
/**
* Use this preset when other presets do not apply.
*/
Generic = ffi::oboe_InputPreset_Generic,
/**
* Use this preset when recording video.
*/
Camcorder = ffi::oboe_InputPreset_Camcorder,
/**
* Use this preset when doing speech recognition.
*/
VoiceRecognition = ffi::oboe_InputPreset_VoiceRecognition,
/**
* Use this preset when doing telephony or voice messaging.
*/
VoiceCommunication = ffi::oboe_InputPreset_VoiceCommunication,
/**
* Use this preset to obtain an input with no effects.
* Note that this input will not have automatic gain control
* so the recorded volume may be very low.
*/
Unprocessed = ffi::oboe_InputPreset_Unprocessed,
/**
* Use this preset for capturing audio meant to be processed in real time
* and played back for live performance (e.g karaoke).
* The capture path will minimize latency and coupling with playback path.
*/
VoicePerformance = ffi::oboe_InputPreset_VoicePerformance,
}
/**
* This attribute can be used to allocate a session ID to the audio stream.
*
* This attribute only has an effect on Android API 28+.
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum SessionId {
/**
* Do not allocate a session ID.
* Effects cannot be used with this stream.
* Default.
*/
None = ffi::oboe_SessionId_None,
/**
* Allocate a session ID that can be used to attach and control
* effects using the Java AudioEffects API.
* Note that the use of this flag may result in higher latency.
*
* Note that this matches the value of `AudioManager.AUDIO_SESSION_ID_GENERATE`.
*/
Allocate = ffi::oboe_SessionId_Allocate,
}
/**
* The channel count of the audio stream.
* Use of this enum is convenient to avoid "magic"
* numbers when specifying the channel count.
*
* For example, you can write
* `builder.set_channel_count(ChannelCount::Stereo)`
* rather than `builder.set_channel_count(2).
*/
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromPrimitive, ToPrimitive)]
#[repr(i32)]
pub enum ChannelCount {
/**
* Audio channel count definition, use Mono or Stereo
*/
Unspecified = ffi::oboe_ChannelCount_Unspecified,
/**
* Use this for mono audio.
*/
Mono = ffi::oboe_ChannelCount_Mono,
/**
* Use this for stereo audio.
*/
Stereo = ffi::oboe_ChannelCount_Stereo,
}
/**
* The default (optimal) audio streaming values.
*
* On API 16 to 26 OpenSL ES will be used.
* When using OpenSL ES the optimal values for `sample_rate` and
* `frames_per_burst` are not known by the native code.
* On API 17+ these values should be obtained from the AudioManager using this code:
*
* ```java
* // Note that this technique only works for built-in speakers and headphones.
* AudioManager myAudioMgr = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
* String sampleRateStr = myAudioMgr.getProperty(AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
* int defaultSampleRate = Integer.parseInt(sampleRateStr);
* String framesPerBurstStr = myAudioMgr.getProperty(AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
* int defaultFramesPerBurst = Integer.parseInt(framesPerBurstStr);
* ```
*
* It can then be passed down to Oboe through JNI.
*
* AAudio will get the optimal `frames_per_burst` from the HAL and will ignore this value.
*/
pub struct DefaultStreamValues(());
impl DefaultStreamValues {
/**
* The default sample rate to use when opening new audio streams
*/
pub fn get_sample_rate() -> i32 {
unsafe { ffi::oboe_DefaultStreamValues_SampleRate }
}
pub fn set_sample_rate(sample_rate: i32) {
unsafe {
ffi::oboe_DefaultStreamValues_SampleRate = sample_rate;
}
}
/**
* The default frames per burst to use when opening new audio streams
*/
pub fn get_frames_per_burst() -> i32 {
unsafe { ffi::oboe_DefaultStreamValues_FramesPerBurst }
}
pub fn set_frames_per_burst(frames_per_burst: i32) {
unsafe {
ffi::oboe_DefaultStreamValues_FramesPerBurst = frames_per_burst;
}
}
/**
* The default channel count to use when opening new audio streams
*/
pub fn get_channel_count() -> i32 {
unsafe { ffi::oboe_DefaultStreamValues_ChannelCount }
}
pub fn set_channel_count(channel_count: i32) {
unsafe {
ffi::oboe_DefaultStreamValues_ChannelCount = channel_count;
}
}
}
/**
* The time at which the frame at `position` was presented
*/
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct FrameTimestamp {
/**
* The position in number of frames
*/
pub position: i64,
/**
* The timestamp in nanoseconds
*/
pub timestamp: i64,
}

8
vendor/oboe/src/java_interface.rs vendored Normal file
View File

@@ -0,0 +1,8 @@
mod audio_features;
mod definitions;
mod devices_info;
mod stream_defaults;
mod utils;
pub use self::audio_features::*;
pub use self::definitions::*;

View File

@@ -0,0 +1,57 @@
use super::{
utils::{
get_context, get_package_manager, has_system_feature, with_attached, JNIEnv, JObject,
JResult,
},
PackageManager,
};
/**
* The Android audio features
*/
#[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "java-interface")))]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum AudioFeature {
LowLatency,
Output,
Pro,
Microphone,
Midi,
}
impl From<AudioFeature> for &'static str {
fn from(feature: AudioFeature) -> Self {
use AudioFeature::*;
match feature {
LowLatency => PackageManager::FEATURE_AUDIO_LOW_LATENCY,
Output => PackageManager::FEATURE_AUDIO_OUTPUT,
Pro => PackageManager::FEATURE_AUDIO_PRO,
Microphone => PackageManager::FEATURE_MICROPHONE,
Midi => PackageManager::FEATURE_MIDI,
}
}
}
impl AudioFeature {
/**
* Check availability of an audio feature using Android Java API
*/
pub fn has(&self) -> Result<bool, String> {
let context = get_context();
with_attached(context, |env, activity| {
try_check_system_feature(env, &activity, (*self).into())
})
.map_err(|error| error.to_string())
}
}
fn try_check_system_feature<'j>(
env: &mut JNIEnv<'j>,
activity: &JObject<'j>,
feature: &str,
) -> JResult<bool> {
let package_manager = get_package_manager(env, activity)?;
has_system_feature(env, &package_manager, feature)
}

View File

@@ -0,0 +1,167 @@
use num_derive::FromPrimitive;
use crate::AudioFormat;
pub(crate) struct Context;
impl Context {
pub const AUDIO_SERVICE: &'static str = "audio";
}
pub(crate) struct PackageManager;
impl PackageManager {
pub const FEATURE_AUDIO_LOW_LATENCY: &'static str = "android.hardware.audio.low_latency";
pub const FEATURE_AUDIO_OUTPUT: &'static str = "android.hardware.audio.output";
pub const FEATURE_AUDIO_PRO: &'static str = "android.hardware.audio.pro";
pub const FEATURE_MICROPHONE: &'static str = "android.hardware.microphone";
pub const FEATURE_MIDI: &'static str = "android.software.midi";
}
pub(crate) struct AudioManager;
impl AudioManager {
pub const PROPERTY_OUTPUT_SAMPLE_RATE: &'static str =
"android.media.property.OUTPUT_SAMPLE_RATE";
pub const PROPERTY_OUTPUT_FRAMES_PER_BUFFER: &'static str =
"android.media.property.OUTPUT_FRAMES_PER_BUFFER";
pub const GET_DEVICES_INPUTS: i32 = 1 << 0;
pub const GET_DEVICES_OUTPUTS: i32 = 1 << 1;
pub const GET_DEVICES_ALL: i32 = Self::GET_DEVICES_INPUTS | Self::GET_DEVICES_OUTPUTS;
}
/**
* The Android audio device info
*/
#[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "java-interface")))]
#[derive(Debug, Clone)]
pub struct AudioDeviceInfo {
/**
* Device identifier
*/
pub id: i32,
/**
* The type of device
*/
pub device_type: AudioDeviceType,
/**
* The device can be used for playback and/or capture
*/
pub direction: AudioDeviceDirection,
/**
* Device address
*/
pub address: String,
/**
* Device product name
*/
pub product_name: String,
/**
* Available channel configurations
*/
pub channel_counts: Vec<i32>,
/**
* Supported sample rates
*/
pub sample_rates: Vec<i32>,
/**
* Supported audio formats
*/
pub formats: Vec<AudioFormat>,
}
/**
* The type of audio device
*/
#[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "java-interface")))]
#[derive(Debug, Clone, Copy, FromPrimitive)]
#[non_exhaustive]
#[repr(i32)]
pub enum AudioDeviceType {
Unknown = 0,
AuxLine = 19,
BleBroadcast = 30,
BleHeadset = 26,
BleSpeaker = 27,
BluetoothA2DP = 8,
BluetoothSCO = 7,
BuiltinEarpiece = 1,
BuiltinMic = 15,
BuiltinSpeaker = 2,
BuiltinSpeakerSafe = 24,
Bus = 21,
Dock = 13,
Fm = 14,
FmTuner = 16,
Hdmi = 9,
HdmiArc = 10,
HdmiEarc = 29,
HearingAid = 23,
Ip = 20,
LineAnalog = 5,
LineDigital = 6,
RemoteSubmix = 25,
Telephony = 18,
TvTuner = 17,
UsbAccessory = 12,
UsbDevice = 11,
UsbHeadset = 22,
WiredHeadphones = 4,
WiredHeadset = 3,
Unsupported = -1,
}
/**
* The direction of audio device
*/
#[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "java-interface")))]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(i32)]
pub enum AudioDeviceDirection {
Dumb = 0,
Input = AudioManager::GET_DEVICES_INPUTS,
Output = AudioManager::GET_DEVICES_OUTPUTS,
InputOutput = AudioManager::GET_DEVICES_ALL,
}
impl AudioDeviceDirection {
pub fn new(is_input: bool, is_output: bool) -> Self {
use self::AudioDeviceDirection::*;
match (is_input, is_output) {
(true, true) => InputOutput,
(false, true) => Output,
(true, false) => Input,
_ => Dumb,
}
}
pub fn is_input(&self) -> bool {
0 < *self as i32 & AudioDeviceDirection::Input as i32
}
pub fn is_output(&self) -> bool {
0 < *self as i32 & AudioDeviceDirection::Output as i32
}
}
impl AudioFormat {
pub(crate) const ENCODING_PCM_16BIT: i32 = 2;
//pub(crate) const ENCODING_PCM_8BIT: i32 = 3;
pub(crate) const ENCODING_PCM_FLOAT: i32 = 4;
pub(crate) fn from_encoding(encoding: i32) -> Option<AudioFormat> {
match encoding {
AudioFormat::ENCODING_PCM_16BIT => Some(AudioFormat::I16),
AudioFormat::ENCODING_PCM_FLOAT => Some(AudioFormat::F32),
_ => None,
}
}
}

View File

@@ -0,0 +1,88 @@
use num_traits::FromPrimitive;
use crate::AudioFormat;
use super::{
utils::{
call_method_no_args_ret_bool, call_method_no_args_ret_char_sequence,
call_method_no_args_ret_int, call_method_no_args_ret_int_array,
call_method_no_args_ret_string, get_context, get_devices, get_system_service,
with_attached, JNIEnv, JObject, JResult,
},
AudioDeviceDirection, AudioDeviceInfo, AudioDeviceType, Context,
};
impl AudioDeviceInfo {
/**
* Request audio devices using Android Java API
*/
#[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "java-interface")))]
pub fn request(direction: AudioDeviceDirection) -> Result<Vec<AudioDeviceInfo>, String> {
let context = get_context();
with_attached(context, |env, context| {
let sdk_version = env
.get_static_field("android/os/Build$VERSION", "SDK_INT", "I")?
.i()?;
if sdk_version >= 23 {
try_request_devices_info(env, &context, direction)
} else {
Err(jni::errors::Error::MethodNotFound {
name: "".into(),
sig: "".into(),
})
}
})
.map_err(|error| error.to_string())
}
}
fn try_request_devices_info<'j>(
env: &mut JNIEnv<'j>,
context: &JObject<'j>,
direction: AudioDeviceDirection,
) -> JResult<Vec<AudioDeviceInfo>> {
let audio_manager = get_system_service(env, context, Context::AUDIO_SERVICE)?;
let devices = get_devices(env, &audio_manager, direction as i32)?;
let length = env.get_array_length(&devices)?;
(0..length)
.map(|index| {
let device = env.get_object_array_element(&devices, index)?;
let id = call_method_no_args_ret_int(env, &device, "getId")?;
let address = call_method_no_args_ret_string(env, &device, "getAddress")?;
let address = String::from(env.get_string(&address)?);
let product_name =
call_method_no_args_ret_char_sequence(env, &device, "getProductName")?;
let product_name = String::from(env.get_string(&product_name)?);
let device_type =
FromPrimitive::from_i32(call_method_no_args_ret_int(env, &device, "getType")?)
.unwrap_or(AudioDeviceType::Unsupported);
let direction = AudioDeviceDirection::new(
call_method_no_args_ret_bool(env, &device, "isSource")?,
call_method_no_args_ret_bool(env, &device, "isSink")?,
);
let channel_counts =
call_method_no_args_ret_int_array(env, &device, "getChannelCounts")?;
let sample_rates = call_method_no_args_ret_int_array(env, &device, "getSampleRates")?;
let formats = call_method_no_args_ret_int_array(env, &device, "getEncodings")?
.into_iter()
.filter_map(AudioFormat::from_encoding)
.collect::<Vec<_>>();
Ok(AudioDeviceInfo {
id,
address,
product_name,
device_type,
direction,
channel_counts,
sample_rates,
formats,
})
})
.collect::<Result<Vec<_>, _>>()
}

View File

@@ -0,0 +1,79 @@
use super::{
utils::{
get_context, get_property, get_system_service, with_attached, JNIEnv, JObject, JResult,
},
AudioManager, Context,
};
use crate::DefaultStreamValues;
impl DefaultStreamValues {
/**
* Try request defaults from AudioManager properties.
*/
#[cfg_attr(feature = "doc-cfg", doc(cfg(feature = "java-interface")))]
pub fn init() -> Result<(), String> {
let activity = get_context();
let values = with_attached(activity, |env, context| {
let sdk_version = env
.get_static_field("android/os/Build$VERSION", "SDK_INT", "I")?
.i()?;
if sdk_version < 17 {
Err(jni::errors::Error::MethodNotFound {
name: "".into(),
sig: "".into(),
})
} else if sdk_version < 26 {
try_request_default_stream_values(env, &context).map(Some)
} else {
// not necessary
Ok(None)
}
});
match values {
Ok(Some((sample_rate, frames_per_burst))) => {
if let Some(value) = sample_rate {
Self::set_sample_rate(value);
}
if let Some(value) = frames_per_burst {
Self::set_frames_per_burst(value);
}
Ok(())
}
Ok(None) => Ok(()),
Err(error) => Err(error.to_string()),
}
}
}
fn try_request_default_stream_values<'j>(
env: &mut JNIEnv<'j>,
context: &JObject<'j>,
) -> JResult<(Option<i32>, Option<i32>)> {
let audio_manager = get_system_service(env, context, Context::AUDIO_SERVICE)?;
let sample_rate = get_property(
env,
&audio_manager,
AudioManager::PROPERTY_OUTPUT_SAMPLE_RATE,
)?;
let sample_rate = env.get_string(&sample_rate)?;
let frames_per_burst = get_property(
env,
&audio_manager,
AudioManager::PROPERTY_OUTPUT_FRAMES_PER_BUFFER,
)?;
let frames_per_burst = env.get_string(&frames_per_burst)?;
Ok((
(*sample_rate).to_str().ok().and_then(|s| s.parse().ok()),
(*frames_per_burst)
.to_str()
.ok()
.and_then(|s| s.parse().ok()),
))
}

181
vendor/oboe/src/java_interface/utils.rs vendored Normal file
View File

@@ -0,0 +1,181 @@
use jni::sys::jobject;
use ndk_context::AndroidContext;
use std::sync::Arc;
pub use jni::Executor;
pub use jni::{
errors::Result as JResult,
objects::{JIntArray, JObject, JObjectArray, JString},
JNIEnv, JavaVM,
};
pub fn get_context() -> AndroidContext {
ndk_context::android_context()
}
pub fn with_attached<F, R>(context: AndroidContext, closure: F) -> JResult<R>
where
for<'j> F: FnOnce(&mut JNIEnv<'j>, JObject<'j>) -> JResult<R>,
{
let vm = Arc::new(unsafe { JavaVM::from_raw(context.vm().cast())? });
let context = context.context();
let context = unsafe { JObject::from_raw(context as jobject) };
Executor::new(vm).with_attached(|env| closure(env, context))
}
pub fn call_method_no_args_ret_int_array<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
method: &str,
) -> JResult<Vec<i32>> {
let array: JIntArray = env.call_method(subject, method, "()[I", &[])?.l()?.into();
let length = env.get_array_length(&array)?;
let mut values = Vec::with_capacity(length as usize);
env.get_int_array_region(array, 0, values.as_mut())?;
Ok(values)
}
pub fn call_method_no_args_ret_int<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
method: &str,
) -> JResult<i32> {
env.call_method(subject, method, "()I", &[])?.i()
}
pub fn call_method_no_args_ret_bool<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
method: &str,
) -> JResult<bool> {
env.call_method(subject, method, "()Z", &[])?.z()
}
pub fn call_method_no_args_ret_string<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
method: &str,
) -> JResult<JString<'j>> {
Ok(env
.call_method(subject, method, "()Ljava/lang/String;", &[])?
.l()?
.into())
}
pub fn call_method_no_args_ret_char_sequence<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
method: &str,
) -> JResult<JString<'j>> {
let cseq = env
.call_method(subject, method, "()Ljava/lang/CharSequence;", &[])?
.l()?;
Ok(env
.call_method(&cseq, "toString", "()Ljava/lang/String;", &[])?
.l()?
.into())
}
pub fn call_method_string_arg_ret_bool<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
name: &str,
arg: impl AsRef<str>,
) -> JResult<bool> {
env.call_method(
subject,
name,
"(Ljava/lang/String;)Z",
&[(&env.new_string(arg)?).into()],
)?
.z()
}
pub fn call_method_string_arg_ret_string<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
name: &str,
arg: impl AsRef<str>,
) -> JResult<JString<'j>> {
Ok(env
.call_method(
subject,
name,
"(Ljava/lang/String;)Ljava/lang/String;",
&[(&env.new_string(arg)?).into()],
)?
.l()?
.into())
}
pub fn call_method_string_arg_ret_object<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
method: &str,
arg: &str,
) -> JResult<JObject<'j>> {
env.call_method(
subject,
method,
"(Ljava/lang/String;)Ljava/lang/Object;",
&[(&env.new_string(arg)?).into()],
)?
.l()
}
pub fn get_package_manager<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
) -> JResult<JObject<'j>> {
env.call_method(
subject,
"getPackageManager",
"()Landroid/content/pm/PackageManager;",
&[],
)?
.l()
}
pub fn has_system_feature<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
name: &str,
) -> JResult<bool> {
call_method_string_arg_ret_bool(env, subject, "hasSystemFeature", name)
}
pub fn get_system_service<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
name: &str,
) -> JResult<JObject<'j>> {
call_method_string_arg_ret_object(env, subject, "getSystemService", name)
}
pub fn get_property<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
name: &str,
) -> JResult<JString<'j>> {
call_method_string_arg_ret_string(env, subject, "getProperty", name)
}
pub fn get_devices<'j>(
env: &mut JNIEnv<'j>,
subject: &JObject<'j>,
flags: i32,
) -> JResult<JObjectArray<'j>> {
env.call_method(
subject,
"getDevices",
"(I)[Landroid/media/AudioDeviceInfo;",
&[flags.into()],
)?
.l()
.map(From::from)
}

26
vendor/oboe/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,26 @@
#![doc = include_str!("../README.md")]
#![cfg_attr(feature = "doc-cfg", feature(doc_cfg))]
mod audio_stream;
mod audio_stream_base;
mod audio_stream_builder;
mod audio_stream_callback;
mod definitions;
mod private;
mod type_guide;
mod version;
#[cfg(feature = "java-interface")]
mod java_interface;
pub use self::audio_stream::*;
pub use self::audio_stream_base::*;
pub use self::audio_stream_builder::*;
pub use self::audio_stream_callback::*;
pub use self::definitions::*;
pub(crate) use self::private::*;
pub use self::type_guide::*;
pub use self::version::*;
#[cfg(feature = "java-interface")]
pub use self::java_interface::*;

17
vendor/oboe/src/private.rs vendored Normal file
View File

@@ -0,0 +1,17 @@
use oboe_sys as ffi;
pub trait RawAudioStreamBase {
fn _raw_base(&self) -> &ffi::oboe_AudioStreamBase;
fn _raw_base_mut(&mut self) -> &mut ffi::oboe_AudioStreamBase;
}
pub trait RawAudioStream {
fn _raw_stream(&self) -> &ffi::oboe_AudioStream;
fn _raw_stream_mut(&mut self) -> &mut ffi::oboe_AudioStream;
}
/// The raw marker for input stream
pub trait RawAudioInputStream {}
/// The raw marker for output stream
pub trait RawAudioOutputStream {}

115
vendor/oboe/src/type_guide.rs vendored Normal file
View File

@@ -0,0 +1,115 @@
use super::{AudioFormat, ChannelCount, Direction};
/**
* Unspecified marker type for use everywhere
*/
pub struct Unspecified;
/**
* The trait for direction marker types
*/
pub trait IsDirection {
const DIRECTION: Direction;
}
/**
* The input direction marker
*/
pub struct Input;
impl IsDirection for Input {
const DIRECTION: Direction = Direction::Input;
}
/**
* The output direction marker
*/
pub struct Output;
impl IsDirection for Output {
const DIRECTION: Direction = Direction::Output;
}
/**
* The traint for format marker types
*/
pub trait IsFormat {
const FORMAT: AudioFormat;
}
impl IsFormat for Unspecified {
const FORMAT: AudioFormat = AudioFormat::Unspecified;
}
impl IsFormat for i16 {
const FORMAT: AudioFormat = AudioFormat::I16;
}
impl IsFormat for i32 {
const FORMAT: AudioFormat = AudioFormat::I32;
}
impl IsFormat for f32 {
const FORMAT: AudioFormat = AudioFormat::F32;
}
/**
* The trait for channel count marker types
*/
pub trait IsChannelCount {
const CHANNEL_COUNT: ChannelCount;
}
impl IsChannelCount for Unspecified {
const CHANNEL_COUNT: ChannelCount = ChannelCount::Unspecified;
}
/**
* The single mono channel configuration marker
*/
pub struct Mono;
impl IsChannelCount for Mono {
const CHANNEL_COUNT: ChannelCount = ChannelCount::Mono;
}
/**
* The dual stereo channels configuration marker
*/
pub struct Stereo;
impl IsChannelCount for Stereo {
const CHANNEL_COUNT: ChannelCount = ChannelCount::Stereo;
}
pub enum AltFrame<T: IsFormat> {
Mono(T),
Stereo(T, T),
}
/**
* The trait for frame type marker types
*/
pub trait IsFrameType {
type Type;
type Format: IsFormat;
type ChannelCount: IsChannelCount;
}
impl<T: IsFormat> IsFrameType for (T, Unspecified) {
type Type = AltFrame<T>;
type Format = T;
type ChannelCount = Unspecified;
}
impl<T: IsFormat> IsFrameType for (T, Mono) {
type Type = T;
type Format = T;
type ChannelCount = Mono;
}
impl<T: IsFormat> IsFrameType for (T, Stereo) {
type Type = (T, T);
type Format = T;
type ChannelCount = Stereo;
}

36
vendor/oboe/src/version.rs vendored Normal file
View File

@@ -0,0 +1,36 @@
use oboe_sys as ffi;
use std::str::from_utf8_unchecked;
/**
* The version info
*/
pub struct Version;
impl Version {
/**
* The major version number
*/
pub const MAJOR: u8 = ffi::oboe_Version_Major;
/**
* The minor version number
*/
pub const MINOR: u8 = ffi::oboe_Version_Minor;
/**
* The patch version number
*/
pub const PATCH: u16 = ffi::oboe_Version_Patch;
/**
* The version as 32-bit number
*/
pub const NUMBER: u32 = ffi::oboe_Version_Number;
/**
* The version as text
*/
pub fn text() -> &'static str {
unsafe { from_utf8_unchecked(ffi::oboe_Version_Text.as_ref()) }
}
}