Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

292
vendor/cpal/src/error.rs vendored Normal file
View File

@@ -0,0 +1,292 @@
use std::error::Error;
use std::fmt::{Display, Formatter};
/// The requested host, although supported on this platform, is unavailable.
#[derive(Copy, Clone, Debug)]
pub struct HostUnavailable;
impl Display for HostUnavailable {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str("the requested host is unavailable")
}
}
impl Error for HostUnavailable {}
/// Some error has occurred that is specific to the backend from which it was produced.
///
/// This error is often used as a catch-all in cases where:
///
/// - It is unclear exactly what error might be produced by the backend API.
/// - It does not make sense to add a variant to the enclosing error type.
/// - No error was expected to occur at all, but we return an error to avoid the possibility of a
/// `panic!` caused by some unforeseen or unknown reason.
///
/// **Note:** If you notice a `BackendSpecificError` that you believe could be better handled in a
/// cross-platform manner, please create an issue or submit a pull request with a patch that adds
/// the necessary error variant to the appropriate error enum.
#[derive(Clone, Debug)]
pub struct BackendSpecificError {
pub description: String,
}
impl Display for BackendSpecificError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"A backend-specific error has occurred: {}",
self.description
)
}
}
impl Error for BackendSpecificError {}
/// An error that might occur while attempting to enumerate the available devices on a system.
#[derive(Clone, Debug)]
pub enum DevicesError {
/// See the [`BackendSpecificError`] docs for more information about this error variant.
BackendSpecific { err: BackendSpecificError },
}
impl Display for DevicesError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::BackendSpecific { err } => err.fmt(f),
}
}
}
impl Error for DevicesError {}
impl From<BackendSpecificError> for DevicesError {
fn from(err: BackendSpecificError) -> Self {
Self::BackendSpecific { err }
}
}
/// An error that may occur while attempting to retrieve a device name.
#[derive(Clone, Debug)]
pub enum DeviceNameError {
/// See the [`BackendSpecificError`] docs for more information about this error variant.
BackendSpecific { err: BackendSpecificError },
}
impl Display for DeviceNameError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::BackendSpecific { err } => err.fmt(f),
}
}
}
impl Error for DeviceNameError {}
impl From<BackendSpecificError> for DeviceNameError {
fn from(err: BackendSpecificError) -> Self {
Self::BackendSpecific { err }
}
}
/// Error that can happen when enumerating the list of supported formats.
#[derive(Debug)]
pub enum SupportedStreamConfigsError {
/// The device no longer exists. This can happen if the device is disconnected while the
/// program is running.
DeviceNotAvailable,
/// We called something the C-Layer did not understand
InvalidArgument,
/// See the [`BackendSpecificError`] docs for more information about this error variant.
BackendSpecific { err: BackendSpecificError },
}
impl Display for SupportedStreamConfigsError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::BackendSpecific { err } => err.fmt(f),
Self::DeviceNotAvailable => f.write_str("The requested device is no longer available. For example, it has been unplugged."),
Self::InvalidArgument => f.write_str("Invalid argument passed to the backend. For example, this happens when trying to read capture capabilities when the device does not support it.")
}
}
}
impl Error for SupportedStreamConfigsError {}
impl From<BackendSpecificError> for SupportedStreamConfigsError {
fn from(err: BackendSpecificError) -> Self {
Self::BackendSpecific { err }
}
}
/// May occur when attempting to request the default input or output stream format from a [`Device`](crate::Device).
#[derive(Debug)]
pub enum DefaultStreamConfigError {
/// The device no longer exists. This can happen if the device is disconnected while the
/// program is running.
DeviceNotAvailable,
/// Returned if e.g. the default input format was requested on an output-only audio device.
StreamTypeNotSupported,
/// See the [`BackendSpecificError`] docs for more information about this error variant.
BackendSpecific { err: BackendSpecificError },
}
impl Display for DefaultStreamConfigError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::BackendSpecific { err } => err.fmt(f),
DefaultStreamConfigError::DeviceNotAvailable => f.write_str(
"The requested device is no longer available. For example, it has been unplugged.",
),
DefaultStreamConfigError::StreamTypeNotSupported => {
f.write_str("The requested stream type is not supported by the device.")
}
}
}
}
impl Error for DefaultStreamConfigError {}
impl From<BackendSpecificError> for DefaultStreamConfigError {
fn from(err: BackendSpecificError) -> Self {
Self::BackendSpecific { err }
}
}
/// Error that can happen when creating a [`Stream`](crate::Stream).
#[derive(Debug)]
pub enum BuildStreamError {
/// The device no longer exists. This can happen if the device is disconnected while the
/// program is running.
DeviceNotAvailable,
/// The specified stream configuration is not supported.
StreamConfigNotSupported,
/// We called something the C-Layer did not understand
///
/// On ALSA device functions called with a feature they do not support will yield this. E.g.
/// Trying to use capture capabilities on an output only format yields this.
InvalidArgument,
/// Occurs if adding a new Stream ID would cause an integer overflow.
StreamIdOverflow,
/// See the [`BackendSpecificError`] docs for more information about this error variant.
BackendSpecific { err: BackendSpecificError },
}
impl Display for BuildStreamError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::BackendSpecific { err } => err.fmt(f),
BuildStreamError::DeviceNotAvailable => f.write_str(
"The requested device is no longer available. For example, it has been unplugged.",
),
BuildStreamError::StreamConfigNotSupported => {
f.write_str("The requested stream configuration is not supported by the device.")
}
BuildStreamError::InvalidArgument => f.write_str(
"The requested device does not support this capability (invalid argument)",
),
BuildStreamError::StreamIdOverflow => {
f.write_str("Adding a new stream ID would cause an overflow")
}
}
}
}
impl Error for BuildStreamError {}
impl From<BackendSpecificError> for BuildStreamError {
fn from(err: BackendSpecificError) -> Self {
Self::BackendSpecific { err }
}
}
/// Errors that might occur when calling [`Stream::play()`](crate::traits::StreamTrait::play).
///
/// As of writing this, only macOS may immediately return an error while calling this method. This
/// is because both the alsa and wasapi backends only enqueue these commands and do not process
/// them immediately.
#[derive(Debug)]
pub enum PlayStreamError {
/// The device associated with the stream is no longer available.
DeviceNotAvailable,
/// See the [`BackendSpecificError`] docs for more information about this error variant.
BackendSpecific { err: BackendSpecificError },
}
impl Display for PlayStreamError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::BackendSpecific { err } => err.fmt(f),
PlayStreamError::DeviceNotAvailable => {
f.write_str("the device associated with the stream is no longer available")
}
}
}
}
impl Error for PlayStreamError {}
impl From<BackendSpecificError> for PlayStreamError {
fn from(err: BackendSpecificError) -> Self {
Self::BackendSpecific { err }
}
}
/// Errors that might occur when calling [`Stream::pause()`](crate::traits::StreamTrait::pause).
///
/// As of writing this, only macOS may immediately return an error while calling this method. This
/// is because both the alsa and wasapi backends only enqueue these commands and do not process
/// them immediately.
#[derive(Debug)]
pub enum PauseStreamError {
/// The device associated with the stream is no longer available.
DeviceNotAvailable,
/// See the [`BackendSpecificError`] docs for more information about this error variant.
BackendSpecific { err: BackendSpecificError },
}
impl Display for PauseStreamError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::BackendSpecific { err } => err.fmt(f),
PauseStreamError::DeviceNotAvailable => {
f.write_str("the device associated with the stream is no longer available")
}
}
}
}
impl Error for PauseStreamError {}
impl From<BackendSpecificError> for PauseStreamError {
fn from(err: BackendSpecificError) -> Self {
Self::BackendSpecific { err }
}
}
/// Errors that might occur while a stream is running.
#[derive(Debug)]
pub enum StreamError {
/// The device no longer exists. This can happen if the device is disconnected while the
/// program is running.
DeviceNotAvailable,
/// See the [`BackendSpecificError`] docs for more information about this error variant.
BackendSpecific { err: BackendSpecificError },
}
impl Display for StreamError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
Self::BackendSpecific { err } => err.fmt(f),
StreamError::DeviceNotAvailable => f.write_str(
"The requested device is no longer available. For example, it has been unplugged.",
),
}
}
}
impl Error for StreamError {}
impl From<BackendSpecificError> for StreamError {
fn from(err: BackendSpecificError) -> Self {
Self::BackendSpecific { err }
}
}

70
vendor/cpal/src/host/alsa/enumerate.rs vendored Normal file
View File

@@ -0,0 +1,70 @@
use super::alsa;
use super::{Device, DeviceHandles};
use crate::{BackendSpecificError, DevicesError};
use std::sync::{Arc, Mutex};
/// ALSA's implementation for `Devices`.
pub struct Devices {
hint_iter: alsa::device_name::HintIter,
}
impl Devices {
pub fn new() -> Result<Self, DevicesError> {
Ok(Devices {
hint_iter: alsa::device_name::HintIter::new_str(None, "pcm")?,
})
}
}
unsafe impl Send for Devices {}
unsafe impl Sync for Devices {}
impl Iterator for Devices {
type Item = Device;
fn next(&mut self) -> Option<Device> {
loop {
match self.hint_iter.next() {
None => return None,
Some(hint) => {
let name = match hint.name {
None => continue,
// Ignoring the `null` device.
Some(name) if name == "null" => continue,
Some(name) => name,
};
if let Ok(handles) = DeviceHandles::open(&name) {
return Some(Device {
name,
handles: Arc::new(Mutex::new(handles)),
});
}
}
}
}
}
}
#[inline]
pub fn default_input_device() -> Option<Device> {
Some(Device {
name: "default".to_owned(),
handles: Arc::new(Mutex::new(Default::default())),
})
}
#[inline]
pub fn default_output_device() -> Option<Device> {
Some(Device {
name: "default".to_owned(),
handles: Arc::new(Mutex::new(Default::default())),
})
}
impl From<alsa::Error> for DevicesError {
fn from(err: alsa::Error) -> Self {
let err: BackendSpecificError = err.into();
err.into()
}
}

1163
vendor/cpal/src/host/alsa/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

232
vendor/cpal/src/host/asio/device.rs vendored Normal file
View File

@@ -0,0 +1,232 @@
pub type SupportedInputConfigs = std::vec::IntoIter<SupportedStreamConfigRange>;
pub type SupportedOutputConfigs = std::vec::IntoIter<SupportedStreamConfigRange>;
use super::sys;
use crate::BackendSpecificError;
use crate::DefaultStreamConfigError;
use crate::DeviceNameError;
use crate::DevicesError;
use crate::SampleFormat;
use crate::SampleRate;
use crate::SupportedBufferSize;
use crate::SupportedStreamConfig;
use crate::SupportedStreamConfigRange;
use crate::SupportedStreamConfigsError;
use std::hash::{Hash, Hasher};
use std::sync::atomic::AtomicI32;
use std::sync::{Arc, Mutex};
/// A ASIO Device
#[derive(Clone)]
pub struct Device {
/// The driver represented by this device.
pub driver: Arc<sys::Driver>,
// Input and/or Output stream.
// A driver can only have one of each.
// They need to be created at the same time.
pub asio_streams: Arc<Mutex<sys::AsioStreams>>,
pub current_buffer_index: Arc<AtomicI32>,
}
/// All available devices.
pub struct Devices {
asio: Arc<sys::Asio>,
drivers: std::vec::IntoIter<String>,
}
impl PartialEq for Device {
fn eq(&self, other: &Self) -> bool {
self.driver.name() == other.driver.name()
}
}
impl Eq for Device {}
impl Hash for Device {
fn hash<H: Hasher>(&self, state: &mut H) {
self.driver.name().hash(state);
}
}
impl Device {
pub fn name(&self) -> Result<String, DeviceNameError> {
Ok(self.driver.name().to_string())
}
/// Gets the supported input configs.
/// TODO currently only supports the default.
/// Need to find all possible configs.
pub fn supported_input_configs(
&self,
) -> Result<SupportedInputConfigs, SupportedStreamConfigsError> {
// Retrieve the default config for the total supported channels and supported sample
// format.
let f = match self.default_input_config() {
Err(_) => return Err(SupportedStreamConfigsError::DeviceNotAvailable),
Ok(f) => f,
};
// Collect a config for every combination of supported sample rate and number of channels.
let mut supported_configs = vec![];
for &rate in crate::COMMON_SAMPLE_RATES {
if !self
.driver
.can_sample_rate(rate.0.into())
.ok()
.unwrap_or(false)
{
continue;
}
for channels in 1..f.channels + 1 {
supported_configs.push(SupportedStreamConfigRange {
channels,
min_sample_rate: rate,
max_sample_rate: rate,
buffer_size: f.buffer_size,
sample_format: f.sample_format,
})
}
}
Ok(supported_configs.into_iter())
}
/// Gets the supported output configs.
/// TODO currently only supports the default.
/// Need to find all possible configs.
pub fn supported_output_configs(
&self,
) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
// Retrieve the default config for the total supported channels and supported sample
// format.
let f = match self.default_output_config() {
Err(_) => return Err(SupportedStreamConfigsError::DeviceNotAvailable),
Ok(f) => f,
};
// Collect a config for every combination of supported sample rate and number of channels.
let mut supported_configs = vec![];
for &rate in crate::COMMON_SAMPLE_RATES {
if !self
.driver
.can_sample_rate(rate.0.into())
.ok()
.unwrap_or(false)
{
continue;
}
for channels in 1..f.channels + 1 {
supported_configs.push(SupportedStreamConfigRange {
channels,
min_sample_rate: rate,
max_sample_rate: rate,
buffer_size: f.buffer_size,
sample_format: f.sample_format,
})
}
}
Ok(supported_configs.into_iter())
}
/// Returns the default input config
pub fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
let channels = self.driver.channels().map_err(default_config_err)?.ins as u16;
let sample_rate = SampleRate(self.driver.sample_rate().map_err(default_config_err)? as _);
let (min, max) = self.driver.buffersize_range().map_err(default_config_err)?;
let buffer_size = SupportedBufferSize::Range {
min: min as u32,
max: max as u32,
};
// Map th ASIO sample type to a CPAL sample type
let data_type = self.driver.input_data_type().map_err(default_config_err)?;
let sample_format = convert_data_type(&data_type)
.ok_or(DefaultStreamConfigError::StreamTypeNotSupported)?;
Ok(SupportedStreamConfig {
channels,
sample_rate,
buffer_size,
sample_format,
})
}
/// Returns the default output config
pub fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
let channels = self.driver.channels().map_err(default_config_err)?.outs as u16;
let sample_rate = SampleRate(self.driver.sample_rate().map_err(default_config_err)? as _);
let (min, max) = self.driver.buffersize_range().map_err(default_config_err)?;
let buffer_size = SupportedBufferSize::Range {
min: min as u32,
max: max as u32,
};
let data_type = self.driver.output_data_type().map_err(default_config_err)?;
let sample_format = convert_data_type(&data_type)
.ok_or(DefaultStreamConfigError::StreamTypeNotSupported)?;
Ok(SupportedStreamConfig {
channels,
sample_rate,
buffer_size,
sample_format,
})
}
}
impl Devices {
pub fn new(asio: Arc<sys::Asio>) -> Result<Self, DevicesError> {
let drivers = asio.driver_names().into_iter();
Ok(Devices { asio, drivers })
}
}
impl Iterator for Devices {
type Item = Device;
/// Load drivers and return device
fn next(&mut self) -> Option<Device> {
loop {
match self.drivers.next() {
Some(name) => match self.asio.load_driver(&name) {
Ok(driver) => {
let driver = Arc::new(driver);
let asio_streams = Arc::new(Mutex::new(sys::AsioStreams {
input: None,
output: None,
}));
return Some(Device {
driver,
asio_streams,
current_buffer_index: Arc::new(AtomicI32::new(-1)),
});
}
Err(_) => continue,
},
None => return None,
}
}
}
}
pub(crate) fn convert_data_type(ty: &sys::AsioSampleType) -> Option<SampleFormat> {
let fmt = match *ty {
sys::AsioSampleType::ASIOSTInt16MSB => SampleFormat::I16,
sys::AsioSampleType::ASIOSTInt16LSB => SampleFormat::I16,
sys::AsioSampleType::ASIOSTFloat32MSB => SampleFormat::F32,
sys::AsioSampleType::ASIOSTFloat32LSB => SampleFormat::F32,
sys::AsioSampleType::ASIOSTInt32MSB => SampleFormat::I32,
sys::AsioSampleType::ASIOSTInt32LSB => SampleFormat::I32,
_ => return None,
};
Some(fmt)
}
fn default_config_err(e: sys::AsioError) -> DefaultStreamConfigError {
match e {
sys::AsioError::NoDrivers | sys::AsioError::HardwareMalfunction => {
DefaultStreamConfigError::DeviceNotAvailable
}
sys::AsioError::NoRate => DefaultStreamConfigError::StreamTypeNotSupported,
err => {
let description = format!("{}", err);
BackendSpecificError { description }.into()
}
}
}

138
vendor/cpal/src/host/asio/mod.rs vendored Normal file
View File

@@ -0,0 +1,138 @@
extern crate asio_sys as sys;
use crate::traits::{DeviceTrait, HostTrait, StreamTrait};
use crate::{
BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError, DevicesError,
InputCallbackInfo, OutputCallbackInfo, PauseStreamError, PlayStreamError, SampleFormat,
StreamConfig, StreamError, SupportedStreamConfig, SupportedStreamConfigsError,
};
pub use self::device::{Device, Devices, SupportedInputConfigs, SupportedOutputConfigs};
pub use self::stream::Stream;
use std::sync::Arc;
use std::time::Duration;
mod device;
mod stream;
/// The host for ASIO.
#[derive(Debug)]
pub struct Host {
asio: Arc<sys::Asio>,
}
impl Host {
pub fn new() -> Result<Self, crate::HostUnavailable> {
let asio = Arc::new(sys::Asio::new());
let host = Host { asio };
Ok(host)
}
}
impl HostTrait for Host {
type Devices = Devices;
type Device = Device;
fn is_available() -> bool {
true
//unimplemented!("check how to do this using asio-sys")
}
fn devices(&self) -> Result<Self::Devices, DevicesError> {
Devices::new(self.asio.clone())
}
fn default_input_device(&self) -> Option<Self::Device> {
// ASIO has no concept of a default device, so just use the first.
self.input_devices().ok().and_then(|mut ds| ds.next())
}
fn default_output_device(&self) -> Option<Self::Device> {
// ASIO has no concept of a default device, so just use the first.
self.output_devices().ok().and_then(|mut ds| ds.next())
}
}
impl DeviceTrait for Device {
type SupportedInputConfigs = SupportedInputConfigs;
type SupportedOutputConfigs = SupportedOutputConfigs;
type Stream = Stream;
fn name(&self) -> Result<String, DeviceNameError> {
Device::name(self)
}
fn supported_input_configs(
&self,
) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError> {
Device::supported_input_configs(self)
}
fn supported_output_configs(
&self,
) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError> {
Device::supported_output_configs(self)
}
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_input_config(self)
}
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_output_config(self)
}
fn build_input_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
Device::build_input_stream_raw(
self,
config,
sample_format,
data_callback,
error_callback,
timeout,
)
}
fn build_output_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
Device::build_output_stream_raw(
self,
config,
sample_format,
data_callback,
error_callback,
timeout,
)
}
}
impl StreamTrait for Stream {
fn play(&self) -> Result<(), PlayStreamError> {
Stream::play(self)
}
fn pause(&self) -> Result<(), PauseStreamError> {
Stream::pause(self)
}
}

718
vendor/cpal/src/host/asio/stream.rs vendored Normal file
View File

@@ -0,0 +1,718 @@
extern crate asio_sys as sys;
extern crate num_traits;
use self::num_traits::PrimInt;
use super::Device;
use crate::{
BackendSpecificError, BufferSize, BuildStreamError, Data, InputCallbackInfo,
OutputCallbackInfo, PauseStreamError, PlayStreamError, SampleFormat, StreamConfig, StreamError,
};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::time::Duration;
pub struct Stream {
playing: Arc<AtomicBool>,
// Ensure the `Driver` does not terminate until the last stream is dropped.
driver: Arc<sys::Driver>,
#[allow(dead_code)]
asio_streams: Arc<Mutex<sys::AsioStreams>>,
callback_id: sys::CallbackId,
}
impl Stream {
pub fn play(&self) -> Result<(), PlayStreamError> {
self.playing.store(true, Ordering::SeqCst);
Ok(())
}
pub fn pause(&self) -> Result<(), PauseStreamError> {
self.playing.store(false, Ordering::SeqCst);
Ok(())
}
}
impl Device {
pub fn build_input_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
mut data_callback: D,
_error_callback: E,
_timeout: Option<Duration>,
) -> Result<Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
let stream_type = self.driver.input_data_type().map_err(build_stream_err)?;
// Ensure that the desired sample type is supported.
let expected_sample_format = super::device::convert_data_type(&stream_type)
.ok_or(BuildStreamError::StreamConfigNotSupported)?;
if sample_format != expected_sample_format {
return Err(BuildStreamError::StreamConfigNotSupported);
}
let num_channels = config.channels;
let buffer_size = self.get_or_create_input_stream(config, sample_format)?;
let cpal_num_samples = buffer_size * num_channels as usize;
// Create the buffer depending on the size of the data type.
let len_bytes = cpal_num_samples * sample_format.sample_size();
let mut interleaved = vec![0u8; len_bytes];
let stream_playing = Arc::new(AtomicBool::new(false));
let playing = Arc::clone(&stream_playing);
let asio_streams = self.asio_streams.clone();
// Set the input callback.
// This is most performance critical part of the ASIO bindings.
let config = config.clone();
let callback_id = self.driver.add_callback(move |callback_info| unsafe {
// If not playing return early.
if !playing.load(Ordering::SeqCst) {
return;
}
// There is 0% chance of lock contention the host only locks when recreating streams.
let stream_lock = asio_streams.lock().unwrap();
let asio_stream = match stream_lock.input {
Some(ref asio_stream) => asio_stream,
None => return,
};
/// 1. Write from the ASIO buffer to the interleaved CPAL buffer.
/// 2. Deliver the CPAL buffer to the user callback.
unsafe fn process_input_callback<A, D, F>(
data_callback: &mut D,
interleaved: &mut [u8],
asio_stream: &sys::AsioStream,
asio_info: &sys::CallbackInfo,
sample_rate: crate::SampleRate,
format: SampleFormat,
from_endianness: F,
) where
A: Copy,
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
F: Fn(A) -> A,
{
// 1. Write the ASIO channels to the CPAL buffer.
let interleaved: &mut [A] = cast_slice_mut(interleaved);
let n_frames = asio_stream.buffer_size as usize;
let n_channels = interleaved.len() / n_frames;
let buffer_index = asio_info.buffer_index as usize;
for ch_ix in 0..n_channels {
let asio_channel = asio_channel_slice::<A>(asio_stream, buffer_index, ch_ix);
for (frame, s_asio) in interleaved.chunks_mut(n_channels).zip(asio_channel) {
frame[ch_ix] = from_endianness(*s_asio);
}
}
// 2. Deliver the interleaved buffer to the callback.
let data = interleaved.as_mut_ptr() as *mut ();
let len = interleaved.len();
let data = Data::from_parts(data, len, format);
let callback = system_time_to_stream_instant(asio_info.system_time);
let delay = frames_to_duration(n_frames, sample_rate);
let capture = callback
.sub(delay)
.expect("`capture` occurs before origin of alsa `StreamInstant`");
let timestamp = crate::InputStreamTimestamp { callback, capture };
let info = InputCallbackInfo { timestamp };
data_callback(&data, &info);
}
match (&stream_type, sample_format) {
(&sys::AsioSampleType::ASIOSTInt16LSB, SampleFormat::I16) => {
process_input_callback::<i16, _, _>(
&mut data_callback,
&mut interleaved,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::I16,
from_le,
);
}
(&sys::AsioSampleType::ASIOSTInt16MSB, SampleFormat::I16) => {
process_input_callback::<i16, _, _>(
&mut data_callback,
&mut interleaved,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::I16,
from_be,
);
}
(&sys::AsioSampleType::ASIOSTFloat32LSB, SampleFormat::F32) => {
process_input_callback::<u32, _, _>(
&mut data_callback,
&mut interleaved,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::F32,
from_le,
);
}
(&sys::AsioSampleType::ASIOSTFloat32MSB, SampleFormat::F32) => {
process_input_callback::<u32, _, _>(
&mut data_callback,
&mut interleaved,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::F32,
from_be,
);
}
(&sys::AsioSampleType::ASIOSTInt32LSB, SampleFormat::I32) => {
process_input_callback::<i32, _, _>(
&mut data_callback,
&mut interleaved,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::I32,
from_le,
);
}
(&sys::AsioSampleType::ASIOSTInt32MSB, SampleFormat::I32) => {
process_input_callback::<i32, _, _>(
&mut data_callback,
&mut interleaved,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::I32,
from_be,
);
}
(&sys::AsioSampleType::ASIOSTFloat64LSB, SampleFormat::F64) => {
process_input_callback::<u64, _, _>(
&mut data_callback,
&mut interleaved,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::F64,
from_le,
);
}
(&sys::AsioSampleType::ASIOSTFloat64MSB, SampleFormat::F64) => {
process_input_callback::<u64, _, _>(
&mut data_callback,
&mut interleaved,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::F64,
from_be,
);
}
unsupported_format_pair => unreachable!(
"`build_input_stream_raw` should have returned with unsupported \
format {:?}",
unsupported_format_pair
),
}
});
let driver = self.driver.clone();
let asio_streams = self.asio_streams.clone();
// Immediately start the device?
self.driver.start().map_err(build_stream_err)?;
Ok(Stream {
playing: stream_playing,
driver,
asio_streams,
callback_id,
})
}
pub fn build_output_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
mut data_callback: D,
_error_callback: E,
_timeout: Option<Duration>,
) -> Result<Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
let stream_type = self.driver.output_data_type().map_err(build_stream_err)?;
// Ensure that the desired sample type is supported.
let expected_sample_format = super::device::convert_data_type(&stream_type)
.ok_or(BuildStreamError::StreamConfigNotSupported)?;
if sample_format != expected_sample_format {
return Err(BuildStreamError::StreamConfigNotSupported);
}
let num_channels = config.channels;
let buffer_size = self.get_or_create_output_stream(config, sample_format)?;
let cpal_num_samples = buffer_size * num_channels as usize;
// Create buffers depending on data type.
let len_bytes = cpal_num_samples * sample_format.sample_size();
let mut interleaved = vec![0u8; len_bytes];
let current_buffer_index = self.current_buffer_index.clone();
let stream_playing = Arc::new(AtomicBool::new(false));
let playing = Arc::clone(&stream_playing);
let asio_streams = self.asio_streams.clone();
let config = config.clone();
let callback_id = self.driver.add_callback(move |callback_info| unsafe {
// If not playing, return early.
if !playing.load(Ordering::SeqCst) {
return;
}
// There is 0% chance of lock contention the host only locks when recreating streams.
let mut stream_lock = asio_streams.lock().unwrap();
let asio_stream = match stream_lock.output {
Some(ref mut asio_stream) => asio_stream,
None => return,
};
// Silence the ASIO buffer that is about to be used.
//
// This checks if any other callbacks have already silenced the buffer associated with
// the current `buffer_index`.
let silence =
current_buffer_index.load(Ordering::Acquire) != callback_info.buffer_index;
if silence {
current_buffer_index.store(callback_info.buffer_index, Ordering::Release);
}
/// 1. Render the given callback to the given buffer of interleaved samples.
/// 2. If required, silence the ASIO buffer.
/// 3. Finally, write the interleaved data to the non-interleaved ASIO buffer,
/// performing endianness conversions as necessary.
unsafe fn process_output_callback<A, D, F>(
data_callback: &mut D,
interleaved: &mut [u8],
silence_asio_buffer: bool,
asio_stream: &mut sys::AsioStream,
asio_info: &sys::CallbackInfo,
sample_rate: crate::SampleRate,
format: SampleFormat,
mix_samples: F,
) where
A: Copy,
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
F: Fn(A, A) -> A,
{
// 1. Render interleaved buffer from callback.
let interleaved: &mut [A] = cast_slice_mut(interleaved);
let data = interleaved.as_mut_ptr() as *mut ();
let len = interleaved.len();
let mut data = Data::from_parts(data, len, format);
let callback = system_time_to_stream_instant(asio_info.system_time);
let n_frames = asio_stream.buffer_size as usize;
let delay = frames_to_duration(n_frames, sample_rate);
let playback = callback
.add(delay)
.expect("`playback` occurs beyond representation supported by `StreamInstant`");
let timestamp = crate::OutputStreamTimestamp { callback, playback };
let info = OutputCallbackInfo { timestamp };
data_callback(&mut data, &info);
// 2. Silence ASIO channels if necessary.
let n_channels = interleaved.len() / n_frames;
let buffer_index = asio_info.buffer_index as usize;
if silence_asio_buffer {
for ch_ix in 0..n_channels {
let asio_channel =
asio_channel_slice_mut::<A>(asio_stream, buffer_index, ch_ix);
asio_channel.align_to_mut::<u8>().1.fill(0);
}
}
// 3. Write interleaved samples to ASIO channels, one channel at a time.
for ch_ix in 0..n_channels {
let asio_channel =
asio_channel_slice_mut::<A>(asio_stream, buffer_index, ch_ix);
for (frame, s_asio) in interleaved.chunks(n_channels).zip(asio_channel) {
*s_asio = mix_samples(*s_asio, frame[ch_ix]);
}
}
}
match (sample_format, &stream_type) {
(SampleFormat::I16, &sys::AsioSampleType::ASIOSTInt16LSB) => {
process_output_callback::<i16, _, _>(
&mut data_callback,
&mut interleaved,
silence,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::I16,
|old_sample, new_sample| {
from_le(old_sample).saturating_add(new_sample).to_le()
},
);
}
(SampleFormat::I16, &sys::AsioSampleType::ASIOSTInt16MSB) => {
process_output_callback::<i16, _, _>(
&mut data_callback,
&mut interleaved,
silence,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::I16,
|old_sample, new_sample| {
from_be(old_sample).saturating_add(new_sample).to_be()
},
);
}
(SampleFormat::F32, &sys::AsioSampleType::ASIOSTFloat32LSB) => {
process_output_callback::<u32, _, _>(
&mut data_callback,
&mut interleaved,
silence,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::F32,
|old_sample, new_sample| {
(f32::from_bits(from_le(old_sample)) + f32::from_bits(new_sample))
.to_bits()
.to_le()
},
);
}
(SampleFormat::F32, &sys::AsioSampleType::ASIOSTFloat32MSB) => {
process_output_callback::<u32, _, _>(
&mut data_callback,
&mut interleaved,
silence,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::F32,
|old_sample, new_sample| {
(f32::from_bits(from_be(old_sample)) + f32::from_bits(new_sample))
.to_bits()
.to_be()
},
);
}
(SampleFormat::I32, &sys::AsioSampleType::ASIOSTInt32LSB) => {
process_output_callback::<i32, _, _>(
&mut data_callback,
&mut interleaved,
silence,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::I32,
|old_sample, new_sample| {
from_le(old_sample).saturating_add(new_sample).to_le()
},
);
}
(SampleFormat::I32, &sys::AsioSampleType::ASIOSTInt32MSB) => {
process_output_callback::<i32, _, _>(
&mut data_callback,
&mut interleaved,
silence,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::I32,
|old_sample, new_sample| {
from_be(old_sample).saturating_add(new_sample).to_be()
},
);
}
(SampleFormat::F64, &sys::AsioSampleType::ASIOSTFloat64LSB) => {
process_output_callback::<u64, _, _>(
&mut data_callback,
&mut interleaved,
silence,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::F64,
|old_sample, new_sample| {
(f64::from_bits(from_le(old_sample)) + f64::from_bits(new_sample))
.to_bits()
.to_le()
},
);
}
(SampleFormat::F64, &sys::AsioSampleType::ASIOSTFloat64MSB) => {
process_output_callback::<u64, _, _>(
&mut data_callback,
&mut interleaved,
silence,
asio_stream,
callback_info,
config.sample_rate,
SampleFormat::F64,
|old_sample, new_sample| {
(f64::from_bits(from_be(old_sample)) + f64::from_bits(new_sample))
.to_bits()
.to_be()
},
);
}
unsupported_format_pair => unreachable!(
"`build_output_stream_raw` should have returned with unsupported \
format {:?}",
unsupported_format_pair
),
}
});
let driver = self.driver.clone();
let asio_streams = self.asio_streams.clone();
// Immediately start the device?
self.driver.start().map_err(build_stream_err)?;
Ok(Stream {
playing: stream_playing,
driver,
asio_streams,
callback_id,
})
}
/// Create a new CPAL Input Stream.
///
/// If there is no existing ASIO Input Stream it will be created.
///
/// On success, the buffer size of the stream is returned.
fn get_or_create_input_stream(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
) -> Result<usize, BuildStreamError> {
match self.default_input_config() {
Ok(f) => {
let num_asio_channels = f.channels;
check_config(&self.driver, config, sample_format, num_asio_channels)
}
Err(_) => Err(BuildStreamError::StreamConfigNotSupported),
}?;
let num_channels = config.channels as usize;
let mut streams = self.asio_streams.lock().unwrap();
let buffer_size = match config.buffer_size {
BufferSize::Fixed(v) => Some(v as i32),
BufferSize::Default => None,
};
// Either create a stream if thers none or had back the
// size of the current one.
match streams.input {
Some(ref input) => Ok(input.buffer_size as usize),
None => {
let output = streams.output.take();
self.driver
.prepare_input_stream(output, num_channels, buffer_size)
.map(|new_streams| {
let bs = match new_streams.input {
Some(ref inp) => inp.buffer_size as usize,
None => unreachable!(),
};
*streams = new_streams;
bs
})
.map_err(|ref e| {
println!("Error preparing stream: {}", e);
BuildStreamError::DeviceNotAvailable
})
}
}
}
/// Create a new CPAL Output Stream.
///
/// If there is no existing ASIO Output Stream it will be created.
fn get_or_create_output_stream(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
) -> Result<usize, BuildStreamError> {
match self.default_output_config() {
Ok(f) => {
let num_asio_channels = f.channels;
check_config(&self.driver, config, sample_format, num_asio_channels)
}
Err(_) => Err(BuildStreamError::StreamConfigNotSupported),
}?;
let num_channels = config.channels as usize;
let mut streams = self.asio_streams.lock().unwrap();
let buffer_size = match config.buffer_size {
BufferSize::Fixed(v) => Some(v as i32),
BufferSize::Default => None,
};
// Either create a stream if thers none or had back the
// size of the current one.
match streams.output {
Some(ref output) => Ok(output.buffer_size as usize),
None => {
let input = streams.input.take();
self.driver
.prepare_output_stream(input, num_channels, buffer_size)
.map(|new_streams| {
let bs = match new_streams.output {
Some(ref out) => out.buffer_size as usize,
None => unreachable!(),
};
*streams = new_streams;
bs
})
.map_err(|ref e| {
println!("Error preparing stream: {}", e);
BuildStreamError::DeviceNotAvailable
})
}
}
}
}
impl Drop for Stream {
fn drop(&mut self) {
self.driver.remove_callback(self.callback_id);
}
}
fn asio_ns_to_double(val: sys::bindings::asio_import::ASIOTimeStamp) -> f64 {
let two_raised_to_32 = 4294967296.0;
val.lo as f64 + val.hi as f64 * two_raised_to_32
}
/// Asio retrieves system time via `timeGetTime` which returns the time in milliseconds.
fn system_time_to_stream_instant(
system_time: sys::bindings::asio_import::ASIOTimeStamp,
) -> crate::StreamInstant {
let systime_ns = asio_ns_to_double(system_time);
let secs = systime_ns as i64 / 1_000_000_000;
let nanos = (systime_ns as i64 - secs * 1_000_000_000) as u32;
crate::StreamInstant::new(secs, nanos)
}
/// Convert the given duration in frames at the given sample rate to a `std::time::Duration`.
fn frames_to_duration(frames: usize, rate: crate::SampleRate) -> std::time::Duration {
let secsf = frames as f64 / rate.0 as f64;
let secs = secsf as u64;
let nanos = ((secsf - secs as f64) * 1_000_000_000.0) as u32;
std::time::Duration::new(secs, nanos)
}
/// Check whether or not the desired config is supported by the stream.
///
/// Checks sample rate, data type and then finally the number of channels.
fn check_config(
driver: &sys::Driver,
config: &StreamConfig,
sample_format: SampleFormat,
num_asio_channels: u16,
) -> Result<(), BuildStreamError> {
let StreamConfig {
channels,
sample_rate,
buffer_size: _,
} = config;
// Try and set the sample rate to what the user selected.
let sample_rate = sample_rate.0.into();
if sample_rate != driver.sample_rate().map_err(build_stream_err)? {
if driver
.can_sample_rate(sample_rate)
.map_err(build_stream_err)?
{
driver
.set_sample_rate(sample_rate)
.map_err(build_stream_err)?;
} else {
return Err(BuildStreamError::StreamConfigNotSupported);
}
}
// unsigned formats are not supported by asio
match sample_format {
SampleFormat::I16 | SampleFormat::I32 | SampleFormat::F32 => (),
_ => return Err(BuildStreamError::StreamConfigNotSupported),
}
if *channels > num_asio_channels {
return Err(BuildStreamError::StreamConfigNotSupported);
}
Ok(())
}
/// Cast a byte slice into a mutable slice of desired type.
///
/// Safety: it's up to the caller to ensure that the input slice has valid bit representations.
unsafe fn cast_slice_mut<T>(v: &mut [u8]) -> &mut [T] {
debug_assert!(v.len() % std::mem::size_of::<T>() == 0);
std::slice::from_raw_parts_mut(v.as_mut_ptr() as *mut T, v.len() / std::mem::size_of::<T>())
}
/// Helper function to convert from little endianness.
fn from_le<T: PrimInt>(t: T) -> T {
T::from_le(t)
}
/// Helper function to convert from little endianness.
fn from_be<T: PrimInt>(t: T) -> T {
T::from_be(t)
}
/// Shorthand for retrieving the asio buffer slice associated with a channel.
unsafe fn asio_channel_slice<T>(
asio_stream: &sys::AsioStream,
buffer_index: usize,
channel_index: usize,
) -> &[T] {
let buff_ptr: *const T =
asio_stream.buffer_infos[channel_index].buffers[buffer_index as usize] as *const _;
std::slice::from_raw_parts(buff_ptr, asio_stream.buffer_size as usize)
}
/// Shorthand for retrieving the asio buffer slice associated with a channel.
unsafe fn asio_channel_slice_mut<T>(
asio_stream: &mut sys::AsioStream,
buffer_index: usize,
channel_index: usize,
) -> &mut [T] {
let buff_ptr: *mut T =
asio_stream.buffer_infos[channel_index].buffers[buffer_index as usize] as *mut _;
std::slice::from_raw_parts_mut(buff_ptr, asio_stream.buffer_size as usize)
}
fn build_stream_err(e: sys::AsioError) -> BuildStreamError {
match e {
sys::AsioError::NoDrivers | sys::AsioError::HardwareMalfunction => {
BuildStreamError::DeviceNotAvailable
}
sys::AsioError::InvalidInput | sys::AsioError::BadMode => BuildStreamError::InvalidArgument,
err => {
let description = format!("{}", err);
BackendSpecificError { description }.into()
}
}
}

View File

@@ -0,0 +1,43 @@
use std::vec::IntoIter as VecIntoIter;
use crate::DevicesError;
use crate::SupportedStreamConfigRange;
use super::Device;
pub type SupportedInputConfigs = ::std::vec::IntoIter<SupportedStreamConfigRange>;
pub type SupportedOutputConfigs = ::std::vec::IntoIter<SupportedStreamConfigRange>;
// TODO: Support enumerating earpiece vs headset vs speaker etc?
pub struct Devices(VecIntoIter<Device>);
impl Devices {
pub fn new() -> Result<Self, DevicesError> {
Ok(Self::default())
}
}
impl Default for Devices {
fn default() -> Devices {
Devices(vec![Device].into_iter())
}
}
impl Iterator for Devices {
type Item = Device;
#[inline]
fn next(&mut self) -> Option<Device> {
self.0.next()
}
}
#[inline]
pub fn default_input_device() -> Option<Device> {
Some(Device)
}
#[inline]
pub fn default_output_device() -> Option<Device> {
Some(Device)
}

View File

@@ -0,0 +1,434 @@
//!
//! coreaudio on iOS looks a bit different from macOS. A lot of configuration needs to use
//! the AVAudioSession objc API which doesn't exist on macOS.
//!
//! TODO:
//! - Use AVAudioSession to enumerate buffer size / sample rate / number of channels and set
//! buffer size.
//!
extern crate core_foundation_sys;
extern crate coreaudio;
use std::cell::RefCell;
use self::coreaudio::audio_unit::render_callback::data;
use self::coreaudio::audio_unit::{render_callback, AudioUnit, Element, Scope};
use self::coreaudio::sys::{
kAudioOutputUnitProperty_EnableIO, kAudioUnitProperty_StreamFormat, AudioBuffer,
AudioStreamBasicDescription,
};
use super::{asbd_from_config, frames_to_duration, host_time_to_stream_instant};
use crate::traits::{DeviceTrait, HostTrait, StreamTrait};
use crate::{
BackendSpecificError, BufferSize, BuildStreamError, Data, DefaultStreamConfigError,
DeviceNameError, DevicesError, InputCallbackInfo, OutputCallbackInfo, PauseStreamError,
PlayStreamError, SampleFormat, SampleRate, StreamConfig, StreamError, SupportedBufferSize,
SupportedStreamConfig, SupportedStreamConfigRange, SupportedStreamConfigsError,
};
use self::enumerate::{
default_input_device, default_output_device, Devices, SupportedInputConfigs,
SupportedOutputConfigs,
};
use std::slice;
use std::time::Duration;
pub mod enumerate;
// These days the default of iOS is now F32 and no longer I16
const SUPPORTED_SAMPLE_FORMAT: SampleFormat = SampleFormat::F32;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Device;
pub struct Host;
impl Host {
pub fn new() -> Result<Self, crate::HostUnavailable> {
Ok(Host)
}
}
impl HostTrait for Host {
type Devices = Devices;
type Device = Device;
fn is_available() -> bool {
true
}
fn devices(&self) -> Result<Self::Devices, DevicesError> {
Devices::new()
}
fn default_input_device(&self) -> Option<Self::Device> {
default_input_device()
}
fn default_output_device(&self) -> Option<Self::Device> {
default_output_device()
}
}
impl Device {
#[inline]
fn name(&self) -> Result<String, DeviceNameError> {
Ok("Default Device".to_owned())
}
#[inline]
fn supported_input_configs(
&self,
) -> Result<SupportedInputConfigs, SupportedStreamConfigsError> {
// TODO: query AVAudioSession for parameters, some values like sample rate and buffer size
// probably need to actually be set to see if it works, but channels can be enumerated.
let asbd: AudioStreamBasicDescription = default_input_asbd()?;
let stream_config = stream_config_from_asbd(asbd);
Ok(vec![SupportedStreamConfigRange {
channels: stream_config.channels,
min_sample_rate: stream_config.sample_rate,
max_sample_rate: stream_config.sample_rate,
buffer_size: stream_config.buffer_size.clone(),
sample_format: SUPPORTED_SAMPLE_FORMAT,
}]
.into_iter())
}
#[inline]
fn supported_output_configs(
&self,
) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
// TODO: query AVAudioSession for parameters, some values like sample rate and buffer size
// probably need to actually be set to see if it works, but channels can be enumerated.
let asbd: AudioStreamBasicDescription = default_output_asbd()?;
let stream_config = stream_config_from_asbd(asbd);
let configs: Vec<_> = (1..=asbd.mChannelsPerFrame as u16)
.map(|channels| SupportedStreamConfigRange {
channels,
min_sample_rate: stream_config.sample_rate,
max_sample_rate: stream_config.sample_rate,
buffer_size: stream_config.buffer_size.clone(),
sample_format: SUPPORTED_SAMPLE_FORMAT,
})
.collect();
Ok(configs.into_iter())
}
#[inline]
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
let asbd: AudioStreamBasicDescription = default_input_asbd()?;
let stream_config = stream_config_from_asbd(asbd);
Ok(stream_config)
}
#[inline]
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
let asbd: AudioStreamBasicDescription = default_output_asbd()?;
let stream_config = stream_config_from_asbd(asbd);
Ok(stream_config)
}
}
impl DeviceTrait for Device {
type SupportedInputConfigs = SupportedInputConfigs;
type SupportedOutputConfigs = SupportedOutputConfigs;
type Stream = Stream;
#[inline]
fn name(&self) -> Result<String, DeviceNameError> {
Device::name(self)
}
#[inline]
fn supported_input_configs(
&self,
) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError> {
Device::supported_input_configs(self)
}
#[inline]
fn supported_output_configs(
&self,
) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError> {
Device::supported_output_configs(self)
}
#[inline]
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_input_config(self)
}
#[inline]
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_output_config(self)
}
fn build_input_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
mut data_callback: D,
mut error_callback: E,
_timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
// The scope and element for working with a device's input stream.
let scope = Scope::Output;
let element = Element::Input;
let mut audio_unit = create_audio_unit()?;
audio_unit.uninitialize()?;
configure_for_recording(&mut audio_unit)?;
audio_unit.initialize()?;
// Set the stream in interleaved mode.
let asbd = asbd_from_config(config, sample_format);
audio_unit.set_property(kAudioUnitProperty_StreamFormat, scope, element, Some(&asbd))?;
// Set the buffersize
match config.buffer_size {
BufferSize::Fixed(_) => {
return Err(BuildStreamError::StreamConfigNotSupported);
}
BufferSize::Default => (),
}
// Register the callback that is being called by coreaudio whenever it needs data to be
// fed to the audio buffer.
let bytes_per_channel = sample_format.sample_size();
let sample_rate = config.sample_rate;
type Args = render_callback::Args<data::Raw>;
audio_unit.set_input_callback(move |args: Args| unsafe {
let ptr = (*args.data.data).mBuffers.as_ptr() as *const AudioBuffer;
let len = (*args.data.data).mNumberBuffers as usize;
let buffers: &[AudioBuffer] = slice::from_raw_parts(ptr, len);
// There is only 1 buffer when using interleaved channels
let AudioBuffer {
mNumberChannels: channels,
mDataByteSize: data_byte_size,
mData: data,
} = buffers[0];
let data = data as *mut ();
let len = (data_byte_size as usize / bytes_per_channel) as usize;
let data = Data::from_parts(data, len, sample_format);
// TODO: Need a better way to get delay, for now we assume a double-buffer offset.
let callback = match host_time_to_stream_instant(args.time_stamp.mHostTime) {
Err(err) => {
error_callback(err.into());
return Err(());
}
Ok(cb) => cb,
};
let buffer_frames = len / channels as usize;
let delay = frames_to_duration(buffer_frames, sample_rate);
let capture = callback
.sub(delay)
.expect("`capture` occurs before origin of alsa `StreamInstant`");
let timestamp = crate::InputStreamTimestamp { callback, capture };
let info = InputCallbackInfo { timestamp };
data_callback(&data, &info);
Ok(())
})?;
audio_unit.start()?;
Ok(Stream::new(StreamInner {
playing: true,
audio_unit,
}))
}
/// Create an output stream.
fn build_output_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
mut data_callback: D,
mut error_callback: E,
_timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
match config.buffer_size {
BufferSize::Fixed(_) => {
return Err(BuildStreamError::StreamConfigNotSupported);
}
BufferSize::Default => (),
};
let mut audio_unit = create_audio_unit()?;
// The scope and element for working with a device's output stream.
let scope = Scope::Input;
let element = Element::Output;
// Set the stream in interleaved mode.
let asbd = asbd_from_config(config, sample_format);
audio_unit.set_property(kAudioUnitProperty_StreamFormat, scope, element, Some(&asbd))?;
// Register the callback that is being called by coreaudio whenever it needs data to be
// fed to the audio buffer.
let bytes_per_channel = sample_format.sample_size();
let sample_rate = config.sample_rate;
type Args = render_callback::Args<data::Raw>;
audio_unit.set_render_callback(move |args: Args| unsafe {
// If `run()` is currently running, then a callback will be available from this list.
// Otherwise, we just fill the buffer with zeroes and return.
let AudioBuffer {
mNumberChannels: channels,
mDataByteSize: data_byte_size,
mData: data,
} = (*args.data.data).mBuffers[0];
let data = data as *mut ();
let len = (data_byte_size as usize / bytes_per_channel) as usize;
let mut data = Data::from_parts(data, len, sample_format);
let callback = match host_time_to_stream_instant(args.time_stamp.mHostTime) {
Err(err) => {
error_callback(err.into());
return Err(());
}
Ok(cb) => cb,
};
// TODO: Need a better way to get delay, for now we assume a double-buffer offset.
let buffer_frames = len / channels as usize;
let delay = frames_to_duration(buffer_frames, sample_rate);
let playback = callback
.add(delay)
.expect("`playback` occurs beyond representation supported by `StreamInstant`");
let timestamp = crate::OutputStreamTimestamp { callback, playback };
let info = OutputCallbackInfo { timestamp };
data_callback(&mut data, &info);
Ok(())
})?;
audio_unit.start()?;
Ok(Stream::new(StreamInner {
playing: true,
audio_unit,
}))
}
}
pub struct Stream {
inner: RefCell<StreamInner>,
}
impl Stream {
fn new(inner: StreamInner) -> Self {
Self {
inner: RefCell::new(inner),
}
}
}
impl StreamTrait for Stream {
fn play(&self) -> Result<(), PlayStreamError> {
let mut stream = self.inner.borrow_mut();
if !stream.playing {
if let Err(e) = stream.audio_unit.start() {
let description = format!("{}", e);
let err = BackendSpecificError { description };
return Err(err.into());
}
stream.playing = true;
}
Ok(())
}
fn pause(&self) -> Result<(), PauseStreamError> {
let mut stream = self.inner.borrow_mut();
if stream.playing {
if let Err(e) = stream.audio_unit.stop() {
let description = format!("{}", e);
let err = BackendSpecificError { description };
return Err(err.into());
}
stream.playing = false;
}
Ok(())
}
}
struct StreamInner {
playing: bool,
audio_unit: AudioUnit,
}
fn create_audio_unit() -> Result<AudioUnit, coreaudio::Error> {
AudioUnit::new(coreaudio::audio_unit::IOType::RemoteIO)
}
fn configure_for_recording(audio_unit: &mut AudioUnit) -> Result<(), coreaudio::Error> {
// Enable mic recording
let enable_input = 1u32;
audio_unit.set_property(
kAudioOutputUnitProperty_EnableIO,
Scope::Input,
Element::Input,
Some(&enable_input),
)?;
// Disable output
let disable_output = 0u32;
audio_unit.set_property(
kAudioOutputUnitProperty_EnableIO,
Scope::Output,
Element::Output,
Some(&disable_output),
)?;
Ok(())
}
fn default_output_asbd() -> Result<AudioStreamBasicDescription, coreaudio::Error> {
let audio_unit = create_audio_unit()?;
let id = kAudioUnitProperty_StreamFormat;
let asbd: AudioStreamBasicDescription =
audio_unit.get_property(id, Scope::Output, Element::Output)?;
Ok(asbd)
}
fn default_input_asbd() -> Result<AudioStreamBasicDescription, coreaudio::Error> {
let mut audio_unit = create_audio_unit()?;
audio_unit.uninitialize()?;
configure_for_recording(&mut audio_unit)?;
audio_unit.initialize()?;
let id = kAudioUnitProperty_StreamFormat;
let asbd: AudioStreamBasicDescription =
audio_unit.get_property(id, Scope::Input, Element::Input)?;
Ok(asbd)
}
fn stream_config_from_asbd(asbd: AudioStreamBasicDescription) -> SupportedStreamConfig {
let buffer_size = SupportedBufferSize::Range { min: 0, max: 0 };
SupportedStreamConfig {
channels: asbd.mChannelsPerFrame as u16,
sample_rate: SampleRate(asbd.mSampleRate as u32),
buffer_size: buffer_size.clone(),
sample_format: SUPPORTED_SAMPLE_FORMAT,
}
}

View File

@@ -0,0 +1,152 @@
extern crate coreaudio;
use self::coreaudio::sys::{
kAudioHardwareNoError, kAudioHardwarePropertyDefaultInputDevice,
kAudioHardwarePropertyDefaultOutputDevice, kAudioHardwarePropertyDevices,
kAudioObjectPropertyElementMaster, kAudioObjectPropertyScopeGlobal, kAudioObjectSystemObject,
AudioDeviceID, AudioObjectGetPropertyData, AudioObjectGetPropertyDataSize,
AudioObjectPropertyAddress, OSStatus,
};
use super::Device;
use crate::{BackendSpecificError, DevicesError, SupportedStreamConfigRange};
use std::mem;
use std::ptr::null;
use std::vec::IntoIter as VecIntoIter;
unsafe fn audio_devices() -> Result<Vec<AudioDeviceID>, OSStatus> {
let property_address = AudioObjectPropertyAddress {
mSelector: kAudioHardwarePropertyDevices,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMaster,
};
macro_rules! try_status_or_return {
($status:expr) => {
if $status != kAudioHardwareNoError as i32 {
return Err($status);
}
};
}
let data_size = 0u32;
let status = AudioObjectGetPropertyDataSize(
kAudioObjectSystemObject,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
);
try_status_or_return!(status);
let device_count = data_size / mem::size_of::<AudioDeviceID>() as u32;
let mut audio_devices = vec![];
audio_devices.reserve_exact(device_count as usize);
let status = AudioObjectGetPropertyData(
kAudioObjectSystemObject,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
audio_devices.as_mut_ptr() as *mut _,
);
try_status_or_return!(status);
audio_devices.set_len(device_count as usize);
Ok(audio_devices)
}
pub struct Devices(VecIntoIter<AudioDeviceID>);
impl Devices {
pub fn new() -> Result<Self, DevicesError> {
let devices = unsafe {
match audio_devices() {
Ok(devices) => devices,
Err(os_status) => {
let description = format!("{}", os_status);
let err = BackendSpecificError { description };
return Err(err.into());
}
}
};
Ok(Devices(devices.into_iter()))
}
}
unsafe impl Send for Devices {}
unsafe impl Sync for Devices {}
impl Iterator for Devices {
type Item = Device;
fn next(&mut self) -> Option<Device> {
self.0.next().map(|id| Device {
audio_device_id: id,
is_default: false,
})
}
}
pub fn default_input_device() -> Option<Device> {
let property_address = AudioObjectPropertyAddress {
mSelector: kAudioHardwarePropertyDefaultInputDevice,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMaster,
};
let audio_device_id: AudioDeviceID = 0;
let data_size = mem::size_of::<AudioDeviceID>();
let status = unsafe {
AudioObjectGetPropertyData(
kAudioObjectSystemObject,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
&audio_device_id as *const _ as *mut _,
)
};
if status != kAudioHardwareNoError as i32 {
return None;
}
let device = Device {
audio_device_id,
is_default: true,
};
Some(device)
}
pub fn default_output_device() -> Option<Device> {
let property_address = AudioObjectPropertyAddress {
mSelector: kAudioHardwarePropertyDefaultOutputDevice,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMaster,
};
let audio_device_id: AudioDeviceID = 0;
let data_size = mem::size_of::<AudioDeviceID>();
let status = unsafe {
AudioObjectGetPropertyData(
kAudioObjectSystemObject,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
&audio_device_id as *const _ as *mut _,
)
};
if status != kAudioHardwareNoError as i32 {
return None;
}
let device = Device {
audio_device_id,
is_default: true,
};
Some(device)
}
pub type SupportedInputConfigs = VecIntoIter<SupportedStreamConfigRange>;
pub type SupportedOutputConfigs = VecIntoIter<SupportedStreamConfigRange>;

View File

@@ -0,0 +1,941 @@
extern crate core_foundation_sys;
extern crate coreaudio;
use super::{asbd_from_config, check_os_status, frames_to_duration, host_time_to_stream_instant};
use self::core_foundation_sys::string::{CFStringGetCString, CFStringGetCStringPtr, CFStringRef};
use self::coreaudio::audio_unit::render_callback::{self, data};
use self::coreaudio::audio_unit::{AudioUnit, Element, Scope};
use self::coreaudio::sys::{
kAudioDevicePropertyAvailableNominalSampleRates, kAudioDevicePropertyBufferFrameSize,
kAudioDevicePropertyBufferFrameSizeRange, kAudioDevicePropertyDeviceIsAlive,
kAudioDevicePropertyDeviceNameCFString, kAudioDevicePropertyNominalSampleRate,
kAudioDevicePropertyScopeOutput, kAudioDevicePropertyStreamConfiguration,
kAudioDevicePropertyStreamFormat, kAudioObjectPropertyElementMaster,
kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyScopeInput,
kAudioObjectPropertyScopeOutput, kAudioOutputUnitProperty_CurrentDevice,
kAudioOutputUnitProperty_EnableIO, kAudioUnitProperty_StreamFormat, kCFStringEncodingUTF8,
AudioBuffer, AudioBufferList, AudioDeviceID, AudioObjectGetPropertyData,
AudioObjectGetPropertyDataSize, AudioObjectID, AudioObjectPropertyAddress,
AudioObjectPropertyScope, AudioObjectSetPropertyData, AudioStreamBasicDescription,
AudioValueRange, OSStatus,
};
use crate::traits::{DeviceTrait, HostTrait, StreamTrait};
use crate::{
BackendSpecificError, BufferSize, BuildStreamError, ChannelCount, Data,
DefaultStreamConfigError, DeviceNameError, DevicesError, InputCallbackInfo, OutputCallbackInfo,
PauseStreamError, PlayStreamError, SampleFormat, SampleRate, StreamConfig, StreamError,
SupportedBufferSize, SupportedStreamConfig, SupportedStreamConfigRange,
SupportedStreamConfigsError,
};
use std::ffi::CStr;
use std::fmt;
use std::mem;
use std::os::raw::c_char;
use std::ptr::null;
use std::rc::Rc;
use std::slice;
use std::sync::mpsc::{channel, RecvTimeoutError};
use std::sync::{Arc, Mutex};
use std::time::{Duration, Instant};
pub use self::enumerate::{
default_input_device, default_output_device, Devices, SupportedInputConfigs,
SupportedOutputConfigs,
};
use property_listener::AudioObjectPropertyListener;
pub mod enumerate;
mod property_listener;
/// Coreaudio host, the default host on macOS.
#[derive(Debug)]
pub struct Host;
impl Host {
pub fn new() -> Result<Self, crate::HostUnavailable> {
Ok(Host)
}
}
impl HostTrait for Host {
type Devices = Devices;
type Device = Device;
fn is_available() -> bool {
// Assume coreaudio is always available
true
}
fn devices(&self) -> Result<Self::Devices, DevicesError> {
Devices::new()
}
fn default_input_device(&self) -> Option<Self::Device> {
default_input_device()
}
fn default_output_device(&self) -> Option<Self::Device> {
default_output_device()
}
}
impl DeviceTrait for Device {
type SupportedInputConfigs = SupportedInputConfigs;
type SupportedOutputConfigs = SupportedOutputConfigs;
type Stream = Stream;
fn name(&self) -> Result<String, DeviceNameError> {
Device::name(self)
}
fn supported_input_configs(
&self,
) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError> {
Device::supported_input_configs(self)
}
fn supported_output_configs(
&self,
) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError> {
Device::supported_output_configs(self)
}
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_input_config(self)
}
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_output_config(self)
}
fn build_input_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
Device::build_input_stream_raw(
self,
config,
sample_format,
data_callback,
error_callback,
timeout,
)
}
fn build_output_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
Device::build_output_stream_raw(
self,
config,
sample_format,
data_callback,
error_callback,
timeout,
)
}
}
#[derive(Clone, PartialEq, Eq)]
pub struct Device {
pub(crate) audio_device_id: AudioDeviceID,
is_default: bool,
}
impl Device {
fn name(&self) -> Result<String, DeviceNameError> {
let property_address = AudioObjectPropertyAddress {
mSelector: kAudioDevicePropertyDeviceNameCFString,
mScope: kAudioDevicePropertyScopeOutput,
mElement: kAudioObjectPropertyElementMaster,
};
let device_name: CFStringRef = null();
let data_size = mem::size_of::<CFStringRef>();
let c_str = unsafe {
let status = AudioObjectGetPropertyData(
self.audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
&device_name as *const _ as *mut _,
);
check_os_status(status)?;
let c_string: *const c_char = CFStringGetCStringPtr(device_name, kCFStringEncodingUTF8);
if c_string.is_null() {
let status = AudioObjectGetPropertyData(
self.audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
&device_name as *const _ as *mut _,
);
check_os_status(status)?;
let mut buf: [i8; 255] = [0; 255];
let result = CFStringGetCString(
device_name,
buf.as_mut_ptr(),
buf.len() as _,
kCFStringEncodingUTF8,
);
if result == 0 {
let description =
"core foundation failed to return device name string".to_string();
let err = BackendSpecificError { description };
return Err(err.into());
}
let name: &CStr = CStr::from_ptr(buf.as_ptr());
return Ok(name.to_str().unwrap().to_owned());
}
CStr::from_ptr(c_string as *mut _)
};
Ok(c_str.to_string_lossy().into_owned())
}
// Logic re-used between `supported_input_configs` and `supported_output_configs`.
#[allow(clippy::cast_ptr_alignment)]
fn supported_configs(
&self,
scope: AudioObjectPropertyScope,
) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
let mut property_address = AudioObjectPropertyAddress {
mSelector: kAudioDevicePropertyStreamConfiguration,
mScope: scope,
mElement: kAudioObjectPropertyElementMaster,
};
unsafe {
// Retrieve the devices audio buffer list.
let data_size = 0u32;
let status = AudioObjectGetPropertyDataSize(
self.audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
);
check_os_status(status)?;
let mut audio_buffer_list: Vec<u8> = vec![];
audio_buffer_list.reserve_exact(data_size as usize);
let status = AudioObjectGetPropertyData(
self.audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
audio_buffer_list.as_mut_ptr() as *mut _,
);
check_os_status(status)?;
let audio_buffer_list = audio_buffer_list.as_mut_ptr() as *mut AudioBufferList;
// If there's no buffers, skip.
if (*audio_buffer_list).mNumberBuffers == 0 {
return Ok(vec![].into_iter());
}
// Count the number of channels as the sum of all channels in all output buffers.
let n_buffers = (*audio_buffer_list).mNumberBuffers as usize;
let first: *const AudioBuffer = (*audio_buffer_list).mBuffers.as_ptr();
let buffers: &'static [AudioBuffer] = slice::from_raw_parts(first, n_buffers);
let mut n_channels = 0;
for buffer in buffers {
n_channels += buffer.mNumberChannels as usize;
}
// TODO: macOS should support U8, I16, I32, F32 and F64. This should allow for using
// I16 but just use F32 for now as it's the default anyway.
let sample_format = SampleFormat::F32;
// Get available sample rate ranges.
property_address.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
let data_size = 0u32;
let status = AudioObjectGetPropertyDataSize(
self.audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
);
check_os_status(status)?;
let n_ranges = data_size as usize / mem::size_of::<AudioValueRange>();
let mut ranges: Vec<u8> = vec![];
ranges.reserve_exact(data_size as usize);
let status = AudioObjectGetPropertyData(
self.audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
ranges.as_mut_ptr() as *mut _,
);
check_os_status(status)?;
let ranges: *mut AudioValueRange = ranges.as_mut_ptr() as *mut _;
let ranges: &'static [AudioValueRange] = slice::from_raw_parts(ranges, n_ranges);
let audio_unit = audio_unit_from_device(self, true)?;
let buffer_size = get_io_buffer_frame_size_range(&audio_unit)?;
// Collect the supported formats for the device.
let mut fmts = vec![];
for range in ranges {
let fmt = SupportedStreamConfigRange {
channels: n_channels as ChannelCount,
min_sample_rate: SampleRate(range.mMinimum as _),
max_sample_rate: SampleRate(range.mMaximum as _),
buffer_size,
sample_format,
};
fmts.push(fmt);
}
Ok(fmts.into_iter())
}
}
fn supported_input_configs(
&self,
) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
self.supported_configs(kAudioObjectPropertyScopeInput)
}
fn supported_output_configs(
&self,
) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
self.supported_configs(kAudioObjectPropertyScopeOutput)
}
fn default_config(
&self,
scope: AudioObjectPropertyScope,
) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
fn default_config_error_from_os_status(
status: OSStatus,
) -> Result<(), DefaultStreamConfigError> {
let err = match coreaudio::Error::from_os_status(status) {
Err(err) => err,
Ok(_) => return Ok(()),
};
match err {
coreaudio::Error::AudioUnit(
coreaudio::error::AudioUnitError::FormatNotSupported,
)
| coreaudio::Error::AudioCodec(_)
| coreaudio::Error::AudioFormat(_) => {
Err(DefaultStreamConfigError::StreamTypeNotSupported)
}
coreaudio::Error::AudioUnit(coreaudio::error::AudioUnitError::NoConnection) => {
Err(DefaultStreamConfigError::DeviceNotAvailable)
}
err => {
let description = format!("{}", err);
let err = BackendSpecificError { description };
Err(err.into())
}
}
}
let property_address = AudioObjectPropertyAddress {
mSelector: kAudioDevicePropertyStreamFormat,
mScope: scope,
mElement: kAudioObjectPropertyElementMaster,
};
unsafe {
let asbd: AudioStreamBasicDescription = mem::zeroed();
let data_size = mem::size_of::<AudioStreamBasicDescription>() as u32;
let status = AudioObjectGetPropertyData(
self.audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
&asbd as *const _ as *mut _,
);
default_config_error_from_os_status(status)?;
let sample_format = {
let audio_format = coreaudio::audio_unit::AudioFormat::from_format_and_flag(
asbd.mFormatID,
Some(asbd.mFormatFlags),
);
let flags = match audio_format {
Some(coreaudio::audio_unit::AudioFormat::LinearPCM(flags)) => flags,
_ => return Err(DefaultStreamConfigError::StreamTypeNotSupported),
};
let maybe_sample_format =
coreaudio::audio_unit::SampleFormat::from_flags_and_bits_per_sample(
flags,
asbd.mBitsPerChannel,
);
match maybe_sample_format {
Some(coreaudio::audio_unit::SampleFormat::F32) => SampleFormat::F32,
Some(coreaudio::audio_unit::SampleFormat::I16) => SampleFormat::I16,
_ => return Err(DefaultStreamConfigError::StreamTypeNotSupported),
}
};
let audio_unit = audio_unit_from_device(self, true)?;
let buffer_size = get_io_buffer_frame_size_range(&audio_unit)?;
let config = SupportedStreamConfig {
sample_rate: SampleRate(asbd.mSampleRate as _),
channels: asbd.mChannelsPerFrame as _,
buffer_size,
sample_format,
};
Ok(config)
}
}
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
self.default_config(kAudioObjectPropertyScopeInput)
}
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
self.default_config(kAudioObjectPropertyScopeOutput)
}
}
impl fmt::Debug for Device {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Device")
.field("audio_device_id", &self.audio_device_id)
.field("name", &self.name())
.finish()
}
}
struct StreamInner {
playing: bool,
audio_unit: AudioUnit,
/// Manage the lifetime of the closure that handles device disconnection.
_disconnect_listener: Option<AudioObjectPropertyListener>,
// Track the device with which the audio unit was spawned.
//
// We must do this so that we can avoid changing the device sample rate if there is already
// a stream associated with the device.
#[allow(dead_code)]
device_id: AudioDeviceID,
}
/// Register the on-disconnect callback.
/// This will both stop the stream and call the error callback with DeviceNotAvailable.
/// This function should only be called once per stream.
fn add_disconnect_listener<E>(
stream: &Stream,
error_callback: Arc<Mutex<E>>,
) -> Result<(), BuildStreamError>
where
E: FnMut(StreamError) + Send + 'static,
{
let stream_copy = stream.clone();
let mut stream_inner = stream.inner.lock().unwrap();
stream_inner._disconnect_listener = Some(AudioObjectPropertyListener::new(
stream_inner.device_id,
AudioObjectPropertyAddress {
mSelector: kAudioDevicePropertyDeviceIsAlive,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMaster,
},
move || {
let _ = stream_copy.pause();
(error_callback.lock().unwrap())(StreamError::DeviceNotAvailable);
},
)?);
Ok(())
}
fn audio_unit_from_device(device: &Device, input: bool) -> Result<AudioUnit, coreaudio::Error> {
let output_type = if device.is_default && !input {
coreaudio::audio_unit::IOType::DefaultOutput
} else {
coreaudio::audio_unit::IOType::HalOutput
};
let mut audio_unit = AudioUnit::new(output_type)?;
if input {
// Enable input processing.
let enable_input = 1u32;
audio_unit.set_property(
kAudioOutputUnitProperty_EnableIO,
Scope::Input,
Element::Input,
Some(&enable_input),
)?;
// Disable output processing.
let disable_output = 0u32;
audio_unit.set_property(
kAudioOutputUnitProperty_EnableIO,
Scope::Output,
Element::Output,
Some(&disable_output),
)?;
}
audio_unit.set_property(
kAudioOutputUnitProperty_CurrentDevice,
Scope::Global,
Element::Output,
Some(&device.audio_device_id),
)?;
Ok(audio_unit)
}
impl Device {
#[allow(clippy::cast_ptr_alignment)]
#[allow(clippy::while_immutable_condition)]
#[allow(clippy::float_cmp)]
fn build_input_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
mut data_callback: D,
error_callback: E,
_timeout: Option<Duration>,
) -> Result<Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
// The scope and element for working with a device's input stream.
let scope = Scope::Output;
let element = Element::Input;
// Potentially change the device sample rate to match the config.
set_sample_rate(self.audio_device_id, config.sample_rate)?;
let mut audio_unit = audio_unit_from_device(self, true)?;
// Set the stream in interleaved mode.
let asbd = asbd_from_config(config, sample_format);
audio_unit.set_property(kAudioUnitProperty_StreamFormat, scope, element, Some(&asbd))?;
// Set the buffersize
match config.buffer_size {
BufferSize::Fixed(v) => {
let buffer_size_range = get_io_buffer_frame_size_range(&audio_unit)?;
match buffer_size_range {
SupportedBufferSize::Range { min, max } => {
if v >= min && v <= max {
audio_unit.set_property(
kAudioDevicePropertyBufferFrameSize,
scope,
element,
Some(&v),
)?
} else {
return Err(BuildStreamError::StreamConfigNotSupported);
}
}
SupportedBufferSize::Unknown => (),
}
}
BufferSize::Default => (),
}
let error_callback = Arc::new(Mutex::new(error_callback));
let error_callback_disconnect = error_callback.clone();
// Register the callback that is being called by coreaudio whenever it needs data to be
// fed to the audio buffer.
let bytes_per_channel = sample_format.sample_size();
let sample_rate = config.sample_rate;
type Args = render_callback::Args<data::Raw>;
audio_unit.set_input_callback(move |args: Args| unsafe {
let ptr = (*args.data.data).mBuffers.as_ptr();
let len = (*args.data.data).mNumberBuffers as usize;
let buffers: &[AudioBuffer] = slice::from_raw_parts(ptr, len);
// TODO: Perhaps loop over all buffers instead?
let AudioBuffer {
mNumberChannels: channels,
mDataByteSize: data_byte_size,
mData: data,
} = buffers[0];
let data = data as *mut ();
let len = data_byte_size as usize / bytes_per_channel;
let data = Data::from_parts(data, len, sample_format);
// TODO: Need a better way to get delay, for now we assume a double-buffer offset.
let callback = match host_time_to_stream_instant(args.time_stamp.mHostTime) {
Err(err) => {
(error_callback.lock().unwrap())(err.into());
return Err(());
}
Ok(cb) => cb,
};
let buffer_frames = len / channels as usize;
let delay = frames_to_duration(buffer_frames, sample_rate);
let capture = callback
.sub(delay)
.expect("`capture` occurs before origin of alsa `StreamInstant`");
let timestamp = crate::InputStreamTimestamp { callback, capture };
let info = InputCallbackInfo { timestamp };
data_callback(&data, &info);
Ok(())
})?;
let stream = Stream::new(StreamInner {
playing: true,
_disconnect_listener: None,
audio_unit,
device_id: self.audio_device_id,
});
// If we didn't request the default device, stop the stream if the
// device disconnects.
if !self.is_default {
add_disconnect_listener(&stream, error_callback_disconnect)?;
}
stream.inner.lock().unwrap().audio_unit.start()?;
Ok(stream)
}
fn build_output_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
mut data_callback: D,
error_callback: E,
_timeout: Option<Duration>,
) -> Result<Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
let mut audio_unit = audio_unit_from_device(self, false)?;
// The scope and element for working with a device's output stream.
let scope = Scope::Input;
let element = Element::Output;
// Set the stream in interleaved mode.
let asbd = asbd_from_config(config, sample_format);
audio_unit.set_property(kAudioUnitProperty_StreamFormat, scope, element, Some(&asbd))?;
// Set the buffersize
match config.buffer_size {
BufferSize::Fixed(v) => {
let buffer_size_range = get_io_buffer_frame_size_range(&audio_unit)?;
match buffer_size_range {
SupportedBufferSize::Range { min, max } => {
if v >= min && v <= max {
audio_unit.set_property(
kAudioDevicePropertyBufferFrameSize,
scope,
element,
Some(&v),
)?
} else {
return Err(BuildStreamError::StreamConfigNotSupported);
}
}
SupportedBufferSize::Unknown => (),
}
}
BufferSize::Default => (),
}
let error_callback = Arc::new(Mutex::new(error_callback));
let error_callback_disconnect = error_callback.clone();
// Register the callback that is being called by coreaudio whenever it needs data to be
// fed to the audio buffer.
let bytes_per_channel = sample_format.sample_size();
let sample_rate = config.sample_rate;
type Args = render_callback::Args<data::Raw>;
audio_unit.set_render_callback(move |args: Args| unsafe {
// If `run()` is currently running, then a callback will be available from this list.
// Otherwise, we just fill the buffer with zeroes and return.
let AudioBuffer {
mNumberChannels: channels,
mDataByteSize: data_byte_size,
mData: data,
} = (*args.data.data).mBuffers[0];
let data = data as *mut ();
let len = data_byte_size as usize / bytes_per_channel;
let mut data = Data::from_parts(data, len, sample_format);
let callback = match host_time_to_stream_instant(args.time_stamp.mHostTime) {
Err(err) => {
(error_callback.lock().unwrap())(err.into());
return Err(());
}
Ok(cb) => cb,
};
// TODO: Need a better way to get delay, for now we assume a double-buffer offset.
let buffer_frames = len / channels as usize;
let delay = frames_to_duration(buffer_frames, sample_rate);
let playback = callback
.add(delay)
.expect("`playback` occurs beyond representation supported by `StreamInstant`");
let timestamp = crate::OutputStreamTimestamp { callback, playback };
let info = OutputCallbackInfo { timestamp };
data_callback(&mut data, &info);
Ok(())
})?;
let stream = Stream::new(StreamInner {
playing: true,
_disconnect_listener: None,
audio_unit,
device_id: self.audio_device_id,
});
// If we didn't request the default device, stop the stream if the
// device disconnects.
if !self.is_default {
add_disconnect_listener(&stream, error_callback_disconnect)?;
}
stream.inner.lock().unwrap().audio_unit.start()?;
Ok(stream)
}
}
/// Attempt to set the device sample rate to the provided rate.
/// Return an error if the requested sample rate is not supported by the device.
fn set_sample_rate(
audio_device_id: AudioObjectID,
target_sample_rate: SampleRate,
) -> Result<(), BuildStreamError> {
// Get the current sample rate.
let mut property_address = AudioObjectPropertyAddress {
mSelector: kAudioDevicePropertyNominalSampleRate,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMaster,
};
let sample_rate: f64 = 0.0;
let data_size = mem::size_of::<f64>() as u32;
let status = unsafe {
AudioObjectGetPropertyData(
audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
&sample_rate as *const _ as *mut _,
)
};
coreaudio::Error::from_os_status(status)?;
// If the requested sample rate is different to the device sample rate, update the device.
if sample_rate as u32 != target_sample_rate.0 {
// Get available sample rate ranges.
property_address.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
let data_size = 0u32;
let status = unsafe {
AudioObjectGetPropertyDataSize(
audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
)
};
coreaudio::Error::from_os_status(status)?;
let n_ranges = data_size as usize / mem::size_of::<AudioValueRange>();
let mut ranges: Vec<u8> = vec![];
ranges.reserve_exact(data_size as usize);
let status = unsafe {
AudioObjectGetPropertyData(
audio_device_id,
&property_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
ranges.as_mut_ptr() as *mut _,
)
};
coreaudio::Error::from_os_status(status)?;
let ranges: *mut AudioValueRange = ranges.as_mut_ptr() as *mut _;
let ranges: &'static [AudioValueRange] = unsafe { slice::from_raw_parts(ranges, n_ranges) };
// Now that we have the available ranges, pick the one matching the desired rate.
let sample_rate = target_sample_rate.0;
let maybe_index = ranges
.iter()
.position(|r| r.mMinimum as u32 == sample_rate && r.mMaximum as u32 == sample_rate);
let range_index = match maybe_index {
None => return Err(BuildStreamError::StreamConfigNotSupported),
Some(i) => i,
};
let (send, recv) = channel::<Result<f64, coreaudio::Error>>();
let sample_rate_address = AudioObjectPropertyAddress {
mSelector: kAudioDevicePropertyNominalSampleRate,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMaster,
};
// Send sample rate updates back on a channel.
let sample_rate_handler = move || {
let mut rate: f64 = 0.0;
let data_size = mem::size_of::<f64>();
let result = unsafe {
AudioObjectGetPropertyData(
audio_device_id,
&sample_rate_address as *const _,
0,
null(),
&data_size as *const _ as *mut _,
&mut rate as *const _ as *mut _,
)
};
send.send(coreaudio::Error::from_os_status(result).map(|_| rate))
.ok();
};
let listener = AudioObjectPropertyListener::new(
audio_device_id,
sample_rate_address,
sample_rate_handler,
)?;
// Finally, set the sample rate.
property_address.mSelector = kAudioDevicePropertyNominalSampleRate;
let status = unsafe {
AudioObjectSetPropertyData(
audio_device_id,
&property_address as *const _,
0,
null(),
data_size,
&ranges[range_index] as *const _ as *const _,
)
};
coreaudio::Error::from_os_status(status)?;
// Wait for the reported_rate to change.
//
// This should not take longer than a few ms, but we timeout after 1 sec just in case.
// We loop over potentially several events from the channel to ensure
// that we catch the expected change in sample rate.
let mut timeout = Duration::from_secs(1);
let start = Instant::now();
loop {
match recv.recv_timeout(timeout) {
Err(err) => {
let description = match err {
RecvTimeoutError::Disconnected => {
"sample rate listener channel disconnected unexpectedly"
}
RecvTimeoutError::Timeout => {
"timeout waiting for sample rate update for device"
}
}
.to_string();
return Err(BackendSpecificError { description }.into());
}
Ok(Ok(reported_sample_rate)) => {
if reported_sample_rate == target_sample_rate.0 as f64 {
break;
}
}
Ok(Err(_)) => {
// TODO: should we consider collecting this error?
}
};
timeout = timeout
.checked_sub(start.elapsed())
.unwrap_or(Duration::ZERO);
}
listener.remove()?;
}
Ok(())
}
#[derive(Clone)]
pub struct Stream {
inner: Arc<Mutex<StreamInner>>,
}
impl Stream {
fn new(inner: StreamInner) -> Self {
Self {
inner: Arc::new(Mutex::new(inner)),
}
}
}
impl StreamTrait for Stream {
fn play(&self) -> Result<(), PlayStreamError> {
let mut stream = self.inner.lock().unwrap();
if !stream.playing {
if let Err(e) = stream.audio_unit.start() {
let description = format!("{}", e);
let err = BackendSpecificError { description };
return Err(err.into());
}
stream.playing = true;
}
Ok(())
}
fn pause(&self) -> Result<(), PauseStreamError> {
let mut stream = self.inner.lock().unwrap();
if stream.playing {
if let Err(e) = stream.audio_unit.stop() {
let description = format!("{}", e);
let err = BackendSpecificError { description };
return Err(err.into());
}
stream.playing = false;
}
Ok(())
}
}
fn get_io_buffer_frame_size_range(
audio_unit: &AudioUnit,
) -> Result<SupportedBufferSize, coreaudio::Error> {
let buffer_size_range: AudioValueRange = audio_unit.get_property(
kAudioDevicePropertyBufferFrameSizeRange,
Scope::Global,
Element::Output,
)?;
Ok(SupportedBufferSize::Range {
min: buffer_size_range.mMinimum as u32,
max: buffer_size_range.mMaximum as u32,
})
}

View File

@@ -0,0 +1,85 @@
//! Helper code for registering audio object property listeners.
use super::coreaudio::sys::{
AudioObjectAddPropertyListener, AudioObjectID, AudioObjectPropertyAddress,
AudioObjectRemovePropertyListener, OSStatus,
};
use crate::BuildStreamError;
/// A double-indirection to be able to pass a closure (a fat pointer)
/// via a single c_void.
struct PropertyListenerCallbackWrapper(Box<dyn FnMut()>);
/// Maintain an audio object property listener.
/// The listener will be removed when this type is dropped.
pub struct AudioObjectPropertyListener {
callback: Box<PropertyListenerCallbackWrapper>,
property_address: AudioObjectPropertyAddress,
audio_object_id: AudioObjectID,
removed: bool,
}
impl AudioObjectPropertyListener {
/// Attach the provided callback as a audio object property listener.
pub fn new<F: FnMut() + 'static>(
audio_object_id: AudioObjectID,
property_address: AudioObjectPropertyAddress,
callback: F,
) -> Result<Self, BuildStreamError> {
let callback = Box::new(PropertyListenerCallbackWrapper(Box::new(callback)));
unsafe {
coreaudio::Error::from_os_status(AudioObjectAddPropertyListener(
audio_object_id,
&property_address as *const _,
Some(property_listener_handler_shim),
&*callback as *const _ as *mut _,
))?;
};
Ok(Self {
callback,
audio_object_id,
property_address,
removed: false,
})
}
/// Explicitly remove the property listener.
/// Use this method if you need to explicitly handle failure to remove
/// the property listener.
pub fn remove(mut self) -> Result<(), BuildStreamError> {
self.remove_inner()
}
fn remove_inner(&mut self) -> Result<(), BuildStreamError> {
unsafe {
coreaudio::Error::from_os_status(AudioObjectRemovePropertyListener(
self.audio_object_id,
&self.property_address as *const _,
Some(property_listener_handler_shim),
&*self.callback as *const _ as *mut _,
))?;
}
self.removed = true;
Ok(())
}
}
impl Drop for AudioObjectPropertyListener {
fn drop(&mut self) {
if !self.removed {
let _ = self.remove_inner();
}
}
}
/// Callback used to call user-provided closure as a property listener.
unsafe extern "C" fn property_listener_handler_shim(
_: AudioObjectID,
_: u32,
_: *const AudioObjectPropertyAddress,
callback: *mut ::std::os::raw::c_void,
) -> OSStatus {
let wrapper = callback as *mut PropertyListenerCallbackWrapper;
(*wrapper).0();
0
}

121
vendor/cpal/src/host/coreaudio/mod.rs vendored Normal file
View File

@@ -0,0 +1,121 @@
extern crate coreaudio;
use self::coreaudio::sys::{
kAudioFormatFlagIsFloat, kAudioFormatFlagIsPacked, kAudioFormatLinearPCM,
AudioStreamBasicDescription, OSStatus,
};
use crate::DefaultStreamConfigError;
use crate::{BuildStreamError, SupportedStreamConfigsError};
use crate::{BackendSpecificError, SampleFormat, StreamConfig};
#[cfg(target_os = "ios")]
mod ios;
#[cfg(target_os = "macos")]
mod macos;
#[cfg(target_os = "ios")]
pub use self::ios::{
enumerate::{Devices, SupportedInputConfigs, SupportedOutputConfigs},
Device, Host, Stream,
};
#[cfg(target_os = "macos")]
pub use self::macos::{
enumerate::{Devices, SupportedInputConfigs, SupportedOutputConfigs},
Device, Host, Stream,
};
/// Common helper methods used by both macOS and iOS
fn check_os_status(os_status: OSStatus) -> Result<(), BackendSpecificError> {
match coreaudio::Error::from_os_status(os_status) {
Ok(()) => Ok(()),
Err(err) => {
let description = err.to_string();
Err(BackendSpecificError { description })
}
}
}
// Create a coreaudio AudioStreamBasicDescription from a CPAL Format.
fn asbd_from_config(
config: &StreamConfig,
sample_format: SampleFormat,
) -> AudioStreamBasicDescription {
let n_channels = config.channels as usize;
let sample_rate = config.sample_rate.0;
let bytes_per_channel = sample_format.sample_size();
let bits_per_channel = bytes_per_channel * 8;
let bytes_per_frame = n_channels * bytes_per_channel;
let frames_per_packet = 1;
let bytes_per_packet = frames_per_packet * bytes_per_frame;
let format_flags = match sample_format {
SampleFormat::F32 => kAudioFormatFlagIsFloat | kAudioFormatFlagIsPacked,
_ => kAudioFormatFlagIsPacked,
};
AudioStreamBasicDescription {
mBitsPerChannel: bits_per_channel as _,
mBytesPerFrame: bytes_per_frame as _,
mChannelsPerFrame: n_channels as _,
mBytesPerPacket: bytes_per_packet as _,
mFramesPerPacket: frames_per_packet as _,
mFormatFlags: format_flags,
mFormatID: kAudioFormatLinearPCM,
mSampleRate: sample_rate as _,
..Default::default()
}
}
fn host_time_to_stream_instant(
m_host_time: u64,
) -> Result<crate::StreamInstant, BackendSpecificError> {
let mut info: mach2::mach_time::mach_timebase_info = Default::default();
let res = unsafe { mach2::mach_time::mach_timebase_info(&mut info) };
check_os_status(res)?;
let nanos = m_host_time * info.numer as u64 / info.denom as u64;
let secs = nanos / 1_000_000_000;
let subsec_nanos = nanos - secs * 1_000_000_000;
Ok(crate::StreamInstant::new(secs as i64, subsec_nanos as u32))
}
// Convert the given duration in frames at the given sample rate to a `std::time::Duration`.
fn frames_to_duration(frames: usize, rate: crate::SampleRate) -> std::time::Duration {
let secsf = frames as f64 / rate.0 as f64;
let secs = secsf as u64;
let nanos = ((secsf - secs as f64) * 1_000_000_000.0) as u32;
std::time::Duration::new(secs, nanos)
}
// TODO need stronger error identification
impl From<coreaudio::Error> for BuildStreamError {
fn from(err: coreaudio::Error) -> BuildStreamError {
match err {
coreaudio::Error::RenderCallbackBufferFormatDoesNotMatchAudioUnitStreamFormat
| coreaudio::Error::NoKnownSubtype
| coreaudio::Error::AudioUnit(coreaudio::error::AudioUnitError::FormatNotSupported)
| coreaudio::Error::AudioCodec(_)
| coreaudio::Error::AudioFormat(_) => BuildStreamError::StreamConfigNotSupported,
_ => BuildStreamError::DeviceNotAvailable,
}
}
}
impl From<coreaudio::Error> for SupportedStreamConfigsError {
fn from(err: coreaudio::Error) -> SupportedStreamConfigsError {
let description = format!("{}", err);
let err = BackendSpecificError { description };
// Check for possible DeviceNotAvailable variant
SupportedStreamConfigsError::BackendSpecific { err }
}
}
impl From<coreaudio::Error> for DefaultStreamConfigError {
fn from(err: coreaudio::Error) -> DefaultStreamConfigError {
let description = format!("{}", err);
let err = BackendSpecificError { description };
// Check for possible DeviceNotAvailable variant
DefaultStreamConfigError::BackendSpecific { err }
}
}

422
vendor/cpal/src/host/emscripten/mod.rs vendored Normal file
View File

@@ -0,0 +1,422 @@
use js_sys::Float32Array;
use std::time::Duration;
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsCast;
use wasm_bindgen_futures::{spawn_local, JsFuture};
use web_sys::AudioContext;
use crate::traits::{DeviceTrait, HostTrait, StreamTrait};
use crate::{
BufferSize, BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError, DevicesError,
InputCallbackInfo, OutputCallbackInfo, PauseStreamError, PlayStreamError, SampleFormat,
SampleRate, StreamConfig, StreamError, SupportedBufferSize, SupportedStreamConfig,
SupportedStreamConfigRange, SupportedStreamConfigsError,
};
// The emscripten backend currently works by instantiating an `AudioContext` object per `Stream`.
// Creating a stream creates a new `AudioContext`. Destroying a stream destroys it. Creation of a
// `Host` instance initializes the `stdweb` context.
/// The default emscripten host type.
#[derive(Debug)]
pub struct Host;
/// Content is false if the iterator is empty.
pub struct Devices(bool);
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Device;
#[wasm_bindgen]
#[derive(Clone)]
pub struct Stream {
// A reference to an `AudioContext` object.
audio_ctxt: AudioContext,
}
// Index within the `streams` array of the events loop.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct StreamId(usize);
pub type SupportedInputConfigs = ::std::vec::IntoIter<SupportedStreamConfigRange>;
pub type SupportedOutputConfigs = ::std::vec::IntoIter<SupportedStreamConfigRange>;
const MIN_CHANNELS: u16 = 1;
const MAX_CHANNELS: u16 = 32;
const MIN_SAMPLE_RATE: SampleRate = SampleRate(8_000);
const MAX_SAMPLE_RATE: SampleRate = SampleRate(96_000);
const DEFAULT_SAMPLE_RATE: SampleRate = SampleRate(44_100);
const MIN_BUFFER_SIZE: u32 = 1;
const MAX_BUFFER_SIZE: u32 = u32::MAX;
const DEFAULT_BUFFER_SIZE: usize = 2048;
const SUPPORTED_SAMPLE_FORMAT: SampleFormat = SampleFormat::F32;
impl Host {
pub fn new() -> Result<Self, crate::HostUnavailable> {
Ok(Host)
}
}
impl Devices {
fn new() -> Result<Self, DevicesError> {
Ok(Self::default())
}
}
impl Device {
#[inline]
fn name(&self) -> Result<String, DeviceNameError> {
Ok("Default Device".to_owned())
}
#[inline]
fn supported_input_configs(
&self,
) -> Result<SupportedInputConfigs, SupportedStreamConfigsError> {
unimplemented!();
}
#[inline]
fn supported_output_configs(
&self,
) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
let buffer_size = SupportedBufferSize::Range {
min: MIN_BUFFER_SIZE,
max: MAX_BUFFER_SIZE,
};
let configs: Vec<_> = (MIN_CHANNELS..=MAX_CHANNELS)
.map(|channels| SupportedStreamConfigRange {
channels,
min_sample_rate: MIN_SAMPLE_RATE,
max_sample_rate: MAX_SAMPLE_RATE,
buffer_size: buffer_size.clone(),
sample_format: SUPPORTED_SAMPLE_FORMAT,
})
.collect();
Ok(configs.into_iter())
}
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
unimplemented!();
}
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
const EXPECT: &str = "expected at least one valid webaudio stream config";
let config = self
.supported_output_configs()
.expect(EXPECT)
.max_by(|a, b| a.cmp_default_heuristics(b))
.unwrap()
.with_sample_rate(DEFAULT_SAMPLE_RATE);
Ok(config)
}
}
impl HostTrait for Host {
type Devices = Devices;
type Device = Device;
fn is_available() -> bool {
// Assume this host is always available on emscripten.
true
}
fn devices(&self) -> Result<Self::Devices, DevicesError> {
Devices::new()
}
fn default_input_device(&self) -> Option<Self::Device> {
default_input_device()
}
fn default_output_device(&self) -> Option<Self::Device> {
default_output_device()
}
}
impl DeviceTrait for Device {
type SupportedInputConfigs = SupportedInputConfigs;
type SupportedOutputConfigs = SupportedOutputConfigs;
type Stream = Stream;
fn name(&self) -> Result<String, DeviceNameError> {
Device::name(self)
}
fn supported_input_configs(
&self,
) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError> {
Device::supported_input_configs(self)
}
fn supported_output_configs(
&self,
) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError> {
Device::supported_output_configs(self)
}
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_input_config(self)
}
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_output_config(self)
}
fn build_input_stream_raw<D, E>(
&self,
_config: &StreamConfig,
_sample_format: SampleFormat,
_data_callback: D,
_error_callback: E,
_timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
unimplemented!()
}
fn build_output_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
_error_callback: E,
_timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
if !valid_config(config, sample_format) {
return Err(BuildStreamError::StreamConfigNotSupported);
}
let buffer_size_frames = match config.buffer_size {
BufferSize::Fixed(v) => {
if v == 0 {
return Err(BuildStreamError::StreamConfigNotSupported);
} else {
v as usize
}
}
BufferSize::Default => DEFAULT_BUFFER_SIZE,
};
// Create the stream.
let audio_ctxt = AudioContext::new().expect("webaudio is not present on this system");
let stream = Stream { audio_ctxt };
// Use `set_timeout` to invoke a Rust callback repeatedly.
//
// The job of this callback is to fill the content of the audio buffers.
//
// See also: The call to `set_timeout` at the end of the `audio_callback_fn` which creates
// the loop.
set_timeout(
10,
stream.clone(),
data_callback,
config,
sample_format,
buffer_size_frames as u32,
);
Ok(stream)
}
}
impl StreamTrait for Stream {
fn play(&self) -> Result<(), PlayStreamError> {
let future = JsFuture::from(
self.audio_ctxt
.resume()
.expect("Could not resume the stream"),
);
spawn_local(async {
match future.await {
Ok(value) => assert!(value.is_undefined()),
Err(value) => panic!("AudioContext.resume() promise was rejected: {:?}", value),
}
});
Ok(())
}
fn pause(&self) -> Result<(), PauseStreamError> {
let future = JsFuture::from(
self.audio_ctxt
.suspend()
.expect("Could not suspend the stream"),
);
spawn_local(async {
match future.await {
Ok(value) => assert!(value.is_undefined()),
Err(value) => panic!("AudioContext.suspend() promise was rejected: {:?}", value),
}
});
Ok(())
}
}
fn audio_callback_fn<D>(
mut data_callback: D,
) -> impl FnOnce(Stream, StreamConfig, SampleFormat, u32)
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
{
|stream, config, sample_format, buffer_size_frames| {
let sample_rate = config.sample_rate.0;
let buffer_size_samples = buffer_size_frames * config.channels as u32;
let audio_ctxt = &stream.audio_ctxt;
// TODO: We should be re-using a buffer.
let mut temporary_buffer = vec![0f32; buffer_size_samples as usize];
{
let len = temporary_buffer.len();
let data = temporary_buffer.as_mut_ptr() as *mut ();
let mut data = unsafe { Data::from_parts(data, len, sample_format) };
let now_secs: f64 = audio_ctxt.current_time();
let callback = crate::StreamInstant::from_secs_f64(now_secs);
// TODO: Use proper latency instead. Currently, unsupported on most browsers though, so
// we estimate based on buffer size instead. Probably should use this, but it's only
// supported by firefox (2020-04-28).
// let latency_secs: f64 = audio_ctxt.outputLatency.try_into().unwrap();
let buffer_duration = frames_to_duration(len, sample_rate as usize);
let playback = callback
.add(buffer_duration)
.expect("`playback` occurs beyond representation supported by `StreamInstant`");
let timestamp = crate::OutputStreamTimestamp { callback, playback };
let info = OutputCallbackInfo { timestamp };
data_callback(&mut data, &info);
}
let typed_array: Float32Array = temporary_buffer.as_slice().into();
debug_assert_eq!(temporary_buffer.len() % config.channels as usize, 0);
let src_buffer = Float32Array::new(typed_array.buffer().as_ref());
let context = audio_ctxt;
let buffer = context
.create_buffer(
config.channels as u32,
buffer_size_frames as u32,
sample_rate as f32,
)
.expect("Buffer could not be created");
for channel in 0..config.channels {
let mut buffer_content = buffer
.get_channel_data(channel as u32)
.expect("Should be impossible");
for (i, buffer_content_item) in buffer_content.iter_mut().enumerate() {
*buffer_content_item =
src_buffer.get_index(i as u32 * config.channels as u32 + channel as u32);
}
}
let node = context
.create_buffer_source()
.expect("The buffer source node could not be created");
node.set_buffer(Some(&buffer));
context
.destination()
.connect_with_audio_node(&node)
.expect("Could not connect the audio node to the destination");
node.start().expect("Could not start the audio node");
// TODO: handle latency better ; right now we just use setInterval with the amount of sound
// data that is in each buffer ; this is obviously bad, and also the schedule is too tight
// and there may be underflows
set_timeout(
1000 * buffer_size_frames as i32 / sample_rate as i32,
stream.clone().clone(),
data_callback,
&config,
sample_format,
buffer_size_frames as u32,
);
}
}
fn set_timeout<D>(
time: i32,
stream: Stream,
data_callback: D,
config: &StreamConfig,
sample_format: SampleFormat,
buffer_size_frames: u32,
) where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
{
let window = web_sys::window().expect("Not in a window somehow?");
window
.set_timeout_with_callback_and_timeout_and_arguments_4(
&Closure::once_into_js(audio_callback_fn(data_callback))
.dyn_ref::<js_sys::Function>()
.expect("The function was somehow not a function"),
time,
&stream.into(),
&((*config).clone()).into(),
&Closure::once_into_js(move || sample_format),
&buffer_size_frames.into(),
)
.expect("The timeout could not be set");
}
impl Default for Devices {
fn default() -> Devices {
// We produce an empty iterator if the WebAudio API isn't available.
Devices(is_webaudio_available())
}
}
impl Iterator for Devices {
type Item = Device;
#[inline]
fn next(&mut self) -> Option<Device> {
if self.0 {
self.0 = false;
Some(Device)
} else {
None
}
}
}
#[inline]
fn default_input_device() -> Option<Device> {
unimplemented!();
}
#[inline]
fn default_output_device() -> Option<Device> {
if is_webaudio_available() {
Some(Device)
} else {
None
}
}
// Detects whether the `AudioContext` global variable is available.
fn is_webaudio_available() -> bool {
AudioContext::new().is_ok()
}
// Whether or not the given stream configuration is valid for building a stream.
fn valid_config(conf: &StreamConfig, sample_format: SampleFormat) -> bool {
conf.channels <= MAX_CHANNELS
&& conf.channels >= MIN_CHANNELS
&& conf.sample_rate <= MAX_SAMPLE_RATE
&& conf.sample_rate >= MIN_SAMPLE_RATE
&& sample_format == SUPPORTED_SAMPLE_FORMAT
}
// Convert the given duration in frames at the given sample rate to a `std::time::Duration`.
fn frames_to_duration(frames: usize, rate: usize) -> std::time::Duration {
let secsf = frames as f64 / rate as f64;
let secs = secsf as u64;
let nanos = ((secsf - secs as f64) * 1_000_000_000.0) as u32;
std::time::Duration::new(secs, nanos)
}

268
vendor/cpal/src/host/jack/device.rs vendored Normal file
View File

@@ -0,0 +1,268 @@
use crate::traits::DeviceTrait;
use crate::{
BackendSpecificError, BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError,
InputCallbackInfo, OutputCallbackInfo, SampleFormat, SampleRate, StreamConfig, StreamError,
SupportedBufferSize, SupportedStreamConfig, SupportedStreamConfigRange,
SupportedStreamConfigsError,
};
use std::hash::{Hash, Hasher};
use std::time::Duration;
use super::stream::Stream;
use super::JACK_SAMPLE_FORMAT;
pub type SupportedInputConfigs = std::vec::IntoIter<SupportedStreamConfigRange>;
pub type SupportedOutputConfigs = std::vec::IntoIter<SupportedStreamConfigRange>;
const DEFAULT_NUM_CHANNELS: u16 = 2;
const DEFAULT_SUPPORTED_CHANNELS: [u16; 10] = [1, 2, 4, 6, 8, 16, 24, 32, 48, 64];
/// If a device is for input or output.
/// Until we have duplex stream support JACK clients and CPAL devices for JACK will be either input or output.
#[derive(Clone, Debug)]
pub enum DeviceType {
InputDevice,
OutputDevice,
}
#[derive(Clone, Debug)]
pub struct Device {
name: String,
sample_rate: SampleRate,
buffer_size: SupportedBufferSize,
device_type: DeviceType,
start_server_automatically: bool,
connect_ports_automatically: bool,
}
impl Device {
fn new_device(
name: String,
connect_ports_automatically: bool,
start_server_automatically: bool,
device_type: DeviceType,
) -> Result<Self, String> {
// ClientOptions are bit flags that you can set with the constants provided
let client_options = super::get_client_options(start_server_automatically);
// Create a dummy client to find out the sample rate of the server to be able to provide it as a possible config.
// This client will be dropped, and a new one will be created when making the stream.
// This is a hack due to the fact that the Client must be moved to create the AsyncClient.
match super::get_client(&name, client_options) {
Ok(client) => Ok(Device {
// The name given to the client by JACK, could potentially be different from the name supplied e.g.if there is a name collision
name: client.name().to_string(),
sample_rate: SampleRate(client.sample_rate() as u32),
buffer_size: SupportedBufferSize::Range {
min: client.buffer_size(),
max: client.buffer_size(),
},
device_type,
start_server_automatically,
connect_ports_automatically,
}),
Err(e) => Err(e),
}
}
pub fn default_output_device(
name: &str,
connect_ports_automatically: bool,
start_server_automatically: bool,
) -> Result<Self, String> {
let output_client_name = format!("{}_out", name);
Device::new_device(
output_client_name,
connect_ports_automatically,
start_server_automatically,
DeviceType::OutputDevice,
)
}
pub fn default_input_device(
name: &str,
connect_ports_automatically: bool,
start_server_automatically: bool,
) -> Result<Self, String> {
let input_client_name = format!("{}_in", name);
Device::new_device(
input_client_name,
connect_ports_automatically,
start_server_automatically,
DeviceType::InputDevice,
)
}
pub fn default_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
let channels = DEFAULT_NUM_CHANNELS;
let sample_rate = self.sample_rate;
let buffer_size = self.buffer_size.clone();
// The sample format for JACK audio ports is always "32-bit float mono audio" in the current implementation.
// Custom formats are allowed within JACK, but this is of niche interest.
// The format can be found programmatically by calling jack::PortSpec::port_type() on a created port.
let sample_format = JACK_SAMPLE_FORMAT;
Ok(SupportedStreamConfig {
channels,
sample_rate,
buffer_size,
sample_format,
})
}
pub fn supported_configs(&self) -> Vec<SupportedStreamConfigRange> {
let f = match self.default_config() {
Err(_) => return vec![],
Ok(f) => f,
};
let mut supported_configs = vec![];
for &channels in DEFAULT_SUPPORTED_CHANNELS.iter() {
supported_configs.push(SupportedStreamConfigRange {
channels,
min_sample_rate: f.sample_rate,
max_sample_rate: f.sample_rate,
buffer_size: f.buffer_size.clone(),
sample_format: f.sample_format,
});
}
supported_configs
}
pub fn is_input(&self) -> bool {
matches!(self.device_type, DeviceType::InputDevice)
}
pub fn is_output(&self) -> bool {
matches!(self.device_type, DeviceType::OutputDevice)
}
}
impl DeviceTrait for Device {
type SupportedInputConfigs = SupportedInputConfigs;
type SupportedOutputConfigs = SupportedOutputConfigs;
type Stream = Stream;
fn name(&self) -> Result<String, DeviceNameError> {
Ok(self.name.clone())
}
fn supported_input_configs(
&self,
) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError> {
Ok(self.supported_configs().into_iter())
}
fn supported_output_configs(
&self,
) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError> {
Ok(self.supported_configs().into_iter())
}
/// Returns the default input config
/// The sample format for JACK audio ports is always "32-bit float mono audio" unless using a custom type.
/// The sample rate is set by the JACK server.
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
self.default_config()
}
/// Returns the default output config
/// The sample format for JACK audio ports is always "32-bit float mono audio" unless using a custom type.
/// The sample rate is set by the JACK server.
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
self.default_config()
}
fn build_input_stream_raw<D, E>(
&self,
conf: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
_timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
if let DeviceType::OutputDevice = &self.device_type {
// Trying to create an input stream from an output device
return Err(BuildStreamError::StreamConfigNotSupported);
}
if conf.sample_rate != self.sample_rate || sample_format != JACK_SAMPLE_FORMAT {
return Err(BuildStreamError::StreamConfigNotSupported);
}
// The settings should be fine, create a Client
let client_options = super::get_client_options(self.start_server_automatically);
let client;
match super::get_client(&self.name, client_options) {
Ok(c) => client = c,
Err(e) => {
return Err(BuildStreamError::BackendSpecific {
err: BackendSpecificError { description: e },
})
}
};
let mut stream = Stream::new_input(client, conf.channels, data_callback, error_callback);
if self.connect_ports_automatically {
stream.connect_to_system_inputs();
}
Ok(stream)
}
fn build_output_stream_raw<D, E>(
&self,
conf: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
_timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
if let DeviceType::InputDevice = &self.device_type {
// Trying to create an output stream from an input device
return Err(BuildStreamError::StreamConfigNotSupported);
}
if conf.sample_rate != self.sample_rate || sample_format != JACK_SAMPLE_FORMAT {
return Err(BuildStreamError::StreamConfigNotSupported);
}
// The settings should be fine, create a Client
let client_options = super::get_client_options(self.start_server_automatically);
let client;
match super::get_client(&self.name, client_options) {
Ok(c) => client = c,
Err(e) => {
return Err(BuildStreamError::BackendSpecific {
err: BackendSpecificError { description: e },
})
}
};
let mut stream = Stream::new_output(client, conf.channels, data_callback, error_callback);
if self.connect_ports_automatically {
stream.connect_to_system_outputs();
}
Ok(stream)
}
}
impl PartialEq for Device {
fn eq(&self, other: &Self) -> bool {
// Device::name() can never fail in this implementation
self.name().unwrap() == other.name().unwrap()
}
}
impl Eq for Device {}
impl Hash for Device {
fn hash<H: Hasher>(&self, state: &mut H) {
self.name().unwrap().hash(state);
}
}

180
vendor/cpal/src/host/jack/mod.rs vendored Normal file
View File

@@ -0,0 +1,180 @@
extern crate jack;
use crate::traits::HostTrait;
use crate::{DevicesError, SampleFormat, SupportedStreamConfigRange};
mod device;
pub use self::device::Device;
pub use self::stream::Stream;
mod stream;
const JACK_SAMPLE_FORMAT: SampleFormat = SampleFormat::F32;
pub type SupportedInputConfigs = std::vec::IntoIter<SupportedStreamConfigRange>;
pub type SupportedOutputConfigs = std::vec::IntoIter<SupportedStreamConfigRange>;
pub type Devices = std::vec::IntoIter<Device>;
/// The JACK Host type
#[derive(Debug)]
pub struct Host {
/// The name that the client will have in JACK.
/// Until we have duplex streams two clients will be created adding "out" or "in" to the name
/// since names have to be unique.
name: String,
/// If ports are to be connected to the system (soundcard) ports automatically (default is true).
connect_ports_automatically: bool,
/// If the JACK server should be started automatically if it isn't already when creating a Client (default is false).
start_server_automatically: bool,
/// A list of the devices that have been created from this Host.
devices_created: Vec<Device>,
}
impl Host {
pub fn new() -> Result<Self, crate::HostUnavailable> {
let mut host = Host {
name: "cpal_client".to_owned(),
connect_ports_automatically: true,
start_server_automatically: false,
devices_created: vec![],
};
// Devices don't exist for JACK, they have to be created
host.initialize_default_devices();
Ok(host)
}
/// Set whether the ports should automatically be connected to system
/// (default is true)
pub fn set_connect_automatically(&mut self, do_connect: bool) {
self.connect_ports_automatically = do_connect;
}
/// Set whether a JACK server should be automatically started if it isn't already.
/// (default is false)
pub fn set_start_server_automatically(&mut self, do_start_server: bool) {
self.start_server_automatically = do_start_server;
}
pub fn input_device_with_name(&mut self, name: &str) -> Option<Device> {
self.name = name.to_owned();
self.default_input_device()
}
pub fn output_device_with_name(&mut self, name: &str) -> Option<Device> {
self.name = name.to_owned();
self.default_output_device()
}
fn initialize_default_devices(&mut self) {
let in_device_res = Device::default_input_device(
&self.name,
self.connect_ports_automatically,
self.start_server_automatically,
);
match in_device_res {
Ok(device) => self.devices_created.push(device),
Err(err) => {
println!("{}", err);
}
}
let out_device_res = Device::default_output_device(
&self.name,
self.connect_ports_automatically,
self.start_server_automatically,
);
match out_device_res {
Ok(device) => self.devices_created.push(device),
Err(err) => {
println!("{}", err);
}
}
}
}
impl HostTrait for Host {
type Devices = Devices;
type Device = Device;
/// JACK is available if
/// - the jack feature flag is set
/// - libjack is installed (wouldn't compile without it)
/// - the JACK server can be started
///
/// If the code compiles the necessary jack libraries are installed.
/// There is no way to know if the user has set up a correct JACK configuration e.g. with qjackctl.
/// Users can choose to automatically start the server if it isn't already started when creating a client
/// so checking if the server is running could give a false negative in some use cases.
/// For these reasons this function should always return true.
fn is_available() -> bool {
true
}
fn devices(&self) -> Result<Self::Devices, DevicesError> {
Ok(self.devices_created.clone().into_iter())
}
fn default_input_device(&self) -> Option<Self::Device> {
for device in &self.devices_created {
if device.is_input() {
return Some(device.clone());
}
}
None
}
fn default_output_device(&self) -> Option<Self::Device> {
for device in &self.devices_created {
if device.is_output() {
return Some(device.clone());
}
}
None
}
}
fn get_client_options(start_server_automatically: bool) -> jack::ClientOptions {
let mut client_options = jack::ClientOptions::empty();
client_options.set(
jack::ClientOptions::NO_START_SERVER,
!start_server_automatically,
);
client_options
}
fn get_client(name: &str, client_options: jack::ClientOptions) -> Result<jack::Client, String> {
let c_res = jack::Client::new(name, client_options);
match c_res {
Ok((client, status)) => {
// The ClientStatus can tell us many things
if status.intersects(jack::ClientStatus::SERVER_ERROR) {
return Err(String::from(
"There was an error communicating with the JACK server!",
));
} else if status.intersects(jack::ClientStatus::SERVER_FAILED) {
return Err(String::from("Could not connect to the JACK server!"));
} else if status.intersects(jack::ClientStatus::VERSION_ERROR) {
return Err(String::from(
"Error connecting to JACK server: Client's protocol version does not match!",
));
} else if status.intersects(jack::ClientStatus::INIT_FAILURE) {
return Err(String::from(
"Error connecting to JACK server: Unable to initialize client!",
));
} else if status.intersects(jack::ClientStatus::SHM_FAILURE) {
return Err(String::from(
"Error connecting to JACK server: Unable to access shared memory!",
));
} else if status.intersects(jack::ClientStatus::NO_SUCH_CLIENT) {
return Err(String::from(
"Error connecting to JACK server: Requested client does not exist!",
));
} else if status.intersects(jack::ClientStatus::INVALID_OPTION) {
return Err(String::from("Error connecting to JACK server: The operation contained an invalid or unsupported option!"));
}
return Ok(client);
}
Err(e) => {
return Err(format!("Failed to open client because of error: {:?}", e));
}
}
}

463
vendor/cpal/src/host/jack/stream.rs vendored Normal file
View File

@@ -0,0 +1,463 @@
use crate::traits::StreamTrait;
use crate::ChannelCount;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use crate::{
BackendSpecificError, Data, InputCallbackInfo, OutputCallbackInfo, PauseStreamError,
PlayStreamError, SampleRate, StreamError,
};
use super::JACK_SAMPLE_FORMAT;
type ErrorCallbackPtr = Arc<Mutex<dyn FnMut(StreamError) + Send + 'static>>;
pub struct Stream {
// TODO: It might be faster to send a message when playing/pausing than to check this every iteration
playing: Arc<AtomicBool>,
async_client: jack::AsyncClient<JackNotificationHandler, LocalProcessHandler>,
// Port names are stored in order to connect them to other ports in jack automatically
input_port_names: Vec<String>,
output_port_names: Vec<String>,
}
impl Stream {
// TODO: Return error messages
pub fn new_input<D, E>(
client: jack::Client,
channels: ChannelCount,
data_callback: D,
mut error_callback: E,
) -> Stream
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
let mut ports = vec![];
let mut port_names: Vec<String> = vec![];
// Create ports
for i in 0..channels {
let port_try = client.register_port(&format!("in_{}", i), jack::AudioIn::default());
match port_try {
Ok(port) => {
// Get the port name in order to later connect it automatically
if let Ok(port_name) = port.name() {
port_names.push(port_name);
}
// Store the port into a Vec to move to the ProcessHandler
ports.push(port);
}
Err(e) => {
// If port creation failed, send the error back via the error_callback
error_callback(
BackendSpecificError {
description: e.to_string(),
}
.into(),
);
}
}
}
let playing = Arc::new(AtomicBool::new(true));
let error_callback_ptr = Arc::new(Mutex::new(error_callback)) as ErrorCallbackPtr;
let input_process_handler = LocalProcessHandler::new(
vec![],
ports,
SampleRate(client.sample_rate() as u32),
client.buffer_size() as usize,
Some(Box::new(data_callback)),
None,
playing.clone(),
Arc::clone(&error_callback_ptr),
);
let notification_handler = JackNotificationHandler::new(error_callback_ptr);
let async_client = client
.activate_async(notification_handler, input_process_handler)
.unwrap();
Stream {
playing,
async_client,
input_port_names: port_names,
output_port_names: vec![],
}
}
pub fn new_output<D, E>(
client: jack::Client,
channels: ChannelCount,
data_callback: D,
mut error_callback: E,
) -> Stream
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
let mut ports = vec![];
let mut port_names: Vec<String> = vec![];
// Create ports
for i in 0..channels {
let port_try = client.register_port(&format!("out_{}", i), jack::AudioOut::default());
match port_try {
Ok(port) => {
// Get the port name in order to later connect it automatically
if let Ok(port_name) = port.name() {
port_names.push(port_name);
}
// Store the port into a Vec to move to the ProcessHandler
ports.push(port);
}
Err(e) => {
// If port creation failed, send the error back via the error_callback
error_callback(
BackendSpecificError {
description: e.to_string(),
}
.into(),
);
}
}
}
let playing = Arc::new(AtomicBool::new(true));
let error_callback_ptr = Arc::new(Mutex::new(error_callback)) as ErrorCallbackPtr;
let output_process_handler = LocalProcessHandler::new(
ports,
vec![],
SampleRate(client.sample_rate() as u32),
client.buffer_size() as usize,
None,
Some(Box::new(data_callback)),
playing.clone(),
Arc::clone(&error_callback_ptr),
);
let notification_handler = JackNotificationHandler::new(error_callback_ptr);
let async_client = client
.activate_async(notification_handler, output_process_handler)
.unwrap();
Stream {
playing,
async_client,
input_port_names: vec![],
output_port_names: port_names,
}
}
/// Connect to the standard system outputs in jack, system:playback_1 and system:playback_2
/// This has to be done after the client is activated, doing it just after creating the ports doesn't work.
pub fn connect_to_system_outputs(&mut self) {
// Get the system ports
let system_ports = self.async_client.as_client().ports(
Some("system:playback_.*"),
None,
jack::PortFlags::empty(),
);
// Connect outputs from this client to the system playback inputs
for i in 0..self.output_port_names.len() {
if i >= system_ports.len() {
break;
}
match self
.async_client
.as_client()
.connect_ports_by_name(&self.output_port_names[i], &system_ports[i])
{
Ok(_) => (),
Err(e) => println!("Unable to connect to port with error {}", e),
}
}
}
/// Connect to the standard system outputs in jack, system:capture_1 and system:capture_2
/// This has to be done after the client is activated, doing it just after creating the ports doesn't work.
pub fn connect_to_system_inputs(&mut self) {
// Get the system ports
let system_ports = self.async_client.as_client().ports(
Some("system:capture_.*"),
None,
jack::PortFlags::empty(),
);
// Connect outputs from this client to the system playback inputs
for i in 0..self.input_port_names.len() {
if i >= system_ports.len() {
break;
}
match self
.async_client
.as_client()
.connect_ports_by_name(&system_ports[i], &self.input_port_names[i])
{
Ok(_) => (),
Err(e) => println!("Unable to connect to port with error {}", e),
}
}
}
}
impl StreamTrait for Stream {
fn play(&self) -> Result<(), PlayStreamError> {
self.playing.store(true, Ordering::SeqCst);
Ok(())
}
fn pause(&self) -> Result<(), PauseStreamError> {
self.playing.store(false, Ordering::SeqCst);
Ok(())
}
}
struct LocalProcessHandler {
/// No new ports are allowed to be created after the creation of the LocalProcessHandler as that would invalidate the buffer sizes
out_ports: Vec<jack::Port<jack::AudioOut>>,
in_ports: Vec<jack::Port<jack::AudioIn>>,
sample_rate: SampleRate,
buffer_size: usize,
input_data_callback: Option<Box<dyn FnMut(&Data, &InputCallbackInfo) + Send + 'static>>,
output_data_callback: Option<Box<dyn FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static>>,
// JACK audio samples are 32-bit float (unless you do some custom dark magic)
temp_input_buffer: Vec<f32>,
temp_output_buffer: Vec<f32>,
playing: Arc<AtomicBool>,
creation_timestamp: std::time::Instant,
/// This should not be called on `process`, only on `buffer_size` because it can block.
error_callback_ptr: ErrorCallbackPtr,
}
impl LocalProcessHandler {
fn new(
out_ports: Vec<jack::Port<jack::AudioOut>>,
in_ports: Vec<jack::Port<jack::AudioIn>>,
sample_rate: SampleRate,
buffer_size: usize,
input_data_callback: Option<Box<dyn FnMut(&Data, &InputCallbackInfo) + Send + 'static>>,
output_data_callback: Option<
Box<dyn FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static>,
>,
playing: Arc<AtomicBool>,
error_callback_ptr: ErrorCallbackPtr,
) -> Self {
// These may be reallocated in the `buffer_size` callback.
let temp_input_buffer = vec![0.0; in_ports.len() * buffer_size];
let temp_output_buffer = vec![0.0; out_ports.len() * buffer_size];
LocalProcessHandler {
out_ports,
in_ports,
sample_rate,
buffer_size,
input_data_callback,
output_data_callback,
temp_input_buffer,
temp_output_buffer,
playing,
creation_timestamp: std::time::Instant::now(),
error_callback_ptr,
}
}
}
fn temp_buffer_to_data(temp_input_buffer: &mut Vec<f32>, total_buffer_size: usize) -> Data {
let slice = &temp_input_buffer[0..total_buffer_size];
let data = slice.as_ptr() as *mut ();
let len = total_buffer_size;
let data = unsafe { Data::from_parts(data, len, JACK_SAMPLE_FORMAT) };
data
}
impl jack::ProcessHandler for LocalProcessHandler {
fn process(&mut self, _: &jack::Client, process_scope: &jack::ProcessScope) -> jack::Control {
if !self.playing.load(Ordering::SeqCst) {
return jack::Control::Continue;
}
// This should be equal to self.buffer_size, but the implementation will
// work even if it is less. Will panic in `temp_buffer_to_data` if greater.
let current_frame_count = process_scope.n_frames() as usize;
// Get timestamp data
let cycle_times = process_scope.cycle_times();
let current_start_usecs = match cycle_times {
Ok(times) => times.current_usecs,
Err(_) => {
// jack was unable to get the current time information
// Fall back to using Instants
let now = std::time::Instant::now();
let duration = now.duration_since(self.creation_timestamp);
duration.as_micros() as u64
}
};
let start_cycle_instant = micros_to_stream_instant(current_start_usecs);
let start_callback_instant = start_cycle_instant
.add(frames_to_duration(
process_scope.frames_since_cycle_start() as usize,
self.sample_rate,
))
.expect("`playback` occurs beyond representation supported by `StreamInstant`");
if let Some(input_callback) = &mut self.input_data_callback {
// Let's get the data from the input ports and run the callback
let num_in_channels = self.in_ports.len();
// Read the data from the input ports into the temporary buffer
// Go through every channel and store its data in the temporary input buffer
for ch_ix in 0..num_in_channels {
let input_channel = &self.in_ports[ch_ix].as_slice(process_scope);
for i in 0..current_frame_count {
self.temp_input_buffer[ch_ix + i * num_in_channels] = input_channel[i];
}
}
// Create a slice of exactly current_frame_count frames
let data = temp_buffer_to_data(
&mut self.temp_input_buffer,
current_frame_count * num_in_channels,
);
// Create timestamp
let frames_since_cycle_start = process_scope.frames_since_cycle_start() as usize;
let duration_since_cycle_start =
frames_to_duration(frames_since_cycle_start, self.sample_rate);
let callback = start_callback_instant
.add(duration_since_cycle_start)
.expect("`playback` occurs beyond representation supported by `StreamInstant`");
let capture = start_callback_instant;
let timestamp = crate::InputStreamTimestamp { callback, capture };
let info = crate::InputCallbackInfo { timestamp };
input_callback(&data, &info);
}
if let Some(output_callback) = &mut self.output_data_callback {
let num_out_channels = self.out_ports.len();
// Create a slice of exactly current_frame_count frames
let mut data = temp_buffer_to_data(
&mut self.temp_output_buffer,
current_frame_count * num_out_channels,
);
// Create timestamp
let frames_since_cycle_start = process_scope.frames_since_cycle_start() as usize;
let duration_since_cycle_start =
frames_to_duration(frames_since_cycle_start, self.sample_rate);
let callback = start_callback_instant
.add(duration_since_cycle_start)
.expect("`playback` occurs beyond representation supported by `StreamInstant`");
let buffer_duration = frames_to_duration(current_frame_count, self.sample_rate);
let playback = start_cycle_instant
.add(buffer_duration)
.expect("`playback` occurs beyond representation supported by `StreamInstant`");
let timestamp = crate::OutputStreamTimestamp { callback, playback };
let info = crate::OutputCallbackInfo { timestamp };
output_callback(&mut data, &info);
// Deinterlace
for ch_ix in 0..num_out_channels {
let output_channel = &mut self.out_ports[ch_ix].as_mut_slice(process_scope);
for i in 0..current_frame_count {
output_channel[i] = self.temp_output_buffer[ch_ix + i * num_out_channels];
}
}
}
// Continue as normal
jack::Control::Continue
}
fn buffer_size(&mut self, _: &jack::Client, size: jack::Frames) -> jack::Control {
// The `buffer_size` callback is actually called on the process thread, but
// it does not need to be suitable for real-time use. Thus we can simply allocate
// new buffers here. It is also fine to call the error callback.
// Details: https://github.com/RustAudio/rust-jack/issues/137
let new_size = size as usize;
if new_size != self.buffer_size {
self.buffer_size = new_size;
self.temp_input_buffer = vec![0.0; self.in_ports.len() * new_size];
self.temp_output_buffer = vec![0.0; self.out_ports.len() * new_size];
let description = format!("buffer size changed to: {}", new_size);
if let Ok(mut mutex_guard) = self.error_callback_ptr.lock() {
let err = &mut *mutex_guard;
err(BackendSpecificError { description }.into());
}
}
jack::Control::Continue
}
}
fn micros_to_stream_instant(micros: u64) -> crate::StreamInstant {
let nanos = micros * 1000;
let secs = micros / 1_000_000;
let subsec_nanos = nanos - secs * 1_000_000_000;
crate::StreamInstant::new(secs as i64, subsec_nanos as u32)
}
// Convert the given duration in frames at the given sample rate to a `std::time::Duration`.
fn frames_to_duration(frames: usize, rate: crate::SampleRate) -> std::time::Duration {
let secsf = frames as f64 / rate.0 as f64;
let secs = secsf as u64;
let nanos = ((secsf - secs as f64) * 1_000_000_000.0) as u32;
std::time::Duration::new(secs, nanos)
}
/// Receives notifications from the JACK server. It is unclear if this may be run concurrent with itself under JACK2 specs
/// so it needs to be Sync.
struct JackNotificationHandler {
error_callback_ptr: ErrorCallbackPtr,
init_sample_rate_flag: Arc<AtomicBool>,
}
impl JackNotificationHandler {
pub fn new(error_callback_ptr: ErrorCallbackPtr) -> Self {
JackNotificationHandler {
error_callback_ptr,
init_sample_rate_flag: Arc::new(AtomicBool::new(false)),
}
}
fn send_error(&mut self, description: String) {
// This thread isn't the audio thread, it's fine to block
if let Ok(mut mutex_guard) = self.error_callback_ptr.lock() {
let err = &mut *mutex_guard;
err(BackendSpecificError { description }.into());
}
}
}
impl jack::NotificationHandler for JackNotificationHandler {
fn shutdown(&mut self, _status: jack::ClientStatus, reason: &str) {
self.send_error(format!("JACK was shut down for reason: {}", reason));
}
fn sample_rate(&mut self, _: &jack::Client, srate: jack::Frames) -> jack::Control {
match self.init_sample_rate_flag.load(Ordering::SeqCst) {
false => {
// One of these notifications is sent every time a client is started.
self.init_sample_rate_flag.store(true, Ordering::SeqCst);
jack::Control::Continue
}
true => {
self.send_error(format!("sample rate changed to: {}", srate));
// Since CPAL currently has no way of signaling a sample rate change in order to make
// all necessary changes that would bring we choose to quit.
jack::Control::Quit
}
}
}
fn xrun(&mut self, _: &jack::Client) -> jack::Control {
self.send_error(String::from("xrun (buffer over or under run)"));
jack::Control::Continue
}
}

30
vendor/cpal/src/host/mod.rs vendored Normal file
View File

@@ -0,0 +1,30 @@
#[cfg(any(
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd"
))]
pub(crate) mod alsa;
#[cfg(all(windows, feature = "asio"))]
pub(crate) mod asio;
#[cfg(any(target_os = "macos", target_os = "ios"))]
pub(crate) mod coreaudio;
#[cfg(target_os = "emscripten")]
pub(crate) mod emscripten;
#[cfg(all(
any(
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd"
),
feature = "jack"
))]
pub(crate) mod jack;
pub(crate) mod null;
#[cfg(target_os = "android")]
pub(crate) mod oboe;
#[cfg(windows)]
pub(crate) mod wasapi;
#[cfg(all(target_arch = "wasm32", feature = "wasm-bindgen"))]
pub(crate) mod webaudio;

160
vendor/cpal/src/host/null/mod.rs vendored Normal file
View File

@@ -0,0 +1,160 @@
use std::time::Duration;
use crate::traits::{DeviceTrait, HostTrait, StreamTrait};
use crate::{
BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError, DevicesError,
InputCallbackInfo, OutputCallbackInfo, PauseStreamError, PlayStreamError, SampleFormat,
StreamConfig, StreamError, SupportedStreamConfig, SupportedStreamConfigRange,
SupportedStreamConfigsError,
};
#[derive(Default)]
pub struct Devices;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Device;
pub struct Host;
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Stream;
pub struct SupportedInputConfigs;
pub struct SupportedOutputConfigs;
impl Host {
#[allow(dead_code)]
pub fn new() -> Result<Self, crate::HostUnavailable> {
Ok(Host)
}
}
impl Devices {
pub fn new() -> Result<Self, DevicesError> {
Ok(Devices)
}
}
impl DeviceTrait for Device {
type SupportedInputConfigs = SupportedInputConfigs;
type SupportedOutputConfigs = SupportedOutputConfigs;
type Stream = Stream;
#[inline]
fn name(&self) -> Result<String, DeviceNameError> {
Ok("null".to_owned())
}
#[inline]
fn supported_input_configs(
&self,
) -> Result<SupportedInputConfigs, SupportedStreamConfigsError> {
unimplemented!()
}
#[inline]
fn supported_output_configs(
&self,
) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
unimplemented!()
}
#[inline]
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
unimplemented!()
}
#[inline]
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
unimplemented!()
}
fn build_input_stream_raw<D, E>(
&self,
_config: &StreamConfig,
_sample_format: SampleFormat,
_data_callback: D,
_error_callback: E,
_timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
unimplemented!()
}
/// Create an output stream.
fn build_output_stream_raw<D, E>(
&self,
_config: &StreamConfig,
_sample_format: SampleFormat,
_data_callback: D,
_error_callback: E,
_timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
unimplemented!()
}
}
impl HostTrait for Host {
type Devices = Devices;
type Device = Device;
fn is_available() -> bool {
false
}
fn devices(&self) -> Result<Self::Devices, DevicesError> {
Devices::new()
}
fn default_input_device(&self) -> Option<Device> {
None
}
fn default_output_device(&self) -> Option<Device> {
None
}
}
impl StreamTrait for Stream {
fn play(&self) -> Result<(), PlayStreamError> {
unimplemented!()
}
fn pause(&self) -> Result<(), PauseStreamError> {
unimplemented!()
}
}
impl Iterator for Devices {
type Item = Device;
#[inline]
fn next(&mut self) -> Option<Device> {
None
}
}
impl Iterator for SupportedInputConfigs {
type Item = SupportedStreamConfigRange;
#[inline]
fn next(&mut self) -> Option<SupportedStreamConfigRange> {
None
}
}
impl Iterator for SupportedOutputConfigs {
type Item = SupportedStreamConfigRange;
#[inline]
fn next(&mut self) -> Option<SupportedStreamConfigRange> {
None
}
}

View File

@@ -0,0 +1,62 @@
use std::sync::Arc;
extern crate jni;
use self::jni::Executor;
use self::jni::{errors::Result as JResult, JNIEnv, JavaVM};
// constants from android.media.AudioFormat
pub const ENCODING_PCM_16BIT: i32 = 2;
pub const ENCODING_PCM_FLOAT: i32 = 4;
pub const CHANNEL_OUT_MONO: i32 = 4;
pub const CHANNEL_OUT_STEREO: i32 = 12;
fn with_attached<F, R>(closure: F) -> JResult<R>
where
F: FnOnce(&mut JNIEnv) -> JResult<R>,
{
let android_context = ndk_context::android_context();
let vm = Arc::new(unsafe { JavaVM::from_raw(android_context.vm().cast())? });
Executor::new(vm).with_attached(|env| closure(env))
}
fn get_min_buffer_size(
class: &'static str,
sample_rate: i32,
channel_mask: i32,
format: i32,
) -> i32 {
// Unwrapping everything because these operations are not expected to fail
// or throw exceptions. Android returns negative values for invalid parameters,
// which is what we expect.
with_attached(|env| {
let class = env.find_class(class).unwrap();
env.call_static_method(
class,
"getMinBufferSize",
"(III)I",
&[sample_rate.into(), channel_mask.into(), format.into()],
)
.unwrap()
.i()
})
.unwrap()
}
pub fn get_audio_track_min_buffer_size(sample_rate: i32, channel_mask: i32, format: i32) -> i32 {
get_min_buffer_size(
"android/media/AudioTrack",
sample_rate,
channel_mask,
format,
)
}
pub fn get_audio_record_min_buffer_size(sample_rate: i32, channel_mask: i32, format: i32) -> i32 {
get_min_buffer_size(
"android/media/AudioRecord",
sample_rate,
channel_mask,
format,
)
}

82
vendor/cpal/src/host/oboe/convert.rs vendored Normal file
View File

@@ -0,0 +1,82 @@
use std::convert::TryInto;
use std::time::Duration;
extern crate oboe;
use crate::{
BackendSpecificError, BuildStreamError, PauseStreamError, PlayStreamError, StreamError,
StreamInstant,
};
pub fn to_stream_instant(duration: Duration) -> StreamInstant {
StreamInstant::new(
duration.as_secs().try_into().unwrap(),
duration.subsec_nanos(),
)
}
pub fn stream_instant<T: oboe::AudioStreamSafe + ?Sized>(stream: &mut T) -> StreamInstant {
const CLOCK_MONOTONIC: i32 = 1;
let ts = stream
.get_timestamp(CLOCK_MONOTONIC)
.unwrap_or(oboe::FrameTimestamp {
position: 0,
timestamp: 0,
});
to_stream_instant(Duration::from_nanos(ts.timestamp as u64))
}
impl From<oboe::Error> for StreamError {
fn from(error: oboe::Error) -> Self {
use self::oboe::Error::*;
match error {
Disconnected | Unavailable | Closed => Self::DeviceNotAvailable,
e => (BackendSpecificError {
description: e.to_string(),
})
.into(),
}
}
}
impl From<oboe::Error> for PlayStreamError {
fn from(error: oboe::Error) -> Self {
use self::oboe::Error::*;
match error {
Disconnected | Unavailable | Closed => Self::DeviceNotAvailable,
e => (BackendSpecificError {
description: e.to_string(),
})
.into(),
}
}
}
impl From<oboe::Error> for PauseStreamError {
fn from(error: oboe::Error) -> Self {
use self::oboe::Error::*;
match error {
Disconnected | Unavailable | Closed => Self::DeviceNotAvailable,
e => (BackendSpecificError {
description: e.to_string(),
})
.into(),
}
}
}
impl From<oboe::Error> for BuildStreamError {
fn from(error: oboe::Error) -> Self {
use self::oboe::Error::*;
match error {
Disconnected | Unavailable | Closed => Self::DeviceNotAvailable,
NoFreeHandles => Self::StreamIdOverflow,
InvalidFormat | InvalidRate => Self::StreamConfigNotSupported,
IllegalArgument => Self::InvalidArgument,
e => (BackendSpecificError {
description: e.to_string(),
})
.into(),
}
}
}

View File

@@ -0,0 +1,90 @@
use std::marker::PhantomData;
use std::time::Instant;
extern crate oboe;
use super::convert::{stream_instant, to_stream_instant};
use crate::{Data, InputCallbackInfo, InputStreamTimestamp, SizedSample, StreamError};
pub struct CpalInputCallback<I, C> {
data_cb: Box<dyn FnMut(&Data, &InputCallbackInfo) + Send + 'static>,
error_cb: Box<dyn FnMut(StreamError) + Send + 'static>,
created: Instant,
phantom_channel: PhantomData<C>,
phantom_input: PhantomData<I>,
}
impl<I, C> CpalInputCallback<I, C> {
pub fn new<D, E>(data_cb: D, error_cb: E) -> Self
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
Self {
data_cb: Box::new(data_cb),
error_cb: Box::new(error_cb),
created: Instant::now(),
phantom_channel: PhantomData,
phantom_input: PhantomData,
}
}
fn make_callback_info(
&self,
audio_stream: &mut dyn oboe::AudioInputStreamSafe,
) -> InputCallbackInfo {
InputCallbackInfo {
timestamp: InputStreamTimestamp {
callback: to_stream_instant(self.created.elapsed()),
capture: stream_instant(audio_stream),
},
}
}
}
impl<T: SizedSample, C: oboe::IsChannelCount> oboe::AudioInputCallback for CpalInputCallback<T, C>
where
(T, C): oboe::IsFrameType,
{
type FrameType = (T, C);
fn on_error_before_close(
&mut self,
_audio_stream: &mut dyn oboe::AudioInputStreamSafe,
error: oboe::Error,
) {
(self.error_cb)(StreamError::from(error))
}
fn on_error_after_close(
&mut self,
_audio_stream: &mut dyn oboe::AudioInputStreamSafe,
error: oboe::Error,
) {
(self.error_cb)(StreamError::from(error))
}
fn on_audio_ready(
&mut self,
audio_stream: &mut dyn oboe::AudioInputStreamSafe,
audio_data: &[<<Self as oboe::AudioInputCallback>::FrameType as oboe::IsFrameType>::Type],
) -> oboe::DataCallbackResult {
let cb_info = self.make_callback_info(audio_stream);
let channel_count = if C::CHANNEL_COUNT == oboe::ChannelCount::Mono {
1
} else {
2
};
(self.data_cb)(
&unsafe {
Data::from_parts(
audio_data.as_ptr() as *mut _,
audio_data.len() * channel_count,
T::FORMAT,
)
},
&cb_info,
);
oboe::DataCallbackResult::Continue
}
}

499
vendor/cpal/src/host/oboe/mod.rs vendored Normal file
View File

@@ -0,0 +1,499 @@
use std::cell::RefCell;
use std::cmp;
use std::convert::TryInto;
use std::time::Duration;
use std::vec::IntoIter as VecIntoIter;
extern crate oboe;
use crate::traits::{DeviceTrait, HostTrait, StreamTrait};
use crate::{
BackendSpecificError, BufferSize, BuildStreamError, Data, DefaultStreamConfigError,
DeviceNameError, DevicesError, InputCallbackInfo, OutputCallbackInfo, PauseStreamError,
PlayStreamError, SampleFormat, SampleRate, SizedSample, StreamConfig, StreamError,
SupportedBufferSize, SupportedStreamConfig, SupportedStreamConfigRange,
SupportedStreamConfigsError,
};
mod android_media;
mod convert;
mod input_callback;
mod output_callback;
use self::android_media::{get_audio_record_min_buffer_size, get_audio_track_min_buffer_size};
use self::input_callback::CpalInputCallback;
use self::oboe::{AudioInputStream, AudioOutputStream};
use self::output_callback::CpalOutputCallback;
// Android Java API supports up to 8 channels, but oboe API
// only exposes mono and stereo.
const CHANNEL_MASKS: [i32; 2] = [
android_media::CHANNEL_OUT_MONO,
android_media::CHANNEL_OUT_STEREO,
];
const SAMPLE_RATES: [i32; 13] = [
5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, 88200, 96000, 176_400, 192_000,
];
pub struct Host;
#[derive(Clone)]
pub struct Device(Option<oboe::AudioDeviceInfo>);
pub enum Stream {
Input(Box<RefCell<dyn AudioInputStream>>),
Output(Box<RefCell<dyn AudioOutputStream>>),
}
pub type SupportedInputConfigs = VecIntoIter<SupportedStreamConfigRange>;
pub type SupportedOutputConfigs = VecIntoIter<SupportedStreamConfigRange>;
pub type Devices = VecIntoIter<Device>;
impl Host {
pub fn new() -> Result<Self, crate::HostUnavailable> {
Ok(Host)
}
}
impl HostTrait for Host {
type Devices = Devices;
type Device = Device;
fn is_available() -> bool {
true
}
fn devices(&self) -> Result<Self::Devices, DevicesError> {
if let Ok(devices) = oboe::AudioDeviceInfo::request(oboe::AudioDeviceDirection::InputOutput)
{
Ok(devices
.into_iter()
.map(|d| Device(Some(d)))
.collect::<Vec<_>>()
.into_iter())
} else {
Ok(vec![Device(None)].into_iter())
}
}
fn default_input_device(&self) -> Option<Self::Device> {
Some(Device(None))
}
fn default_output_device(&self) -> Option<Self::Device> {
Some(Device(None))
}
}
fn buffer_size_range_for_params(
is_output: bool,
sample_rate: i32,
channel_mask: i32,
android_format: i32,
) -> SupportedBufferSize {
let min_buffer_size = if is_output {
get_audio_track_min_buffer_size(sample_rate, channel_mask, android_format)
} else {
get_audio_record_min_buffer_size(sample_rate, channel_mask, android_format)
};
if min_buffer_size > 0 {
SupportedBufferSize::Range {
min: min_buffer_size as u32,
max: i32::MAX as u32,
}
} else {
SupportedBufferSize::Unknown
}
}
fn default_supported_configs(is_output: bool) -> VecIntoIter<SupportedStreamConfigRange> {
// Have to "brute force" the parameter combinations with getMinBufferSize
const FORMATS: [SampleFormat; 2] = [SampleFormat::I16, SampleFormat::F32];
let mut output = Vec::with_capacity(SAMPLE_RATES.len() * CHANNEL_MASKS.len() * FORMATS.len());
for sample_format in &FORMATS {
let android_format = if *sample_format == SampleFormat::I16 {
android_media::ENCODING_PCM_16BIT
} else {
android_media::ENCODING_PCM_FLOAT
};
for (mask_idx, channel_mask) in CHANNEL_MASKS.iter().enumerate() {
let channel_count = mask_idx + 1;
for sample_rate in &SAMPLE_RATES {
if let SupportedBufferSize::Range { min, max } = buffer_size_range_for_params(
is_output,
*sample_rate,
*channel_mask,
android_format,
) {
output.push(SupportedStreamConfigRange {
channels: channel_count as u16,
min_sample_rate: SampleRate(*sample_rate as u32),
max_sample_rate: SampleRate(*sample_rate as u32),
buffer_size: SupportedBufferSize::Range { min, max },
sample_format: *sample_format,
});
}
}
}
}
output.into_iter()
}
fn device_supported_configs(
device: &oboe::AudioDeviceInfo,
is_output: bool,
) -> VecIntoIter<SupportedStreamConfigRange> {
let sample_rates = if !device.sample_rates.is_empty() {
device.sample_rates.as_slice()
} else {
&SAMPLE_RATES
};
const ALL_CHANNELS: [i32; 2] = [1, 2];
let channel_counts = if !device.channel_counts.is_empty() {
device.channel_counts.as_slice()
} else {
&ALL_CHANNELS
};
const ALL_FORMATS: [oboe::AudioFormat; 2] = [oboe::AudioFormat::I16, oboe::AudioFormat::F32];
let formats = if !device.formats.is_empty() {
device.formats.as_slice()
} else {
&ALL_FORMATS
};
let mut output = Vec::with_capacity(sample_rates.len() * channel_counts.len() * formats.len());
for sample_rate in sample_rates {
for channel_count in channel_counts {
assert!(*channel_count > 0);
if *channel_count > 2 {
// could be supported by the device, but oboe does not support more than 2 channels
continue;
}
let channel_mask = CHANNEL_MASKS[*channel_count as usize - 1];
for format in formats {
let (android_format, sample_format) = match format {
oboe::AudioFormat::I16 => {
(android_media::ENCODING_PCM_16BIT, SampleFormat::I16)
}
oboe::AudioFormat::F32 => {
(android_media::ENCODING_PCM_FLOAT, SampleFormat::F32)
}
_ => panic!("Unexpected format"),
};
let buffer_size = buffer_size_range_for_params(
is_output,
*sample_rate,
channel_mask,
android_format,
);
output.push(SupportedStreamConfigRange {
channels: cmp::min(*channel_count as u16, 2u16),
min_sample_rate: SampleRate(*sample_rate as u32),
max_sample_rate: SampleRate(*sample_rate as u32),
buffer_size,
sample_format,
});
}
}
}
output.into_iter()
}
fn configure_for_device<D, C, I>(
builder: oboe::AudioStreamBuilder<D, C, I>,
device: &Device,
config: &StreamConfig,
) -> oboe::AudioStreamBuilder<D, C, I> {
let mut builder = if let Some(info) = &device.0 {
builder.set_device_id(info.id)
} else {
builder
};
builder = builder.set_sample_rate(config.sample_rate.0.try_into().unwrap());
match &config.buffer_size {
BufferSize::Default => builder,
BufferSize::Fixed(size) => builder.set_buffer_capacity_in_frames(*size as i32),
}
}
fn build_input_stream<D, E, C, T>(
device: &Device,
config: &StreamConfig,
data_callback: D,
error_callback: E,
builder: oboe::AudioStreamBuilder<oboe::Input, C, T>,
) -> Result<Stream, BuildStreamError>
where
T: SizedSample + oboe::IsFormat + Send + 'static,
C: oboe::IsChannelCount + Send + 'static,
(T, C): oboe::IsFrameType,
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
let builder = configure_for_device(builder, device, config);
let stream = builder
.set_callback(CpalInputCallback::<T, C>::new(
data_callback,
error_callback,
))
.open_stream()?;
Ok(Stream::Input(Box::new(RefCell::new(stream))))
}
fn build_output_stream<D, E, C, T>(
device: &Device,
config: &StreamConfig,
data_callback: D,
error_callback: E,
builder: oboe::AudioStreamBuilder<oboe::Output, C, T>,
) -> Result<Stream, BuildStreamError>
where
T: SizedSample + oboe::IsFormat + Send + 'static,
C: oboe::IsChannelCount + Send + 'static,
(T, C): oboe::IsFrameType,
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
let builder = configure_for_device(builder, device, config);
let stream = builder
.set_callback(CpalOutputCallback::<T, C>::new(
data_callback,
error_callback,
))
.open_stream()?;
Ok(Stream::Output(Box::new(RefCell::new(stream))))
}
impl DeviceTrait for Device {
type SupportedInputConfigs = SupportedInputConfigs;
type SupportedOutputConfigs = SupportedOutputConfigs;
type Stream = Stream;
fn name(&self) -> Result<String, DeviceNameError> {
match &self.0 {
None => Ok("default".to_owned()),
Some(info) => Ok(info.product_name.clone()),
}
}
fn supported_input_configs(
&self,
) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError> {
if let Some(info) = &self.0 {
Ok(device_supported_configs(info, false))
} else {
Ok(default_supported_configs(false))
}
}
fn supported_output_configs(
&self,
) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError> {
if let Some(info) = &self.0 {
Ok(device_supported_configs(info, true))
} else {
Ok(default_supported_configs(true))
}
}
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
let mut configs: Vec<_> = self.supported_input_configs().unwrap().collect();
configs.sort_by(|a, b| b.cmp_default_heuristics(a));
let config = configs
.into_iter()
.next()
.ok_or(DefaultStreamConfigError::StreamTypeNotSupported)?
.with_max_sample_rate();
Ok(config)
}
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
let mut configs: Vec<_> = self.supported_output_configs().unwrap().collect();
configs.sort_by(|a, b| b.cmp_default_heuristics(a));
let config = configs
.into_iter()
.next()
.ok_or(DefaultStreamConfigError::StreamTypeNotSupported)?
.with_max_sample_rate();
Ok(config)
}
fn build_input_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
_timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
match sample_format {
SampleFormat::I16 => {
let builder = oboe::AudioStreamBuilder::default()
.set_input()
.set_format::<i16>();
if config.channels == 1 {
build_input_stream(
self,
config,
data_callback,
error_callback,
builder.set_mono(),
)
} else if config.channels == 2 {
build_input_stream(
self,
config,
data_callback,
error_callback,
builder.set_stereo(),
)
} else {
Err(BackendSpecificError {
description: "More than 2 channels are not supported by Oboe.".to_owned(),
}
.into())
}
}
SampleFormat::F32 => {
let builder = oboe::AudioStreamBuilder::default()
.set_input()
.set_format::<f32>();
if config.channels == 1 {
build_input_stream(
self,
config,
data_callback,
error_callback,
builder.set_mono(),
)
} else if config.channels == 2 {
build_input_stream(
self,
config,
data_callback,
error_callback,
builder.set_stereo(),
)
} else {
Err(BackendSpecificError {
description: "More than 2 channels are not supported by Oboe.".to_owned(),
}
.into())
}
}
sample_format => Err(BackendSpecificError {
description: format!("{} format is not supported on Android.", sample_format),
}
.into()),
}
}
fn build_output_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
_timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
match sample_format {
SampleFormat::I16 => {
let builder = oboe::AudioStreamBuilder::default()
.set_output()
.set_format::<i16>();
if config.channels == 1 {
build_output_stream(
self,
config,
data_callback,
error_callback,
builder.set_mono(),
)
} else if config.channels == 2 {
build_output_stream(
self,
config,
data_callback,
error_callback,
builder.set_stereo(),
)
} else {
Err(BackendSpecificError {
description: "More than 2 channels are not supported by Oboe.".to_owned(),
}
.into())
}
}
SampleFormat::F32 => {
let builder = oboe::AudioStreamBuilder::default()
.set_output()
.set_format::<f32>();
if config.channels == 1 {
build_output_stream(
self,
config,
data_callback,
error_callback,
builder.set_mono(),
)
} else if config.channels == 2 {
build_output_stream(
self,
config,
data_callback,
error_callback,
builder.set_stereo(),
)
} else {
Err(BackendSpecificError {
description: "More than 2 channels are not supported by Oboe.".to_owned(),
}
.into())
}
}
sample_format => Err(BackendSpecificError {
description: format!("{} format is not supported on Android.", sample_format),
}
.into()),
}
}
}
impl StreamTrait for Stream {
fn play(&self) -> Result<(), PlayStreamError> {
match self {
Self::Input(stream) => stream
.borrow_mut()
.request_start()
.map_err(PlayStreamError::from),
Self::Output(stream) => stream
.borrow_mut()
.request_start()
.map_err(PlayStreamError::from),
}
}
fn pause(&self) -> Result<(), PauseStreamError> {
match self {
Self::Input(_) => Err(BackendSpecificError {
description: "Pause called on the input stream.".to_owned(),
}
.into()),
Self::Output(stream) => stream
.borrow_mut()
.request_pause()
.map_err(PauseStreamError::from),
}
}
}

View File

@@ -0,0 +1,90 @@
use std::marker::PhantomData;
use std::time::Instant;
extern crate oboe;
use super::convert::{stream_instant, to_stream_instant};
use crate::{Data, OutputCallbackInfo, OutputStreamTimestamp, SizedSample, StreamError};
pub struct CpalOutputCallback<I, C> {
data_cb: Box<dyn FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static>,
error_cb: Box<dyn FnMut(StreamError) + Send + 'static>,
created: Instant,
phantom_channel: PhantomData<C>,
phantom_input: PhantomData<I>,
}
impl<I, C> CpalOutputCallback<I, C> {
pub fn new<D, E>(data_cb: D, error_cb: E) -> Self
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
Self {
data_cb: Box::new(data_cb),
error_cb: Box::new(error_cb),
created: Instant::now(),
phantom_channel: PhantomData,
phantom_input: PhantomData,
}
}
fn make_callback_info(
&self,
audio_stream: &mut dyn oboe::AudioOutputStreamSafe,
) -> OutputCallbackInfo {
OutputCallbackInfo {
timestamp: OutputStreamTimestamp {
callback: to_stream_instant(self.created.elapsed()),
playback: stream_instant(audio_stream),
},
}
}
}
impl<T: SizedSample, C: oboe::IsChannelCount> oboe::AudioOutputCallback for CpalOutputCallback<T, C>
where
(T, C): oboe::IsFrameType,
{
type FrameType = (T, C);
fn on_error_before_close(
&mut self,
_audio_stream: &mut dyn oboe::AudioOutputStreamSafe,
error: oboe::Error,
) {
(self.error_cb)(StreamError::from(error))
}
fn on_error_after_close(
&mut self,
_audio_stream: &mut dyn oboe::AudioOutputStreamSafe,
error: oboe::Error,
) {
(self.error_cb)(StreamError::from(error))
}
fn on_audio_ready(
&mut self,
audio_stream: &mut dyn oboe::AudioOutputStreamSafe,
audio_data: &mut [<<Self as oboe::AudioOutputCallback>::FrameType as oboe::IsFrameType>::Type],
) -> oboe::DataCallbackResult {
let cb_info = self.make_callback_info(audio_stream);
let channel_count = if C::CHANNEL_COUNT == oboe::ChannelCount::Mono {
1
} else {
2
};
(self.data_cb)(
&mut unsafe {
Data::from_parts(
audio_data.as_mut_ptr() as *mut _,
audio_data.len() * channel_count,
T::FORMAT,
)
},
&cb_info,
);
oboe::DataCallbackResult::Continue
}
}

57
vendor/cpal/src/host/wasapi/com.rs vendored Normal file
View File

@@ -0,0 +1,57 @@
//! Handles COM initialization and cleanup.
use super::IoError;
use std::marker::PhantomData;
use windows::Win32::Foundation::RPC_E_CHANGED_MODE;
use windows::Win32::System::Com::{CoInitializeEx, CoUninitialize, COINIT_APARTMENTTHREADED};
thread_local!(static COM_INITIALIZED: ComInitialized = {
unsafe {
// Try to initialize COM with STA by default to avoid compatibility issues with the ASIO
// backend (where CoInitialize() is called by the ASIO SDK) or winit (where drag and drop
// requires STA).
// This call can fail with RPC_E_CHANGED_MODE if another library initialized COM with MTA.
// That's OK though since COM ensures thread-safety/compatibility through marshalling when
// necessary.
let result = CoInitializeEx(None, COINIT_APARTMENTTHREADED);
if result.is_ok() || result == RPC_E_CHANGED_MODE {
ComInitialized {
result,
_ptr: PhantomData,
}
} else {
// COM initialization failed in another way, something is really wrong.
panic!(
"Failed to initialize COM: {}",
IoError::from_raw_os_error(result.0)
);
}
}
});
/// RAII object that guards the fact that COM is initialized.
///
// We store a raw pointer because it's the only way at the moment to remove `Send`/`Sync` from the
// object.
struct ComInitialized {
result: windows::core::HRESULT,
_ptr: PhantomData<*mut ()>,
}
impl Drop for ComInitialized {
#[inline]
fn drop(&mut self) {
// Need to avoid calling CoUninitialize() if CoInitializeEx failed since it may have
// returned RPC_E_MODE_CHANGED - which is OK, see above.
if self.result.is_ok() {
unsafe { CoUninitialize() };
}
}
}
/// Ensures that COM is initialized in this thread.
#[inline]
pub fn com_initialized() {
COM_INITIALIZED.with(|_| {});
}

1021
vendor/cpal/src/host/wasapi/device.rs vendored Normal file

File diff suppressed because it is too large Load Diff

104
vendor/cpal/src/host/wasapi/mod.rs vendored Normal file
View File

@@ -0,0 +1,104 @@
pub use self::device::{
default_input_device, default_output_device, Device, Devices, SupportedInputConfigs,
SupportedOutputConfigs,
};
pub use self::stream::Stream;
use crate::traits::HostTrait;
use crate::BackendSpecificError;
use crate::DevicesError;
use std::io::Error as IoError;
use windows::Win32::Media::Audio;
mod com;
mod device;
mod stream;
/// The WASAPI host, the default windows host type.
///
/// Note: If you use a WASAPI output device as an input device it will
/// transparently enable loopback mode (see
/// https://docs.microsoft.com/en-us/windows/win32/coreaudio/loopback-recording).
#[derive(Debug)]
pub struct Host;
impl Host {
pub fn new() -> Result<Self, crate::HostUnavailable> {
Ok(Host)
}
}
impl HostTrait for Host {
type Devices = Devices;
type Device = Device;
fn is_available() -> bool {
// Assume WASAPI is always available on Windows.
true
}
fn devices(&self) -> Result<Self::Devices, DevicesError> {
Devices::new()
}
fn default_input_device(&self) -> Option<Self::Device> {
default_input_device()
}
fn default_output_device(&self) -> Option<Self::Device> {
default_output_device()
}
}
impl From<windows::core::Error> for BackendSpecificError {
fn from(error: windows::core::Error) -> Self {
BackendSpecificError {
description: format!("{}", IoError::from(error)),
}
}
}
trait ErrDeviceNotAvailable: From<BackendSpecificError> {
fn device_not_available() -> Self;
}
impl ErrDeviceNotAvailable for crate::BuildStreamError {
fn device_not_available() -> Self {
Self::DeviceNotAvailable
}
}
impl ErrDeviceNotAvailable for crate::SupportedStreamConfigsError {
fn device_not_available() -> Self {
Self::DeviceNotAvailable
}
}
impl ErrDeviceNotAvailable for crate::DefaultStreamConfigError {
fn device_not_available() -> Self {
Self::DeviceNotAvailable
}
}
impl ErrDeviceNotAvailable for crate::StreamError {
fn device_not_available() -> Self {
Self::DeviceNotAvailable
}
}
fn windows_err_to_cpal_err<E: ErrDeviceNotAvailable>(e: windows::core::Error) -> E {
windows_err_to_cpal_err_message::<E>(e, "")
}
fn windows_err_to_cpal_err_message<E: ErrDeviceNotAvailable>(
e: windows::core::Error,
message: &str,
) -> E {
match e.code() {
Audio::AUDCLNT_E_DEVICE_INVALIDATED => E::device_not_available(),
_ => {
let description = format!("{}{}", message, e);
let err = BackendSpecificError { description };
err.into()
}
}
}

559
vendor/cpal/src/host/wasapi/stream.rs vendored Normal file
View File

@@ -0,0 +1,559 @@
use super::windows_err_to_cpal_err;
use crate::traits::StreamTrait;
use crate::{
BackendSpecificError, Data, InputCallbackInfo, OutputCallbackInfo, PauseStreamError,
PlayStreamError, SampleFormat, StreamError,
};
use std::mem;
use std::ptr;
use std::sync::mpsc::{channel, Receiver, SendError, Sender};
use std::thread::{self, JoinHandle};
use windows::Win32::Foundation;
use windows::Win32::Foundation::HANDLE;
use windows::Win32::Foundation::WAIT_OBJECT_0;
use windows::Win32::Media::Audio;
use windows::Win32::System::SystemServices;
use windows::Win32::System::Threading;
pub struct Stream {
/// The high-priority audio processing thread calling callbacks.
/// Option used for moving out in destructor.
///
/// TODO: Actually set the thread priority.
thread: Option<JoinHandle<()>>,
// Commands processed by the `run()` method that is currently running.
// `pending_scheduled_event` must be signalled whenever a command is added here, so that it
// will get picked up.
commands: Sender<Command>,
// This event is signalled after a new entry is added to `commands`, so that the `run()`
// method can be notified.
pending_scheduled_event: Foundation::HANDLE,
}
struct RunContext {
// Streams that have been created in this event loop.
stream: StreamInner,
// Handles corresponding to the `event` field of each element of `voices`. Must always be in
// sync with `voices`, except that the first element is always `pending_scheduled_event`.
handles: Vec<Foundation::HANDLE>,
commands: Receiver<Command>,
}
// Once we start running the eventloop, the RunContext will not be moved.
unsafe impl Send for RunContext {}
pub enum Command {
PlayStream,
PauseStream,
Terminate,
}
pub enum AudioClientFlow {
Render {
render_client: Audio::IAudioRenderClient,
},
Capture {
capture_client: Audio::IAudioCaptureClient,
},
}
pub struct StreamInner {
pub audio_client: Audio::IAudioClient,
pub audio_clock: Audio::IAudioClock,
pub client_flow: AudioClientFlow,
// Event that is signalled by WASAPI whenever audio data must be written.
pub event: Foundation::HANDLE,
// True if the stream is currently playing. False if paused.
pub playing: bool,
// Number of frames of audio data in the underlying buffer allocated by WASAPI.
pub max_frames_in_buffer: u32,
// Number of bytes that each frame occupies.
pub bytes_per_frame: u16,
// The configuration with which the stream was created.
pub config: crate::StreamConfig,
// The sample format with which the stream was created.
pub sample_format: SampleFormat,
}
impl Stream {
pub(crate) fn new_input<D, E>(
stream_inner: StreamInner,
mut data_callback: D,
mut error_callback: E,
) -> Stream
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
let pending_scheduled_event = unsafe {
Threading::CreateEventA(None, false, false, windows::core::PCSTR(ptr::null()))
}
.expect("cpal: could not create input stream event");
let (tx, rx) = channel();
let run_context = RunContext {
handles: vec![pending_scheduled_event, stream_inner.event],
stream: stream_inner,
commands: rx,
};
let thread = thread::Builder::new()
.name("cpal_wasapi_in".to_owned())
.spawn(move || run_input(run_context, &mut data_callback, &mut error_callback))
.unwrap();
Stream {
thread: Some(thread),
commands: tx,
pending_scheduled_event,
}
}
pub(crate) fn new_output<D, E>(
stream_inner: StreamInner,
mut data_callback: D,
mut error_callback: E,
) -> Stream
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
let pending_scheduled_event = unsafe {
Threading::CreateEventA(None, false, false, windows::core::PCSTR(ptr::null()))
}
.expect("cpal: could not create output stream event");
let (tx, rx) = channel();
let run_context = RunContext {
handles: vec![pending_scheduled_event, stream_inner.event],
stream: stream_inner,
commands: rx,
};
let thread = thread::Builder::new()
.name("cpal_wasapi_out".to_owned())
.spawn(move || run_output(run_context, &mut data_callback, &mut error_callback))
.unwrap();
Stream {
thread: Some(thread),
commands: tx,
pending_scheduled_event,
}
}
#[inline]
fn push_command(&self, command: Command) -> Result<(), SendError<Command>> {
self.commands.send(command)?;
unsafe {
Threading::SetEvent(self.pending_scheduled_event).unwrap();
}
Ok(())
}
}
impl Drop for Stream {
#[inline]
fn drop(&mut self) {
if self.push_command(Command::Terminate).is_ok() {
self.thread.take().unwrap().join().unwrap();
unsafe {
let _ = Foundation::CloseHandle(self.pending_scheduled_event);
}
}
}
}
impl StreamTrait for Stream {
fn play(&self) -> Result<(), PlayStreamError> {
self.push_command(Command::PlayStream)
.map_err(|_| crate::error::PlayStreamError::DeviceNotAvailable)?;
Ok(())
}
fn pause(&self) -> Result<(), PauseStreamError> {
self.push_command(Command::PauseStream)
.map_err(|_| crate::error::PauseStreamError::DeviceNotAvailable)?;
Ok(())
}
}
impl Drop for StreamInner {
#[inline]
fn drop(&mut self) {
unsafe {
let _ = Foundation::CloseHandle(self.event);
}
}
}
// Process any pending commands that are queued within the `RunContext`.
// Returns `true` if the loop should continue running, `false` if it should terminate.
fn process_commands(run_context: &mut RunContext) -> Result<bool, StreamError> {
// Process the pending commands.
for command in run_context.commands.try_iter() {
match command {
Command::PlayStream => unsafe {
if !run_context.stream.playing {
run_context
.stream
.audio_client
.Start()
.map_err(windows_err_to_cpal_err::<StreamError>)?;
run_context.stream.playing = true;
}
},
Command::PauseStream => unsafe {
if run_context.stream.playing {
run_context
.stream
.audio_client
.Stop()
.map_err(windows_err_to_cpal_err::<StreamError>)?;
run_context.stream.playing = false;
}
},
Command::Terminate => {
return Ok(false);
}
}
}
Ok(true)
}
// Wait for any of the given handles to be signalled.
//
// Returns the index of the `handle` that was signalled, or an `Err` if
// `WaitForMultipleObjectsEx` fails.
//
// This is called when the `run` thread is ready to wait for the next event. The
// next event might be some command submitted by the user (the first handle) or
// might indicate that one of the streams is ready to deliver or receive audio.
fn wait_for_handle_signal(handles: &[Foundation::HANDLE]) -> Result<usize, BackendSpecificError> {
debug_assert!(handles.len() <= SystemServices::MAXIMUM_WAIT_OBJECTS as usize);
let result = unsafe {
Threading::WaitForMultipleObjectsEx(
handles,
false, // Don't wait for all, just wait for the first
Threading::INFINITE, // TODO: allow setting a timeout
false, // irrelevant parameter here
)
};
if result == Foundation::WAIT_FAILED {
let err = unsafe { Foundation::GetLastError() };
let description = format!("`WaitForMultipleObjectsEx failed: {:?}", err);
let err = BackendSpecificError { description };
return Err(err);
}
// Notifying the corresponding task handler.
let handle_idx = (result.0 - WAIT_OBJECT_0.0) as usize;
Ok(handle_idx)
}
// Get the number of available frames that are available for writing/reading.
fn get_available_frames(stream: &StreamInner) -> Result<u32, StreamError> {
unsafe {
let padding = stream
.audio_client
.GetCurrentPadding()
.map_err(windows_err_to_cpal_err::<StreamError>)?;
Ok(stream.max_frames_in_buffer - padding)
}
}
fn run_input(
mut run_ctxt: RunContext,
data_callback: &mut dyn FnMut(&Data, &InputCallbackInfo),
error_callback: &mut dyn FnMut(StreamError),
) {
boost_current_thread_priority();
loop {
match process_commands_and_await_signal(&mut run_ctxt, error_callback) {
Some(ControlFlow::Break) => break,
Some(ControlFlow::Continue) => continue,
None => (),
}
let capture_client = match run_ctxt.stream.client_flow {
AudioClientFlow::Capture { ref capture_client } => capture_client.clone(),
_ => unreachable!(),
};
match process_input(
&run_ctxt.stream,
capture_client,
data_callback,
error_callback,
) {
ControlFlow::Break => break,
ControlFlow::Continue => continue,
}
}
}
fn run_output(
mut run_ctxt: RunContext,
data_callback: &mut dyn FnMut(&mut Data, &OutputCallbackInfo),
error_callback: &mut dyn FnMut(StreamError),
) {
boost_current_thread_priority();
loop {
match process_commands_and_await_signal(&mut run_ctxt, error_callback) {
Some(ControlFlow::Break) => break,
Some(ControlFlow::Continue) => continue,
None => (),
}
let render_client = match run_ctxt.stream.client_flow {
AudioClientFlow::Render { ref render_client } => render_client.clone(),
_ => unreachable!(),
};
match process_output(
&run_ctxt.stream,
render_client,
data_callback,
error_callback,
) {
ControlFlow::Break => break,
ControlFlow::Continue => continue,
}
}
}
fn boost_current_thread_priority() {
unsafe {
let thread_id = Threading::GetCurrentThreadId();
let _ = Threading::SetThreadPriority(
HANDLE(thread_id as isize),
Threading::THREAD_PRIORITY_TIME_CRITICAL,
);
}
}
enum ControlFlow {
Break,
Continue,
}
fn process_commands_and_await_signal(
run_context: &mut RunContext,
error_callback: &mut dyn FnMut(StreamError),
) -> Option<ControlFlow> {
// Process queued commands.
match process_commands(run_context) {
Ok(true) => (),
Ok(false) => return Some(ControlFlow::Break),
Err(err) => {
error_callback(err);
return Some(ControlFlow::Break);
}
};
// Wait for any of the handles to be signalled.
let handle_idx = match wait_for_handle_signal(&run_context.handles) {
Ok(idx) => idx,
Err(err) => {
error_callback(err.into());
return Some(ControlFlow::Break);
}
};
// If `handle_idx` is 0, then it's `pending_scheduled_event` that was signalled in
// order for us to pick up the pending commands. Otherwise, a stream needs data.
if handle_idx == 0 {
return Some(ControlFlow::Continue);
}
None
}
// The loop for processing pending input data.
fn process_input(
stream: &StreamInner,
capture_client: Audio::IAudioCaptureClient,
data_callback: &mut dyn FnMut(&Data, &InputCallbackInfo),
error_callback: &mut dyn FnMut(StreamError),
) -> ControlFlow {
unsafe {
// Get the available data in the shared buffer.
let mut buffer: *mut u8 = ptr::null_mut();
let mut flags = mem::MaybeUninit::uninit();
loop {
let mut frames_available = match capture_client.GetNextPacketSize() {
Ok(0) => return ControlFlow::Continue,
Ok(f) => f,
Err(err) => {
error_callback(windows_err_to_cpal_err(err));
return ControlFlow::Break;
}
};
let mut qpc_position: u64 = 0;
let result = capture_client.GetBuffer(
&mut buffer,
&mut frames_available,
flags.as_mut_ptr(),
None,
Some(&mut qpc_position),
);
match result {
// TODO: Can this happen?
Err(e) if e.code() == Audio::AUDCLNT_S_BUFFER_EMPTY => continue,
Err(e) => {
error_callback(windows_err_to_cpal_err(e));
return ControlFlow::Break;
}
Ok(_) => (),
}
debug_assert!(!buffer.is_null());
let data = buffer as *mut ();
let len = frames_available as usize * stream.bytes_per_frame as usize
/ stream.sample_format.sample_size();
let data = Data::from_parts(data, len, stream.sample_format);
// The `qpc_position` is in 100 nanosecond units. Convert it to nanoseconds.
let timestamp = match input_timestamp(stream, qpc_position) {
Ok(ts) => ts,
Err(err) => {
error_callback(err);
return ControlFlow::Break;
}
};
let info = InputCallbackInfo { timestamp };
data_callback(&data, &info);
// Release the buffer.
let result = capture_client
.ReleaseBuffer(frames_available)
.map_err(windows_err_to_cpal_err);
if let Err(err) = result {
error_callback(err);
return ControlFlow::Break;
}
}
}
}
// The loop for writing output data.
fn process_output(
stream: &StreamInner,
render_client: Audio::IAudioRenderClient,
data_callback: &mut dyn FnMut(&mut Data, &OutputCallbackInfo),
error_callback: &mut dyn FnMut(StreamError),
) -> ControlFlow {
// The number of frames available for writing.
let frames_available = match get_available_frames(stream) {
Ok(0) => return ControlFlow::Continue, // TODO: Can this happen?
Ok(n) => n,
Err(err) => {
error_callback(err);
return ControlFlow::Break;
}
};
unsafe {
let buffer = match render_client.GetBuffer(frames_available) {
Ok(b) => b,
Err(e) => {
error_callback(windows_err_to_cpal_err(e));
return ControlFlow::Break;
}
};
debug_assert!(!buffer.is_null());
let data = buffer as *mut ();
let len = frames_available as usize * stream.bytes_per_frame as usize
/ stream.sample_format.sample_size();
let mut data = Data::from_parts(data, len, stream.sample_format);
let sample_rate = stream.config.sample_rate;
let timestamp = match output_timestamp(stream, frames_available, sample_rate) {
Ok(ts) => ts,
Err(err) => {
error_callback(err);
return ControlFlow::Break;
}
};
let info = OutputCallbackInfo { timestamp };
data_callback(&mut data, &info);
if let Err(err) = render_client.ReleaseBuffer(frames_available, 0) {
error_callback(windows_err_to_cpal_err(err));
return ControlFlow::Break;
}
}
ControlFlow::Continue
}
/// Convert the given duration in frames at the given sample rate to a `std::time::Duration`.
fn frames_to_duration(frames: u32, rate: crate::SampleRate) -> std::time::Duration {
let secsf = frames as f64 / rate.0 as f64;
let secs = secsf as u64;
let nanos = ((secsf - secs as f64) * 1_000_000_000.0) as u32;
std::time::Duration::new(secs, nanos)
}
/// Use the stream's `IAudioClock` to produce the current stream instant.
///
/// Uses the QPC position produced via the `GetPosition` method.
fn stream_instant(stream: &StreamInner) -> Result<crate::StreamInstant, StreamError> {
let mut position: u64 = 0;
let mut qpc_position: u64 = 0;
unsafe {
stream
.audio_clock
.GetPosition(&mut position, Some(&mut qpc_position))
.map_err(windows_err_to_cpal_err::<StreamError>)?;
};
// The `qpc_position` is in 100 nanosecond units. Convert it to nanoseconds.
let qpc_nanos = qpc_position as i128 * 100;
let instant = crate::StreamInstant::from_nanos_i128(qpc_nanos)
.expect("performance counter out of range of `StreamInstant` representation");
Ok(instant)
}
/// Produce the input stream timestamp.
///
/// `buffer_qpc_position` is the `qpc_position` returned via the `GetBuffer` call on the capture
/// client. It represents the instant at which the first sample of the retrieved buffer was
/// captured.
fn input_timestamp(
stream: &StreamInner,
buffer_qpc_position: u64,
) -> Result<crate::InputStreamTimestamp, StreamError> {
// The `qpc_position` is in 100 nanosecond units. Convert it to nanoseconds.
let qpc_nanos = buffer_qpc_position as i128 * 100;
let capture = crate::StreamInstant::from_nanos_i128(qpc_nanos)
.expect("performance counter out of range of `StreamInstant` representation");
let callback = stream_instant(stream)?;
Ok(crate::InputStreamTimestamp { capture, callback })
}
/// Produce the output stream timestamp.
///
/// `frames_available` is the number of frames available for writing as reported by subtracting the
/// result of `GetCurrentPadding` from the maximum buffer size.
///
/// `sample_rate` is the rate at which audio frames are processed by the device.
///
/// TODO: The returned `playback` is an estimate that assumes audio is delivered immediately after
/// `frames_available` are consumed. The reality is that there is likely a tiny amount of latency
/// after this, but not sure how to determine this.
fn output_timestamp(
stream: &StreamInner,
frames_available: u32,
sample_rate: crate::SampleRate,
) -> Result<crate::OutputStreamTimestamp, StreamError> {
let callback = stream_instant(stream)?;
let buffer_duration = frames_to_duration(frames_available, sample_rate);
let playback = callback
.add(buffer_duration)
.expect("`playback` occurs beyond representation supported by `StreamInstant`");
Ok(crate::OutputStreamTimestamp { callback, playback })
}

528
vendor/cpal/src/host/webaudio/mod.rs vendored Normal file
View File

@@ -0,0 +1,528 @@
extern crate js_sys;
extern crate wasm_bindgen;
extern crate web_sys;
use self::js_sys::eval;
use self::wasm_bindgen::prelude::*;
use self::wasm_bindgen::JsCast;
use self::web_sys::{AudioContext, AudioContextOptions};
use crate::traits::{DeviceTrait, HostTrait, StreamTrait};
use crate::{
BackendSpecificError, BufferSize, BuildStreamError, Data, DefaultStreamConfigError,
DeviceNameError, DevicesError, InputCallbackInfo, OutputCallbackInfo, PauseStreamError,
PlayStreamError, SampleFormat, SampleRate, StreamConfig, StreamError, SupportedBufferSize,
SupportedStreamConfig, SupportedStreamConfigRange, SupportedStreamConfigsError,
};
use std::ops::DerefMut;
use std::sync::{Arc, Mutex, RwLock};
use std::time::Duration;
/// Content is false if the iterator is empty.
pub struct Devices(bool);
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Device;
pub struct Host;
pub struct Stream {
ctx: Arc<AudioContext>,
on_ended_closures: Vec<Arc<RwLock<Option<Closure<dyn FnMut()>>>>>,
config: StreamConfig,
buffer_size_frames: usize,
}
pub type SupportedInputConfigs = ::std::vec::IntoIter<SupportedStreamConfigRange>;
pub type SupportedOutputConfigs = ::std::vec::IntoIter<SupportedStreamConfigRange>;
const MIN_CHANNELS: u16 = 1;
const MAX_CHANNELS: u16 = 32;
const MIN_SAMPLE_RATE: SampleRate = SampleRate(8_000);
const MAX_SAMPLE_RATE: SampleRate = SampleRate(96_000);
const DEFAULT_SAMPLE_RATE: SampleRate = SampleRate(44_100);
const MIN_BUFFER_SIZE: u32 = 1;
const MAX_BUFFER_SIZE: u32 = u32::MAX;
const DEFAULT_BUFFER_SIZE: usize = 2048;
const SUPPORTED_SAMPLE_FORMAT: SampleFormat = SampleFormat::F32;
impl Host {
pub fn new() -> Result<Self, crate::HostUnavailable> {
Ok(Host)
}
}
impl HostTrait for Host {
type Devices = Devices;
type Device = Device;
fn is_available() -> bool {
// Assume this host is always available on webaudio.
true
}
fn devices(&self) -> Result<Self::Devices, DevicesError> {
Devices::new()
}
fn default_input_device(&self) -> Option<Self::Device> {
default_input_device()
}
fn default_output_device(&self) -> Option<Self::Device> {
default_output_device()
}
}
impl Devices {
fn new() -> Result<Self, DevicesError> {
Ok(Self::default())
}
}
impl Device {
#[inline]
fn name(&self) -> Result<String, DeviceNameError> {
Ok("Default Device".to_owned())
}
#[inline]
fn supported_input_configs(
&self,
) -> Result<SupportedInputConfigs, SupportedStreamConfigsError> {
// TODO
Ok(Vec::new().into_iter())
}
#[inline]
fn supported_output_configs(
&self,
) -> Result<SupportedOutputConfigs, SupportedStreamConfigsError> {
let buffer_size = SupportedBufferSize::Range {
min: MIN_BUFFER_SIZE,
max: MAX_BUFFER_SIZE,
};
let configs: Vec<_> = (MIN_CHANNELS..=MAX_CHANNELS)
.map(|channels| SupportedStreamConfigRange {
channels,
min_sample_rate: MIN_SAMPLE_RATE,
max_sample_rate: MAX_SAMPLE_RATE,
buffer_size: buffer_size.clone(),
sample_format: SUPPORTED_SAMPLE_FORMAT,
})
.collect();
Ok(configs.into_iter())
}
#[inline]
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
// TODO
Err(DefaultStreamConfigError::StreamTypeNotSupported)
}
#[inline]
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
const EXPECT: &str = "expected at least one valid webaudio stream config";
let config = self
.supported_output_configs()
.expect(EXPECT)
.max_by(|a, b| a.cmp_default_heuristics(b))
.unwrap()
.with_sample_rate(DEFAULT_SAMPLE_RATE);
Ok(config)
}
}
impl DeviceTrait for Device {
type SupportedInputConfigs = SupportedInputConfigs;
type SupportedOutputConfigs = SupportedOutputConfigs;
type Stream = Stream;
#[inline]
fn name(&self) -> Result<String, DeviceNameError> {
Device::name(self)
}
#[inline]
fn supported_input_configs(
&self,
) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError> {
Device::supported_input_configs(self)
}
#[inline]
fn supported_output_configs(
&self,
) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError> {
Device::supported_output_configs(self)
}
#[inline]
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_input_config(self)
}
#[inline]
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError> {
Device::default_output_config(self)
}
fn build_input_stream_raw<D, E>(
&self,
_config: &StreamConfig,
_sample_format: SampleFormat,
_data_callback: D,
_error_callback: E,
_timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
// TODO
Err(BuildStreamError::StreamConfigNotSupported)
}
/// Create an output stream.
fn build_output_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
_error_callback: E,
_timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
if !valid_config(config, sample_format) {
return Err(BuildStreamError::StreamConfigNotSupported);
}
let n_channels = config.channels as usize;
let buffer_size_frames = match config.buffer_size {
BufferSize::Fixed(v) => {
if v == 0 {
return Err(BuildStreamError::StreamConfigNotSupported);
} else {
v as usize
}
}
BufferSize::Default => DEFAULT_BUFFER_SIZE,
};
let buffer_size_samples = buffer_size_frames * n_channels;
let buffer_time_step_secs = buffer_time_step_secs(buffer_size_frames, config.sample_rate);
let data_callback = Arc::new(Mutex::new(Box::new(data_callback)));
// Create the WebAudio stream.
let mut stream_opts = AudioContextOptions::new();
stream_opts.sample_rate(config.sample_rate.0 as f32);
let ctx = AudioContext::new_with_context_options(&stream_opts).map_err(
|err| -> BuildStreamError {
let description = format!("{:?}", err);
let err = BackendSpecificError { description };
err.into()
},
)?;
let destination = ctx.destination();
// If possible, set the destination's channel_count to the given config.channel.
// If not, fallback on the default destination channel_count to keep previous behavior
// and do not return an error.
if config.channels as u32 <= destination.max_channel_count() {
destination.set_channel_count(config.channels as u32);
}
let ctx = Arc::new(ctx);
// A container for managing the lifecycle of the audio callbacks.
let mut on_ended_closures: Vec<Arc<RwLock<Option<Closure<dyn FnMut()>>>>> = Vec::new();
// A cursor keeping track of the current time at which new frames should be scheduled.
let time = Arc::new(RwLock::new(0f64));
// Create a set of closures / callbacks which will continuously fetch and schedule sample
// playback. Starting with two workers, e.g. a front and back buffer so that audio frames
// can be fetched in the background.
for _i in 0..2 {
let data_callback_handle = data_callback.clone();
let ctx_handle = ctx.clone();
let time_handle = time.clone();
// A set of temporary buffers to be used for intermediate sample transformation steps.
let mut temporary_buffer = vec![0f32; buffer_size_samples];
let mut temporary_channel_buffer = vec![0f32; buffer_size_frames];
#[cfg(target_feature = "atomics")]
let temporary_channel_array_view: js_sys::Float32Array;
#[cfg(target_feature = "atomics")]
{
let temporary_channel_array = js_sys::ArrayBuffer::new(
(std::mem::size_of::<f32>() * buffer_size_frames) as u32,
);
temporary_channel_array_view = js_sys::Float32Array::new(&temporary_channel_array);
}
// Create a webaudio buffer which will be reused to avoid allocations.
let ctx_buffer = ctx
.create_buffer(
config.channels as u32,
buffer_size_frames as u32,
config.sample_rate.0 as f32,
)
.map_err(|err| -> BuildStreamError {
let description = format!("{:?}", err);
let err = BackendSpecificError { description };
err.into()
})?;
// A self reference to this closure for passing to future audio event calls.
let on_ended_closure: Arc<RwLock<Option<Closure<dyn FnMut()>>>> =
Arc::new(RwLock::new(None));
let on_ended_closure_handle = on_ended_closure.clone();
on_ended_closure
.write()
.unwrap()
.replace(Closure::wrap(Box::new(move || {
let now = ctx_handle.current_time();
let time_at_start_of_buffer = {
let time_at_start_of_buffer = time_handle
.read()
.expect("Unable to get a read lock on the time cursor");
// Synchronise first buffer as necessary (eg. keep the time value
// referenced to the context clock).
if *time_at_start_of_buffer > 0.001 {
*time_at_start_of_buffer
} else {
// 25ms of time to fetch the first sample data, increase to avoid
// initial underruns.
now + 0.025
}
};
// Populate the sample data into an interleaved temporary buffer.
{
let len = temporary_buffer.len();
let data = temporary_buffer.as_mut_ptr() as *mut ();
let mut data = unsafe { Data::from_parts(data, len, sample_format) };
let mut data_callback = data_callback_handle.lock().unwrap();
let callback = crate::StreamInstant::from_secs_f64(now);
let playback = crate::StreamInstant::from_secs_f64(time_at_start_of_buffer);
let timestamp = crate::OutputStreamTimestamp { callback, playback };
let info = OutputCallbackInfo { timestamp };
(data_callback.deref_mut())(&mut data, &info);
}
// Deinterleave the sample data and copy into the audio context buffer.
// We do not reference the audio context buffer directly e.g. getChannelData.
// As wasm-bindgen only gives us a copy, not a direct reference.
for channel in 0..n_channels {
for i in 0..buffer_size_frames {
temporary_channel_buffer[i] =
temporary_buffer[n_channels * i + channel];
}
#[cfg(not(target_feature = "atomics"))]
{
ctx_buffer
.copy_to_channel(&mut temporary_channel_buffer, channel as i32)
.expect(
"Unable to write sample data into the audio context buffer",
);
}
// copyToChannel cannot be directly copied into from a SharedArrayBuffer,
// which WASM memory is backed by if the 'atomics' flag is enabled.
// This workaround copies the data into an intermediary buffer first.
// There's a chance browsers may eventually relax that requirement.
// See this issue: https://github.com/WebAudio/web-audio-api/issues/2565
#[cfg(target_feature = "atomics")]
{
temporary_channel_array_view.copy_from(&mut temporary_channel_buffer);
ctx_buffer
.unchecked_ref::<ExternalArrayAudioBuffer>()
.copy_to_channel(&temporary_channel_array_view, channel as i32)
.expect(
"Unable to write sample data into the audio context buffer",
);
}
}
// Create an AudioBufferSourceNode, schedule it to playback the reused buffer
// in the future.
let source = ctx_handle
.create_buffer_source()
.expect("Unable to create a webaudio buffer source");
source.set_buffer(Some(&ctx_buffer));
source
.connect_with_audio_node(&ctx_handle.destination())
.expect(
"Unable to connect the web audio buffer source to the context destination",
);
source.set_onended(Some(
on_ended_closure_handle
.read()
.unwrap()
.as_ref()
.unwrap()
.as_ref()
.unchecked_ref(),
));
source
.start_with_when(time_at_start_of_buffer)
.expect("Unable to start the webaudio buffer source");
// Keep track of when the next buffer worth of samples should be played.
*time_handle.write().unwrap() = time_at_start_of_buffer + buffer_time_step_secs;
}) as Box<dyn FnMut()>));
on_ended_closures.push(on_ended_closure);
}
Ok(Stream {
ctx,
on_ended_closures,
config: config.clone(),
buffer_size_frames,
})
}
}
impl Stream {
/// Return the [`AudioContext`](https://developer.mozilla.org/docs/Web/API/AudioContext) used
/// by this stream.
pub fn audio_context(&self) -> &AudioContext {
&*self.ctx
}
}
impl StreamTrait for Stream {
fn play(&self) -> Result<(), PlayStreamError> {
let window = web_sys::window().unwrap();
match self.ctx.resume() {
Ok(_) => {
// Begin webaudio playback, initially scheduling the closures to fire on a timeout
// event.
let mut offset_ms = 10;
let time_step_secs =
buffer_time_step_secs(self.buffer_size_frames, self.config.sample_rate);
let time_step_ms = (time_step_secs * 1_000.0) as i32;
for on_ended_closure in self.on_ended_closures.iter() {
window
.set_timeout_with_callback_and_timeout_and_arguments_0(
on_ended_closure
.read()
.unwrap()
.as_ref()
.unwrap()
.as_ref()
.unchecked_ref(),
offset_ms,
)
.unwrap();
offset_ms += time_step_ms;
}
Ok(())
}
Err(err) => {
let description = format!("{:?}", err);
let err = BackendSpecificError { description };
Err(err.into())
}
}
}
fn pause(&self) -> Result<(), PauseStreamError> {
match self.ctx.suspend() {
Ok(_) => Ok(()),
Err(err) => {
let description = format!("{:?}", err);
let err = BackendSpecificError { description };
Err(err.into())
}
}
}
}
impl Drop for Stream {
fn drop(&mut self) {
let _ = self.ctx.close();
}
}
impl Default for Devices {
fn default() -> Devices {
// We produce an empty iterator if the WebAudio API isn't available.
Devices(is_webaudio_available())
}
}
impl Iterator for Devices {
type Item = Device;
#[inline]
fn next(&mut self) -> Option<Device> {
if self.0 {
self.0 = false;
Some(Device)
} else {
None
}
}
}
#[inline]
fn default_input_device() -> Option<Device> {
// TODO
None
}
#[inline]
fn default_output_device() -> Option<Device> {
if is_webaudio_available() {
Some(Device)
} else {
None
}
}
// Detects whether the `AudioContext` global variable is available.
fn is_webaudio_available() -> bool {
if let Ok(audio_context_is_defined) = eval("typeof AudioContext !== 'undefined'") {
audio_context_is_defined.as_bool().unwrap()
} else {
false
}
}
// Whether or not the given stream configuration is valid for building a stream.
fn valid_config(conf: &StreamConfig, sample_format: SampleFormat) -> bool {
conf.channels <= MAX_CHANNELS
&& conf.channels >= MIN_CHANNELS
&& conf.sample_rate <= MAX_SAMPLE_RATE
&& conf.sample_rate >= MIN_SAMPLE_RATE
&& sample_format == SUPPORTED_SAMPLE_FORMAT
}
fn buffer_time_step_secs(buffer_size_frames: usize, sample_rate: SampleRate) -> f64 {
buffer_size_frames as f64 / sample_rate.0 as f64
}
#[cfg(target_feature = "atomics")]
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(js_name = AudioBuffer)]
type ExternalArrayAudioBuffer;
# [wasm_bindgen(catch, method, structural, js_class = "AudioBuffer", js_name = copyToChannel)]
pub fn copy_to_channel(
this: &ExternalArrayAudioBuffer,
source: &js_sys::Float32Array,
channel_number: i32,
) -> Result<(), JsValue>;
}

871
vendor/cpal/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,871 @@
//! # How to use cpal
//!
//! Here are some concepts cpal exposes:
//!
//! - A [`Host`] provides access to the available audio devices on the system.
//! Some platforms have more than one host available, but every platform supported by CPAL has at
//! least one [default_host] that is guaranteed to be available.
//! - A [`Device`] is an audio device that may have any number of input and
//! output streams.
//! - A [`Stream`] is an open flow of audio data. Input streams allow you to
//! receive audio data, output streams allow you to play audio data. You must choose which
//! [Device] will run your stream before you can create one. Often, a default device can be
//! retrieved via the [Host].
//!
//! The first step is to initialise the [`Host`]:
//!
//! ```
//! use cpal::traits::HostTrait;
//! let host = cpal::default_host();
//! ```
//!
//! Then choose an available [`Device`]. The easiest way is to use the default input or output
//! `Device` via the [`default_input_device()`] or [`default_output_device()`] methods on `host`.
//!
//! Alternatively, you can enumerate all the available devices with the [`devices()`] method.
//! Beware that the `default_*_device()` functions return an `Option<Device>` in case no device
//! is available for that stream type on the system.
//!
//! ```no_run
//! # use cpal::traits::HostTrait;
//! # let host = cpal::default_host();
//! let device = host.default_output_device().expect("no output device available");
//! ```
//!
//! Before we can create a stream, we must decide what the configuration of the audio stream is
//! going to be.
//! You can query all the supported configurations with the
//! [`supported_input_configs()`] and [`supported_output_configs()`] methods.
//! These produce a list of [`SupportedStreamConfigRange`] structs which can later be turned into
//! actual [`SupportedStreamConfig`] structs.
//!
//! If you don't want to query the list of configs,
//! you can also build your own [`StreamConfig`] manually, but doing so could lead to an error when
//! building the stream if the config is not supported by the device.
//!
//! > **Note**: the `supported_input/output_configs()` methods
//! > could return an error for example if the device has been disconnected.
//!
//! ```no_run
//! use cpal::traits::{DeviceTrait, HostTrait};
//! # let host = cpal::default_host();
//! # let device = host.default_output_device().unwrap();
//! let mut supported_configs_range = device.supported_output_configs()
//! .expect("error while querying configs");
//! let supported_config = supported_configs_range.next()
//! .expect("no supported config?!")
//! .with_max_sample_rate();
//! ```
//!
//! Now that we have everything for the stream, we are ready to create it from our selected device:
//!
//! ```no_run
//! use cpal::Data;
//! use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
//! # let host = cpal::default_host();
//! # let device = host.default_output_device().unwrap();
//! # let config = device.default_output_config().unwrap().into();
//! let stream = device.build_output_stream(
//! &config,
//! move |data: &mut [f32], _: &cpal::OutputCallbackInfo| {
//! // react to stream events and read or write stream data here.
//! },
//! move |err| {
//! // react to errors here.
//! },
//! None // None=blocking, Some(Duration)=timeout
//! );
//! ```
//!
//! While the stream is running, the selected audio device will periodically call the data callback
//! that was passed to the function. The callback is passed an instance of either [`&Data` or
//! `&mut Data`](Data) depending on whether the stream is an input stream or output stream respectively.
//!
//! > **Note**: Creating and running a stream will *not* block the thread. On modern platforms, the
//! > given callback is called by a dedicated, high-priority thread responsible for delivering
//! > audio data to the system's audio device in a timely manner. On older platforms that only
//! > provide a blocking API (e.g. ALSA), CPAL will create a thread in order to consistently
//! > provide non-blocking behaviour (currently this is a thread per stream, but this may change to
//! > use a single thread for all streams). *If this is an issue for your platform or design,
//! > please share your issue and use-case with the CPAL team on the GitHub issue tracker for
//! > consideration.*
//!
//! In this example, we simply fill the given output buffer with silence.
//!
//! ```no_run
//! use cpal::{Data, Sample, SampleFormat, FromSample};
//! use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
//! # let host = cpal::default_host();
//! # let device = host.default_output_device().unwrap();
//! # let supported_config = device.default_output_config().unwrap();
//! let err_fn = |err| eprintln!("an error occurred on the output audio stream: {}", err);
//! let sample_format = supported_config.sample_format();
//! let config = supported_config.into();
//! let stream = match sample_format {
//! SampleFormat::F32 => device.build_output_stream(&config, write_silence::<f32>, err_fn, None),
//! SampleFormat::I16 => device.build_output_stream(&config, write_silence::<i16>, err_fn, None),
//! SampleFormat::U16 => device.build_output_stream(&config, write_silence::<u16>, err_fn, None),
//! sample_format => panic!("Unsupported sample format '{sample_format}'")
//! }.unwrap();
//!
//! fn write_silence<T: Sample>(data: &mut [T], _: &cpal::OutputCallbackInfo) {
//! for sample in data.iter_mut() {
//! *sample = Sample::EQUILIBRIUM;
//! }
//! }
//! ```
//!
//! Not all platforms automatically run the stream upon creation. To ensure the stream has started,
//! we can use [`Stream::play`](traits::StreamTrait::play).
//!
//! ```no_run
//! # use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
//! # let host = cpal::default_host();
//! # let device = host.default_output_device().unwrap();
//! # let supported_config = device.default_output_config().unwrap();
//! # let sample_format = supported_config.sample_format();
//! # let config = supported_config.into();
//! # let data_fn = move |_data: &mut cpal::Data, _: &cpal::OutputCallbackInfo| {};
//! # let err_fn = move |_err| {};
//! # let stream = device.build_output_stream_raw(&config, sample_format, data_fn, err_fn, None).unwrap();
//! stream.play().unwrap();
//! ```
//!
//! Some devices support pausing the audio stream. This can be useful for saving energy in moments
//! of silence.
//!
//! ```no_run
//! # use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
//! # let host = cpal::default_host();
//! # let device = host.default_output_device().unwrap();
//! # let supported_config = device.default_output_config().unwrap();
//! # let sample_format = supported_config.sample_format();
//! # let config = supported_config.into();
//! # let data_fn = move |_data: &mut cpal::Data, _: &cpal::OutputCallbackInfo| {};
//! # let err_fn = move |_err| {};
//! # let stream = device.build_output_stream_raw(&config, sample_format, data_fn, err_fn, None).unwrap();
//! stream.pause().unwrap();
//! ```
//!
//! [`default_input_device()`]: traits::HostTrait::default_input_device
//! [`default_output_device()`]: traits::HostTrait::default_output_device
//! [`devices()`]: traits::HostTrait::devices
//! [`supported_input_configs()`]: traits::DeviceTrait::supported_input_configs
//! [`supported_output_configs()`]: traits::DeviceTrait::supported_output_configs
#![recursion_limit = "2048"]
// Extern crate declarations with `#[macro_use]` must unfortunately be at crate root.
#[cfg(target_os = "emscripten")]
#[macro_use]
extern crate wasm_bindgen;
#[cfg(target_os = "emscripten")]
extern crate js_sys;
#[cfg(target_os = "emscripten")]
extern crate web_sys;
pub use error::*;
pub use platform::{
available_hosts, default_host, host_from_id, Device, Devices, Host, HostId, Stream,
SupportedInputConfigs, SupportedOutputConfigs, ALL_HOSTS,
};
pub use samples_formats::{FromSample, Sample, SampleFormat, SizedSample, I24, I48, U24, U48};
use std::convert::TryInto;
use std::ops::{Div, Mul};
use std::time::Duration;
#[cfg(target_os = "emscripten")]
use wasm_bindgen::prelude::*;
mod error;
mod host;
pub mod platform;
mod samples_formats;
pub mod traits;
/// A host's device iterator yielding only *input* devices.
pub type InputDevices<I> = std::iter::Filter<I, fn(&<I as Iterator>::Item) -> bool>;
/// A host's device iterator yielding only *output* devices.
pub type OutputDevices<I> = std::iter::Filter<I, fn(&<I as Iterator>::Item) -> bool>;
/// Number of channels.
pub type ChannelCount = u16;
/// The number of samples processed per second for a single channel of audio.
#[cfg_attr(target_os = "emscripten", wasm_bindgen)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct SampleRate(pub u32);
impl<T> Mul<T> for SampleRate
where
u32: Mul<T, Output = u32>,
{
type Output = Self;
fn mul(self, rhs: T) -> Self {
SampleRate(self.0 * rhs)
}
}
impl<T> Div<T> for SampleRate
where
u32: Div<T, Output = u32>,
{
type Output = Self;
fn div(self, rhs: T) -> Self {
SampleRate(self.0 / rhs)
}
}
/// The desired number of frames for the hardware buffer.
pub type FrameCount = u32;
/// The buffer size used by the device.
///
/// [`Default`] is used when no specific buffer size is set and uses the default
/// behavior of the given host. Note, the default buffer size may be surprisingly
/// large, leading to latency issues. If low latency is desired, [`Fixed(FrameCount)`]
/// should be used in accordance with the [`SupportedBufferSize`] range produced by
/// the [`SupportedStreamConfig`] API.
///
/// [`Default`]: BufferSize::Default
/// [`Fixed(FrameCount)`]: BufferSize::Fixed
/// [`SupportedStreamConfig`]: SupportedStreamConfig::buffer_size
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum BufferSize {
Default,
Fixed(FrameCount),
}
#[cfg(target_os = "emscripten")]
impl wasm_bindgen::describe::WasmDescribe for BufferSize {
fn describe() {}
}
#[cfg(target_os = "emscripten")]
impl wasm_bindgen::convert::IntoWasmAbi for BufferSize {
type Abi = Option<u32>;
fn into_abi(self) -> Self::Abi {
match self {
Self::Default => None,
Self::Fixed(fc) => Some(fc),
}
.into_abi()
}
}
/// The set of parameters used to describe how to open a stream.
///
/// The sample format is omitted in favour of using a sample type.
#[cfg_attr(target_os = "emscripten", wasm_bindgen)]
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct StreamConfig {
pub channels: ChannelCount,
pub sample_rate: SampleRate,
pub buffer_size: BufferSize,
}
/// Describes the minimum and maximum supported buffer size for the device
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum SupportedBufferSize {
Range {
min: FrameCount,
max: FrameCount,
},
/// In the case that the platform provides no way of getting the default
/// buffersize before starting a stream.
Unknown,
}
/// Describes a range of supported stream configurations, retrieved via the
/// [`Device::supported_input/output_configs`](traits::DeviceTrait#required-methods) method.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct SupportedStreamConfigRange {
pub(crate) channels: ChannelCount,
/// Minimum value for the samples rate of the supported formats.
pub(crate) min_sample_rate: SampleRate,
/// Maximum value for the samples rate of the supported formats.
pub(crate) max_sample_rate: SampleRate,
/// Buffersize ranges supported by the device
pub(crate) buffer_size: SupportedBufferSize,
/// Type of data expected by the device.
pub(crate) sample_format: SampleFormat,
}
/// Describes a single supported stream configuration, retrieved via either a
/// [`SupportedStreamConfigRange`] instance or one of the
/// [`Device::default_input/output_config`](traits::DeviceTrait#required-methods) methods.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SupportedStreamConfig {
channels: ChannelCount,
sample_rate: SampleRate,
buffer_size: SupportedBufferSize,
sample_format: SampleFormat,
}
/// A buffer of dynamically typed audio data, passed to raw stream callbacks.
///
/// Raw input stream callbacks receive `&Data`, while raw output stream callbacks expect `&mut
/// Data`.
#[cfg_attr(target_os = "emscripten", wasm_bindgen)]
#[derive(Debug)]
pub struct Data {
data: *mut (),
len: usize,
sample_format: SampleFormat,
}
/// A monotonic time instance associated with a stream, retrieved from either:
///
/// 1. A timestamp provided to the stream's underlying audio data callback or
/// 2. The same time source used to generate timestamps for a stream's underlying audio data
/// callback.
///
/// `StreamInstant` represents a duration since some unspecified origin occurring either before
/// or equal to the moment the stream from which it was created begins.
///
/// ## Host `StreamInstant` Sources
///
/// | Host | Source |
/// | ---- | ------ |
/// | alsa | `snd_pcm_status_get_htstamp` |
/// | coreaudio | `mach_absolute_time` |
/// | wasapi | `QueryPerformanceCounter` |
/// | asio | `timeGetTime` |
/// | emscripten | `AudioContext.getOutputTimestamp` |
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
pub struct StreamInstant {
secs: i64,
nanos: u32,
}
/// A timestamp associated with a call to an input stream's data callback.
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub struct InputStreamTimestamp {
/// The instant the stream's data callback was invoked.
pub callback: StreamInstant,
/// The instant that data was captured from the device.
///
/// E.g. The instant data was read from an ADC.
pub capture: StreamInstant,
}
/// A timestamp associated with a call to an output stream's data callback.
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub struct OutputStreamTimestamp {
/// The instant the stream's data callback was invoked.
pub callback: StreamInstant,
/// The predicted instant that data written will be delivered to the device for playback.
///
/// E.g. The instant data will be played by a DAC.
pub playback: StreamInstant,
}
/// Information relevant to a single call to the user's input stream data callback.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct InputCallbackInfo {
timestamp: InputStreamTimestamp,
}
/// Information relevant to a single call to the user's output stream data callback.
#[cfg_attr(target_os = "emscripten", wasm_bindgen)]
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct OutputCallbackInfo {
timestamp: OutputStreamTimestamp,
}
impl SupportedStreamConfig {
pub fn new(
channels: ChannelCount,
sample_rate: SampleRate,
buffer_size: SupportedBufferSize,
sample_format: SampleFormat,
) -> Self {
Self {
channels,
sample_rate,
buffer_size,
sample_format,
}
}
pub fn channels(&self) -> ChannelCount {
self.channels
}
pub fn sample_rate(&self) -> SampleRate {
self.sample_rate
}
pub fn buffer_size(&self) -> &SupportedBufferSize {
&self.buffer_size
}
pub fn sample_format(&self) -> SampleFormat {
self.sample_format
}
pub fn config(&self) -> StreamConfig {
StreamConfig {
channels: self.channels,
sample_rate: self.sample_rate,
buffer_size: BufferSize::Default,
}
}
}
impl StreamInstant {
/// The amount of time elapsed from another instant to this one.
///
/// Returns `None` if `earlier` is later than self.
pub fn duration_since(&self, earlier: &Self) -> Option<Duration> {
if self < earlier {
None
} else {
(self.as_nanos() - earlier.as_nanos())
.try_into()
.ok()
.map(Duration::from_nanos)
}
}
/// Returns the instant in time after the given duration has passed.
///
/// Returns `None` if the resulting instant would exceed the bounds of the underlying data
/// structure.
pub fn add(&self, duration: Duration) -> Option<Self> {
self.as_nanos()
.checked_add(duration.as_nanos() as i128)
.and_then(Self::from_nanos_i128)
}
/// Returns the instant in time one `duration` ago.
///
/// Returns `None` if the resulting instant would underflow. As a result, it is important to
/// consider that on some platforms the [`StreamInstant`] may begin at `0` from the moment the
/// source stream is created.
pub fn sub(&self, duration: Duration) -> Option<Self> {
self.as_nanos()
.checked_sub(duration.as_nanos() as i128)
.and_then(Self::from_nanos_i128)
}
fn as_nanos(&self) -> i128 {
(self.secs as i128 * 1_000_000_000) + self.nanos as i128
}
#[allow(dead_code)]
fn from_nanos(nanos: i64) -> Self {
let secs = nanos / 1_000_000_000;
let subsec_nanos = nanos - secs * 1_000_000_000;
Self::new(secs, subsec_nanos as u32)
}
#[allow(dead_code)]
fn from_nanos_i128(nanos: i128) -> Option<Self> {
let secs = nanos / 1_000_000_000;
if secs > i64::MAX as i128 || secs < i64::MIN as i128 {
None
} else {
let subsec_nanos = nanos - secs * 1_000_000_000;
debug_assert!(subsec_nanos < u32::MAX as i128);
Some(Self::new(secs as i64, subsec_nanos as u32))
}
}
#[allow(dead_code)]
fn from_secs_f64(secs: f64) -> crate::StreamInstant {
let s = secs.floor() as i64;
let ns = ((secs - s as f64) * 1_000_000_000.0) as u32;
Self::new(s, ns)
}
fn new(secs: i64, nanos: u32) -> Self {
StreamInstant { secs, nanos }
}
}
impl InputCallbackInfo {
/// The timestamp associated with the call to an input stream's data callback.
pub fn timestamp(&self) -> InputStreamTimestamp {
self.timestamp
}
}
impl OutputCallbackInfo {
/// The timestamp associated with the call to an output stream's data callback.
pub fn timestamp(&self) -> OutputStreamTimestamp {
self.timestamp
}
}
#[allow(clippy::len_without_is_empty)]
impl Data {
// Internal constructor for host implementations to use.
//
// The following requirements must be met in order for the safety of `Data`'s public API.
//
// - The `data` pointer must point to the first sample in the slice containing all samples.
// - The `len` must describe the length of the buffer as a number of samples in the expected
// format specified via the `sample_format` argument.
// - The `sample_format` must correctly represent the underlying sample data delivered/expected
// by the stream.
pub(crate) unsafe fn from_parts(
data: *mut (),
len: usize,
sample_format: SampleFormat,
) -> Self {
Data {
data,
len,
sample_format,
}
}
/// The sample format of the internal audio data.
pub fn sample_format(&self) -> SampleFormat {
self.sample_format
}
/// The full length of the buffer in samples.
///
/// The returned length is the same length as the slice of type `T` that would be returned via
/// [`as_slice`](Self::as_slice) given a sample type that matches the inner sample format.
pub fn len(&self) -> usize {
self.len
}
/// The raw slice of memory representing the underlying audio data as a slice of bytes.
///
/// It is up to the user to interpret the slice of memory based on [`Data::sample_format`].
pub fn bytes(&self) -> &[u8] {
let len = self.len * self.sample_format.sample_size();
// The safety of this block relies on correct construction of the `Data` instance.
// See the unsafe `from_parts` constructor for these requirements.
unsafe { std::slice::from_raw_parts(self.data as *const u8, len) }
}
/// The raw slice of memory representing the underlying audio data as a slice of bytes.
///
/// It is up to the user to interpret the slice of memory based on [`Data::sample_format`].
pub fn bytes_mut(&mut self) -> &mut [u8] {
let len = self.len * self.sample_format.sample_size();
// The safety of this block relies on correct construction of the `Data` instance. See
// the unsafe `from_parts` constructor for these requirements.
unsafe { std::slice::from_raw_parts_mut(self.data as *mut u8, len) }
}
/// Access the data as a slice of sample type `T`.
///
/// Returns `None` if the sample type does not match the expected sample format.
pub fn as_slice<T>(&self) -> Option<&[T]>
where
T: SizedSample,
{
if T::FORMAT == self.sample_format {
// The safety of this block relies on correct construction of the `Data` instance. See
// the unsafe `from_parts` constructor for these requirements.
unsafe { Some(std::slice::from_raw_parts(self.data as *const T, self.len)) }
} else {
None
}
}
/// Access the data as a slice of sample type `T`.
///
/// Returns `None` if the sample type does not match the expected sample format.
pub fn as_slice_mut<T>(&mut self) -> Option<&mut [T]>
where
T: SizedSample,
{
if T::FORMAT == self.sample_format {
// The safety of this block relies on correct construction of the `Data` instance. See
// the unsafe `from_parts` constructor for these requirements.
unsafe {
Some(std::slice::from_raw_parts_mut(
self.data as *mut T,
self.len,
))
}
} else {
None
}
}
}
impl SupportedStreamConfigRange {
pub fn new(
channels: ChannelCount,
min_sample_rate: SampleRate,
max_sample_rate: SampleRate,
buffer_size: SupportedBufferSize,
sample_format: SampleFormat,
) -> Self {
Self {
channels,
min_sample_rate,
max_sample_rate,
buffer_size,
sample_format,
}
}
pub fn channels(&self) -> ChannelCount {
self.channels
}
pub fn min_sample_rate(&self) -> SampleRate {
self.min_sample_rate
}
pub fn max_sample_rate(&self) -> SampleRate {
self.max_sample_rate
}
pub fn buffer_size(&self) -> &SupportedBufferSize {
&self.buffer_size
}
pub fn sample_format(&self) -> SampleFormat {
self.sample_format
}
/// Retrieve a [`SupportedStreamConfig`] with the given sample rate and buffer size.
///
/// # Panics
///
/// Panics if the given `sample_rate` is outside the range specified within
/// this [`SupportedStreamConfigRange`] instance. For a non-panicking
/// variant, use [`try_with_sample_rate`](#method.try_with_sample_rate).
pub fn with_sample_rate(self, sample_rate: SampleRate) -> SupportedStreamConfig {
self.try_with_sample_rate(sample_rate)
.expect("sample rate out of range")
}
/// Retrieve a [`SupportedStreamConfig`] with the given sample rate and buffer size.
///
/// Returns `None` if the given sample rate is outside the range specified
/// within this [`SupportedStreamConfigRange`] instance.
pub fn try_with_sample_rate(self, sample_rate: SampleRate) -> Option<SupportedStreamConfig> {
if self.min_sample_rate <= sample_rate && sample_rate <= self.max_sample_rate {
Some(SupportedStreamConfig {
channels: self.channels,
sample_rate,
sample_format: self.sample_format,
buffer_size: self.buffer_size,
})
} else {
None
}
}
/// Turns this [`SupportedStreamConfigRange`] into a [`SupportedStreamConfig`] corresponding to the maximum samples rate.
#[inline]
pub fn with_max_sample_rate(self) -> SupportedStreamConfig {
SupportedStreamConfig {
channels: self.channels,
sample_rate: self.max_sample_rate,
sample_format: self.sample_format,
buffer_size: self.buffer_size,
}
}
/// A comparison function which compares two [`SupportedStreamConfigRange`]s in terms of their priority of
/// use as a default stream format.
///
/// Some backends do not provide a default stream format for their audio devices. In these
/// cases, CPAL attempts to decide on a reasonable default format for the user. To do this we
/// use the "greatest" of all supported stream formats when compared with this method.
///
/// SupportedStreamConfigs are prioritised by the following heuristics:
///
/// **Channels**:
///
/// - Stereo
/// - Mono
/// - Max available channels
///
/// **Sample format**:
/// - f32
/// - i16
/// - u16
///
/// **Sample rate**:
///
/// - 44100 (cd quality)
/// - Max sample rate
pub fn cmp_default_heuristics(&self, other: &Self) -> std::cmp::Ordering {
use std::cmp::Ordering::Equal;
use SampleFormat::{F32, I16, U16};
let cmp_stereo = (self.channels == 2).cmp(&(other.channels == 2));
if cmp_stereo != Equal {
return cmp_stereo;
}
let cmp_mono = (self.channels == 1).cmp(&(other.channels == 1));
if cmp_mono != Equal {
return cmp_mono;
}
let cmp_channels = self.channels.cmp(&other.channels);
if cmp_channels != Equal {
return cmp_channels;
}
let cmp_f32 = (self.sample_format == F32).cmp(&(other.sample_format == F32));
if cmp_f32 != Equal {
return cmp_f32;
}
let cmp_i16 = (self.sample_format == I16).cmp(&(other.sample_format == I16));
if cmp_i16 != Equal {
return cmp_i16;
}
let cmp_u16 = (self.sample_format == U16).cmp(&(other.sample_format == U16));
if cmp_u16 != Equal {
return cmp_u16;
}
const HZ_44100: SampleRate = SampleRate(44_100);
let r44100_in_self = self.min_sample_rate <= HZ_44100 && HZ_44100 <= self.max_sample_rate;
let r44100_in_other =
other.min_sample_rate <= HZ_44100 && HZ_44100 <= other.max_sample_rate;
let cmp_r44100 = r44100_in_self.cmp(&r44100_in_other);
if cmp_r44100 != Equal {
return cmp_r44100;
}
self.max_sample_rate.cmp(&other.max_sample_rate)
}
}
#[test]
fn test_cmp_default_heuristics() {
let mut formats = [
SupportedStreamConfigRange {
buffer_size: SupportedBufferSize::Range { min: 256, max: 512 },
channels: 2,
min_sample_rate: SampleRate(1),
max_sample_rate: SampleRate(96000),
sample_format: SampleFormat::F32,
},
SupportedStreamConfigRange {
buffer_size: SupportedBufferSize::Range { min: 256, max: 512 },
channels: 1,
min_sample_rate: SampleRate(1),
max_sample_rate: SampleRate(96000),
sample_format: SampleFormat::F32,
},
SupportedStreamConfigRange {
buffer_size: SupportedBufferSize::Range { min: 256, max: 512 },
channels: 2,
min_sample_rate: SampleRate(1),
max_sample_rate: SampleRate(96000),
sample_format: SampleFormat::I16,
},
SupportedStreamConfigRange {
buffer_size: SupportedBufferSize::Range { min: 256, max: 512 },
channels: 2,
min_sample_rate: SampleRate(1),
max_sample_rate: SampleRate(96000),
sample_format: SampleFormat::U16,
},
SupportedStreamConfigRange {
buffer_size: SupportedBufferSize::Range { min: 256, max: 512 },
channels: 2,
min_sample_rate: SampleRate(1),
max_sample_rate: SampleRate(22050),
sample_format: SampleFormat::F32,
},
];
formats.sort_by(|a, b| a.cmp_default_heuristics(b));
// lowest-priority first:
assert_eq!(formats[0].sample_format(), SampleFormat::F32);
assert_eq!(formats[0].min_sample_rate(), SampleRate(1));
assert_eq!(formats[0].max_sample_rate(), SampleRate(96000));
assert_eq!(formats[0].channels(), 1);
assert_eq!(formats[1].sample_format(), SampleFormat::U16);
assert_eq!(formats[1].min_sample_rate(), SampleRate(1));
assert_eq!(formats[1].max_sample_rate(), SampleRate(96000));
assert_eq!(formats[1].channels(), 2);
assert_eq!(formats[2].sample_format(), SampleFormat::I16);
assert_eq!(formats[2].min_sample_rate(), SampleRate(1));
assert_eq!(formats[2].max_sample_rate(), SampleRate(96000));
assert_eq!(formats[2].channels(), 2);
assert_eq!(formats[3].sample_format(), SampleFormat::F32);
assert_eq!(formats[3].min_sample_rate(), SampleRate(1));
assert_eq!(formats[3].max_sample_rate(), SampleRate(22050));
assert_eq!(formats[3].channels(), 2);
assert_eq!(formats[4].sample_format(), SampleFormat::F32);
assert_eq!(formats[4].min_sample_rate(), SampleRate(1));
assert_eq!(formats[4].max_sample_rate(), SampleRate(96000));
assert_eq!(formats[4].channels(), 2);
}
impl From<SupportedStreamConfig> for StreamConfig {
fn from(conf: SupportedStreamConfig) -> Self {
conf.config()
}
}
// If a backend does not provide an API for retrieving supported formats, we query it with a bunch
// of commonly used rates. This is always the case for wasapi and is sometimes the case for alsa.
//
// If a rate you desire is missing from this list, feel free to add it!
#[cfg(target_os = "windows")]
const COMMON_SAMPLE_RATES: &[SampleRate] = &[
SampleRate(5512),
SampleRate(8000),
SampleRate(11025),
SampleRate(16000),
SampleRate(22050),
SampleRate(32000),
SampleRate(44100),
SampleRate(48000),
SampleRate(64000),
SampleRate(88200),
SampleRate(96000),
SampleRate(176400),
SampleRate(192000),
];
#[test]
fn test_stream_instant() {
let a = StreamInstant::new(2, 0);
let b = StreamInstant::new(-2, 0);
let min = StreamInstant::new(i64::MIN, 0);
let max = StreamInstant::new(i64::MAX, 0);
assert_eq!(
a.sub(Duration::from_secs(1)),
Some(StreamInstant::new(1, 0))
);
assert_eq!(
a.sub(Duration::from_secs(2)),
Some(StreamInstant::new(0, 0))
);
assert_eq!(
a.sub(Duration::from_secs(3)),
Some(StreamInstant::new(-1, 0))
);
assert_eq!(min.sub(Duration::from_secs(1)), None);
assert_eq!(
b.add(Duration::from_secs(1)),
Some(StreamInstant::new(-1, 0))
);
assert_eq!(
b.add(Duration::from_secs(2)),
Some(StreamInstant::new(0, 0))
);
assert_eq!(
b.add(Duration::from_secs(3)),
Some(StreamInstant::new(1, 0))
);
assert_eq!(max.add(Duration::from_secs(1)), None);
}

742
vendor/cpal/src/platform/mod.rs vendored Normal file
View File

@@ -0,0 +1,742 @@
//! Platform-specific items.
//!
//! This module also contains the implementation of the platform's dynamically dispatched [`Host`]
//! type and its associated [`Device`], [`Stream`] and other associated types. These
//! types are useful in the case that users require switching between audio host APIs at runtime.
#[doc(inline)]
pub use self::platform_impl::*;
/// A macro to assist with implementing a platform's dynamically dispatched [`Host`] type.
///
/// These dynamically dispatched types are necessary to allow for users to switch between hosts at
/// runtime.
///
/// For example the invocation `impl_platform_host(Wasapi wasapi "WASAPI", Asio asio "ASIO")`,
/// this macro should expand to:
///
// This sample code block is marked as text because it's not a valid test,
// it's just illustrative. (see rust issue #96573)
/// ```text
/// pub enum HostId {
/// Wasapi,
/// Asio,
/// }
///
/// pub enum Host {
/// Wasapi(crate::host::wasapi::Host),
/// Asio(crate::host::asio::Host),
/// }
/// ```
///
/// And so on for Device, Devices, Host, Stream, SupportedInputConfigs,
/// SupportedOutputConfigs and all their necessary trait implementations.
///
macro_rules! impl_platform_host {
($($(#[cfg($feat: meta)])? $HostVariant:ident $host_mod:ident $host_name:literal),*) => {
/// All hosts supported by CPAL on this platform.
pub const ALL_HOSTS: &'static [HostId] = &[
$(
$(#[cfg($feat)])?
HostId::$HostVariant,
)*
];
/// The platform's dynamically dispatched `Host` type.
///
/// An instance of this `Host` type may represent one of the `Host`s available
/// on the platform.
///
/// Use this type if you require switching between available hosts at runtime.
///
/// This type may be constructed via the [`host_from_id`] function. [`HostId`]s may
/// be acquired via the [`ALL_HOSTS`] const, and the [`available_hosts`] function.
pub struct Host(HostInner);
/// The `Device` implementation associated with the platform's dynamically dispatched
/// [`Host`] type.
#[derive(Clone)]
pub struct Device(DeviceInner);
/// The `Devices` iterator associated with the platform's dynamically dispatched [`Host`]
/// type.
pub struct Devices(DevicesInner);
/// The `Stream` implementation associated with the platform's dynamically dispatched
/// [`Host`] type.
// Streams cannot be `Send` or `Sync` if we plan to support Android's AAudio API. This is
// because the stream API is not thread-safe, and the API prohibits calling certain
// functions within the callback.
//
// TODO: Confirm this and add more specific detail and references.
#[must_use = "If the stream is not stored it will not play."]
pub struct Stream(StreamInner, crate::platform::NotSendSyncAcrossAllPlatforms);
/// The `SupportedInputConfigs` iterator associated with the platform's dynamically
/// dispatched [`Host`] type.
pub struct SupportedInputConfigs(SupportedInputConfigsInner);
/// The `SupportedOutputConfigs` iterator associated with the platform's dynamically
/// dispatched [`Host`] type.
pub struct SupportedOutputConfigs(SupportedOutputConfigsInner);
/// Unique identifier for available hosts on the platform.
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub enum HostId {
$(
$(#[cfg($feat)])?
$HostVariant,
)*
}
/// Contains a platform specific [`Device`] implementation.
#[derive(Clone)]
pub enum DeviceInner {
$(
$(#[cfg($feat)])?
$HostVariant(crate::host::$host_mod::Device),
)*
}
/// Contains a platform specific [`Devices`] implementation.
pub enum DevicesInner {
$(
$(#[cfg($feat)])?
$HostVariant(crate::host::$host_mod::Devices),
)*
}
/// Contains a platform specific [`Host`] implementation.
pub enum HostInner {
$(
$(#[cfg($feat)])?
$HostVariant(crate::host::$host_mod::Host),
)*
}
/// Contains a platform specific [`Stream`] implementation.
pub enum StreamInner {
$(
$(#[cfg($feat)])?
$HostVariant(crate::host::$host_mod::Stream),
)*
}
enum SupportedInputConfigsInner {
$(
$(#[cfg($feat)])?
$HostVariant(crate::host::$host_mod::SupportedInputConfigs),
)*
}
enum SupportedOutputConfigsInner {
$(
$(#[cfg($feat)])?
$HostVariant(crate::host::$host_mod::SupportedOutputConfigs),
)*
}
impl HostId {
pub fn name(&self) -> &'static str {
match self {
$(
$(#[cfg($feat)])?
HostId::$HostVariant => $host_name,
)*
}
}
}
impl Devices {
/// Returns a reference to the underlying platform specific implementation of this
/// `Devices`.
pub fn as_inner(&self) -> &DevicesInner {
&self.0
}
/// Returns a mutable reference to the underlying platform specific implementation of
/// this `Devices`.
pub fn as_inner_mut(&mut self) -> &mut DevicesInner {
&mut self.0
}
/// Returns the underlying platform specific implementation of this `Devices`.
pub fn into_inner(self) -> DevicesInner {
self.0
}
}
impl Device {
/// Returns a reference to the underlying platform specific implementation of this
/// `Device`.
pub fn as_inner(&self) -> &DeviceInner {
&self.0
}
/// Returns a mutable reference to the underlying platform specific implementation of
/// this `Device`.
pub fn as_inner_mut(&mut self) -> &mut DeviceInner {
&mut self.0
}
/// Returns the underlying platform specific implementation of this `Device`.
pub fn into_inner(self) -> DeviceInner {
self.0
}
}
impl Host {
/// The unique identifier associated with this `Host`.
pub fn id(&self) -> HostId {
match self.0 {
$(
$(#[cfg($feat)])?
HostInner::$HostVariant(_) => HostId::$HostVariant,
)*
}
}
/// Returns a reference to the underlying platform specific implementation of this
/// `Host`.
pub fn as_inner(&self) -> &HostInner {
&self.0
}
/// Returns a mutable reference to the underlying platform specific implementation of
/// this `Host`.
pub fn as_inner_mut(&mut self) -> &mut HostInner {
&mut self.0
}
/// Returns the underlying platform specific implementation of this `Host`.
pub fn into_inner(self) -> HostInner {
self.0
}
}
impl Stream {
/// Returns a reference to the underlying platform specific implementation of this
/// `Stream`.
pub fn as_inner(&self) -> &StreamInner {
&self.0
}
/// Returns a mutable reference to the underlying platform specific implementation of
/// this `Stream`.
pub fn as_inner_mut(&mut self) -> &mut StreamInner {
&mut self.0
}
/// Returns the underlying platform specific implementation of this `Stream`.
pub fn into_inner(self) -> StreamInner {
self.0
}
}
impl Iterator for Devices {
type Item = Device;
fn next(&mut self) -> Option<Self::Item> {
match self.0 {
$(
$(#[cfg($feat)])?
DevicesInner::$HostVariant(ref mut d) => {
d.next().map(DeviceInner::$HostVariant).map(Device::from)
}
)*
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.0 {
$(
$(#[cfg($feat)])?
DevicesInner::$HostVariant(ref d) => d.size_hint(),
)*
}
}
}
impl Iterator for SupportedInputConfigs {
type Item = crate::SupportedStreamConfigRange;
fn next(&mut self) -> Option<Self::Item> {
match self.0 {
$(
$(#[cfg($feat)])?
SupportedInputConfigsInner::$HostVariant(ref mut s) => s.next(),
)*
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.0 {
$(
$(#[cfg($feat)])?
SupportedInputConfigsInner::$HostVariant(ref d) => d.size_hint(),
)*
}
}
}
impl Iterator for SupportedOutputConfigs {
type Item = crate::SupportedStreamConfigRange;
fn next(&mut self) -> Option<Self::Item> {
match self.0 {
$(
$(#[cfg($feat)])?
SupportedOutputConfigsInner::$HostVariant(ref mut s) => s.next(),
)*
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self.0 {
$(
$(#[cfg($feat)])?
SupportedOutputConfigsInner::$HostVariant(ref d) => d.size_hint(),
)*
}
}
}
impl crate::traits::DeviceTrait for Device {
type SupportedInputConfigs = SupportedInputConfigs;
type SupportedOutputConfigs = SupportedOutputConfigs;
type Stream = Stream;
fn name(&self) -> Result<String, crate::DeviceNameError> {
match self.0 {
$(
$(#[cfg($feat)])?
DeviceInner::$HostVariant(ref d) => d.name(),
)*
}
}
fn supported_input_configs(&self) -> Result<Self::SupportedInputConfigs, crate::SupportedStreamConfigsError> {
match self.0 {
$(
$(#[cfg($feat)])?
DeviceInner::$HostVariant(ref d) => {
d.supported_input_configs()
.map(SupportedInputConfigsInner::$HostVariant)
.map(SupportedInputConfigs)
}
)*
}
}
fn supported_output_configs(&self) -> Result<Self::SupportedOutputConfigs, crate::SupportedStreamConfigsError> {
match self.0 {
$(
$(#[cfg($feat)])?
DeviceInner::$HostVariant(ref d) => {
d.supported_output_configs()
.map(SupportedOutputConfigsInner::$HostVariant)
.map(SupportedOutputConfigs)
}
)*
}
}
fn default_input_config(&self) -> Result<crate::SupportedStreamConfig, crate::DefaultStreamConfigError> {
match self.0 {
$(
$(#[cfg($feat)])?
DeviceInner::$HostVariant(ref d) => d.default_input_config(),
)*
}
}
fn default_output_config(&self) -> Result<crate::SupportedStreamConfig, crate::DefaultStreamConfigError> {
match self.0 {
$(
$(#[cfg($feat)])?
DeviceInner::$HostVariant(ref d) => d.default_output_config(),
)*
}
}
fn build_input_stream_raw<D, E>(
&self,
config: &crate::StreamConfig,
sample_format: crate::SampleFormat,
data_callback: D,
error_callback: E,
timeout: Option<std::time::Duration>,
) -> Result<Self::Stream, crate::BuildStreamError>
where
D: FnMut(&crate::Data, &crate::InputCallbackInfo) + Send + 'static,
E: FnMut(crate::StreamError) + Send + 'static,
{
match self.0 {
$(
$(#[cfg($feat)])?
DeviceInner::$HostVariant(ref d) => d
.build_input_stream_raw(
config,
sample_format,
data_callback,
error_callback,
timeout,
)
.map(StreamInner::$HostVariant)
.map(Stream::from),
)*
}
}
fn build_output_stream_raw<D, E>(
&self,
config: &crate::StreamConfig,
sample_format: crate::SampleFormat,
data_callback: D,
error_callback: E,
timeout: Option<std::time::Duration>,
) -> Result<Self::Stream, crate::BuildStreamError>
where
D: FnMut(&mut crate::Data, &crate::OutputCallbackInfo) + Send + 'static,
E: FnMut(crate::StreamError) + Send + 'static,
{
match self.0 {
$(
$(#[cfg($feat)])?
DeviceInner::$HostVariant(ref d) => d
.build_output_stream_raw(
config,
sample_format,
data_callback,
error_callback,
timeout,
)
.map(StreamInner::$HostVariant)
.map(Stream::from),
)*
}
}
}
impl crate::traits::HostTrait for Host {
type Devices = Devices;
type Device = Device;
fn is_available() -> bool {
$(
$(#[cfg($feat)])?
if crate::host::$host_mod::Host::is_available() { return true; }
)*
false
}
fn devices(&self) -> Result<Self::Devices, crate::DevicesError> {
match self.0 {
$(
$(#[cfg($feat)])?
HostInner::$HostVariant(ref h) => {
h.devices().map(DevicesInner::$HostVariant).map(Devices::from)
}
)*
}
}
fn default_input_device(&self) -> Option<Self::Device> {
match self.0 {
$(
$(#[cfg($feat)])?
HostInner::$HostVariant(ref h) => {
h.default_input_device().map(DeviceInner::$HostVariant).map(Device::from)
}
)*
}
}
fn default_output_device(&self) -> Option<Self::Device> {
match self.0 {
$(
$(#[cfg($feat)])?
HostInner::$HostVariant(ref h) => {
h.default_output_device().map(DeviceInner::$HostVariant).map(Device::from)
}
)*
}
}
}
impl crate::traits::StreamTrait for Stream {
fn play(&self) -> Result<(), crate::PlayStreamError> {
match self.0 {
$(
$(#[cfg($feat)])?
StreamInner::$HostVariant(ref s) => {
s.play()
}
)*
}
}
fn pause(&self) -> Result<(), crate::PauseStreamError> {
match self.0 {
$(
$(#[cfg($feat)])?
StreamInner::$HostVariant(ref s) => {
s.pause()
}
)*
}
}
}
impl From<DeviceInner> for Device {
fn from(d: DeviceInner) -> Self {
Device(d)
}
}
impl From<DevicesInner> for Devices {
fn from(d: DevicesInner) -> Self {
Devices(d)
}
}
impl From<HostInner> for Host {
fn from(h: HostInner) -> Self {
Host(h)
}
}
impl From<StreamInner> for Stream {
fn from(s: StreamInner) -> Self {
Stream(s, Default::default())
}
}
$(
$(#[cfg($feat)])?
impl From<crate::host::$host_mod::Device> for Device {
fn from(h: crate::host::$host_mod::Device) -> Self {
DeviceInner::$HostVariant(h).into()
}
}
$(#[cfg($feat)])?
impl From<crate::host::$host_mod::Devices> for Devices {
fn from(h: crate::host::$host_mod::Devices) -> Self {
DevicesInner::$HostVariant(h).into()
}
}
$(#[cfg($feat)])?
impl From<crate::host::$host_mod::Host> for Host {
fn from(h: crate::host::$host_mod::Host) -> Self {
HostInner::$HostVariant(h).into()
}
}
$(#[cfg($feat)])?
impl From<crate::host::$host_mod::Stream> for Stream {
fn from(h: crate::host::$host_mod::Stream) -> Self {
StreamInner::$HostVariant(h).into()
}
}
)*
/// Produces a list of hosts that are currently available on the system.
pub fn available_hosts() -> Vec<HostId> {
let mut host_ids = vec![];
$(
$(#[cfg($feat)])?
if <crate::host::$host_mod::Host as crate::traits::HostTrait>::is_available() {
host_ids.push(HostId::$HostVariant);
}
)*
host_ids
}
/// Given a unique host identifier, initialise and produce the host if it is available.
pub fn host_from_id(id: HostId) -> Result<Host, crate::HostUnavailable> {
match id {
$(
$(#[cfg($feat)])?
HostId::$HostVariant => {
crate::host::$host_mod::Host::new()
.map(HostInner::$HostVariant)
.map(Host::from)
}
)*
}
}
};
}
// TODO: Add pulseaudio and jack here eventually.
#[cfg(any(
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd"
))]
mod platform_impl {
pub use crate::host::alsa::{
Device as AlsaDevice, Devices as AlsaDevices, Host as AlsaHost, Stream as AlsaStream,
SupportedInputConfigs as AlsaSupportedInputConfigs,
SupportedOutputConfigs as AlsaSupportedOutputConfigs,
};
#[cfg(feature = "jack")]
pub use crate::host::jack::{
Device as JackDevice, Devices as JackDevices, Host as JackHost, Stream as JackStream,
SupportedInputConfigs as JackSupportedInputConfigs,
SupportedOutputConfigs as JackSupportedOutputConfigs,
};
impl_platform_host!(#[cfg(feature = "jack")] Jack jack "JACK", Alsa alsa "ALSA");
/// The default host for the current compilation target platform.
pub fn default_host() -> Host {
AlsaHost::new()
.expect("the default host should always be available")
.into()
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
mod platform_impl {
pub use crate::host::coreaudio::{
Device as CoreAudioDevice, Devices as CoreAudioDevices, Host as CoreAudioHost,
Stream as CoreAudioStream, SupportedInputConfigs as CoreAudioSupportedInputConfigs,
SupportedOutputConfigs as CoreAudioSupportedOutputConfigs,
};
impl_platform_host!(CoreAudio coreaudio "CoreAudio");
/// The default host for the current compilation target platform.
pub fn default_host() -> Host {
CoreAudioHost::new()
.expect("the default host should always be available")
.into()
}
}
#[cfg(target_os = "emscripten")]
mod platform_impl {
pub use crate::host::emscripten::{
Device as EmscriptenDevice, Devices as EmscriptenDevices, Host as EmscriptenHost,
Stream as EmscriptenStream, SupportedInputConfigs as EmscriptenSupportedInputConfigs,
SupportedOutputConfigs as EmscriptenSupportedOutputConfigs,
};
impl_platform_host!(Emscripten emscripten "Emscripten");
/// The default host for the current compilation target platform.
pub fn default_host() -> Host {
EmscriptenHost::new()
.expect("the default host should always be available")
.into()
}
}
#[cfg(all(target_arch = "wasm32", feature = "wasm-bindgen"))]
mod platform_impl {
pub use crate::host::webaudio::{
Device as WebAudioDevice, Devices as WebAudioDevices, Host as WebAudioHost,
Stream as WebAudioStream, SupportedInputConfigs as WebAudioSupportedInputConfigs,
SupportedOutputConfigs as WebAudioSupportedOutputConfigs,
};
impl_platform_host!(WebAudio webaudio "WebAudio");
/// The default host for the current compilation target platform.
pub fn default_host() -> Host {
WebAudioHost::new()
.expect("the default host should always be available")
.into()
}
}
#[cfg(windows)]
mod platform_impl {
#[cfg(feature = "asio")]
pub use crate::host::asio::{
Device as AsioDevice, Devices as AsioDevices, Host as AsioHost, Stream as AsioStream,
SupportedInputConfigs as AsioSupportedInputConfigs,
SupportedOutputConfigs as AsioSupportedOutputConfigs,
};
pub use crate::host::wasapi::{
Device as WasapiDevice, Devices as WasapiDevices, Host as WasapiHost,
Stream as WasapiStream, SupportedInputConfigs as WasapiSupportedInputConfigs,
SupportedOutputConfigs as WasapiSupportedOutputConfigs,
};
impl_platform_host!(#[cfg(feature = "asio")] Asio asio "ASIO", Wasapi wasapi "WASAPI");
/// The default host for the current compilation target platform.
pub fn default_host() -> Host {
WasapiHost::new()
.expect("the default host should always be available")
.into()
}
}
#[cfg(target_os = "android")]
mod platform_impl {
pub use crate::host::oboe::{
Device as OboeDevice, Devices as OboeDevices, Host as OboeHost, Stream as OboeStream,
SupportedInputConfigs as OboeSupportedInputConfigs,
SupportedOutputConfigs as OboeSupportedOutputConfigs,
};
impl_platform_host!(Oboe oboe "Oboe");
/// The default host for the current compilation target platform.
pub fn default_host() -> Host {
OboeHost::new()
.expect("the default host should always be available")
.into()
}
}
#[cfg(not(any(
windows,
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "macos",
target_os = "ios",
target_os = "emscripten",
target_os = "android",
all(target_arch = "wasm32", feature = "wasm-bindgen"),
)))]
mod platform_impl {
pub use crate::host::null::{
Device as NullDevice, Devices as NullDevices, Host as NullHost,
SupportedInputConfigs as NullSupportedInputConfigs,
SupportedOutputConfigs as NullSupportedOutputConfigs,
};
impl_platform_host!(Null null "Null");
/// The default host for the current compilation target platform.
pub fn default_host() -> Host {
NullHost::new()
.expect("the default host should always be available")
.into()
}
}
// The following zero-sized types are for applying Send/Sync restrictions to ensure
// consistent behaviour across different platforms. These verbosely named types are used
// (rather than using the markers directly) in the hope of making the compile errors
// slightly more helpful.
//
// TODO: Remove these in favour of using negative trait bounds if they stabilise.
// A marker used to remove the `Send` and `Sync` traits.
struct NotSendSyncAcrossAllPlatforms(std::marker::PhantomData<*mut ()>);
impl Default for NotSendSyncAcrossAllPlatforms {
fn default() -> Self {
NotSendSyncAcrossAllPlatforms(std::marker::PhantomData)
}
}

167
vendor/cpal/src/samples_formats.rs vendored Normal file
View File

@@ -0,0 +1,167 @@
use std::{fmt::Display, mem};
#[cfg(target_os = "emscripten")]
use wasm_bindgen::prelude::*;
pub use dasp_sample::{FromSample, Sample, I24, I48, U24, U48};
/// Format that each sample has.
#[cfg_attr(target_os = "emscripten", wasm_bindgen)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum SampleFormat {
/// `i8` with a valid range of 'u8::MIN..=u8::MAX' with `0` being the origin
I8,
/// `i16` with a valid range of 'u16::MIN..=u16::MAX' with `0` being the origin
I16,
// /// `I24` with a valid range of '-(1 << 23)..(1 << 23)' with `0` being the origin
// I24,
/// `i32` with a valid range of 'u32::MIN..=u32::MAX' with `0` being the origin
I32,
// /// `I24` with a valid range of '-(1 << 47)..(1 << 47)' with `0` being the origin
// I48,
/// `i64` with a valid range of 'u64::MIN..=u64::MAX' with `0` being the origin
I64,
/// `u8` with a valid range of 'u8::MIN..=u8::MAX' with `1 << 7 == 128` being the origin
U8,
/// `u16` with a valid range of 'u16::MIN..=u16::MAX' with `1 << 15 == 32768` being the origin
U16,
// /// `U24` with a valid range of '0..16777216' with `1 << 23 == 8388608` being the origin
// U24,
/// `u32` with a valid range of 'u32::MIN..=u32::MAX' with `1 << 31` being the origin
U32,
// /// `U48` with a valid range of '0..(1 << 48)' with `1 << 47` being the origin
// U48,
/// `u64` with a valid range of 'u64::MIN..=u64::MAX' with `1 << 63` being the origin
U64,
/// `f32` with a valid range of `-1.0..1.0` with `0.0` being the origin
F32,
/// `f64` with a valid range of -1.0..1.0 with 0.0 being the origin
F64,
}
impl SampleFormat {
/// Returns the size in bytes of a sample of this format.
#[inline]
#[must_use]
pub fn sample_size(&self) -> usize {
match *self {
SampleFormat::I8 | SampleFormat::U8 => mem::size_of::<i8>(),
SampleFormat::I16 | SampleFormat::U16 => mem::size_of::<i16>(),
// SampleFormat::I24 | SampleFormat::U24 => 3,
SampleFormat::I32 | SampleFormat::U32 => mem::size_of::<i32>(),
// SampleFormat::I48 | SampleFormat::U48 => 6,
SampleFormat::I64 | SampleFormat::U64 => mem::size_of::<i64>(),
SampleFormat::F32 => mem::size_of::<f32>(),
SampleFormat::F64 => mem::size_of::<f64>(),
}
}
#[inline]
#[must_use]
pub fn is_int(&self) -> bool {
//matches!(*self, SampleFormat::I8 | SampleFormat::I16 | SampleFormat::I24 | SampleFormat::I32 | SampleFormat::I48 | SampleFormat::I64)
matches!(
*self,
SampleFormat::I8 | SampleFormat::I16 | SampleFormat::I32 | SampleFormat::I64
)
}
#[inline]
#[must_use]
pub fn is_uint(&self) -> bool {
//matches!(*self, SampleFormat::U8 | SampleFormat::U16 | SampleFormat::U24 | SampleFormat::U32 | SampleFormat::U48 | SampleFormat::U64)
matches!(
*self,
SampleFormat::U8 | SampleFormat::U16 | SampleFormat::U32 | SampleFormat::U64
)
}
#[inline]
#[must_use]
pub fn is_float(&self) -> bool {
matches!(*self, SampleFormat::F32 | SampleFormat::F64)
}
}
impl Display for SampleFormat {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
SampleFormat::I8 => "i8",
SampleFormat::I16 => "i16",
// SampleFormat::I24 => "i24",
SampleFormat::I32 => "i32",
// SampleFormat::I48 => "i48",
SampleFormat::I64 => "i64",
SampleFormat::U8 => "u8",
SampleFormat::U16 => "u16",
// SampleFormat::U24 => "u24",
SampleFormat::U32 => "u32",
// SampleFormat::U48 => "u48",
SampleFormat::U64 => "u64",
SampleFormat::F32 => "f32",
SampleFormat::F64 => "f64",
}
.fmt(f)
}
}
pub trait SizedSample: Sample {
const FORMAT: SampleFormat;
}
impl SizedSample for i8 {
const FORMAT: SampleFormat = SampleFormat::I8;
}
impl SizedSample for i16 {
const FORMAT: SampleFormat = SampleFormat::I16;
}
// impl SizedSample for I24 { const FORMAT: SampleFormat = SampleFormat::I24; }
impl SizedSample for i32 {
const FORMAT: SampleFormat = SampleFormat::I32;
}
// impl SizedSample for I48 { const FORMAT: SampleFormat = SampleFormat::I48; }
impl SizedSample for i64 {
const FORMAT: SampleFormat = SampleFormat::I64;
}
impl SizedSample for u8 {
const FORMAT: SampleFormat = SampleFormat::U8;
}
impl SizedSample for u16 {
const FORMAT: SampleFormat = SampleFormat::U16;
}
// impl SizedSample for U24 { const FORMAT: SampleFormat = SampleFormat::U24; }
impl SizedSample for u32 {
const FORMAT: SampleFormat = SampleFormat::U32;
}
// impl SizedSample for U48 { const FORMAT: SampleFormat = SampleFormat::U48; }
impl SizedSample for u64 {
const FORMAT: SampleFormat = SampleFormat::U64;
}
impl SizedSample for f32 {
const FORMAT: SampleFormat = SampleFormat::F32;
}
impl SizedSample for f64 {
const FORMAT: SampleFormat = SampleFormat::F64;
}

221
vendor/cpal/src/traits.rs vendored Normal file
View File

@@ -0,0 +1,221 @@
//! The suite of traits allowing CPAL to abstract over hosts, devices, event loops and stream IDs.
use std::time::Duration;
use crate::{
BuildStreamError, Data, DefaultStreamConfigError, DeviceNameError, DevicesError,
InputCallbackInfo, InputDevices, OutputCallbackInfo, OutputDevices, PauseStreamError,
PlayStreamError, SampleFormat, SizedSample, StreamConfig, StreamError, SupportedStreamConfig,
SupportedStreamConfigRange, SupportedStreamConfigsError,
};
/// A [`Host`] provides access to the available audio devices on the system.
///
/// Each platform may have a number of available hosts depending on the system, each with their own
/// pros and cons.
///
/// For example, WASAPI is the standard audio host API that ships with the Windows operating
/// system. However, due to historical limitations with respect to performance and flexibility,
/// Steinberg created the ASIO API providing better audio device support for pro audio and
/// low-latency applications. As a result, it is common for some devices and device capabilities to
/// only be available via ASIO, while others are only available via WASAPI.
///
/// Another great example is the Linux platform. While the ALSA host API is the lowest-level API
/// available to almost all distributions of Linux, its flexibility is limited as it requires that
/// each process have exclusive access to the devices with which they establish streams. PulseAudio
/// is another popular host API that aims to solve this issue by providing user-space mixing,
/// however it has its own limitations w.r.t. low-latency and high-performance audio applications.
/// JACK is yet another host API that is more suitable to pro-audio applications, however it is
/// less readily available by default in many Linux distributions and is known to be tricky to
/// set up.
///
/// [`Host`]: crate::Host
pub trait HostTrait {
/// The type used for enumerating available devices by the host.
type Devices: Iterator<Item = Self::Device>;
/// The `Device` type yielded by the host.
type Device: DeviceTrait;
/// Whether or not the host is available on the system.
fn is_available() -> bool;
/// An iterator yielding all [`Device`](DeviceTrait)s currently available to the host on the system.
///
/// Can be empty if the system does not support audio in general.
fn devices(&self) -> Result<Self::Devices, DevicesError>;
/// The default input audio device on the system.
///
/// Returns `None` if no input device is available.
fn default_input_device(&self) -> Option<Self::Device>;
/// The default output audio device on the system.
///
/// Returns `None` if no output device is available.
fn default_output_device(&self) -> Option<Self::Device>;
/// An iterator yielding all `Device`s currently available to the system that support one or more
/// input stream formats.
///
/// Can be empty if the system does not support audio input.
fn input_devices(&self) -> Result<InputDevices<Self::Devices>, DevicesError> {
fn supports_input<D: DeviceTrait>(device: &D) -> bool {
device
.supported_input_configs()
.map(|mut iter| iter.next().is_some())
.unwrap_or(false)
}
Ok(self.devices()?.filter(supports_input::<Self::Device>))
}
/// An iterator yielding all `Device`s currently available to the system that support one or more
/// output stream formats.
///
/// Can be empty if the system does not support audio output.
fn output_devices(&self) -> Result<OutputDevices<Self::Devices>, DevicesError> {
fn supports_output<D: DeviceTrait>(device: &D) -> bool {
device
.supported_output_configs()
.map(|mut iter| iter.next().is_some())
.unwrap_or(false)
}
Ok(self.devices()?.filter(supports_output::<Self::Device>))
}
}
/// A device that is capable of audio input and/or output.
///
/// Please note that `Device`s may become invalid if they get disconnected. Therefore, all the
/// methods that involve a device return a `Result` allowing the user to handle this case.
pub trait DeviceTrait {
/// The iterator type yielding supported input stream formats.
type SupportedInputConfigs: Iterator<Item = SupportedStreamConfigRange>;
/// The iterator type yielding supported output stream formats.
type SupportedOutputConfigs: Iterator<Item = SupportedStreamConfigRange>;
/// The stream type created by [`build_input_stream_raw`] and [`build_output_stream_raw`].
///
/// [`build_input_stream_raw`]: Self::build_input_stream_raw
/// [`build_output_stream_raw`]: Self::build_output_stream_raw
type Stream: StreamTrait;
/// The human-readable name of the device.
fn name(&self) -> Result<String, DeviceNameError>;
/// An iterator yielding formats that are supported by the backend.
///
/// Can return an error if the device is no longer valid (e.g. it has been disconnected).
fn supported_input_configs(
&self,
) -> Result<Self::SupportedInputConfigs, SupportedStreamConfigsError>;
/// An iterator yielding output stream formats that are supported by the device.
///
/// Can return an error if the device is no longer valid (e.g. it has been disconnected).
fn supported_output_configs(
&self,
) -> Result<Self::SupportedOutputConfigs, SupportedStreamConfigsError>;
/// The default input stream format for the device.
fn default_input_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>;
/// The default output stream format for the device.
fn default_output_config(&self) -> Result<SupportedStreamConfig, DefaultStreamConfigError>;
/// Create an input stream.
fn build_input_stream<T, D, E>(
&self,
config: &StreamConfig,
mut data_callback: D,
error_callback: E,
timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
T: SizedSample,
D: FnMut(&[T], &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
self.build_input_stream_raw(
config,
T::FORMAT,
move |data, info| {
data_callback(
data.as_slice()
.expect("host supplied incorrect sample type"),
info,
)
},
error_callback,
timeout,
)
}
/// Create an output stream.
fn build_output_stream<T, D, E>(
&self,
config: &StreamConfig,
mut data_callback: D,
error_callback: E,
timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
T: SizedSample,
D: FnMut(&mut [T], &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static,
{
self.build_output_stream_raw(
config,
T::FORMAT,
move |data, info| {
data_callback(
data.as_slice_mut()
.expect("host supplied incorrect sample type"),
info,
)
},
error_callback,
timeout,
)
}
/// Create a dynamically typed input stream.
fn build_input_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&Data, &InputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static;
/// Create a dynamically typed output stream.
fn build_output_stream_raw<D, E>(
&self,
config: &StreamConfig,
sample_format: SampleFormat,
data_callback: D,
error_callback: E,
timeout: Option<Duration>,
) -> Result<Self::Stream, BuildStreamError>
where
D: FnMut(&mut Data, &OutputCallbackInfo) + Send + 'static,
E: FnMut(StreamError) + Send + 'static;
}
/// A stream created from [`Device`](DeviceTrait), with methods to control playback.
pub trait StreamTrait {
/// Run the stream.
///
/// Note: Not all platforms automatically run the stream upon creation, so it is important to
/// call `play` after creation if it is expected that the stream should run immediately.
fn play(&self) -> Result<(), PlayStreamError>;
/// Some devices support pausing the audio stream. This can be useful for saving energy in
/// moments of silence.
///
/// Note: Not all devices support suspending the stream at the hardware level. This method may
/// fail in these cases.
fn pause(&self) -> Result<(), PauseStreamError>;
}