//! Functions and filters for the sampling of pixels. // See http://cs.brown.edu/courses/cs123/lectures/08_Image_Processing_IV.pdf // for some of the theory behind image scaling and convolution use num_traits::{NumCast, ToPrimitive, Zero}; use std::f32; use std::ops::Mul; use crate::imageops::filter_1d::{ filter_2d_sep_la, filter_2d_sep_la_f32, filter_2d_sep_la_u16, filter_2d_sep_plane, filter_2d_sep_plane_f32, filter_2d_sep_plane_u16, filter_2d_sep_rgb, filter_2d_sep_rgb_f32, filter_2d_sep_rgb_u16, filter_2d_sep_rgba, filter_2d_sep_rgba_f32, filter_2d_sep_rgba_u16, FilterImageSize, }; use crate::images::buffer::{Gray16Image, GrayAlpha16Image, Rgb16Image, Rgba16Image}; use crate::traits::{Enlargeable, Pixel, Primitive}; use crate::utils::clamp; use crate::{ DynamicImage, GenericImage, GenericImageView, GrayAlphaImage, GrayImage, ImageBuffer, Rgb32FImage, RgbImage, Rgba32FImage, RgbaImage, }; /// Available Sampling Filters. /// /// ## Examples /// /// To test the different sampling filters on a real example, you can find two /// examples called /// [`scaledown`](https://github.com/image-rs/image/tree/main/examples/scaledown) /// and /// [`scaleup`](https://github.com/image-rs/image/tree/main/examples/scaleup) /// in the `examples` directory of the crate source code. /// /// Here is a 3.58 MiB /// [test image](https://github.com/image-rs/image/blob/main/examples/scaledown/test.jpg) /// that has been scaled down to 300x225 px: /// /// ///





| Nearest | ///31 ms | ///
|---|---|
| Triangle | ///414 ms | ///
| CatmullRom | ///817 ms | ///
| Gaussian | ///1180 ms | ///
| Lanczos3 | ///1170 ms | ///
( image: &Rgba32FImage, new_width: u32, filter: &mut Filter, ) -> ImageBuffer
>
where
P: Pixel {
if ![u, v].iter().all(|c| (0.0..=1.0).contains(c)) {
return None;
}
let (w, h) = img.dimensions();
if w == 0 || h == 0 {
return None;
}
let ui = w as f32 * u - 0.5;
let vi = h as f32 * v - 0.5;
interpolate_bilinear(
img,
ui.max(0.).min((w - 1) as f32),
vi.max(0.).min((h - 1) as f32),
)
}
/// Sample from an image using coordinates in [0, 1], taking the nearest coordinate.
pub fn sample_nearest {
if ![u, v].iter().all(|c| (0.0..=1.0).contains(c)) {
return None;
}
let (w, h) = img.dimensions();
let ui = w as f32 * u - 0.5;
let ui = ui.max(0.).min((w.saturating_sub(1)) as f32);
let vi = h as f32 * v - 0.5;
let vi = vi.max(0.).min((h.saturating_sub(1)) as f32);
interpolate_nearest(img, ui, vi)
}
/// Sample from an image using coordinates in [0, w-1] and [0, h-1], taking the
/// nearest pixel.
///
/// Coordinates outside the image bounds will return `None`, however the
/// behavior for points within half a pixel of the image bounds may change in
/// the future.
pub fn interpolate_nearest {
let (w, h) = img.dimensions();
if w == 0 || h == 0 {
return None;
}
if !(0.0..=((w - 1) as f32)).contains(&x) {
return None;
}
if !(0.0..=((h - 1) as f32)).contains(&y) {
return None;
}
Some(img.get_pixel(x.round() as u32, y.round() as u32))
}
/// Linearly sample from an image using coordinates in [0, w-1] and [0, h-1].
pub fn interpolate_bilinear {
// assumption needed for correctness of pixel creation
assert!(P::CHANNEL_COUNT <= 4);
let (w, h) = img.dimensions();
if w == 0 || h == 0 {
return None;
}
if !(0.0..=((w - 1) as f32)).contains(&x) {
return None;
}
if !(0.0..=((h - 1) as f32)).contains(&y) {
return None;
}
// keep these as integers, for fewer FLOPs
let uf = x.floor() as u32;
let vf = y.floor() as u32;
let uc = (uf + 1).min(w - 1);
let vc = (vf + 1).min(h - 1);
// clamp coords to the range of the image
let mut sxx = [[0.; 4]; 4];
// do not use Array::map, as it can be slow with high stack usage,
// for [[f32; 4]; 4].
// convert samples to f32
// currently rgba is the largest one,
// so just store as many items as necessary,
// because there's not a simple way to be generic over all of them.
let mut compute = |u: u32, v: u32, i| {
let s = img.get_pixel(u, v);
for (j, c) in s.channels().iter().enumerate() {
sxx[j][i] = c.to_f32().unwrap();
}
s
};
// hacky reuse since cannot construct a generic Pixel
let mut out: P = compute(uf, vf, 0);
compute(uf, vc, 1);
compute(uc, vf, 2);
compute(uc, vc, 3);
// weights, the later two are independent from the first 2 for better vectorization.
let ufw = x - uf as f32;
let vfw = y - vf as f32;
let ucw = (uf + 1) as f32 - x;
let vcw = (vf + 1) as f32 - y;
// https://en.wikipedia.org/wiki/Bilinear_interpolation#Weighted_mean
// the distance between pixels is 1 so there is no denominator
let wff = ucw * vcw;
let wfc = ucw * vfw;
let wcf = ufw * vcw;
let wcc = ufw * vfw;
// was originally assert, but is actually not a cheap computation
debug_assert!(f32::abs((wff + wfc + wcf + wcc) - 1.) < 1e-3);
// hack to see if primitive is an integer or a float
let is_float = P::Subpixel::DEFAULT_MAX_VALUE.to_f32().unwrap() == 1.0;
for (i, c) in out.channels_mut().iter_mut().enumerate() {
let v = wff * sxx[i][0] + wfc * sxx[i][1] + wcf * sxx[i][2] + wcc * sxx[i][3];
// this rounding may introduce quantization errors,
// Specifically what is meant is that many samples may deviate
// from the mean value of the originals, but it's not possible to fix that.
*c = >
where
I: GenericImageView >
where
I: GenericImageView >
where
I: GenericImageView {
fn zeroed() -> Self {
ThumbnailSum(
S::Larger::zero(),
S::Larger::zero(),
S::Larger::zero(),
S::Larger::zero(),
)
}
fn sample_val(val: S) -> S::Larger {
::from(
fact_left * leftv.to_f32().unwrap() + fact_right * rightv.to_f32().unwrap(),
)
.expect("Average sample value should fit into sample type")
};
(
mix_left_and_right(sum_left.0, sum_right.0),
mix_left_and_right(sum_left.1, sum_right.1),
mix_left_and_right(sum_left.2, sum_right.2),
mix_left_and_right(sum_left.3, sum_right.3),
)
}
/// Get a thumbnail pixel where the input window encloses at least a horizontal pixel.
fn thumbnail_sample_fraction_vertical(
image: &I,
left: u32,
right: u32,
bottom: u32,
fraction_vertical: f32,
) -> (S, S, S, S)
where
I: GenericImageView::from(fact_bot * botv.to_f32().unwrap() + fact_top * topv.to_f32().unwrap())
.expect("Average sample value should fit into sample type")
};
(
mix_bot_and_top(sum_bot.0, sum_top.0),
mix_bot_and_top(sum_bot.1, sum_top.1),
mix_bot_and_top(sum_bot.2, sum_top.2),
mix_bot_and_top(sum_bot.3, sum_top.3),
)
}
/// Get a single pixel for a thumbnail where the input window does not enclose any full pixel.
fn thumbnail_sample_fraction_both(
image: &I,
left: u32,
fraction_vertical: f32,
bottom: u32,
fraction_horizontal: f32,
) -> (S, S, S, S)
where
I: GenericImageView::from(
fact_br * br.to_f32().unwrap()
+ fact_tr * tr.to_f32().unwrap()
+ fact_bl * bl.to_f32().unwrap()
+ fact_tl * tl.to_f32().unwrap(),
)
.expect("Average sample value should fit into sample type")
};
(
mix(k_br.0, k_tr.0, k_bl.0, k_tl.0),
mix(k_br.1, k_tr.1, k_bl.1, k_tl.1),
mix(k_br.2, k_tr.2, k_bl.2, k_tl.2),
mix(k_br.3, k_tr.3, k_bl.3, k_tl.3),
)
}
/// Perform a 3x3 box filter on the supplied image.
///
/// # Arguments:
///
/// * `image` - source image.
/// * `kernel` - is an array of the filter weights of length 9.
///
/// This method typically assumes that the input is scene-linear light.
/// If it is not, color distortion may occur.
pub fn filter3x3(image: &I, kernel: &[f32]) -> ImageBuffer