Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

448
vendor/image/src/animation.rs vendored Normal file
View File

@@ -0,0 +1,448 @@
use std::cmp::Ordering;
use std::time::Duration;
use crate::error::ImageResult;
use crate::RgbaImage;
/// An implementation dependent iterator, reading the frames as requested
pub struct Frames<'a> {
iterator: Box<dyn Iterator<Item = ImageResult<Frame>> + 'a>,
}
impl<'a> Frames<'a> {
/// Creates a new `Frames` from an implementation specific iterator.
#[must_use]
pub fn new(iterator: Box<dyn Iterator<Item = ImageResult<Frame>> + 'a>) -> Self {
Frames { iterator }
}
/// Steps through the iterator from the current frame until the end and pushes each frame into
/// a `Vec`.
/// If en error is encountered that error is returned instead.
///
/// Note: This is equivalent to `Frames::collect::<ImageResult<Vec<Frame>>>()`
pub fn collect_frames(self) -> ImageResult<Vec<Frame>> {
self.collect()
}
}
impl Iterator for Frames<'_> {
type Item = ImageResult<Frame>;
fn next(&mut self) -> Option<ImageResult<Frame>> {
self.iterator.next()
}
}
/// A single animation frame
pub struct Frame {
/// Delay between the frames in milliseconds
delay: Delay,
/// x offset
left: u32,
/// y offset
top: u32,
buffer: RgbaImage,
}
impl Clone for Frame {
fn clone(&self) -> Self {
Self {
delay: self.delay,
left: self.left,
top: self.top,
buffer: self.buffer.clone(),
}
}
fn clone_from(&mut self, source: &Self) {
self.delay = source.delay;
self.left = source.left;
self.top = source.top;
self.buffer.clone_from(&source.buffer);
}
}
/// The delay of a frame relative to the previous one.
///
/// The ratio is reduced on construction which means equality comparisons is reliable even when
/// mixing different bases. Note however that there is an upper limit to the delays that can be
/// represented exactly when using [`Self::from_saturating_duration`] which depends on the
/// granularity of the interval.
///
/// ```
/// use image::Delay;
/// let delay_10ms = Delay::from_numer_denom_ms(10, 1);
/// let delay_10000us = Delay::from_numer_denom_ms(10_000, 1_000);
///
/// assert_eq!(delay_10ms, delay_10000us);
/// ```
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd)]
pub struct Delay {
ratio: Ratio,
}
impl Frame {
/// Constructs a new frame without any delay.
#[must_use]
pub fn new(buffer: RgbaImage) -> Frame {
Frame {
delay: Delay::from_ratio(Ratio { numer: 0, denom: 1 }),
left: 0,
top: 0,
buffer,
}
}
/// Constructs a new frame
#[must_use]
pub fn from_parts(buffer: RgbaImage, left: u32, top: u32, delay: Delay) -> Frame {
Frame {
delay,
left,
top,
buffer,
}
}
/// Delay of this frame
#[must_use]
pub fn delay(&self) -> Delay {
self.delay
}
/// Returns the image buffer
#[must_use]
pub fn buffer(&self) -> &RgbaImage {
&self.buffer
}
/// Returns a mutable image buffer
pub fn buffer_mut(&mut self) -> &mut RgbaImage {
&mut self.buffer
}
/// Returns the image buffer
#[must_use]
pub fn into_buffer(self) -> RgbaImage {
self.buffer
}
/// Returns the x offset
#[must_use]
pub fn left(&self) -> u32 {
self.left
}
/// Returns the y offset
#[must_use]
pub fn top(&self) -> u32 {
self.top
}
}
impl Delay {
/// Create a delay from a ratio of milliseconds.
///
/// # Examples
///
/// ```
/// use image::Delay;
/// let delay_10ms = Delay::from_numer_denom_ms(10, 1);
/// ```
#[must_use]
pub fn from_numer_denom_ms(numerator: u32, denominator: u32) -> Self {
Delay {
ratio: Ratio::new(numerator, denominator),
}
}
/// Convert from a duration, clamped between 0 and an implemented defined maximum.
///
/// The maximum is *at least* `i32::MAX` milliseconds. It should be noted that the accuracy of
/// the result may be relative and very large delays have a coarse resolution.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use image::Delay;
///
/// let duration = Duration::from_millis(20);
/// let delay = Delay::from_saturating_duration(duration);
/// ```
#[must_use]
pub fn from_saturating_duration(duration: Duration) -> Self {
// A few notes: The largest number we can represent as a ratio is u32::MAX but we can
// sometimes represent much smaller numbers.
//
// We can represent duration as `millis+a/b` (where a < b, b > 0).
// We must thus bound b with `b·millis + (b-1) <= u32::MAX` or
// > `0 < b <= (u32::MAX + 1)/(millis + 1)`
// Corollary: millis <= u32::MAX
const MILLIS_BOUND: u128 = u32::MAX as u128;
let millis = duration.as_millis().min(MILLIS_BOUND);
let submillis = (duration.as_nanos() % 1_000_000) as u32;
let max_b = if millis > 0 {
((MILLIS_BOUND + 1) / (millis + 1)) as u32
} else {
MILLIS_BOUND as u32
};
let millis = millis as u32;
let (a, b) = Self::closest_bounded_fraction(max_b, submillis, 1_000_000);
Self::from_numer_denom_ms(a + b * millis, b)
}
/// The numerator and denominator of the delay in milliseconds.
///
/// This is guaranteed to be an exact conversion if the `Delay` was previously created with the
/// `from_numer_denom_ms` constructor.
#[must_use]
pub fn numer_denom_ms(self) -> (u32, u32) {
(self.ratio.numer, self.ratio.denom)
}
pub(crate) fn from_ratio(ratio: Ratio) -> Self {
Delay { ratio }
}
pub(crate) fn into_ratio(self) -> Ratio {
self.ratio
}
/// Given some fraction, compute an approximation with denominator bounded.
///
/// Note that `denom_bound` bounds nominator and denominator of all intermediate
/// approximations and the end result.
fn closest_bounded_fraction(denom_bound: u32, nom: u32, denom: u32) -> (u32, u32) {
use std::cmp::Ordering::*;
assert!(0 < denom);
assert!(0 < denom_bound);
assert!(nom < denom);
// Avoid a few type troubles. All intermediate results are bounded by `denom_bound` which
// is in turn bounded by u32::MAX. Representing with u64 allows multiplication of any two
// values without fears of overflow.
// Compare two fractions whose parts fit into a u32.
fn compare_fraction((an, ad): (u64, u64), (bn, bd): (u64, u64)) -> Ordering {
(an * bd).cmp(&(bn * ad))
}
// Computes the nominator of the absolute difference between two such fractions.
fn abs_diff_nom((an, ad): (u64, u64), (bn, bd): (u64, u64)) -> u64 {
let c0 = an * bd;
let c1 = ad * bn;
let d0 = c0.max(c1);
let d1 = c0.min(c1);
d0 - d1
}
let exact = (u64::from(nom), u64::from(denom));
// The lower bound fraction, numerator and denominator.
let mut lower = (0u64, 1u64);
// The upper bound fraction, numerator and denominator.
let mut upper = (1u64, 1u64);
// The closest approximation for now.
let mut guess = (u64::from(nom * 2 > denom), 1u64);
// loop invariant: ad, bd <= denom_bound
// iterates the Farey sequence.
loop {
// Break if we are done.
if compare_fraction(guess, exact) == Equal {
break;
}
// Break if next Farey number is out-of-range.
if u64::from(denom_bound) - lower.1 < upper.1 {
break;
}
// Next Farey approximation n between a and b
let next = (lower.0 + upper.0, lower.1 + upper.1);
// if F < n then replace the upper bound, else replace lower.
if compare_fraction(exact, next) == Less {
upper = next;
} else {
lower = next;
}
// Now correct the closest guess.
// In other words, if |c - f| > |n - f| then replace it with the new guess.
// This favors the guess with smaller denominator on equality.
// |g - f| = |g_diff_nom|/(gd*fd);
let g_diff_nom = abs_diff_nom(guess, exact);
// |n - f| = |n_diff_nom|/(nd*fd);
let n_diff_nom = abs_diff_nom(next, exact);
// The difference |n - f| is smaller than |g - f| if either the integral part of the
// fraction |n_diff_nom|/nd is smaller than the one of |g_diff_nom|/gd or if they are
// the same but the fractional part is larger.
if match (n_diff_nom / next.1).cmp(&(g_diff_nom / guess.1)) {
Less => true,
Greater => false,
// Note that the nominator for the fractional part is smaller than its denominator
// which is smaller than u32 and can't overflow the multiplication with the other
// denominator, that is we can compare these fractions by multiplication with the
// respective other denominator.
Equal => {
compare_fraction(
(n_diff_nom % next.1, next.1),
(g_diff_nom % guess.1, guess.1),
) == Less
}
} {
guess = next;
}
}
(guess.0 as u32, guess.1 as u32)
}
}
impl From<Delay> for Duration {
fn from(delay: Delay) -> Self {
let ratio = delay.into_ratio();
let ms = ratio.to_integer();
let rest = ratio.numer % ratio.denom;
let nanos = (u64::from(rest) * 1_000_000) / u64::from(ratio.denom);
Duration::from_millis(ms.into()) + Duration::from_nanos(nanos)
}
}
#[inline]
const fn gcd(mut a: u32, mut b: u32) -> u32 {
while b != 0 {
(a, b) = (b, a.rem_euclid(b));
}
a
}
#[derive(Copy, Clone, Debug)]
pub(crate) struct Ratio {
numer: u32,
denom: u32,
}
impl Ratio {
#[inline]
pub(crate) fn new(numerator: u32, denominator: u32) -> Self {
assert_ne!(denominator, 0);
let divisor = gcd(numerator, denominator);
Self {
numer: numerator / divisor,
denom: denominator / divisor,
}
}
#[inline]
pub(crate) fn to_integer(self) -> u32 {
self.numer / self.denom
}
}
impl PartialEq for Ratio {
fn eq(&self, other: &Self) -> bool {
self.cmp(other) == Ordering::Equal
}
}
impl Eq for Ratio {}
impl PartialOrd for Ratio {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Ratio {
fn cmp(&self, other: &Self) -> Ordering {
// The following comparison can be simplified:
// a / b <cmp> c / d
// We multiply both sides by `b`:
// a <cmp> c * b / d
// We multiply both sides by `d`:
// a * d <cmp> c * b
let a: u32 = self.numer;
let b: u32 = self.denom;
let c: u32 = other.numer;
let d: u32 = other.denom;
// We cast the types from `u32` to `u64` in order
// to not overflow the multiplications.
(u64::from(a) * u64::from(d)).cmp(&(u64::from(c) * u64::from(b)))
}
}
#[cfg(test)]
mod tests {
use super::{Delay, Duration, Ratio};
#[test]
fn simple() {
let second = Delay::from_numer_denom_ms(1000, 1);
assert_eq!(Duration::from(second), Duration::from_secs(1));
}
#[test]
fn fps_30() {
let thirtieth = Delay::from_numer_denom_ms(1000, 30);
let duration = Duration::from(thirtieth);
assert_eq!(duration.as_secs(), 0);
assert_eq!(duration.subsec_millis(), 33);
assert_eq!(duration.subsec_nanos(), 33_333_333);
}
#[test]
fn duration_outlier() {
let oob = Duration::from_secs(0xFFFF_FFFF);
let delay = Delay::from_saturating_duration(oob);
assert_eq!(delay.numer_denom_ms(), (0xFFFF_FFFF, 1));
}
#[test]
fn duration_approx() {
let oob = Duration::from_millis(0xFFFF_FFFF) + Duration::from_micros(1);
let delay = Delay::from_saturating_duration(oob);
assert_eq!(delay.numer_denom_ms(), (0xFFFF_FFFF, 1));
let inbounds = Duration::from_millis(0xFFFF_FFFF) - Duration::from_micros(1);
let delay = Delay::from_saturating_duration(inbounds);
assert_eq!(delay.numer_denom_ms(), (0xFFFF_FFFF, 1));
let fine =
Duration::from_millis(0xFFFF_FFFF / 1000) + Duration::from_micros(0xFFFF_FFFF % 1000);
let delay = Delay::from_saturating_duration(fine);
// Funnily, 0xFFFF_FFFF is divisble by 5, thus we compare with a `Ratio`.
assert_eq!(delay.into_ratio(), Ratio::new(0xFFFF_FFFF, 1000));
}
#[test]
fn precise() {
// The ratio has only 32 bits in the numerator, too imprecise to get more than 11 digits
// correct. But it may be expressed as 1_000_000/3 instead.
let exceed = Duration::from_secs(333) + Duration::from_nanos(333_333_333);
let delay = Delay::from_saturating_duration(exceed);
assert_eq!(Duration::from(delay), exceed);
}
#[test]
fn small() {
// Not quite a delay of `1 ms`.
let delay = Delay::from_numer_denom_ms(1 << 16, (1 << 16) + 1);
let duration = Duration::from(delay);
assert_eq!(duration.as_millis(), 0);
// Not precisely the original but should be smaller than 0.
let delay = Delay::from_saturating_duration(duration);
assert_eq!(delay.into_ratio().to_integer(), 0);
}
}

673
vendor/image/src/codecs/avif/decoder.rs vendored Normal file
View File

@@ -0,0 +1,673 @@
//! Decoding of AVIF images.
use crate::error::{
DecodingError, ImageFormatHint, LimitError, LimitErrorKind, UnsupportedError,
UnsupportedErrorKind,
};
use crate::{ColorType, ImageDecoder, ImageError, ImageFormat, ImageResult};
///
/// The [AVIF] specification defines an image derivative of the AV1 bitstream, an open video codec.
///
/// [AVIF]: https://aomediacodec.github.io/av1-avif/
use std::error::Error;
use std::fmt::{Display, Formatter};
use std::io::Read;
use std::marker::PhantomData;
use crate::codecs::avif::ycgco::{
ycgco420_to_rgba10, ycgco420_to_rgba12, ycgco420_to_rgba8, ycgco422_to_rgba10,
ycgco422_to_rgba12, ycgco422_to_rgba8, ycgco444_to_rgba10, ycgco444_to_rgba12,
ycgco444_to_rgba8,
};
use crate::codecs::avif::yuv::*;
use dav1d::{PixelLayout, PlanarImageComponent};
use mp4parse::{read_avif, ParseStrictness};
fn error_map<E: Into<Box<dyn Error + Send + Sync>>>(err: E) -> ImageError {
ImageError::Decoding(DecodingError::new(ImageFormat::Avif.into(), err))
}
/// AVIF Decoder.
///
/// Reads one image into the chosen input.
pub struct AvifDecoder<R> {
inner: PhantomData<R>,
picture: dav1d::Picture,
alpha_picture: Option<dav1d::Picture>,
icc_profile: Option<Vec<u8>>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
enum AvifDecoderError {
AlphaPlaneFormat(PixelLayout),
YuvLayoutOnIdentityMatrix(PixelLayout),
UnsupportedLayoutAndMatrix(PixelLayout, YuvMatrixStrategy),
}
impl Display for AvifDecoderError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
AvifDecoderError::AlphaPlaneFormat(pixel_layout) => match pixel_layout {
PixelLayout::I400 => unreachable!("This option must be handled correctly"),
PixelLayout::I420 => f.write_str("Alpha layout must be 4:0:0, but it was 4:2:0"),
PixelLayout::I422 => f.write_str("Alpha layout must be 4:0:0, but it was 4:2:2"),
PixelLayout::I444 => f.write_str("Alpha layout must be 4:0:0, but it was 4:4:4"),
},
AvifDecoderError::YuvLayoutOnIdentityMatrix(pixel_layout) => match pixel_layout {
PixelLayout::I400 => {
f.write_str("YUV layout on 'Identity' matrix must be 4:4:4, but it was 4:0:0")
}
PixelLayout::I420 => {
f.write_str("YUV layout on 'Identity' matrix must be 4:4:4, but it was 4:2:0")
}
PixelLayout::I422 => {
f.write_str("YUV layout on 'Identity' matrix must be 4:4:4, but it was 4:2:2")
}
PixelLayout::I444 => unreachable!("This option must be handled correctly"),
},
AvifDecoderError::UnsupportedLayoutAndMatrix(layout, matrix) => f.write_fmt(
format_args!("YUV layout {layout:?} on matrix {matrix:?} is not supported",),
),
}
}
}
impl Error for AvifDecoderError {}
impl<R: Read> AvifDecoder<R> {
/// Create a new decoder that reads its input from `r`.
pub fn new(mut r: R) -> ImageResult<Self> {
let ctx = read_avif(&mut r, ParseStrictness::Normal).map_err(error_map)?;
let coded = ctx.primary_item_coded_data().unwrap_or_default();
let mut primary_decoder = dav1d::Decoder::new().map_err(error_map)?;
primary_decoder
.send_data(coded.to_vec(), None, None, None)
.map_err(error_map)?;
let picture = read_until_ready(&mut primary_decoder)?;
let alpha_item = ctx.alpha_item_coded_data().unwrap_or_default();
let alpha_picture = if !alpha_item.is_empty() {
let mut alpha_decoder = dav1d::Decoder::new().map_err(error_map)?;
alpha_decoder
.send_data(alpha_item.to_vec(), None, None, None)
.map_err(error_map)?;
Some(read_until_ready(&mut alpha_decoder)?)
} else {
None
};
let icc_profile = ctx
.icc_colour_information()
.map(|x| x.ok().unwrap_or_default())
.map(|x| x.to_vec());
match picture.bit_depth() {
8 => (),
10 | 12 => (),
_ => {
return ImageResult::Err(ImageError::Decoding(DecodingError::new(
ImageFormatHint::Exact(ImageFormat::Avif),
format!(
"Avif format does not support {} bit depth",
picture.bit_depth()
),
)))
}
};
Ok(AvifDecoder {
inner: PhantomData,
picture,
alpha_picture,
icc_profile,
})
}
}
/// Reshaping incorrectly aligned or sized FFI data into Rust constraints
fn reshape_plane(source: &[u8], stride: usize, width: usize, height: usize) -> Vec<u16> {
let mut target_plane = vec![0u16; width * height];
for (shaped_row, src_row) in target_plane
.chunks_exact_mut(width)
.zip(source.chunks_exact(stride))
{
for (dst, src) in shaped_row.iter_mut().zip(src_row.chunks_exact(2)) {
*dst = u16::from_ne_bytes([src[0], src[1]]);
}
}
target_plane
}
struct Plane16View<'a> {
data: std::borrow::Cow<'a, [u16]>,
stride: usize,
}
impl Default for Plane16View<'_> {
fn default() -> Self {
Plane16View {
data: std::borrow::Cow::Owned(vec![]),
stride: 0,
}
}
}
/// This is correct to transmute FFI data for Y plane and Alpha plane
fn transmute_y_plane16(
plane: &dav1d::Plane,
stride: usize,
width: usize,
height: usize,
) -> Plane16View<'_> {
let mut y_plane_stride = stride >> 1;
let mut bind_y = vec![];
let plane_ref = plane.as_ref();
let mut shape_y_plane = || {
y_plane_stride = width;
bind_y = reshape_plane(plane_ref, stride, width, height);
};
if stride & 1 == 0 {
match bytemuck::try_cast_slice(plane_ref) {
Ok(slice) => Plane16View {
data: std::borrow::Cow::Borrowed(slice),
stride: y_plane_stride,
},
Err(_) => {
shape_y_plane();
Plane16View {
data: std::borrow::Cow::Owned(bind_y),
stride: y_plane_stride,
}
}
}
} else {
shape_y_plane();
Plane16View {
data: std::borrow::Cow::Owned(bind_y),
stride: y_plane_stride,
}
}
}
/// This is correct to transmute FFI data for Y plane and Alpha plane
fn transmute_chroma_plane16(
plane: &dav1d::Plane,
pixel_layout: PixelLayout,
stride: usize,
width: usize,
height: usize,
) -> Plane16View<'_> {
let plane_ref = plane.as_ref();
let mut chroma_plane_stride = stride >> 1;
let mut bind_chroma = vec![];
let mut shape_chroma_plane = || {
chroma_plane_stride = match pixel_layout {
PixelLayout::I400 => unreachable!(),
PixelLayout::I420 | PixelLayout::I422 => width.div_ceil(2),
PixelLayout::I444 => width,
};
let u_plane_height = match pixel_layout {
PixelLayout::I400 => unreachable!(),
PixelLayout::I420 => height.div_ceil(2),
PixelLayout::I422 | PixelLayout::I444 => height,
};
bind_chroma = reshape_plane(plane_ref, stride, chroma_plane_stride, u_plane_height);
};
if stride & 1 == 0 {
match bytemuck::try_cast_slice(plane_ref) {
Ok(slice) => Plane16View {
data: std::borrow::Cow::Borrowed(slice),
stride: chroma_plane_stride,
},
Err(_) => {
shape_chroma_plane();
Plane16View {
data: std::borrow::Cow::Owned(bind_chroma),
stride: chroma_plane_stride,
}
}
}
} else {
shape_chroma_plane();
Plane16View {
data: std::borrow::Cow::Owned(bind_chroma),
stride: chroma_plane_stride,
}
}
}
#[derive(Copy, Clone, Debug, PartialOrd, Eq, PartialEq)]
enum YuvMatrixStrategy {
KrKb(YuvStandardMatrix),
CgCo,
Identity,
}
/// Getting one of prebuilt matrix of fails
fn get_matrix(
david_matrix: dav1d::pixel::MatrixCoefficients,
) -> Result<YuvMatrixStrategy, ImageError> {
match david_matrix {
dav1d::pixel::MatrixCoefficients::Identity => Ok(YuvMatrixStrategy::Identity),
dav1d::pixel::MatrixCoefficients::BT709 => {
Ok(YuvMatrixStrategy::KrKb(YuvStandardMatrix::Bt709))
}
// This is arguable, some applications prefer to go with Bt.709 as default,
// and some applications prefer Bt.601 as default.
// For ex. `Chrome` always prefer Bt.709 even for SD content
// However, nowadays standard should be Bt.709 for HD+ size otherwise Bt.601
dav1d::pixel::MatrixCoefficients::Unspecified => {
Ok(YuvMatrixStrategy::KrKb(YuvStandardMatrix::Bt709))
}
dav1d::pixel::MatrixCoefficients::Reserved => Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Avif.into(),
UnsupportedErrorKind::GenericFeature(
"Using 'Reserved' color matrix is not supported".to_string(),
),
),
)),
dav1d::pixel::MatrixCoefficients::BT470M => {
Ok(YuvMatrixStrategy::KrKb(YuvStandardMatrix::Bt470_6))
}
dav1d::pixel::MatrixCoefficients::BT470BG => {
Ok(YuvMatrixStrategy::KrKb(YuvStandardMatrix::Bt601))
}
dav1d::pixel::MatrixCoefficients::ST170M => {
Ok(YuvMatrixStrategy::KrKb(YuvStandardMatrix::Smpte240))
}
dav1d::pixel::MatrixCoefficients::ST240M => {
Ok(YuvMatrixStrategy::KrKb(YuvStandardMatrix::Smpte240))
}
dav1d::pixel::MatrixCoefficients::YCgCo => Ok(YuvMatrixStrategy::CgCo),
dav1d::pixel::MatrixCoefficients::BT2020NonConstantLuminance => {
Ok(YuvMatrixStrategy::KrKb(YuvStandardMatrix::Bt2020))
}
dav1d::pixel::MatrixCoefficients::BT2020ConstantLuminance => {
// This matrix significantly differs from others because linearize values is required
// to compute Y instead of Y'.
// Actually it is almost everywhere is not implemented.
// Libavif + libheif missing this also so actually AVIF images
// with CL BT.2020 might be made only by mistake
Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Avif.into(),
UnsupportedErrorKind::GenericFeature(
"BT2020ConstantLuminance matrix is not supported".to_string(),
),
),
))
}
dav1d::pixel::MatrixCoefficients::ST2085 => Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Avif.into(),
UnsupportedErrorKind::GenericFeature("ST2085 matrix is not supported".to_string()),
),
)),
dav1d::pixel::MatrixCoefficients::ChromaticityDerivedConstantLuminance
| dav1d::pixel::MatrixCoefficients::ChromaticityDerivedNonConstantLuminance => Err(
ImageError::Unsupported(UnsupportedError::from_format_and_kind(
ImageFormat::Avif.into(),
UnsupportedErrorKind::GenericFeature(
"Chromaticity Derived Luminance matrix is not supported".to_string(),
),
)),
),
dav1d::pixel::MatrixCoefficients::ICtCp => Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Avif.into(),
UnsupportedErrorKind::GenericFeature(
"ICtCp Derived Luminance matrix is not supported".to_string(),
),
),
)),
}
}
impl<R: Read> ImageDecoder for AvifDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
(self.picture.width(), self.picture.height())
}
fn color_type(&self) -> ColorType {
if self.picture.bit_depth() == 8 {
ColorType::Rgba8
} else {
ColorType::Rgba16
}
}
fn icc_profile(&mut self) -> ImageResult<Option<Vec<u8>>> {
Ok(self.icc_profile.clone())
}
fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
let bit_depth = self.picture.bit_depth();
// Normally this should never happen,
// if this happens then there is an incorrect implementation somewhere else
assert!(bit_depth == 8 || bit_depth == 10 || bit_depth == 12);
let (width, height) = self.dimensions();
// This is suspicious if this happens, better fail early
if width == 0 || height == 0 {
return Err(ImageError::Limits(LimitError::from_kind(
LimitErrorKind::DimensionError,
)));
}
let yuv_range = match self.picture.color_range() {
dav1d::pixel::YUVRange::Limited => YuvIntensityRange::Tv,
dav1d::pixel::YUVRange::Full => YuvIntensityRange::Pc,
};
let matrix_strategy = get_matrix(self.picture.matrix_coefficients())?;
// Identity matrix should be possible only on 4:4:4
if matrix_strategy == YuvMatrixStrategy::Identity
&& self.picture.pixel_layout() != PixelLayout::I444
{
return Err(ImageError::Decoding(DecodingError::new(
ImageFormat::Avif.into(),
AvifDecoderError::YuvLayoutOnIdentityMatrix(self.picture.pixel_layout()),
)));
}
if matrix_strategy == YuvMatrixStrategy::CgCo
&& self.picture.pixel_layout() == PixelLayout::I400
{
return Err(ImageError::Decoding(DecodingError::new(
ImageFormat::Avif.into(),
AvifDecoderError::UnsupportedLayoutAndMatrix(
self.picture.pixel_layout(),
matrix_strategy,
),
)));
}
if bit_depth == 8 {
let ref_y = self.picture.plane(PlanarImageComponent::Y);
let ref_u = self.picture.plane(PlanarImageComponent::U);
let ref_v = self.picture.plane(PlanarImageComponent::V);
let image = YuvPlanarImage {
y_plane: ref_y.as_ref(),
y_stride: self.picture.stride(PlanarImageComponent::Y) as usize,
u_plane: ref_u.as_ref(),
u_stride: self.picture.stride(PlanarImageComponent::U) as usize,
v_plane: ref_v.as_ref(),
v_stride: self.picture.stride(PlanarImageComponent::V) as usize,
width: width as usize,
height: height as usize,
};
match matrix_strategy {
YuvMatrixStrategy::KrKb(standard) => {
let worker = match self.picture.pixel_layout() {
PixelLayout::I400 => yuv400_to_rgba8,
PixelLayout::I420 => yuv420_to_rgba8,
PixelLayout::I422 => yuv422_to_rgba8,
PixelLayout::I444 => yuv444_to_rgba8,
};
worker(image, buf, yuv_range, standard)?;
}
YuvMatrixStrategy::CgCo => {
let worker = match self.picture.pixel_layout() {
PixelLayout::I400 => unreachable!(),
PixelLayout::I420 => ycgco420_to_rgba8,
PixelLayout::I422 => ycgco422_to_rgba8,
PixelLayout::I444 => ycgco444_to_rgba8,
};
worker(image, buf, yuv_range)?;
}
YuvMatrixStrategy::Identity => {
let worker = match self.picture.pixel_layout() {
PixelLayout::I400 => unreachable!(),
PixelLayout::I420 => unreachable!(),
PixelLayout::I422 => unreachable!(),
PixelLayout::I444 => gbr_to_rgba8,
};
worker(image, buf, yuv_range)?;
}
}
// Squashing alpha plane into a picture
if let Some(picture) = self.alpha_picture {
if picture.pixel_layout() != PixelLayout::I400 {
return Err(ImageError::Decoding(DecodingError::new(
ImageFormat::Avif.into(),
AvifDecoderError::AlphaPlaneFormat(picture.pixel_layout()),
)));
}
let stride = picture.stride(PlanarImageComponent::Y) as usize;
let plane = picture.plane(PlanarImageComponent::Y);
for (buf, slice) in Iterator::zip(
buf.chunks_exact_mut(width as usize * 4),
plane.as_ref().chunks_exact(stride),
) {
for (rgba, a_src) in buf.chunks_exact_mut(4).zip(slice) {
rgba[3] = *a_src;
}
}
}
} else {
// // 8+ bit-depth case
if let Ok(buf) = bytemuck::try_cast_slice_mut(buf) {
let target_slice: &mut [u16] = buf;
self.process_16bit_picture(target_slice, yuv_range, matrix_strategy)?;
} else {
// If buffer from Decoder is unaligned
let mut aligned_store = vec![0u16; buf.len() / 2];
self.process_16bit_picture(&mut aligned_store, yuv_range, matrix_strategy)?;
for (dst, src) in buf.chunks_exact_mut(2).zip(aligned_store.iter()) {
let bytes = src.to_ne_bytes();
dst[0] = bytes[0];
dst[1] = bytes[1];
}
}
}
Ok(())
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
impl<R: Read> AvifDecoder<R> {
fn process_16bit_picture(
&self,
target: &mut [u16],
yuv_range: YuvIntensityRange,
matrix_strategy: YuvMatrixStrategy,
) -> ImageResult<()> {
let y_dav1d_plane = self.picture.plane(PlanarImageComponent::Y);
let (width, height) = (self.picture.width(), self.picture.height());
let bit_depth = self.picture.bit_depth();
// dav1d may return not aligned and not correctly constrained data,
// or at least I can't find guarantees on that
// so if it is happened, instead casting we'll need to reshape it into a target slice
// required criteria: bytemuck allows this align of this data, and stride must be dividable by 2
let y_plane_view = transmute_y_plane16(
&y_dav1d_plane,
self.picture.stride(PlanarImageComponent::Y) as usize,
width as usize,
height as usize,
);
let u_dav1d_plane = self.picture.plane(PlanarImageComponent::U);
let v_dav1d_plane = self.picture.plane(PlanarImageComponent::V);
let mut u_plane_view = Plane16View::default();
let mut v_plane_view = Plane16View::default();
if self.picture.pixel_layout() != PixelLayout::I400 {
u_plane_view = transmute_chroma_plane16(
&u_dav1d_plane,
self.picture.pixel_layout(),
self.picture.stride(PlanarImageComponent::U) as usize,
width as usize,
height as usize,
);
v_plane_view = transmute_chroma_plane16(
&v_dav1d_plane,
self.picture.pixel_layout(),
self.picture.stride(PlanarImageComponent::V) as usize,
width as usize,
height as usize,
);
}
let image = YuvPlanarImage {
y_plane: y_plane_view.data.as_ref(),
y_stride: y_plane_view.stride,
u_plane: u_plane_view.data.as_ref(),
u_stride: u_plane_view.stride,
v_plane: v_plane_view.data.as_ref(),
v_stride: v_plane_view.stride,
width: width as usize,
height: height as usize,
};
match matrix_strategy {
YuvMatrixStrategy::KrKb(standard) => {
let worker = match self.picture.pixel_layout() {
PixelLayout::I400 => {
if bit_depth == 10 {
yuv400_to_rgba10
} else {
yuv400_to_rgba12
}
}
PixelLayout::I420 => {
if bit_depth == 10 {
yuv420_to_rgba10
} else {
yuv420_to_rgba12
}
}
PixelLayout::I422 => {
if bit_depth == 10 {
yuv422_to_rgba10
} else {
yuv422_to_rgba12
}
}
PixelLayout::I444 => {
if bit_depth == 10 {
yuv444_to_rgba10
} else {
yuv444_to_rgba12
}
}
};
worker(image, target, yuv_range, standard)?;
}
YuvMatrixStrategy::CgCo => {
let worker = match self.picture.pixel_layout() {
PixelLayout::I400 => unreachable!(),
PixelLayout::I420 => {
if bit_depth == 10 {
ycgco420_to_rgba10
} else {
ycgco420_to_rgba12
}
}
PixelLayout::I422 => {
if bit_depth == 10 {
ycgco422_to_rgba10
} else {
ycgco422_to_rgba12
}
}
PixelLayout::I444 => {
if bit_depth == 10 {
ycgco444_to_rgba10
} else {
ycgco444_to_rgba12
}
}
};
worker(image, target, yuv_range)?;
}
YuvMatrixStrategy::Identity => {
let worker = match self.picture.pixel_layout() {
PixelLayout::I400 => unreachable!(),
PixelLayout::I420 => unreachable!(),
PixelLayout::I422 => unreachable!(),
PixelLayout::I444 => {
if bit_depth == 10 {
gbr_to_rgba10
} else {
gbr_to_rgba12
}
}
};
worker(image, target, yuv_range)?;
}
}
// Squashing alpha plane into a picture
if let Some(picture) = &self.alpha_picture {
if picture.pixel_layout() != PixelLayout::I400 {
return Err(ImageError::Decoding(DecodingError::new(
ImageFormat::Avif.into(),
AvifDecoderError::AlphaPlaneFormat(picture.pixel_layout()),
)));
}
let a_dav1d_plane = picture.plane(PlanarImageComponent::Y);
let a_plane_view = transmute_y_plane16(
&a_dav1d_plane,
picture.stride(PlanarImageComponent::Y) as usize,
width as usize,
height as usize,
);
for (buf, slice) in Iterator::zip(
target.chunks_exact_mut(width as usize * 4),
a_plane_view.data.as_ref().chunks_exact(a_plane_view.stride),
) {
for (rgba, a_src) in buf.chunks_exact_mut(4).zip(slice) {
rgba[3] = *a_src;
}
}
}
// Expand current bit depth to target 16
let target_expand_bits = 16u32 - self.picture.bit_depth() as u32;
for item in target.iter_mut() {
*item = (*item << target_expand_bits) | (*item >> (16 - target_expand_bits));
}
Ok(())
}
}
/// `get_picture` and `send_pending_data` yield `Again` as a non-fatal error requesting more data is sent to the decoder
/// This ensures that in the case of `Again` all pending data is submitted
/// This should be called after `send_data` (which does not yield `Again` when called the first time)
fn read_until_ready(decoder: &mut dav1d::Decoder) -> ImageResult<dav1d::Picture> {
loop {
match decoder.get_picture() {
Err(dav1d::Error::Again) => match decoder.send_pending_data() {
Ok(()) => {}
Err(dav1d::Error::Again) => {}
Err(e) => return Err(error_map(e)),
},
r => return r.map_err(error_map),
}
}
}

286
vendor/image/src/codecs/avif/encoder.rs vendored Normal file
View File

@@ -0,0 +1,286 @@
//! Encoding of AVIF images.
///
/// The [AVIF] specification defines an image derivative of the AV1 bitstream, an open video codec.
///
/// [AVIF]: https://aomediacodec.github.io/av1-avif/
use std::borrow::Cow;
use std::cmp::min;
use std::io::Write;
use std::mem::size_of;
use crate::buffer::ConvertBuffer;
use crate::color::{FromColor, Luma, LumaA, Rgb, Rgba};
use crate::error::{
EncodingError, ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind,
};
use crate::{ExtendedColorType, ImageBuffer, ImageEncoder, ImageFormat, Pixel};
use crate::{ImageError, ImageResult};
use bytemuck::{try_cast_slice, try_cast_slice_mut, Pod, PodCastError};
use num_traits::Zero;
use ravif::{BitDepth, Encoder, Img, RGB8, RGBA8};
use rgb::AsPixels;
/// AVIF Encoder.
///
/// Writes one image into the chosen output.
pub struct AvifEncoder<W> {
inner: W,
encoder: Encoder,
}
/// An enumeration over supported AVIF color spaces
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[non_exhaustive]
pub enum ColorSpace {
/// sRGB colorspace
Srgb,
/// BT.709 colorspace
Bt709,
}
impl ColorSpace {
fn to_ravif(self) -> ravif::ColorModel {
match self {
Self::Srgb => ravif::ColorModel::RGB,
Self::Bt709 => ravif::ColorModel::YCbCr,
}
}
}
enum RgbColor<'buf> {
Rgb8(Img<&'buf [RGB8]>),
Rgba8(Img<&'buf [RGBA8]>),
}
impl<W: Write> AvifEncoder<W> {
/// Create a new encoder that writes its output to `w`.
pub fn new(w: W) -> Self {
AvifEncoder::new_with_speed_quality(w, 4, 80) // `cavif` uses these defaults
}
/// Create a new encoder with a specified speed and quality that writes its output to `w`.
/// `speed` accepts a value in the range 1-10, where 1 is the slowest and 10 is the fastest.
/// Slower speeds generally yield better compression results.
/// `quality` accepts a value in the range 1-100, where 1 is the worst and 100 is the best.
pub fn new_with_speed_quality(w: W, speed: u8, quality: u8) -> Self {
// Clamp quality and speed to range
let quality = min(quality, 100);
let speed = min(speed, 10);
let encoder = Encoder::new()
.with_quality(f32::from(quality))
.with_alpha_quality(f32::from(quality))
.with_speed(speed)
.with_bit_depth(BitDepth::Eight);
AvifEncoder { inner: w, encoder }
}
/// Encode with the specified `color_space`.
pub fn with_colorspace(mut self, color_space: ColorSpace) -> Self {
self.encoder = self
.encoder
.with_internal_color_model(color_space.to_ravif());
self
}
/// Configures `rayon` thread pool size.
/// The default `None` is to use all threads in the default `rayon` thread pool.
pub fn with_num_threads(mut self, num_threads: Option<usize>) -> Self {
self.encoder = self.encoder.with_num_threads(num_threads);
self
}
}
impl<W: Write> ImageEncoder for AvifEncoder<W> {
/// Encode image data with the indicated color type.
///
/// The encoder currently requires all data to be RGBA8, it will be converted internally if
/// necessary. When data is suitably aligned, i.e. u16 channels to two bytes, then the
/// conversion may be more efficient.
#[track_caller]
fn write_image(
mut self,
data: &[u8],
width: u32,
height: u32,
color: ExtendedColorType,
) -> ImageResult<()> {
let expected_buffer_len = color.buffer_size(width, height);
assert_eq!(
expected_buffer_len,
data.len() as u64,
"Invalid buffer length: expected {expected_buffer_len} got {} for {width}x{height} image",
data.len(),
);
self.set_color(color);
// `ravif` needs strongly typed data so let's convert. We can either use a temporarily
// owned version in our own buffer or zero-copy if possible by using the input buffer.
// This requires going through `rgb`.
let mut fallback = vec![]; // This vector is used if we need to do a color conversion.
let result = match Self::encode_as_img(&mut fallback, data, width, height, color)? {
RgbColor::Rgb8(buffer) => self.encoder.encode_rgb(buffer),
RgbColor::Rgba8(buffer) => self.encoder.encode_rgba(buffer),
};
let data = result.map_err(|err| {
ImageError::Encoding(EncodingError::new(ImageFormat::Avif.into(), err))
})?;
self.inner.write_all(&data.avif_file)?;
Ok(())
}
}
impl<W: Write> AvifEncoder<W> {
// Does not currently do anything. Mirrors behaviour of old config function.
fn set_color(&mut self, _color: ExtendedColorType) {
// self.config.color_space = ColorSpace::RGB;
}
fn encode_as_img<'buf>(
fallback: &'buf mut Vec<u8>,
data: &'buf [u8],
width: u32,
height: u32,
color: ExtendedColorType,
) -> ImageResult<RgbColor<'buf>> {
// Error wrapping utility for color dependent buffer dimensions.
fn try_from_raw<P: Pixel + 'static>(
data: &[P::Subpixel],
width: u32,
height: u32,
) -> ImageResult<ImageBuffer<P, &[P::Subpixel]>> {
ImageBuffer::from_raw(width, height, data).ok_or_else(|| {
ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
))
})
}
// Convert to target color type using few buffer allocations.
fn convert_into<'buf, P>(
buf: &'buf mut Vec<u8>,
image: ImageBuffer<P, &[P::Subpixel]>,
) -> Img<&'buf [RGBA8]>
where
P: Pixel + 'static,
Rgba<u8>: FromColor<P>,
{
let (width, height) = image.dimensions();
// TODO: conversion re-using the target buffer?
let image: ImageBuffer<Rgba<u8>, _> = image.convert();
*buf = image.into_raw();
Img::new(buf.as_pixels(), width as usize, height as usize)
}
// Cast the input slice using few buffer allocations if possible.
// In particular try not to allocate if the caller did the infallible reverse.
fn cast_buffer<Channel>(buf: &[u8]) -> ImageResult<Cow<'_, [Channel]>>
where
Channel: Pod + Zero,
{
match try_cast_slice(buf) {
Ok(slice) => Ok(Cow::Borrowed(slice)),
Err(PodCastError::OutputSliceWouldHaveSlop) => Err(ImageError::Parameter(
ParameterError::from_kind(ParameterErrorKind::DimensionMismatch),
)),
Err(PodCastError::TargetAlignmentGreaterAndInputNotAligned) => {
// Sad, but let's allocate.
// bytemuck checks alignment _before_ slop but size mismatch before this..
if buf.len() % size_of::<Channel>() != 0 {
Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)))
} else {
let len = buf.len() / size_of::<Channel>();
let mut data = vec![Channel::zero(); len];
let view = try_cast_slice_mut::<_, u8>(data.as_mut_slice()).unwrap();
view.copy_from_slice(buf);
Ok(Cow::Owned(data))
}
}
Err(err) => {
// Are you trying to encode a ZST??
Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(format!("{err:?}")),
)))
}
}
}
match color {
ExtendedColorType::Rgb8 => {
// ravif doesn't do any checks but has some asserts, so we do the checks.
let img = try_from_raw::<Rgb<u8>>(data, width, height)?;
// Now, internally ravif uses u32 but it takes usize. We could do some checked
// conversion but instead we use that a non-empty image must be addressable.
if img.pixels().len() == 0 {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
Ok(RgbColor::Rgb8(Img::new(
AsPixels::as_pixels(data),
width as usize,
height as usize,
)))
}
ExtendedColorType::Rgba8 => {
// ravif doesn't do any checks but has some asserts, so we do the checks.
let img = try_from_raw::<Rgba<u8>>(data, width, height)?;
// Now, internally ravif uses u32 but it takes usize. We could do some checked
// conversion but instead we use that a non-empty image must be addressable.
if img.pixels().len() == 0 {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
Ok(RgbColor::Rgba8(Img::new(
AsPixels::as_pixels(data),
width as usize,
height as usize,
)))
}
// we need a separate buffer..
ExtendedColorType::L8 => {
let image = try_from_raw::<Luma<u8>>(data, width, height)?;
Ok(RgbColor::Rgba8(convert_into(fallback, image)))
}
ExtendedColorType::La8 => {
let image = try_from_raw::<LumaA<u8>>(data, width, height)?;
Ok(RgbColor::Rgba8(convert_into(fallback, image)))
}
// we need to really convert data..
ExtendedColorType::L16 => {
let buffer = cast_buffer(data)?;
let image = try_from_raw::<Luma<u16>>(&buffer, width, height)?;
Ok(RgbColor::Rgba8(convert_into(fallback, image)))
}
ExtendedColorType::La16 => {
let buffer = cast_buffer(data)?;
let image = try_from_raw::<LumaA<u16>>(&buffer, width, height)?;
Ok(RgbColor::Rgba8(convert_into(fallback, image)))
}
ExtendedColorType::Rgb16 => {
let buffer = cast_buffer(data)?;
let image = try_from_raw::<Rgb<u16>>(&buffer, width, height)?;
Ok(RgbColor::Rgba8(convert_into(fallback, image)))
}
ExtendedColorType::Rgba16 => {
let buffer = cast_buffer(data)?;
let image = try_from_raw::<Rgba<u16>>(&buffer, width, height)?;
Ok(RgbColor::Rgba8(convert_into(fallback, image)))
}
// for cases we do not support at all?
_ => Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Avif.into(),
UnsupportedErrorKind::Color(color),
),
)),
}
}
}

18
vendor/image/src/codecs/avif/mod.rs vendored Normal file
View File

@@ -0,0 +1,18 @@
//! Encoding of AVIF images.
///
/// The [AVIF] specification defines an image derivative of the AV1 bitstream, an open video codec.
///
/// [AVIF]: https://aomediacodec.github.io/av1-avif/
#[cfg(feature = "avif-native")]
pub use self::decoder::AvifDecoder;
#[cfg(feature = "avif")]
pub use self::encoder::{AvifEncoder, ColorSpace};
#[cfg(feature = "avif-native")]
mod decoder;
#[cfg(feature = "avif")]
mod encoder;
#[cfg(feature = "avif-native")]
mod ycgco;
#[cfg(feature = "avif-native")]
mod yuv;

419
vendor/image/src/codecs/avif/ycgco.rs vendored Normal file
View File

@@ -0,0 +1,419 @@
use crate::codecs::avif::yuv::{
check_rgb_preconditions, check_yuv_plane_preconditions, qrshr, yuv420_to_rgbx_invoker,
yuv422_to_rgbx_invoker, CbCrInverseTransform, HalvedRowHandler, PlaneDefinition,
YuvChromaRange, YuvIntensityRange, YuvPlanarImage, YuvStandardMatrix,
};
use crate::ImageError;
use num_traits::AsPrimitive;
/// Computes YCgCo inverse in limited range
/// # Arguments
/// - `dst` - dest buffer
/// - `y_value` - Y value with subtracted bias
/// - `cb` - Cb value with subtracted bias
/// - `cr` - Cr value with subtracted bias
#[inline(always)]
fn ycgco_execute_limited<
V: Copy + AsPrimitive<i32> + 'static + Sized,
const PRECISION: i32,
const CHANNELS: usize,
const BIT_DEPTH: usize,
>(
dst: &mut [V; CHANNELS],
y_value: i32,
cg: i32,
co: i32,
scale: i32,
) where
i32: AsPrimitive<V>,
{
let t0 = y_value - cg;
let r = qrshr::<PRECISION, BIT_DEPTH>((t0 + co) * scale);
let b = qrshr::<PRECISION, BIT_DEPTH>((t0 - co) * scale);
let g = qrshr::<PRECISION, BIT_DEPTH>((y_value + cg) * scale);
if CHANNELS == 4 {
dst[0] = r.as_();
dst[1] = g.as_();
dst[2] = b.as_();
dst[3] = ((1i32 << BIT_DEPTH) - 1).as_();
} else if CHANNELS == 3 {
dst[0] = r.as_();
dst[1] = g.as_();
dst[2] = b.as_();
} else {
unreachable!();
}
}
/// Computes YCgCo inverse in full range
/// # Arguments
/// - `dst` - dest buffer
/// - `y_value` - Y value with subtracted bias
/// - `cb` - Cb value with subtracted bias
/// - `cr` - Cr value with subtracted bias
#[inline(always)]
fn ycgco_execute_full<
V: Copy + AsPrimitive<i32> + 'static + Sized,
const PRECISION: i32,
const CHANNELS: usize,
const BIT_DEPTH: usize,
>(
dst: &mut [V; CHANNELS],
y_value: i32,
cg: i32,
co: i32,
) where
i32: AsPrimitive<V>,
{
let t0 = y_value - cg;
let max_value = (1i32 << BIT_DEPTH) - 1;
let r = (t0 + co).clamp(0, max_value);
let b = (t0 - co).clamp(0, max_value);
let g = (y_value + cg).clamp(0, max_value);
if CHANNELS == 4 {
dst[0] = r.as_();
dst[1] = g.as_();
dst[2] = b.as_();
dst[3] = max_value.as_();
} else if CHANNELS == 3 {
dst[0] = r.as_();
dst[1] = g.as_();
dst[2] = b.as_();
} else {
unreachable!();
}
}
#[inline(always)]
fn process_halved_chroma_row_cgco<
V: Copy + AsPrimitive<i32> + 'static + Sized,
const PRECISION: i32,
const CHANNELS: usize,
const BIT_DEPTH: usize,
>(
image: YuvPlanarImage<V>,
rgba: &mut [V],
_: &CbCrInverseTransform<i32>,
range: &YuvChromaRange,
) where
i32: AsPrimitive<V>,
{
let max_value = (1i32 << BIT_DEPTH) - 1;
// If the stride is larger than the plane size,
// it might contain junk data beyond the actual valid region.
// To avoid processing artifacts when working with odd-sized images,
// the buffer is reshaped to its actual size,
// preventing accidental use of invalid values from the trailing region.
let y_plane = &image.y_plane[..image.width];
let chroma_size = image.width.div_ceil(2);
let u_plane = &image.u_plane[..chroma_size];
let v_plane = &image.v_plane[..chroma_size];
let rgba = &mut rgba[..image.width * CHANNELS];
let bias_y = range.bias_y as i32;
let bias_uv = range.bias_uv as i32;
let y_iter = y_plane.chunks_exact(2);
let rgb_chunks = rgba.chunks_exact_mut(CHANNELS * 2);
let scale_coef = ((max_value as f32 / range.range_y as f32) * (1 << PRECISION) as f32) as i32;
for (((y_src, &u_src), &v_src), rgb_dst) in y_iter.zip(u_plane).zip(v_plane).zip(rgb_chunks) {
let y_value0: i32 = y_src[0].as_() - bias_y;
let cg_value: i32 = u_src.as_() - bias_uv;
let co_value: i32 = v_src.as_() - bias_uv;
let dst0 = &mut rgb_dst[..CHANNELS];
ycgco_execute_limited::<V, PRECISION, CHANNELS, BIT_DEPTH>(
dst0.try_into().unwrap(),
y_value0,
cg_value,
co_value,
scale_coef,
);
let y_value1 = y_src[1].as_() - bias_y;
let dst1 = &mut rgb_dst[CHANNELS..2 * CHANNELS];
ycgco_execute_limited::<V, PRECISION, CHANNELS, BIT_DEPTH>(
dst1.try_into().unwrap(),
y_value1,
cg_value,
co_value,
scale_coef,
);
}
// Process remainder if width is odd.
if image.width & 1 != 0 {
let y_left = y_plane.chunks_exact(2).remainder();
let rgb_chunks = rgba
.chunks_exact_mut(CHANNELS * 2)
.into_remainder()
.chunks_exact_mut(CHANNELS);
let u_iter = u_plane.iter().rev();
let v_iter = v_plane.iter().rev();
for (((y_src, u_src), v_src), rgb_dst) in
y_left.iter().zip(u_iter).zip(v_iter).zip(rgb_chunks)
{
let y_value = y_src.as_() - bias_y;
let cg_value = u_src.as_() - bias_uv;
let co_value = v_src.as_() - bias_uv;
ycgco_execute_limited::<V, PRECISION, CHANNELS, BIT_DEPTH>(
rgb_dst.try_into().unwrap(),
y_value,
cg_value,
co_value,
scale_coef,
);
}
}
}
/// Converts YCgCo 444 planar format to Rgba
///
/// # Arguments
///
/// * `image`: see [YuvPlanarImage]
/// * `rgba`: RGB image layout
/// * `range`: see [YuvIntensityRange]
///
fn ycgco444_to_rgbx_impl<
V: Copy + AsPrimitive<i32> + 'static + Sized,
const CHANNELS: usize,
const BIT_DEPTH: usize,
>(
image: YuvPlanarImage<V>,
rgba: &mut [V],
yuv_range: YuvIntensityRange,
) -> Result<(), ImageError>
where
i32: AsPrimitive<V>,
{
assert!(
CHANNELS == 3 || CHANNELS == 4,
"YUV 4:4:4 -> RGB is implemented only on 3 and 4 channels"
);
assert!(
(8..=16).contains(&BIT_DEPTH),
"Invalid bit depth is provided"
);
assert!(
if BIT_DEPTH > 8 {
size_of::<V>() == 2
} else {
size_of::<V>() == 1
},
"Unsupported bit depth and data type combination"
);
let y_plane = image.y_plane;
let u_plane = image.u_plane;
let v_plane = image.v_plane;
let y_stride = image.y_stride;
let u_stride = image.u_stride;
let v_stride = image.v_stride;
let height = image.height;
let width = image.width;
check_yuv_plane_preconditions(y_plane, PlaneDefinition::Y, y_stride, height)?;
check_yuv_plane_preconditions(u_plane, PlaneDefinition::U, u_stride, height)?;
check_yuv_plane_preconditions(v_plane, PlaneDefinition::V, v_stride, height)?;
check_rgb_preconditions(rgba, image.width * CHANNELS, height)?;
let range = yuv_range.get_yuv_range(BIT_DEPTH as u32);
const PRECISION: i32 = 13;
let bias_y = range.bias_y as i32;
let bias_uv = range.bias_uv as i32;
let rgb_stride = width * CHANNELS;
let y_iter = y_plane.chunks_exact(y_stride);
let rgb_iter = rgba.chunks_exact_mut(rgb_stride);
let u_iter = u_plane.chunks_exact(u_stride);
let v_iter = v_plane.chunks_exact(v_stride);
let max_value: i32 = (1 << BIT_DEPTH) - 1;
// All branches on generic const will be optimized out.
for (((y_src, u_src), v_src), rgb) in y_iter.zip(u_iter).zip(v_iter).zip(rgb_iter) {
let rgb_chunks = rgb.chunks_exact_mut(CHANNELS);
match yuv_range {
YuvIntensityRange::Tv => {
let y_coef =
((max_value as f32 / range.range_y as f32) * (1 << PRECISION) as f32) as i32;
for (((y_src, u_src), v_src), rgb_dst) in
y_src.iter().zip(u_src).zip(v_src).zip(rgb_chunks)
{
let y_value = y_src.as_() - bias_y;
let cg_value = u_src.as_() - bias_uv;
let co_value = v_src.as_() - bias_uv;
ycgco_execute_limited::<V, PRECISION, CHANNELS, BIT_DEPTH>(
rgb_dst.try_into().unwrap(),
y_value,
cg_value,
co_value,
y_coef,
);
}
}
YuvIntensityRange::Pc => {
for (((y_src, u_src), v_src), rgb_dst) in
y_src.iter().zip(u_src).zip(v_src).zip(rgb_chunks)
{
let y_value = y_src.as_() - bias_y;
let cg_value = u_src.as_() - bias_uv;
let co_value = v_src.as_() - bias_uv;
ycgco_execute_full::<V, PRECISION, CHANNELS, BIT_DEPTH>(
rgb_dst.try_into().unwrap(),
y_value,
cg_value,
co_value,
);
}
}
}
}
Ok(())
}
macro_rules! define_ycgco_half_chroma {
($name: ident, $invoker: ident, $storage: ident, $cn: expr, $bp: expr, $description: expr) => {
#[doc = concat!($description, "
# Arguments
* `image`: see [YuvPlanarImage]
* `rgb`: RGB image layout
* `range`: see [YuvIntensityRange]
* `matrix`: see [YuvStandardMatrix]")]
pub(crate) fn $name(
image: YuvPlanarImage<$storage>,
rgb: &mut [$storage],
range: YuvIntensityRange,
) -> Result<(), ImageError> {
const P: i32 = 13;
$invoker::<$storage, HalvedRowHandler<$storage>, P, $cn, $bp>(
image,
rgb,
range,
YuvStandardMatrix::Bt709,
process_halved_chroma_row_cgco::<$storage, P, $cn, $bp>,
)
}
};
}
const RGBA_CN: usize = 4;
define_ycgco_half_chroma!(
ycgco420_to_rgba8,
yuv420_to_rgbx_invoker,
u8,
RGBA_CN,
8,
"Converts YCgCo 420 8-bit planar format to Rgba 8-bit"
);
define_ycgco_half_chroma!(
ycgco422_to_rgba8,
yuv422_to_rgbx_invoker,
u8,
RGBA_CN,
8,
"Converts YCgCo 420 8-bit planar format to Rgba 8-bit"
);
define_ycgco_half_chroma!(
ycgco420_to_rgba10,
yuv420_to_rgbx_invoker,
u16,
RGBA_CN,
10,
"Converts YCgCo 420 10-bit planar format to Rgba 10-bit"
);
define_ycgco_half_chroma!(
ycgco422_to_rgba10,
yuv422_to_rgbx_invoker,
u16,
RGBA_CN,
10,
"Converts YCgCo 422 10-bit planar format to Rgba 10-bit"
);
define_ycgco_half_chroma!(
ycgco420_to_rgba12,
yuv420_to_rgbx_invoker,
u16,
RGBA_CN,
12,
"Converts YCgCo 420 12-bit planar format to Rgba 12-bit"
);
define_ycgco_half_chroma!(
ycgco422_to_rgba12,
yuv422_to_rgbx_invoker,
u16,
RGBA_CN,
12,
"Converts YCgCo 422 12-bit planar format to Rgba 12-bit"
);
macro_rules! define_ycgcg_full_chroma {
($name: ident, $storage: ident, $cn: expr, $bp: expr, $description: expr) => {
#[doc = concat!($description, "
# Arguments
* `image`: see [YuvPlanarImage]
* `rgba`: RGB image layout
* `range`: see [YuvIntensityRange]
* `matrix`: see [YuvStandardMatrix]
")]
pub(crate) fn $name(
image: YuvPlanarImage<$storage>,
rgba: &mut [$storage],
range: YuvIntensityRange,
) -> Result<(), ImageError> {
ycgco444_to_rgbx_impl::<$storage, $cn, $bp>(image, rgba, range)
}
};
}
define_ycgcg_full_chroma!(
ycgco444_to_rgba8,
u8,
RGBA_CN,
8,
"Converts YCgCo 444 planar format 8 bit-depth to Rgba 8 bit"
);
define_ycgcg_full_chroma!(
ycgco444_to_rgba10,
u16,
RGBA_CN,
10,
"Converts YCgCo 444 planar format 10 bit-depth to Rgba 10 bit"
);
define_ycgcg_full_chroma!(
ycgco444_to_rgba12,
u16,
RGBA_CN,
12,
"Converts YCgCo 444 planar format 12 bit-depth to Rgba 12 bit"
);

1279
vendor/image/src/codecs/avif/yuv.rs vendored Normal file

File diff suppressed because it is too large Load Diff

1480
vendor/image/src/codecs/bmp/decoder.rs vendored Normal file

File diff suppressed because it is too large Load Diff

424
vendor/image/src/codecs/bmp/encoder.rs vendored Normal file
View File

@@ -0,0 +1,424 @@
use byteorder_lite::{LittleEndian, WriteBytesExt};
use std::io::{self, Write};
use crate::error::{
EncodingError, ImageError, ImageFormatHint, ImageResult, ParameterError, ParameterErrorKind,
UnsupportedError, UnsupportedErrorKind,
};
use crate::{DynamicImage, ExtendedColorType, ImageEncoder, ImageFormat};
const BITMAPFILEHEADER_SIZE: u32 = 14;
const BITMAPINFOHEADER_SIZE: u32 = 40;
const BITMAPV4HEADER_SIZE: u32 = 108;
/// The representation of a BMP encoder.
pub struct BmpEncoder<'a, W: 'a> {
writer: &'a mut W,
}
impl<'a, W: Write + 'a> BmpEncoder<'a, W> {
/// Create a new encoder that writes its output to ```w```.
pub fn new(w: &'a mut W) -> Self {
BmpEncoder { writer: w }
}
/// Encodes the image `image` that has dimensions `width` and `height` and `ExtendedColorType` `c`.
///
/// # Panics
///
/// Panics if `width * height * c.bytes_per_pixel() != image.len()`.
#[track_caller]
pub fn encode(
&mut self,
image: &[u8],
width: u32,
height: u32,
c: ExtendedColorType,
) -> ImageResult<()> {
self.encode_with_palette(image, width, height, c, None)
}
/// Same as `encode`, but allow a palette to be passed in. The `palette` is ignored for color
/// types other than Luma/Luma-with-alpha.
///
/// # Panics
///
/// Panics if `width * height * c.bytes_per_pixel() != image.len()`.
#[track_caller]
pub fn encode_with_palette(
&mut self,
image: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
palette: Option<&[[u8; 3]]>,
) -> ImageResult<()> {
if palette.is_some()
&& color_type != ExtendedColorType::L8
&& color_type != ExtendedColorType::La8
{
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(
"Palette given which must only be used with L8 or La8 color types".to_string(),
),
)));
}
let expected_buffer_len = color_type.buffer_size(width, height);
assert_eq!(
expected_buffer_len,
image.len() as u64,
"Invalid buffer length: expected {expected_buffer_len} got {} for {width}x{height} image",
image.len(),
);
let bmp_header_size = BITMAPFILEHEADER_SIZE;
let (dib_header_size, written_pixel_size, palette_color_count) =
get_pixel_info(color_type, palette)?;
let row_pad_size = (4 - (width * written_pixel_size) % 4) % 4; // each row must be padded to a multiple of 4 bytes
let image_size = width
.checked_mul(height)
.and_then(|v| v.checked_mul(written_pixel_size))
.and_then(|v| v.checked_add(height * row_pad_size))
.ok_or_else(|| {
ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
))
})?;
let palette_size = palette_color_count * 4; // all palette colors are BGRA
let file_size = bmp_header_size
.checked_add(dib_header_size)
.and_then(|v| v.checked_add(palette_size))
.and_then(|v| v.checked_add(image_size))
.ok_or_else(|| {
ImageError::Encoding(EncodingError::new(
ImageFormatHint::Exact(ImageFormat::Bmp),
"calculated BMP header size larger than 2^32",
))
})?;
// write BMP header
self.writer.write_u8(b'B')?;
self.writer.write_u8(b'M')?;
self.writer.write_u32::<LittleEndian>(file_size)?; // file size
self.writer.write_u16::<LittleEndian>(0)?; // reserved 1
self.writer.write_u16::<LittleEndian>(0)?; // reserved 2
self.writer
.write_u32::<LittleEndian>(bmp_header_size + dib_header_size + palette_size)?; // image data offset
// write DIB header
self.writer.write_u32::<LittleEndian>(dib_header_size)?;
self.writer.write_i32::<LittleEndian>(width as i32)?;
self.writer.write_i32::<LittleEndian>(height as i32)?;
self.writer.write_u16::<LittleEndian>(1)?; // color planes
self.writer
.write_u16::<LittleEndian>((written_pixel_size * 8) as u16)?; // bits per pixel
if dib_header_size >= BITMAPV4HEADER_SIZE {
// Assume BGRA32
self.writer.write_u32::<LittleEndian>(3)?; // compression method - bitfields
} else {
self.writer.write_u32::<LittleEndian>(0)?; // compression method - no compression
}
self.writer.write_u32::<LittleEndian>(image_size)?;
self.writer.write_i32::<LittleEndian>(0)?; // horizontal ppm
self.writer.write_i32::<LittleEndian>(0)?; // vertical ppm
self.writer.write_u32::<LittleEndian>(palette_color_count)?;
self.writer.write_u32::<LittleEndian>(0)?; // all colors are important
if dib_header_size >= BITMAPV4HEADER_SIZE {
// Assume BGRA32
self.writer.write_u32::<LittleEndian>(0xff << 16)?; // red mask
self.writer.write_u32::<LittleEndian>(0xff << 8)?; // green mask
self.writer.write_u32::<LittleEndian>(0xff)?; // blue mask
self.writer.write_u32::<LittleEndian>(0xff << 24)?; // alpha mask
self.writer.write_u32::<LittleEndian>(0x7352_4742)?; // colorspace - sRGB
// endpoints (3x3) and gamma (3)
for _ in 0..12 {
self.writer.write_u32::<LittleEndian>(0)?;
}
}
// write image data
match color_type {
ExtendedColorType::Rgb8 => self.encode_rgb(image, width, height, row_pad_size, 3)?,
ExtendedColorType::Rgba8 => self.encode_rgba(image, width, height, row_pad_size, 4)?,
ExtendedColorType::L8 => {
self.encode_gray(image, width, height, row_pad_size, 1, palette)?;
}
ExtendedColorType::La8 => {
self.encode_gray(image, width, height, row_pad_size, 2, palette)?;
}
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Bmp.into(),
UnsupportedErrorKind::Color(color_type),
),
));
}
}
Ok(())
}
fn encode_rgb(
&mut self,
image: &[u8],
width: u32,
height: u32,
row_pad_size: u32,
bytes_per_pixel: u32,
) -> io::Result<()> {
let width = width as usize;
let height = height as usize;
let x_stride = bytes_per_pixel as usize;
let y_stride = width * x_stride;
for row in (0..height).rev() {
// from the bottom up
let row_start = row * y_stride;
for px in image[row_start..][..y_stride].chunks_exact(x_stride) {
let r = px[0];
let g = px[1];
let b = px[2];
// written as BGR
self.writer.write_all(&[b, g, r])?;
}
self.write_row_pad(row_pad_size)?;
}
Ok(())
}
fn encode_rgba(
&mut self,
image: &[u8],
width: u32,
height: u32,
row_pad_size: u32,
bytes_per_pixel: u32,
) -> io::Result<()> {
let width = width as usize;
let height = height as usize;
let x_stride = bytes_per_pixel as usize;
let y_stride = width * x_stride;
for row in (0..height).rev() {
// from the bottom up
let row_start = row * y_stride;
for px in image[row_start..][..y_stride].chunks_exact(x_stride) {
let r = px[0];
let g = px[1];
let b = px[2];
let a = px[3];
// written as BGRA
self.writer.write_all(&[b, g, r, a])?;
}
self.write_row_pad(row_pad_size)?;
}
Ok(())
}
fn encode_gray(
&mut self,
image: &[u8],
width: u32,
height: u32,
row_pad_size: u32,
bytes_per_pixel: u32,
palette: Option<&[[u8; 3]]>,
) -> io::Result<()> {
// write grayscale palette
if let Some(palette) = palette {
for item in palette {
// each color is written as BGRA, where A is always 0
self.writer.write_all(&[item[2], item[1], item[0], 0])?;
}
} else {
for val in 0u8..=255 {
// each color is written as BGRA, where A is always 0 and since only grayscale is being written, B = G = R = index
self.writer.write_all(&[val, val, val, 0])?;
}
}
// write image data
let x_stride = bytes_per_pixel;
let y_stride = width * x_stride;
for row in (0..height).rev() {
// from the bottom up
let row_start = row * y_stride;
// color value is equal to the palette index
if x_stride == 1 {
// improve performance by writing the whole row at once
self.writer
.write_all(&image[row_start as usize..][..y_stride as usize])?;
} else {
for col in 0..width {
let pixel_start = (row_start + (col * x_stride)) as usize;
self.writer.write_u8(image[pixel_start])?;
// alpha is never written as it's not widely supported
}
}
self.write_row_pad(row_pad_size)?;
}
Ok(())
}
fn write_row_pad(&mut self, row_pad_size: u32) -> io::Result<()> {
for _ in 0..row_pad_size {
self.writer.write_u8(0)?;
}
Ok(())
}
}
impl<W: Write> ImageEncoder for BmpEncoder<'_, W> {
#[track_caller]
fn write_image(
mut self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
self.encode(buf, width, height, color_type)
}
fn make_compatible_img(
&self,
_: crate::io::encoder::MethodSealedToImage,
img: &DynamicImage,
) -> Option<DynamicImage> {
crate::io::encoder::dynimage_conversion_8bit(img)
}
}
fn get_unsupported_error_message(c: ExtendedColorType) -> String {
format!("Unsupported color type {c:?}. Supported types: RGB(8), RGBA(8), Gray(8), GrayA(8).")
}
/// Returns a tuple representing: (dib header size, written pixel size, palette color count).
fn get_pixel_info(
c: ExtendedColorType,
palette: Option<&[[u8; 3]]>,
) -> io::Result<(u32, u32, u32)> {
let sizes = match c {
ExtendedColorType::Rgb8 => (BITMAPINFOHEADER_SIZE, 3, 0),
ExtendedColorType::Rgba8 => (BITMAPV4HEADER_SIZE, 4, 0),
ExtendedColorType::L8 => (
BITMAPINFOHEADER_SIZE,
1,
palette.map(|p| p.len()).unwrap_or(256) as u32,
),
ExtendedColorType::La8 => (
BITMAPINFOHEADER_SIZE,
1,
palette.map(|p| p.len()).unwrap_or(256) as u32,
),
_ => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
&get_unsupported_error_message(c)[..],
))
}
};
Ok(sizes)
}
#[cfg(test)]
mod tests {
use super::super::BmpDecoder;
use super::BmpEncoder;
use crate::ExtendedColorType;
use crate::ImageDecoder as _;
use std::io::Cursor;
fn round_trip_image(image: &[u8], width: u32, height: u32, c: ExtendedColorType) -> Vec<u8> {
let mut encoded_data = Vec::new();
{
let mut encoder = BmpEncoder::new(&mut encoded_data);
encoder
.encode(image, width, height, c)
.expect("could not encode image");
}
let decoder = BmpDecoder::new(Cursor::new(&encoded_data)).expect("failed to decode");
let mut buf = vec![0; decoder.total_bytes() as usize];
decoder.read_image(&mut buf).expect("failed to decode");
buf
}
#[test]
fn round_trip_single_pixel_rgb() {
let image = [255u8, 0, 0]; // single red pixel
let decoded = round_trip_image(&image, 1, 1, ExtendedColorType::Rgb8);
assert_eq!(3, decoded.len());
assert_eq!(255, decoded[0]);
assert_eq!(0, decoded[1]);
assert_eq!(0, decoded[2]);
}
#[test]
#[cfg(target_pointer_width = "64")]
fn huge_files_return_error() {
let mut encoded_data = Vec::new();
let image = vec![0u8; 3 * 40_000 * 40_000]; // 40_000x40_000 pixels, 3 bytes per pixel, allocated on the heap
let mut encoder = BmpEncoder::new(&mut encoded_data);
let result = encoder.encode(&image, 40_000, 40_000, ExtendedColorType::Rgb8);
assert!(result.is_err());
}
#[test]
fn round_trip_single_pixel_rgba() {
let image = [1, 2, 3, 4];
let decoded = round_trip_image(&image, 1, 1, ExtendedColorType::Rgba8);
assert_eq!(&decoded[..], &image[..]);
}
#[test]
fn round_trip_3px_rgb() {
let image = [0u8; 3 * 3 * 3]; // 3x3 pixels, 3 bytes per pixel
let _decoded = round_trip_image(&image, 3, 3, ExtendedColorType::Rgb8);
}
#[test]
fn round_trip_gray() {
let image = [0u8, 1, 2]; // 3 pixels
let decoded = round_trip_image(&image, 3, 1, ExtendedColorType::L8);
// should be read back as 3 RGB pixels
assert_eq!(9, decoded.len());
assert_eq!(0, decoded[0]);
assert_eq!(0, decoded[1]);
assert_eq!(0, decoded[2]);
assert_eq!(1, decoded[3]);
assert_eq!(1, decoded[4]);
assert_eq!(1, decoded[5]);
assert_eq!(2, decoded[6]);
assert_eq!(2, decoded[7]);
assert_eq!(2, decoded[8]);
}
#[test]
fn round_trip_graya() {
let image = [0u8, 0, 1, 0, 2, 0]; // 3 pixels, each with an alpha channel
let decoded = round_trip_image(&image, 1, 3, ExtendedColorType::La8);
// should be read back as 3 RGB pixels
assert_eq!(9, decoded.len());
assert_eq!(0, decoded[0]);
assert_eq!(0, decoded[1]);
assert_eq!(0, decoded[2]);
assert_eq!(1, decoded[3]);
assert_eq!(1, decoded[4]);
assert_eq!(1, decoded[5]);
assert_eq!(2, decoded[6]);
assert_eq!(2, decoded[7]);
assert_eq!(2, decoded[8]);
}
}

13
vendor/image/src/codecs/bmp/mod.rs vendored Normal file
View File

@@ -0,0 +1,13 @@
//! Decoding and Encoding of BMP Images
//!
//! A decoder and encoder for BMP (Windows Bitmap) images
//!
//! # Related Links
//! * <https://msdn.microsoft.com/en-us/library/windows/desktop/dd183375%28v=vs.85%29.aspx>
//! * <https://en.wikipedia.org/wiki/BMP_file_format>
pub use self::decoder::BmpDecoder;
pub use self::encoder::BmpEncoder;
mod decoder;
mod encoder;

366
vendor/image/src/codecs/dds.rs vendored Normal file
View File

@@ -0,0 +1,366 @@
//! Decoding of DDS images
//!
//! DDS (DirectDraw Surface) is a container format for storing DXT (S3TC) compressed images.
//!
//! # Related Links
//! * <https://docs.microsoft.com/en-us/windows/win32/direct3ddds/dx-graphics-dds-pguide> - Description of the DDS format.
use std::io::Read;
use std::{error, fmt};
use byteorder_lite::{LittleEndian, ReadBytesExt};
#[allow(deprecated)]
use crate::codecs::dxt::{DxtDecoder, DxtVariant};
use crate::color::ColorType;
use crate::error::{
DecodingError, ImageError, ImageFormatHint, ImageResult, UnsupportedError, UnsupportedErrorKind,
};
use crate::{ImageDecoder, ImageFormat};
/// Errors that can occur during decoding and parsing a DDS image
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
#[allow(clippy::enum_variant_names)]
enum DecoderError {
/// Wrong DDS channel width
PixelFormatSizeInvalid(u32),
/// Wrong DDS header size
HeaderSizeInvalid(u32),
/// Wrong DDS header flags
HeaderFlagsInvalid(u32),
/// Invalid DXGI format in DX10 header
DxgiFormatInvalid(u32),
/// Invalid resource dimension
ResourceDimensionInvalid(u32),
/// Invalid flags in DX10 header
Dx10FlagsInvalid(u32),
/// Invalid array size in DX10 header
Dx10ArraySizeInvalid(u32),
/// DDS "DDS " signature invalid or missing
DdsSignatureInvalid,
}
impl fmt::Display for DecoderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DecoderError::PixelFormatSizeInvalid(s) => {
f.write_fmt(format_args!("Invalid DDS PixelFormat size: {s}"))
}
DecoderError::HeaderSizeInvalid(s) => {
f.write_fmt(format_args!("Invalid DDS header size: {s}"))
}
DecoderError::HeaderFlagsInvalid(fs) => {
f.write_fmt(format_args!("Invalid DDS header flags: {fs:#010X}"))
}
DecoderError::DxgiFormatInvalid(df) => {
f.write_fmt(format_args!("Invalid DDS DXGI format: {df}"))
}
DecoderError::ResourceDimensionInvalid(d) => {
f.write_fmt(format_args!("Invalid DDS resource dimension: {d}"))
}
DecoderError::Dx10FlagsInvalid(fs) => {
f.write_fmt(format_args!("Invalid DDS DX10 header flags: {fs:#010X}"))
}
DecoderError::Dx10ArraySizeInvalid(s) => {
f.write_fmt(format_args!("Invalid DDS DX10 array size: {s}"))
}
DecoderError::DdsSignatureInvalid => f.write_str("DDS signature not found"),
}
}
}
impl From<DecoderError> for ImageError {
fn from(e: DecoderError) -> ImageError {
ImageError::Decoding(DecodingError::new(ImageFormat::Dds.into(), e))
}
}
impl error::Error for DecoderError {}
/// Header used by DDS image files
#[derive(Debug)]
struct Header {
_flags: u32,
height: u32,
width: u32,
_pitch_or_linear_size: u32,
_depth: u32,
_mipmap_count: u32,
pixel_format: PixelFormat,
_caps: u32,
_caps2: u32,
}
/// Extended DX10 header used by some DDS image files
#[derive(Debug)]
struct DX10Header {
dxgi_format: u32,
resource_dimension: u32,
misc_flag: u32,
array_size: u32,
misc_flags_2: u32,
}
/// DDS pixel format
#[derive(Debug)]
struct PixelFormat {
flags: u32,
fourcc: [u8; 4],
_rgb_bit_count: u32,
_r_bit_mask: u32,
_g_bit_mask: u32,
_b_bit_mask: u32,
_a_bit_mask: u32,
}
impl PixelFormat {
fn from_reader(r: &mut dyn Read) -> ImageResult<Self> {
let size = r.read_u32::<LittleEndian>()?;
if size != 32 {
return Err(DecoderError::PixelFormatSizeInvalid(size).into());
}
Ok(Self {
flags: r.read_u32::<LittleEndian>()?,
fourcc: {
let mut v = [0; 4];
r.read_exact(&mut v)?;
v
},
_rgb_bit_count: r.read_u32::<LittleEndian>()?,
_r_bit_mask: r.read_u32::<LittleEndian>()?,
_g_bit_mask: r.read_u32::<LittleEndian>()?,
_b_bit_mask: r.read_u32::<LittleEndian>()?,
_a_bit_mask: r.read_u32::<LittleEndian>()?,
})
}
}
impl Header {
fn from_reader(r: &mut dyn Read) -> ImageResult<Self> {
let size = r.read_u32::<LittleEndian>()?;
if size != 124 {
return Err(DecoderError::HeaderSizeInvalid(size).into());
}
const REQUIRED_FLAGS: u32 = 0x1 | 0x2 | 0x4 | 0x1000;
const VALID_FLAGS: u32 = 0x1 | 0x2 | 0x4 | 0x8 | 0x1000 | 0x20000 | 0x80000 | 0x0080_0000;
let flags = r.read_u32::<LittleEndian>()?;
if flags & (REQUIRED_FLAGS | !VALID_FLAGS) != REQUIRED_FLAGS {
return Err(DecoderError::HeaderFlagsInvalid(flags).into());
}
let height = r.read_u32::<LittleEndian>()?;
let width = r.read_u32::<LittleEndian>()?;
let pitch_or_linear_size = r.read_u32::<LittleEndian>()?;
let depth = r.read_u32::<LittleEndian>()?;
let mipmap_count = r.read_u32::<LittleEndian>()?;
// Skip `dwReserved1`
{
let mut skipped = [0; 4 * 11];
r.read_exact(&mut skipped)?;
}
let pixel_format = PixelFormat::from_reader(r)?;
let caps = r.read_u32::<LittleEndian>()?;
let caps2 = r.read_u32::<LittleEndian>()?;
// Skip `dwCaps3`, `dwCaps4`, `dwReserved2` (unused)
{
let mut skipped = [0; 4 + 4 + 4];
r.read_exact(&mut skipped)?;
}
Ok(Self {
_flags: flags,
height,
width,
_pitch_or_linear_size: pitch_or_linear_size,
_depth: depth,
_mipmap_count: mipmap_count,
pixel_format,
_caps: caps,
_caps2: caps2,
})
}
}
impl DX10Header {
fn from_reader(r: &mut dyn Read) -> ImageResult<Self> {
let dxgi_format = r.read_u32::<LittleEndian>()?;
let resource_dimension = r.read_u32::<LittleEndian>()?;
let misc_flag = r.read_u32::<LittleEndian>()?;
let array_size = r.read_u32::<LittleEndian>()?;
let misc_flags_2 = r.read_u32::<LittleEndian>()?;
let dx10_header = Self {
dxgi_format,
resource_dimension,
misc_flag,
array_size,
misc_flags_2,
};
dx10_header.validate()?;
Ok(dx10_header)
}
fn validate(&self) -> Result<(), ImageError> {
// Note: see https://docs.microsoft.com/en-us/windows/win32/direct3ddds/dds-header-dxt10 for info on valid values
if self.dxgi_format > 132 {
// Invalid format
return Err(DecoderError::DxgiFormatInvalid(self.dxgi_format).into());
}
if self.resource_dimension < 2 || self.resource_dimension > 4 {
// Invalid dimension
// Only 1D (2), 2D (3) and 3D (4) resource dimensions are allowed
return Err(DecoderError::ResourceDimensionInvalid(self.resource_dimension).into());
}
if self.misc_flag != 0x0 && self.misc_flag != 0x4 {
// Invalid flag
// Only no (0x0) and DDS_RESOURCE_MISC_TEXTURECUBE (0x4) flags are allowed
return Err(DecoderError::Dx10FlagsInvalid(self.misc_flag).into());
}
if self.resource_dimension == 4 && self.array_size != 1 {
// Invalid array size
// 3D textures (resource dimension == 4) must have an array size of 1
return Err(DecoderError::Dx10ArraySizeInvalid(self.array_size).into());
}
if self.misc_flags_2 > 0x4 {
// Invalid alpha flags
return Err(DecoderError::Dx10FlagsInvalid(self.misc_flags_2).into());
}
Ok(())
}
}
/// The representation of a DDS decoder
pub struct DdsDecoder<R: Read> {
#[allow(deprecated)]
inner: DxtDecoder<R>,
}
impl<R: Read> DdsDecoder<R> {
/// Create a new decoder that decodes from the stream `r`
pub fn new(mut r: R) -> ImageResult<Self> {
let mut magic = [0; 4];
r.read_exact(&mut magic)?;
if magic != b"DDS "[..] {
return Err(DecoderError::DdsSignatureInvalid.into());
}
let header = Header::from_reader(&mut r)?;
if header.pixel_format.flags & 0x4 != 0 {
#[allow(deprecated)]
let variant = match &header.pixel_format.fourcc {
b"DXT1" => DxtVariant::DXT1,
b"DXT3" => DxtVariant::DXT3,
b"DXT5" => DxtVariant::DXT5,
b"DX10" => {
let dx10_header = DX10Header::from_reader(&mut r)?;
// Format equivalents were taken from https://docs.microsoft.com/en-us/windows/win32/direct3d11/texture-block-compression-in-direct3d-11
// The enum integer values were taken from https://docs.microsoft.com/en-us/windows/win32/api/dxgiformat/ne-dxgiformat-dxgi_format
// DXT1 represents the different BC1 variants, DTX3 represents the different BC2 variants and DTX5 represents the different BC3 variants
match dx10_header.dxgi_format {
70..=72 => DxtVariant::DXT1, // DXGI_FORMAT_BC1_TYPELESS, DXGI_FORMAT_BC1_UNORM or DXGI_FORMAT_BC1_UNORM_SRGB
73..=75 => DxtVariant::DXT3, // DXGI_FORMAT_BC2_TYPELESS, DXGI_FORMAT_BC2_UNORM or DXGI_FORMAT_BC2_UNORM_SRGB
76..=78 => DxtVariant::DXT5, // DXGI_FORMAT_BC3_TYPELESS, DXGI_FORMAT_BC3_UNORM or DXGI_FORMAT_BC3_UNORM_SRGB
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Dds.into(),
UnsupportedErrorKind::GenericFeature(format!(
"DDS DXGI Format {}",
dx10_header.dxgi_format
)),
),
))
}
}
}
fourcc => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Dds.into(),
UnsupportedErrorKind::GenericFeature(format!("DDS FourCC {fourcc:?}")),
),
))
}
};
#[allow(deprecated)]
let bytes_per_pixel = variant.color_type().bytes_per_pixel();
if crate::utils::check_dimension_overflow(header.width, header.height, bytes_per_pixel)
{
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Dds.into(),
UnsupportedErrorKind::GenericFeature(format!(
"Image dimensions ({}x{}) are too large",
header.width, header.height
)),
),
));
}
#[allow(deprecated)]
let inner = DxtDecoder::new(r, header.width, header.height, variant)?;
Ok(Self { inner })
} else {
// For now, supports only DXT variants
Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Dds.into(),
UnsupportedErrorKind::Format(ImageFormatHint::Name("DDS".to_string())),
),
))
}
}
}
impl<R: Read> ImageDecoder for DdsDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
self.inner.dimensions()
}
fn color_type(&self) -> ColorType {
self.inner.color_type()
}
fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
self.inner.read_image(buf)
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn dimension_overflow() {
// A DXT1 header set to 0xFFFF_FFFC width and height (the highest u32%4 == 0)
let header = [
0x44, 0x44, 0x53, 0x20, 0x7C, 0x0, 0x0, 0x0, 0x7, 0x10, 0x8, 0x0, 0xFC, 0xFF, 0xFF,
0xFF, 0xFC, 0xFF, 0xFF, 0xFF, 0x0, 0xC0, 0x12, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0,
0x0, 0x49, 0x4D, 0x41, 0x47, 0x45, 0x4D, 0x41, 0x47, 0x49, 0x43, 0x4B, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0,
0x4, 0x0, 0x0, 0x0, 0x44, 0x58, 0x54, 0x31, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x10, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
];
assert!(DdsDecoder::new(&header[..]).is_err());
}
}

349
vendor/image/src/codecs/dxt.rs vendored Normal file
View File

@@ -0,0 +1,349 @@
//! Decoding of DXT (S3TC) compression
//!
//! DXT is an image format that supports lossy compression
//!
//! # Related Links
//! * <https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_compression_s3tc.txt> - Description of the DXT compression OpenGL extensions.
//!
//! Note: this module only implements bare DXT encoding/decoding, it does not parse formats that can contain DXT files like .dds
use std::io::{self, Read};
use crate::color::ColorType;
use crate::error::{ImageError, ImageResult, ParameterError, ParameterErrorKind};
use crate::io::ReadExt;
use crate::ImageDecoder;
/// What version of DXT compression are we using?
/// Note that DXT2 and DXT4 are left away as they're
/// just DXT3 and DXT5 with premultiplied alpha
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub(crate) enum DxtVariant {
/// The DXT1 format. 48 bytes of RGB data in a 4x4 pixel square is
/// compressed into an 8 byte block of DXT1 data
DXT1,
/// The DXT3 format. 64 bytes of RGBA data in a 4x4 pixel square is
/// compressed into a 16 byte block of DXT3 data
DXT3,
/// The DXT5 format. 64 bytes of RGBA data in a 4x4 pixel square is
/// compressed into a 16 byte block of DXT5 data
DXT5,
}
impl DxtVariant {
/// Returns the amount of bytes of raw image data
/// that is encoded in a single DXTn block
fn decoded_bytes_per_block(self) -> usize {
match self {
DxtVariant::DXT1 => 48,
DxtVariant::DXT3 | DxtVariant::DXT5 => 64,
}
}
/// Returns the amount of bytes per block of encoded DXTn data
fn encoded_bytes_per_block(self) -> usize {
match self {
DxtVariant::DXT1 => 8,
DxtVariant::DXT3 | DxtVariant::DXT5 => 16,
}
}
/// Returns the color type that is stored in this DXT variant
pub(crate) fn color_type(self) -> ColorType {
match self {
DxtVariant::DXT1 => ColorType::Rgb8,
DxtVariant::DXT3 | DxtVariant::DXT5 => ColorType::Rgba8,
}
}
}
/// DXT decoder
pub(crate) struct DxtDecoder<R: Read> {
inner: R,
width_blocks: u32,
height_blocks: u32,
variant: DxtVariant,
row: u32,
}
impl<R: Read> DxtDecoder<R> {
/// Create a new DXT decoder that decodes from the stream ```r```.
/// As DXT is often stored as raw buffers with the width/height
/// somewhere else the width and height of the image need
/// to be passed in ```width``` and ```height```, as well as the
/// DXT variant in ```variant```.
/// width and height are required to be powers of 2 and at least 4.
/// otherwise an error will be returned
pub(crate) fn new(
r: R,
width: u32,
height: u32,
variant: DxtVariant,
) -> Result<DxtDecoder<R>, ImageError> {
if width % 4 != 0 || height % 4 != 0 {
// TODO: this is actually a bit of a weird case. We could return `DecodingError` but
// it's not really the format that is wrong However, the encoder should surely return
// `EncodingError` so it would be the logical choice for symmetry.
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
let width_blocks = width / 4;
let height_blocks = height / 4;
Ok(DxtDecoder {
inner: r,
width_blocks,
height_blocks,
variant,
row: 0,
})
}
fn scanline_bytes(&self) -> u64 {
self.variant.decoded_bytes_per_block() as u64 * u64::from(self.width_blocks)
}
fn read_scanline(&mut self, buf: &mut [u8]) -> io::Result<usize> {
assert_eq!(
u64::try_from(buf.len()),
Ok(
#[allow(deprecated)]
self.scanline_bytes()
)
);
let len = self.variant.encoded_bytes_per_block() * self.width_blocks as usize;
let mut src = Vec::new();
self.inner.read_exact_vec(&mut src, len)?;
match self.variant {
DxtVariant::DXT1 => decode_dxt1_row(&src, buf),
DxtVariant::DXT3 => decode_dxt3_row(&src, buf),
DxtVariant::DXT5 => decode_dxt5_row(&src, buf),
}
self.row += 1;
Ok(buf.len())
}
}
// Note that, due to the way that DXT compression works, a scanline is considered to consist out of
// 4 lines of pixels.
impl<R: Read> ImageDecoder for DxtDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
(self.width_blocks * 4, self.height_blocks * 4)
}
fn color_type(&self) -> ColorType {
self.variant.color_type()
}
fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
#[allow(deprecated)]
for chunk in buf.chunks_mut(self.scanline_bytes().max(1) as usize) {
self.read_scanline(chunk)?;
}
Ok(())
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
/**
* Actual encoding/decoding logic below.
*/
type Rgb = [u8; 3];
/// decodes a 5-bit R, 6-bit G, 5-bit B 16-bit packed color value into 8-bit RGB
/// mapping is done so min/max range values are preserved. So for 5-bit
/// values 0x00 -> 0x00 and 0x1F -> 0xFF
fn enc565_decode(value: u16) -> Rgb {
let red = (value >> 11) & 0x1F;
let green = (value >> 5) & 0x3F;
let blue = (value) & 0x1F;
[
(red * 0xFF / 0x1F) as u8,
(green * 0xFF / 0x3F) as u8,
(blue * 0xFF / 0x1F) as u8,
]
}
/*
* Functions for decoding DXT compression
*/
/// Constructs the DXT5 alpha lookup table from the two alpha entries
/// if alpha0 > alpha1, constructs a table of [a0, a1, 6 linearly interpolated values from a0 to a1]
/// if alpha0 <= alpha1, constructs a table of [a0, a1, 4 linearly interpolated values from a0 to a1, 0, 0xFF]
fn alpha_table_dxt5(alpha0: u8, alpha1: u8) -> [u8; 8] {
let mut table = [alpha0, alpha1, 0, 0, 0, 0, 0, 0xFF];
if alpha0 > alpha1 {
for i in 2..8u16 {
table[i as usize] =
(((8 - i) * u16::from(alpha0) + (i - 1) * u16::from(alpha1)) / 7) as u8;
}
} else {
for i in 2..6u16 {
table[i as usize] =
(((6 - i) * u16::from(alpha0) + (i - 1) * u16::from(alpha1)) / 5) as u8;
}
}
table
}
/// decodes an 8-byte dxt color block into the RGB channels of a 16xRGB or 16xRGBA block.
/// source should have a length of 8, dest a length of 48 (RGB) or 64 (RGBA)
fn decode_dxt_colors(source: &[u8], dest: &mut [u8], is_dxt1: bool) {
// sanity checks, also enable the compiler to elide all following bound checks
assert!(source.len() == 8 && (dest.len() == 48 || dest.len() == 64));
// calculate pitch to store RGB values in dest (3 for RGB, 4 for RGBA)
let pitch = dest.len() / 16;
// extract color data
let color0 = u16::from(source[0]) | (u16::from(source[1]) << 8);
let color1 = u16::from(source[2]) | (u16::from(source[3]) << 8);
let color_table = u32::from(source[4])
| (u32::from(source[5]) << 8)
| (u32::from(source[6]) << 16)
| (u32::from(source[7]) << 24);
// let color_table = source[4..8].iter().rev().fold(0, |t, &b| (t << 8) | b as u32);
// decode the colors to rgb format
let mut colors = [[0; 3]; 4];
colors[0] = enc565_decode(color0);
colors[1] = enc565_decode(color1);
// determine color interpolation method
if color0 > color1 || !is_dxt1 {
// linearly interpolate the other two color table entries
for i in 0..3 {
colors[2][i] = ((u16::from(colors[0][i]) * 2 + u16::from(colors[1][i]) + 1) / 3) as u8;
colors[3][i] = ((u16::from(colors[0][i]) + u16::from(colors[1][i]) * 2 + 1) / 3) as u8;
}
} else {
// linearly interpolate one other entry, keep the other at 0
for i in 0..3 {
colors[2][i] = (u16::from(colors[0][i]) + u16::from(colors[1][i])).div_ceil(2) as u8;
}
}
// serialize the result. Every color is determined by looking up
// two bits in color_table which identify which color to actually pick from the 4 possible colors
for i in 0..16 {
dest[i * pitch..i * pitch + 3]
.copy_from_slice(&colors[(color_table >> (i * 2)) as usize & 3]);
}
}
/// Decodes a 16-byte bock of dxt5 data to a 16xRGBA block
fn decode_dxt5_block(source: &[u8], dest: &mut [u8]) {
assert!(source.len() == 16 && dest.len() == 64);
// extract alpha index table (stored as little endian 64-bit value)
let alpha_table = source[2..8]
.iter()
.rev()
.fold(0, |t, &b| (t << 8) | u64::from(b));
// alpha level decode
let alphas = alpha_table_dxt5(source[0], source[1]);
// serialize alpha
for i in 0..16 {
dest[i * 4 + 3] = alphas[(alpha_table >> (i * 3)) as usize & 7];
}
// handle colors
decode_dxt_colors(&source[8..16], dest, false);
}
/// Decodes a 16-byte bock of dxt3 data to a 16xRGBA block
fn decode_dxt3_block(source: &[u8], dest: &mut [u8]) {
assert!(source.len() == 16 && dest.len() == 64);
// extract alpha index table (stored as little endian 64-bit value)
let alpha_table = source[0..8]
.iter()
.rev()
.fold(0, |t, &b| (t << 8) | u64::from(b));
// serialize alpha (stored as 4-bit values)
for i in 0..16 {
dest[i * 4 + 3] = ((alpha_table >> (i * 4)) as u8 & 0xF) * 0x11;
}
// handle colors
decode_dxt_colors(&source[8..16], dest, false);
}
/// Decodes a 8-byte bock of dxt5 data to a 16xRGB block
fn decode_dxt1_block(source: &[u8], dest: &mut [u8]) {
assert!(source.len() == 8 && dest.len() == 48);
decode_dxt_colors(source, dest, true);
}
/// Decode a row of DXT1 data to four rows of RGB data.
/// `source.len()` should be a multiple of 8, otherwise this panics.
fn decode_dxt1_row(source: &[u8], dest: &mut [u8]) {
assert!(source.len() % 8 == 0);
let block_count = source.len() / 8;
assert!(dest.len() >= block_count * 48);
// contains the 16 decoded pixels per block
let mut decoded_block = [0u8; 48];
for (x, encoded_block) in source.chunks(8).enumerate() {
decode_dxt1_block(encoded_block, &mut decoded_block);
// copy the values from the decoded block to linewise RGB layout
for line in 0..4 {
let offset = (block_count * line + x) * 12;
dest[offset..offset + 12].copy_from_slice(&decoded_block[line * 12..(line + 1) * 12]);
}
}
}
/// Decode a row of DXT3 data to four rows of RGBA data.
/// `source.len()` should be a multiple of 16, otherwise this panics.
fn decode_dxt3_row(source: &[u8], dest: &mut [u8]) {
assert!(source.len() % 16 == 0);
let block_count = source.len() / 16;
assert!(dest.len() >= block_count * 64);
// contains the 16 decoded pixels per block
let mut decoded_block = [0u8; 64];
for (x, encoded_block) in source.chunks(16).enumerate() {
decode_dxt3_block(encoded_block, &mut decoded_block);
// copy the values from the decoded block to linewise RGB layout
for line in 0..4 {
let offset = (block_count * line + x) * 16;
dest[offset..offset + 16].copy_from_slice(&decoded_block[line * 16..(line + 1) * 16]);
}
}
}
/// Decode a row of DXT5 data to four rows of RGBA data.
/// `source.len()` should be a multiple of 16, otherwise this panics.
fn decode_dxt5_row(source: &[u8], dest: &mut [u8]) {
assert!(source.len() % 16 == 0);
let block_count = source.len() / 16;
assert!(dest.len() >= block_count * 64);
// contains the 16 decoded pixels per block
let mut decoded_block = [0u8; 64];
for (x, encoded_block) in source.chunks(16).enumerate() {
decode_dxt5_block(encoded_block, &mut decoded_block);
// copy the values from the decoded block to linewise RGB layout
for line in 0..4 {
let offset = (block_count * line + x) * 16;
dest[offset..offset + 16].copy_from_slice(&decoded_block[line * 16..(line + 1) * 16]);
}
}
}

407
vendor/image/src/codecs/farbfeld.rs vendored Normal file
View File

@@ -0,0 +1,407 @@
//! Decoding of farbfeld images
//!
//! farbfeld is a lossless image format which is easy to parse, pipe and compress.
//!
//! It has the following format:
//!
//! | Bytes | Description |
//! |--------|---------------------------------------------------------|
//! | 8 | "farbfeld" magic value |
//! | 4 | 32-Bit BE unsigned integer (width) |
//! | 4 | 32-Bit BE unsigned integer (height) |
//! | [2222] | 4⋅16-Bit BE unsigned integers [RGBA] / pixel, row-major |
//!
//! The RGB-data should be sRGB for best interoperability and not alpha-premultiplied.
//!
//! # Related Links
//! * <https://tools.suckless.org/farbfeld/> - the farbfeld specification
use std::io::{self, Read, Seek, SeekFrom, Write};
use crate::color::ExtendedColorType;
use crate::error::{
DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind,
};
use crate::io::free_functions::load_rect;
use crate::{ColorType, ImageDecoder, ImageDecoderRect, ImageEncoder, ImageFormat};
/// farbfeld Reader
pub struct FarbfeldReader<R: Read> {
width: u32,
height: u32,
inner: R,
/// Relative to the start of the pixel data
current_offset: u64,
cached_byte: Option<u8>,
}
impl<R: Read> FarbfeldReader<R> {
fn new(mut buffered_read: R) -> ImageResult<FarbfeldReader<R>> {
fn read_dimm<R: Read>(from: &mut R) -> ImageResult<u32> {
let mut buf = [0u8; 4];
from.read_exact(&mut buf).map_err(|err| {
ImageError::Decoding(DecodingError::new(ImageFormat::Farbfeld.into(), err))
})?;
Ok(u32::from_be_bytes(buf))
}
let mut magic = [0u8; 8];
buffered_read.read_exact(&mut magic).map_err(|err| {
ImageError::Decoding(DecodingError::new(ImageFormat::Farbfeld.into(), err))
})?;
if &magic != b"farbfeld" {
return Err(ImageError::Decoding(DecodingError::new(
ImageFormat::Farbfeld.into(),
format!("Invalid magic: {magic:02x?}"),
)));
}
let reader = FarbfeldReader {
width: read_dimm(&mut buffered_read)?,
height: read_dimm(&mut buffered_read)?,
inner: buffered_read,
current_offset: 0,
cached_byte: None,
};
if crate::utils::check_dimension_overflow(
reader.width,
reader.height,
// ExtendedColorType is always rgba16
8,
) {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Farbfeld.into(),
UnsupportedErrorKind::GenericFeature(format!(
"Image dimensions ({}x{}) are too large",
reader.width, reader.height
)),
),
));
}
Ok(reader)
}
}
impl<R: Read> Read for FarbfeldReader<R> {
fn read(&mut self, mut buf: &mut [u8]) -> io::Result<usize> {
let mut bytes_written = 0;
if let Some(byte) = self.cached_byte.take() {
buf[0] = byte;
buf = &mut buf[1..];
bytes_written = 1;
self.current_offset += 1;
}
if buf.len() == 1 {
buf[0] = cache_byte(&mut self.inner, &mut self.cached_byte)?;
bytes_written += 1;
self.current_offset += 1;
} else {
for channel_out in buf.chunks_exact_mut(2) {
consume_channel(&mut self.inner, channel_out)?;
bytes_written += 2;
self.current_offset += 2;
}
}
Ok(bytes_written)
}
}
impl<R: Read + Seek> Seek for FarbfeldReader<R> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
fn parse_offset(original_offset: u64, end_offset: u64, pos: SeekFrom) -> Option<i64> {
match pos {
SeekFrom::Start(off) => i64::try_from(off)
.ok()?
.checked_sub(i64::try_from(original_offset).ok()?),
SeekFrom::End(off) => {
if off < i64::try_from(end_offset).unwrap_or(i64::MAX) {
None
} else {
Some(i64::try_from(end_offset.checked_sub(original_offset)?).ok()? + off)
}
}
SeekFrom::Current(off) => {
if off < i64::try_from(original_offset).unwrap_or(i64::MAX) {
None
} else {
Some(off)
}
}
}
}
let original_offset = self.current_offset;
let end_offset = u64::from(self.width) * u64::from(self.height) * 2;
let offset_from_current =
parse_offset(original_offset, end_offset, pos).ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"invalid seek to a negative or overflowing position",
)
})?;
// TODO: convert to seek_relative() once that gets stabilised
self.inner.seek(SeekFrom::Current(offset_from_current))?;
self.current_offset = if offset_from_current < 0 {
original_offset.checked_sub(offset_from_current.wrapping_neg() as u64)
} else {
original_offset.checked_add(offset_from_current as u64)
}
.expect("This should've been checked above");
if self.current_offset < end_offset && self.current_offset % 2 == 1 {
let curr = self.inner.seek(SeekFrom::Current(-1))?;
cache_byte(&mut self.inner, &mut self.cached_byte)?;
self.inner.seek(SeekFrom::Start(curr))?;
} else {
self.cached_byte = None;
}
Ok(original_offset)
}
}
fn consume_channel<R: Read>(from: &mut R, mut to: &mut [u8]) -> io::Result<()> {
let mut ibuf = [0u8; 2];
from.read_exact(&mut ibuf)?;
to.write_all(&u16::from_be_bytes(ibuf).to_ne_bytes())?;
Ok(())
}
fn cache_byte<R: Read>(from: &mut R, cached_byte: &mut Option<u8>) -> io::Result<u8> {
let mut obuf = [0u8; 2];
consume_channel(from, &mut obuf)?;
*cached_byte = Some(obuf[1]);
Ok(obuf[0])
}
/// farbfeld decoder
pub struct FarbfeldDecoder<R: Read> {
reader: FarbfeldReader<R>,
}
impl<R: Read> FarbfeldDecoder<R> {
/// Creates a new decoder that decodes from the stream ```r```
pub fn new(buffered_read: R) -> ImageResult<FarbfeldDecoder<R>> {
Ok(FarbfeldDecoder {
reader: FarbfeldReader::new(buffered_read)?,
})
}
}
impl<R: Read> ImageDecoder for FarbfeldDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
(self.reader.width, self.reader.height)
}
fn color_type(&self) -> ColorType {
ColorType::Rgba16
}
fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
self.reader.read_exact(buf)?;
Ok(())
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
impl<R: Read + Seek> ImageDecoderRect for FarbfeldDecoder<R> {
fn read_rect(
&mut self,
x: u32,
y: u32,
width: u32,
height: u32,
buf: &mut [u8],
row_pitch: usize,
) -> ImageResult<()> {
// A "scanline" (defined as "shortest non-caching read" in the doc) is just one channel in this case
let start = self.reader.stream_position()?;
load_rect(
x,
y,
width,
height,
buf,
row_pitch,
self,
2,
|s, scanline| s.reader.seek(SeekFrom::Start(scanline * 2)).map(|_| ()),
|s, buf| s.reader.read_exact(buf),
)?;
self.reader.seek(SeekFrom::Start(start))?;
Ok(())
}
}
/// farbfeld encoder
pub struct FarbfeldEncoder<W: Write> {
w: W,
}
impl<W: Write> FarbfeldEncoder<W> {
/// Create a new encoder that writes its output to ```w```. The writer should be buffered.
pub fn new(buffered_writer: W) -> FarbfeldEncoder<W> {
FarbfeldEncoder { w: buffered_writer }
}
/// Encodes the image `data` (native endian) that has dimensions `width` and `height`.
///
/// # Panics
///
/// Panics if `width * height * 8 != data.len()`.
#[track_caller]
pub fn encode(self, data: &[u8], width: u32, height: u32) -> ImageResult<()> {
let expected_buffer_len = (u64::from(width) * u64::from(height)).saturating_mul(8);
assert_eq!(
expected_buffer_len,
data.len() as u64,
"Invalid buffer length: expected {expected_buffer_len} got {} for {width}x{height} image",
data.len(),
);
self.encode_impl(data, width, height)?;
Ok(())
}
fn encode_impl(mut self, data: &[u8], width: u32, height: u32) -> io::Result<()> {
self.w.write_all(b"farbfeld")?;
self.w.write_all(&width.to_be_bytes())?;
self.w.write_all(&height.to_be_bytes())?;
for channel in data.chunks_exact(2) {
self.w
.write_all(&u16::from_ne_bytes(channel.try_into().unwrap()).to_be_bytes())?;
}
Ok(())
}
}
impl<W: Write> ImageEncoder for FarbfeldEncoder<W> {
#[track_caller]
fn write_image(
self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
if color_type != ExtendedColorType::Rgba16 {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Farbfeld.into(),
UnsupportedErrorKind::Color(color_type),
),
));
}
self.encode(buf, width, height)
}
}
#[cfg(test)]
mod tests {
use crate::codecs::farbfeld::FarbfeldDecoder;
use crate::ImageDecoderRect;
use byteorder_lite::{ByteOrder, NativeEndian};
use std::io::{Cursor, Seek, SeekFrom};
static RECTANGLE_IN: &[u8] = b"farbfeld\
\x00\x00\x00\x02\x00\x00\x00\x03\
\xFF\x01\xFE\x02\xFD\x03\xFC\x04\xFB\x05\xFA\x06\xF9\x07\xF8\x08\
\xF7\x09\xF6\x0A\xF5\x0B\xF4\x0C\xF3\x0D\xF2\x0E\xF1\x0F\xF0\x10\
\xEF\x11\xEE\x12\xED\x13\xEC\x14\xEB\x15\xEA\x16\xE9\x17\xE8\x18";
#[test]
fn read_rect_1x2() {
static RECTANGLE_OUT: &[u16] = &[
0xF30D, 0xF20E, 0xF10F, 0xF010, 0xEB15, 0xEA16, 0xE917, 0xE818,
];
read_rect(1, 1, 1, 2, RECTANGLE_OUT);
}
#[test]
fn read_rect_2x2() {
static RECTANGLE_OUT: &[u16] = &[
0xFF01, 0xFE02, 0xFD03, 0xFC04, 0xFB05, 0xFA06, 0xF907, 0xF808, 0xF709, 0xF60A, 0xF50B,
0xF40C, 0xF30D, 0xF20E, 0xF10F, 0xF010,
];
read_rect(0, 0, 2, 2, RECTANGLE_OUT);
}
#[test]
fn read_rect_2x1() {
static RECTANGLE_OUT: &[u16] = &[
0xEF11, 0xEE12, 0xED13, 0xEC14, 0xEB15, 0xEA16, 0xE917, 0xE818,
];
read_rect(0, 2, 2, 1, RECTANGLE_OUT);
}
#[test]
fn read_rect_2x3() {
static RECTANGLE_OUT: &[u16] = &[
0xFF01, 0xFE02, 0xFD03, 0xFC04, 0xFB05, 0xFA06, 0xF907, 0xF808, 0xF709, 0xF60A, 0xF50B,
0xF40C, 0xF30D, 0xF20E, 0xF10F, 0xF010, 0xEF11, 0xEE12, 0xED13, 0xEC14, 0xEB15, 0xEA16,
0xE917, 0xE818,
];
read_rect(0, 0, 2, 3, RECTANGLE_OUT);
}
#[test]
fn read_rect_in_stream() {
static RECTANGLE_OUT: &[u16] = &[0xEF11, 0xEE12, 0xED13, 0xEC14];
let mut input = vec![];
input.extend_from_slice(b"This is a 31-byte-long prologue");
input.extend_from_slice(RECTANGLE_IN);
let mut input_cur = Cursor::new(input);
input_cur.seek(SeekFrom::Start(31)).unwrap();
let mut out_buf = [0u8; 64];
FarbfeldDecoder::new(input_cur)
.unwrap()
.read_rect(0, 2, 1, 1, &mut out_buf, 8)
.unwrap();
let exp = degenerate_pixels(RECTANGLE_OUT);
assert_eq!(&out_buf[..exp.len()], &exp[..]);
}
#[test]
fn dimension_overflow() {
let header = b"farbfeld\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF";
assert!(FarbfeldDecoder::new(Cursor::new(header)).is_err());
}
fn read_rect(x: u32, y: u32, width: u32, height: u32, exp_wide: &[u16]) {
let mut out_buf = [0u8; 64];
FarbfeldDecoder::new(Cursor::new(RECTANGLE_IN))
.unwrap()
.read_rect(x, y, width, height, &mut out_buf, width as usize * 8)
.unwrap();
let exp = degenerate_pixels(exp_wide);
assert_eq!(&out_buf[..exp.len()], &exp[..]);
}
fn degenerate_pixels(exp_wide: &[u16]) -> Vec<u8> {
let mut exp = vec![0u8; exp_wide.len() * 2];
NativeEndian::write_u16_into(exp_wide, &mut exp);
exp
}
}

676
vendor/image/src/codecs/gif.rs vendored Normal file
View File

@@ -0,0 +1,676 @@
//! Decoding of GIF Images
//!
//! GIF (Graphics Interchange Format) is an image format that supports lossless compression.
//!
//! # Related Links
//! * <http://www.w3.org/Graphics/GIF/spec-gif89a.txt> - The GIF Specification
//!
//! # Examples
//! ```rust,no_run
//! use image::codecs::gif::{GifDecoder, GifEncoder};
//! use image::{ImageDecoder, AnimationDecoder};
//! use std::fs::File;
//! use std::io::BufReader;
//! # fn main() -> std::io::Result<()> {
//! // Decode a gif into frames
//! let file_in = BufReader::new(File::open("foo.gif")?);
//! let mut decoder = GifDecoder::new(file_in).unwrap();
//! let frames = decoder.into_frames();
//! let frames = frames.collect_frames().expect("error decoding gif");
//!
//! // Encode frames into a gif and save to a file
//! let mut file_out = File::open("out.gif")?;
//! let mut encoder = GifEncoder::new(file_out);
//! encoder.encode_frames(frames.into_iter());
//! # Ok(())
//! # }
//! ```
#![allow(clippy::while_let_loop)]
use std::io::{self, BufRead, Cursor, Read, Seek, Write};
use std::marker::PhantomData;
use std::mem;
use gif::ColorOutput;
use gif::{DisposalMethod, Frame};
use crate::animation::{self, Ratio};
use crate::color::{ColorType, Rgba};
use crate::error::{
DecodingError, EncodingError, ImageError, ImageResult, LimitError, LimitErrorKind,
ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind,
};
use crate::traits::Pixel;
use crate::{
AnimationDecoder, ExtendedColorType, ImageBuffer, ImageDecoder, ImageEncoder, ImageFormat,
Limits,
};
/// GIF decoder
pub struct GifDecoder<R: Read> {
reader: gif::Decoder<R>,
limits: Limits,
}
impl<R: Read> GifDecoder<R> {
/// Creates a new decoder that decodes the input steam `r`
pub fn new(r: R) -> ImageResult<GifDecoder<R>> {
let mut decoder = gif::DecodeOptions::new();
decoder.set_color_output(ColorOutput::RGBA);
Ok(GifDecoder {
reader: decoder.read_info(r).map_err(ImageError::from_decoding)?,
limits: Limits::no_limits(),
})
}
}
/// Wrapper struct around a `Cursor<Vec<u8>>`
#[allow(dead_code)]
#[deprecated]
pub struct GifReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
#[allow(deprecated)]
impl<R> Read for GifReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
if self.0.position() == 0 && buf.is_empty() {
mem::swap(buf, self.0.get_mut());
Ok(buf.len())
} else {
self.0.read_to_end(buf)
}
}
}
impl<R: BufRead + Seek> ImageDecoder for GifDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
(
u32::from(self.reader.width()),
u32::from(self.reader.height()),
)
}
fn color_type(&self) -> ColorType {
ColorType::Rgba8
}
fn set_limits(&mut self, limits: Limits) -> ImageResult<()> {
limits.check_support(&crate::LimitSupport::default())?;
let (width, height) = self.dimensions();
limits.check_dimensions(width, height)?;
self.limits = limits;
Ok(())
}
fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
let frame = match self
.reader
.next_frame_info()
.map_err(ImageError::from_decoding)?
{
Some(frame) => FrameInfo::new_from_frame(frame),
None => {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::NoMoreData,
)))
}
};
let (width, height) = self.dimensions();
if frame.left == 0
&& frame.width == width
&& (u64::from(frame.top) + u64::from(frame.height) <= u64::from(height))
{
// If the frame matches the logical screen, or, as a more general case,
// fits into it and touches its left and right borders, then
// we can directly write it into the buffer without causing line wraparound.
let line_length = usize::try_from(width)
.unwrap()
.checked_mul(self.color_type().bytes_per_pixel() as usize)
.unwrap();
// isolate the portion of the buffer to read the frame data into.
// the chunks above and below it are going to be zeroed.
let (blank_top, rest) =
buf.split_at_mut(line_length.checked_mul(frame.top as usize).unwrap());
let (buf, blank_bottom) =
rest.split_at_mut(line_length.checked_mul(frame.height as usize).unwrap());
debug_assert_eq!(buf.len(), self.reader.buffer_size());
// this is only necessary in case the buffer is not zeroed
for b in blank_top {
*b = 0;
}
// fill the middle section with the frame data
self.reader
.read_into_buffer(buf)
.map_err(ImageError::from_decoding)?;
// this is only necessary in case the buffer is not zeroed
for b in blank_bottom {
*b = 0;
}
} else {
// If the frame does not match the logical screen, read into an extra buffer
// and 'insert' the frame from left/top to logical screen width/height.
let buffer_size = (frame.width as usize)
.checked_mul(frame.height as usize)
.and_then(|s| s.checked_mul(4))
.ok_or(ImageError::Limits(LimitError::from_kind(
LimitErrorKind::InsufficientMemory,
)))?;
self.limits.reserve_usize(buffer_size)?;
let mut frame_buffer = vec![0; buffer_size];
self.limits.free_usize(buffer_size);
self.reader
.read_into_buffer(&mut frame_buffer[..])
.map_err(ImageError::from_decoding)?;
let frame_buffer = ImageBuffer::from_raw(frame.width, frame.height, frame_buffer);
let image_buffer = ImageBuffer::from_raw(width, height, buf);
// `buffer_size` uses wrapping arithmetic, thus might not report the
// correct storage requirement if the result does not fit in `usize`.
// `ImageBuffer::from_raw` detects overflow and reports by returning `None`.
if frame_buffer.is_none() || image_buffer.is_none() {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Gif.into(),
UnsupportedErrorKind::GenericFeature(format!(
"Image dimensions ({}, {}) are too large",
frame.width, frame.height
)),
),
));
}
let frame_buffer = frame_buffer.unwrap();
let mut image_buffer = image_buffer.unwrap();
for (x, y, pixel) in image_buffer.enumerate_pixels_mut() {
let frame_x = x.wrapping_sub(frame.left);
let frame_y = y.wrapping_sub(frame.top);
if frame_x < frame.width && frame_y < frame.height {
*pixel = *frame_buffer.get_pixel(frame_x, frame_y);
} else {
// this is only necessary in case the buffer is not zeroed
*pixel = Rgba([0, 0, 0, 0]);
}
}
}
Ok(())
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
struct GifFrameIterator<R: Read> {
reader: gif::Decoder<R>,
width: u32,
height: u32,
non_disposed_frame: Option<ImageBuffer<Rgba<u8>, Vec<u8>>>,
limits: Limits,
// `is_end` is used to indicate whether the iterator has reached the end of the frames.
// Or encounter any un-recoverable error.
is_end: bool,
}
impl<R: BufRead + Seek> GifFrameIterator<R> {
fn new(decoder: GifDecoder<R>) -> GifFrameIterator<R> {
let (width, height) = decoder.dimensions();
let limits = decoder.limits.clone();
// intentionally ignore the background color for web compatibility
GifFrameIterator {
reader: decoder.reader,
width,
height,
non_disposed_frame: None,
limits,
is_end: false,
}
}
}
impl<R: Read> Iterator for GifFrameIterator<R> {
type Item = ImageResult<animation::Frame>;
fn next(&mut self) -> Option<ImageResult<animation::Frame>> {
if self.is_end {
return None;
}
// The iterator always produces RGBA8 images
const COLOR_TYPE: ColorType = ColorType::Rgba8;
// Allocate the buffer for the previous frame.
// This is done here and not in the constructor because
// the constructor cannot return an error when the allocation limit is exceeded.
if self.non_disposed_frame.is_none() {
if let Err(e) = self
.limits
.reserve_buffer(self.width, self.height, COLOR_TYPE)
{
return Some(Err(e));
}
self.non_disposed_frame = Some(ImageBuffer::from_pixel(
self.width,
self.height,
Rgba([0, 0, 0, 0]),
));
}
// Bind to a variable to avoid repeated `.unwrap()` calls
let non_disposed_frame = self.non_disposed_frame.as_mut().unwrap();
// begin looping over each frame
let frame = match self.reader.next_frame_info() {
Ok(frame_info) => {
if let Some(frame) = frame_info {
FrameInfo::new_from_frame(frame)
} else {
// no more frames
return None;
}
}
Err(err) => match err {
gif::DecodingError::Io(ref e) => {
if e.kind() == io::ErrorKind::UnexpectedEof {
// end of file reached, no more frames
self.is_end = true;
}
return Some(Err(ImageError::from_decoding(err)));
}
_ => {
return Some(Err(ImageError::from_decoding(err)));
}
},
};
// All allocations we do from now on will be freed at the end of this function.
// Therefore, do not count them towards the persistent limits.
// Instead, create a local instance of `Limits` for this function alone
// which will be dropped along with all the buffers when they go out of scope.
let mut local_limits = self.limits.clone();
// Check the allocation we're about to perform against the limits
if let Err(e) = local_limits.reserve_buffer(frame.width, frame.height, COLOR_TYPE) {
return Some(Err(e));
}
// Allocate the buffer now that the limits allowed it
let mut vec = vec![0; self.reader.buffer_size()];
if let Err(err) = self.reader.read_into_buffer(&mut vec) {
return Some(Err(ImageError::from_decoding(err)));
}
// create the image buffer from the raw frame.
// `buffer_size` uses wrapping arithmetic, thus might not report the
// correct storage requirement if the result does not fit in `usize`.
// on the other hand, `ImageBuffer::from_raw` detects overflow and
// reports by returning `None`.
let Some(mut frame_buffer) = ImageBuffer::from_raw(frame.width, frame.height, vec) else {
return Some(Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Gif.into(),
UnsupportedErrorKind::GenericFeature(format!(
"Image dimensions ({}, {}) are too large",
frame.width, frame.height
)),
),
)));
};
// blend the current frame with the non-disposed frame, then update
// the non-disposed frame according to the disposal method.
fn blend_and_dispose_pixel(
dispose: DisposalMethod,
previous: &mut Rgba<u8>,
current: &mut Rgba<u8>,
) {
let pixel_alpha = current.channels()[3];
if pixel_alpha == 0 {
*current = *previous;
}
match dispose {
DisposalMethod::Any | DisposalMethod::Keep => {
// do not dispose
// (keep pixels from this frame)
// note: the `Any` disposal method is underspecified in the GIF
// spec, but most viewers treat it identically to `Keep`
*previous = *current;
}
DisposalMethod::Background => {
// restore to background color
// (background shows through transparent pixels in the next frame)
*previous = Rgba([0, 0, 0, 0]);
}
DisposalMethod::Previous => {
// restore to previous
// (dispose frames leaving the last none disposal frame)
}
}
}
// if `frame_buffer`'s frame exactly matches the entire image, then
// use it directly, else create a new buffer to hold the composited
// image.
let image_buffer = if (frame.left, frame.top) == (0, 0)
&& (self.width, self.height) == frame_buffer.dimensions()
{
for (x, y, pixel) in frame_buffer.enumerate_pixels_mut() {
let previous_pixel = non_disposed_frame.get_pixel_mut(x, y);
blend_and_dispose_pixel(frame.disposal_method, previous_pixel, pixel);
}
frame_buffer
} else {
// Check limits before allocating the buffer
if let Err(e) = local_limits.reserve_buffer(self.width, self.height, COLOR_TYPE) {
return Some(Err(e));
}
ImageBuffer::from_fn(self.width, self.height, |x, y| {
let frame_x = x.wrapping_sub(frame.left);
let frame_y = y.wrapping_sub(frame.top);
let previous_pixel = non_disposed_frame.get_pixel_mut(x, y);
if frame_x < frame_buffer.width() && frame_y < frame_buffer.height() {
let mut pixel = *frame_buffer.get_pixel(frame_x, frame_y);
blend_and_dispose_pixel(frame.disposal_method, previous_pixel, &mut pixel);
pixel
} else {
// out of bounds, return pixel from previous frame
*previous_pixel
}
})
};
Some(Ok(animation::Frame::from_parts(
image_buffer,
0,
0,
frame.delay,
)))
}
}
impl<'a, R: BufRead + Seek + 'a> AnimationDecoder<'a> for GifDecoder<R> {
fn into_frames(self) -> animation::Frames<'a> {
animation::Frames::new(Box::new(GifFrameIterator::new(self)))
}
}
struct FrameInfo {
left: u32,
top: u32,
width: u32,
height: u32,
disposal_method: DisposalMethod,
delay: animation::Delay,
}
impl FrameInfo {
fn new_from_frame(frame: &Frame) -> FrameInfo {
FrameInfo {
left: u32::from(frame.left),
top: u32::from(frame.top),
width: u32::from(frame.width),
height: u32::from(frame.height),
disposal_method: frame.dispose,
// frame.delay is in units of 10ms so frame.delay*10 is in ms
delay: animation::Delay::from_ratio(Ratio::new(u32::from(frame.delay) * 10, 1)),
}
}
}
/// Number of repetitions for a GIF animation
#[derive(Clone, Copy, Debug)]
pub enum Repeat {
/// Finite number of repetitions
Finite(u16),
/// Looping GIF
Infinite,
}
impl Repeat {
pub(crate) fn to_gif_enum(self) -> gif::Repeat {
match self {
Repeat::Finite(n) => gif::Repeat::Finite(n),
Repeat::Infinite => gif::Repeat::Infinite,
}
}
}
/// GIF encoder.
pub struct GifEncoder<W: Write> {
w: Option<W>,
gif_encoder: Option<gif::Encoder<W>>,
speed: i32,
repeat: Option<Repeat>,
}
impl<W: Write> GifEncoder<W> {
/// Creates a new GIF encoder with a speed of 1. This prioritizes quality over performance at any cost.
pub fn new(w: W) -> GifEncoder<W> {
Self::new_with_speed(w, 1)
}
/// Create a new GIF encoder, and has the speed parameter `speed`. See
/// [`Frame::from_rgba_speed`](https://docs.rs/gif/latest/gif/struct.Frame.html#method.from_rgba_speed)
/// for more information.
pub fn new_with_speed(w: W, speed: i32) -> GifEncoder<W> {
assert!(
(1..=30).contains(&speed),
"speed needs to be in the range [1, 30]"
);
GifEncoder {
w: Some(w),
gif_encoder: None,
speed,
repeat: None,
}
}
/// Set the repeat behaviour of the encoded GIF
pub fn set_repeat(&mut self, repeat: Repeat) -> ImageResult<()> {
if let Some(ref mut encoder) = self.gif_encoder {
encoder
.set_repeat(repeat.to_gif_enum())
.map_err(ImageError::from_encoding)?;
}
self.repeat = Some(repeat);
Ok(())
}
/// Encode a single image.
pub fn encode(
&mut self,
data: &[u8],
width: u32,
height: u32,
color: ExtendedColorType,
) -> ImageResult<()> {
let (width, height) = self.gif_dimensions(width, height)?;
match color {
ExtendedColorType::Rgb8 => {
self.encode_gif(Frame::from_rgb_speed(width, height, data, self.speed))
}
ExtendedColorType::Rgba8 => self.encode_gif(Frame::from_rgba_speed(
width,
height,
&mut data.to_owned(),
self.speed,
)),
_ => Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Gif.into(),
UnsupportedErrorKind::Color(color),
),
)),
}
}
/// Encode one frame of animation.
pub fn encode_frame(&mut self, img_frame: animation::Frame) -> ImageResult<()> {
let frame = self.convert_frame(img_frame)?;
self.encode_gif(frame)
}
/// Encodes Frames.
/// Consider using `try_encode_frames` instead to encode an `animation::Frames` like iterator.
pub fn encode_frames<F>(&mut self, frames: F) -> ImageResult<()>
where
F: IntoIterator<Item = animation::Frame>,
{
for img_frame in frames {
self.encode_frame(img_frame)?;
}
Ok(())
}
/// Try to encode a collection of `ImageResult<animation::Frame>` objects.
/// Use this function to encode an `animation::Frames` like iterator.
/// Whenever an `Err` item is encountered, that value is returned without further actions.
pub fn try_encode_frames<F>(&mut self, frames: F) -> ImageResult<()>
where
F: IntoIterator<Item = ImageResult<animation::Frame>>,
{
for img_frame in frames {
self.encode_frame(img_frame?)?;
}
Ok(())
}
pub(crate) fn convert_frame(
&mut self,
img_frame: animation::Frame,
) -> ImageResult<Frame<'static>> {
// get the delay before converting img_frame
let frame_delay = img_frame.delay().into_ratio().to_integer();
// convert img_frame into RgbaImage
let mut rbga_frame = img_frame.into_buffer();
let (width, height) = self.gif_dimensions(rbga_frame.width(), rbga_frame.height())?;
// Create the gif::Frame from the animation::Frame
let mut frame = Frame::from_rgba_speed(width, height, &mut rbga_frame, self.speed);
// Saturate the conversion to u16::MAX instead of returning an error as that
// would require a new special cased variant in ParameterErrorKind which most
// likely couldn't be reused for other cases. This isn't a bad trade-off given
// that the current algorithm is already lossy.
frame.delay = (frame_delay / 10).try_into().unwrap_or(u16::MAX);
Ok(frame)
}
fn gif_dimensions(&self, width: u32, height: u32) -> ImageResult<(u16, u16)> {
fn inner_dimensions(width: u32, height: u32) -> Option<(u16, u16)> {
let width = u16::try_from(width).ok()?;
let height = u16::try_from(height).ok()?;
Some((width, height))
}
// TODO: this is not very idiomatic yet. Should return an EncodingError.
inner_dimensions(width, height).ok_or_else(|| {
ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
))
})
}
pub(crate) fn encode_gif(&mut self, mut frame: Frame) -> ImageResult<()> {
let gif_encoder;
if let Some(ref mut encoder) = self.gif_encoder {
gif_encoder = encoder;
} else {
let writer = self.w.take().unwrap();
let mut encoder = gif::Encoder::new(writer, frame.width, frame.height, &[])
.map_err(ImageError::from_encoding)?;
if let Some(ref repeat) = self.repeat {
encoder
.set_repeat(repeat.to_gif_enum())
.map_err(ImageError::from_encoding)?;
}
self.gif_encoder = Some(encoder);
gif_encoder = self.gif_encoder.as_mut().unwrap();
}
frame.dispose = DisposalMethod::Background;
gif_encoder
.write_frame(&frame)
.map_err(ImageError::from_encoding)
}
}
impl<W: Write> ImageEncoder for GifEncoder<W> {
fn write_image(
mut self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
self.encode(buf, width, height, color_type)
}
}
impl ImageError {
fn from_decoding(err: gif::DecodingError) -> ImageError {
use gif::DecodingError::*;
match err {
err @ Format(_) => {
ImageError::Decoding(DecodingError::new(ImageFormat::Gif.into(), err))
}
Io(io_err) => ImageError::IoError(io_err),
}
}
fn from_encoding(err: gif::EncodingError) -> ImageError {
use gif::EncodingError::*;
match err {
err @ Format(_) => {
ImageError::Encoding(EncodingError::new(ImageFormat::Gif.into(), err))
}
Io(io_err) => ImageError::IoError(io_err),
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn frames_exceeding_logical_screen_size() {
// This is a gif with 10x10 logical screen, but a 16x16 frame + 6px offset inside.
let data = vec![
0x47, 0x49, 0x46, 0x38, 0x39, 0x61, 0x0A, 0x00, 0x0A, 0x00, 0xF0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0E, 0xFF, 0x1F, 0x21, 0xF9, 0x04, 0x09, 0x64, 0x00, 0x00, 0x00, 0x2C,
0x06, 0x00, 0x06, 0x00, 0x10, 0x00, 0x10, 0x00, 0x00, 0x02, 0x23, 0x84, 0x8F, 0xA9,
0xBB, 0xE1, 0xE8, 0x42, 0x8A, 0x0F, 0x50, 0x79, 0xAE, 0xD1, 0xF9, 0x7A, 0xE8, 0x71,
0x5B, 0x48, 0x81, 0x64, 0xD5, 0x91, 0xCA, 0x89, 0x4D, 0x21, 0x63, 0x89, 0x4C, 0x09,
0x77, 0xF5, 0x6D, 0x14, 0x00, 0x3B,
];
let decoder = GifDecoder::new(Cursor::new(data)).unwrap();
let mut buf = vec![0u8; decoder.total_bytes() as usize];
assert!(decoder.read_image(&mut buf).is_ok());
}
}

752
vendor/image/src/codecs/hdr/decoder.rs vendored Normal file
View File

@@ -0,0 +1,752 @@
use std::io::{self, Read};
use std::num::{ParseFloatError, ParseIntError};
use std::{error, fmt};
use crate::error::{
DecodingError, ImageError, ImageFormatHint, ImageResult, UnsupportedError, UnsupportedErrorKind,
};
use crate::{ColorType, ImageDecoder, ImageFormat, Rgb};
/// Errors that can occur during decoding and parsing of a HDR image
#[derive(Debug, Clone, PartialEq, Eq)]
enum DecoderError {
/// HDR's "#?RADIANCE" signature wrong or missing
RadianceHdrSignatureInvalid,
/// EOF before end of header
TruncatedHeader,
/// EOF instead of image dimensions
TruncatedDimensions,
/// A value couldn't be parsed
UnparsableF32(LineType, ParseFloatError),
/// A value couldn't be parsed
UnparsableU32(LineType, ParseIntError),
/// Not enough numbers in line
LineTooShort(LineType),
/// COLORCORR contains too many numbers in strict mode
ExtraneousColorcorrNumbers,
/// Dimensions line had too few elements
DimensionsLineTooShort(usize, usize),
/// Dimensions line had too many elements
DimensionsLineTooLong(usize),
/// The length of a scanline (1) wasn't a match for the specified length (2)
WrongScanlineLength(usize, usize),
/// First pixel of a scanline is a run length marker
FirstPixelRlMarker,
}
impl fmt::Display for DecoderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DecoderError::RadianceHdrSignatureInvalid => {
f.write_str("Radiance HDR signature not found")
}
DecoderError::TruncatedHeader => f.write_str("EOF in header"),
DecoderError::TruncatedDimensions => f.write_str("EOF in dimensions line"),
DecoderError::UnparsableF32(line, pe) => {
f.write_fmt(format_args!("Cannot parse {line} value as f32: {pe}"))
}
DecoderError::UnparsableU32(line, pe) => {
f.write_fmt(format_args!("Cannot parse {line} value as u32: {pe}"))
}
DecoderError::LineTooShort(line) => {
f.write_fmt(format_args!("Not enough numbers in {line}"))
}
DecoderError::ExtraneousColorcorrNumbers => f.write_str("Extra numbers in COLORCORR"),
DecoderError::DimensionsLineTooShort(elements, expected) => f.write_fmt(format_args!(
"Dimensions line too short: have {elements} elements, expected {expected}"
)),
DecoderError::DimensionsLineTooLong(expected) => f.write_fmt(format_args!(
"Dimensions line too long, expected {expected} elements"
)),
DecoderError::WrongScanlineLength(len, expected) => f.write_fmt(format_args!(
"Wrong length of decoded scanline: got {len}, expected {expected}"
)),
DecoderError::FirstPixelRlMarker => {
f.write_str("First pixel of a scanline shouldn't be run length marker")
}
}
}
}
impl From<DecoderError> for ImageError {
fn from(e: DecoderError) -> ImageError {
ImageError::Decoding(DecodingError::new(ImageFormat::Hdr.into(), e))
}
}
impl error::Error for DecoderError {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match self {
DecoderError::UnparsableF32(_, err) => Some(err),
DecoderError::UnparsableU32(_, err) => Some(err),
_ => None,
}
}
}
/// Lines which contain parsable data that can fail
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
enum LineType {
Exposure,
Pixaspect,
Colorcorr,
DimensionsHeight,
DimensionsWidth,
}
impl fmt::Display for LineType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
LineType::Exposure => "EXPOSURE",
LineType::Pixaspect => "PIXASPECT",
LineType::Colorcorr => "COLORCORR",
LineType::DimensionsHeight => "height dimension",
LineType::DimensionsWidth => "width dimension",
})
}
}
/// Radiance HDR file signature
pub const SIGNATURE: &[u8] = b"#?RADIANCE";
const SIGNATURE_LENGTH: usize = 10;
/// An Radiance HDR decoder
#[derive(Debug)]
pub struct HdrDecoder<R> {
r: R,
width: u32,
height: u32,
meta: HdrMetadata,
}
/// Refer to [wikipedia](https://en.wikipedia.org/wiki/RGBE_image_format)
#[repr(C)]
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub(crate) struct Rgbe8Pixel {
/// Color components
pub(crate) c: [u8; 3],
/// Exponent
pub(crate) e: u8,
}
/// Creates `Rgbe8Pixel` from components
pub(crate) fn rgbe8(r: u8, g: u8, b: u8, e: u8) -> Rgbe8Pixel {
Rgbe8Pixel { c: [r, g, b], e }
}
impl Rgbe8Pixel {
/// Converts `Rgbe8Pixel` into `Rgb<f32>` linearly
#[inline]
pub(crate) fn to_hdr(self) -> Rgb<f32> {
if self.e == 0 {
Rgb([0.0, 0.0, 0.0])
} else {
// let exp = f32::ldexp(1., self.e as isize - (128 + 8)); // unstable
let exp = f32::exp2(<f32 as From<_>>::from(self.e) - (128.0 + 8.0));
Rgb([
exp * <f32 as From<_>>::from(self.c[0]),
exp * <f32 as From<_>>::from(self.c[1]),
exp * <f32 as From<_>>::from(self.c[2]),
])
}
}
}
impl<R: Read> HdrDecoder<R> {
/// Reads Radiance HDR image header from stream ```r```
/// if the header is valid, creates `HdrDecoder`
/// strict mode is enabled
pub fn new(reader: R) -> ImageResult<Self> {
HdrDecoder::with_strictness(reader, true)
}
/// Allows reading old Radiance HDR images
pub fn new_nonstrict(reader: R) -> ImageResult<Self> {
Self::with_strictness(reader, false)
}
/// Reads Radiance HDR image header from stream `reader`,
/// if the header is valid, creates `HdrDecoder`.
///
/// strict enables strict mode
///
/// Warning! Reading wrong file in non-strict mode
/// could consume file size worth of memory in the process.
pub fn with_strictness(mut reader: R, strict: bool) -> ImageResult<HdrDecoder<R>> {
let mut attributes = HdrMetadata::new();
{
// scope to make borrowck happy
let r = &mut reader;
if strict {
let mut signature = [0; SIGNATURE_LENGTH];
r.read_exact(&mut signature)?;
if signature != SIGNATURE {
return Err(DecoderError::RadianceHdrSignatureInvalid.into());
} // no else
// skip signature line ending
read_line_u8(r)?;
} else {
// Old Radiance HDR files (*.pic) don't use signature
// Let them be parsed in non-strict mode
}
// read header data until empty line
loop {
match read_line_u8(r)? {
None => {
// EOF before end of header
return Err(DecoderError::TruncatedHeader.into());
}
Some(line) => {
if line.is_empty() {
// end of header
break;
} else if line[0] == b'#' {
// line[0] will not panic, line.len() == 0 is false here
// skip comments
continue;
} // no else
// process attribute line
let line = String::from_utf8_lossy(&line[..]);
attributes.update_header_info(&line, strict)?;
} // <= Some(line)
} // match read_line_u8()
} // loop
} // scope to end borrow of reader
// parse dimensions
let (width, height) = match read_line_u8(&mut reader)? {
None => {
// EOF instead of image dimensions
return Err(DecoderError::TruncatedDimensions.into());
}
Some(dimensions) => {
let dimensions = String::from_utf8_lossy(&dimensions[..]);
parse_dimensions_line(&dimensions, strict)?
}
};
// color type is always rgb8
if crate::utils::check_dimension_overflow(width, height, ColorType::Rgb8.bytes_per_pixel())
{
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Hdr.into(),
UnsupportedErrorKind::GenericFeature(format!(
"Image dimensions ({width}x{height}) are too large"
)),
),
));
}
Ok(HdrDecoder {
r: reader,
width,
height,
meta: HdrMetadata {
width,
height,
..attributes
},
})
} // end with_strictness
/// Returns file metadata. Refer to `HdrMetadata` for details.
pub fn metadata(&self) -> HdrMetadata {
self.meta.clone()
}
/// Consumes decoder and returns a vector of transformed pixels
fn read_image_transform<T: Send, F: Send + Sync + Fn(Rgbe8Pixel) -> T>(
mut self,
f: F,
output_slice: &mut [T],
) -> ImageResult<()> {
assert_eq!(
output_slice.len(),
self.width as usize * self.height as usize
);
// Don't read anything if image is empty
if self.width == 0 || self.height == 0 {
return Ok(());
}
let chunks_iter = output_slice.chunks_mut(self.width as usize);
let mut buf = vec![Default::default(); self.width as usize];
for chunk in chunks_iter {
// read_scanline overwrites the entire buffer or returns an Err,
// so not resetting the buffer here is ok.
read_scanline(&mut self.r, &mut buf[..])?;
for (dst, &pix) in chunk.iter_mut().zip(buf.iter()) {
*dst = f(pix);
}
}
Ok(())
}
}
impl<R: Read> ImageDecoder for HdrDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
(self.meta.width, self.meta.height)
}
fn color_type(&self) -> ColorType {
ColorType::Rgb32F
}
fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
let mut img = vec![Rgb([0.0, 0.0, 0.0]); self.width as usize * self.height as usize];
self.read_image_transform(|pix| pix.to_hdr(), &mut img[..])?;
for (i, Rgb(data)) in img.into_iter().enumerate() {
buf[(i * 12)..][..12].copy_from_slice(bytemuck::cast_slice(&data));
}
Ok(())
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
// Precondition: buf.len() > 0
fn read_scanline<R: Read>(r: &mut R, buf: &mut [Rgbe8Pixel]) -> ImageResult<()> {
assert!(!buf.is_empty());
let width = buf.len();
// first 4 bytes in scanline allow to determine compression method
let fb = read_rgbe(r)?;
if fb.c[0] == 2 && fb.c[1] == 2 && fb.c[2] < 128 {
// denormalized pixel value (2,2,<128,_) indicates new per component RLE method
// decode_component guarantees that offset is within 0 .. width
// therefore we can skip bounds checking here, but we will not
decode_component(r, width, |offset, value| buf[offset].c[0] = value)?;
decode_component(r, width, |offset, value| buf[offset].c[1] = value)?;
decode_component(r, width, |offset, value| buf[offset].c[2] = value)?;
decode_component(r, width, |offset, value| buf[offset].e = value)?;
} else {
// old RLE method (it was considered old around 1991, should it be here?)
decode_old_rle(r, fb, buf)?;
}
Ok(())
}
#[inline(always)]
fn read_byte<R: Read>(r: &mut R) -> io::Result<u8> {
let mut buf = [0u8];
r.read_exact(&mut buf[..])?;
Ok(buf[0])
}
// Guarantees that first parameter of set_component will be within pos .. pos+width
#[inline]
fn decode_component<R: Read, S: FnMut(usize, u8)>(
r: &mut R,
width: usize,
mut set_component: S,
) -> ImageResult<()> {
let mut buf = [0; 128];
let mut pos = 0;
while pos < width {
// increment position by a number of decompressed values
pos += {
let rl = read_byte(r)?;
if rl <= 128 {
// sanity check
if pos + rl as usize > width {
return Err(DecoderError::WrongScanlineLength(pos + rl as usize, width).into());
}
// read values
r.read_exact(&mut buf[0..rl as usize])?;
for (offset, &value) in buf[0..rl as usize].iter().enumerate() {
set_component(pos + offset, value);
}
rl as usize
} else {
// run
let rl = rl - 128;
// sanity check
if pos + rl as usize > width {
return Err(DecoderError::WrongScanlineLength(pos + rl as usize, width).into());
}
// fill with same value
let value = read_byte(r)?;
for offset in 0..rl as usize {
set_component(pos + offset, value);
}
rl as usize
}
};
}
if pos != width {
return Err(DecoderError::WrongScanlineLength(pos, width).into());
}
Ok(())
}
// Decodes scanline, places it into buf
// Precondition: buf.len() > 0
// fb - first 4 bytes of scanline
fn decode_old_rle<R: Read>(r: &mut R, fb: Rgbe8Pixel, buf: &mut [Rgbe8Pixel]) -> ImageResult<()> {
assert!(!buf.is_empty());
let width = buf.len();
// convenience function.
// returns run length if pixel is a run length marker
#[inline]
fn rl_marker(pix: Rgbe8Pixel) -> Option<usize> {
if pix.c == [1, 1, 1] {
Some(pix.e as usize)
} else {
None
}
}
// first pixel in scanline should not be run length marker
// it is error if it is
if rl_marker(fb).is_some() {
return Err(DecoderError::FirstPixelRlMarker.into());
}
buf[0] = fb; // set first pixel of scanline
let mut x_off = 1; // current offset from beginning of a scanline
let mut rl_mult = 1; // current run length multiplier
let mut prev_pixel = fb;
while x_off < width {
let pix = read_rgbe(r)?;
// it's harder to forget to increase x_off if I write this this way.
x_off += {
if let Some(rl) = rl_marker(pix) {
// rl_mult takes care of consecutive RL markers
let rl = rl * rl_mult;
rl_mult *= 256;
if x_off + rl <= width {
// do run
for b in &mut buf[x_off..x_off + rl] {
*b = prev_pixel;
}
} else {
return Err(DecoderError::WrongScanlineLength(x_off + rl, width).into());
};
rl // value to increase x_off by
} else {
rl_mult = 1; // chain of consecutive RL markers is broken
prev_pixel = pix;
buf[x_off] = pix;
1 // value to increase x_off by
}
};
}
if x_off != width {
return Err(DecoderError::WrongScanlineLength(x_off, width).into());
}
Ok(())
}
fn read_rgbe<R: Read>(r: &mut R) -> io::Result<Rgbe8Pixel> {
let mut buf = [0u8; 4];
r.read_exact(&mut buf[..])?;
Ok(Rgbe8Pixel {
c: [buf[0], buf[1], buf[2]],
e: buf[3],
})
}
/// Metadata for Radiance HDR image
#[derive(Debug, Clone)]
pub struct HdrMetadata {
/// Width of decoded image. It could be either scanline length,
/// or scanline count, depending on image orientation.
pub width: u32,
/// Height of decoded image. It depends on orientation too.
pub height: u32,
/// Orientation matrix. For standard orientation it is ((1,0),(0,1)) - left to right, top to bottom.
/// First pair tells how resulting pixel coordinates change along a scanline.
/// Second pair tells how they change from one scanline to the next.
pub orientation: ((i8, i8), (i8, i8)),
/// Divide color values by exposure to get to get physical radiance in
/// watts/steradian/m<sup>2</sup>
///
/// Image may not contain physical data, even if this field is set.
pub exposure: Option<f32>,
/// Divide color values by corresponding tuple member (r, g, b) to get to get physical radiance
/// in watts/steradian/m<sup>2</sup>
///
/// Image may not contain physical data, even if this field is set.
pub color_correction: Option<(f32, f32, f32)>,
/// Pixel height divided by pixel width
pub pixel_aspect_ratio: Option<f32>,
/// All lines contained in image header are put here. Ordering of lines is preserved.
/// Lines in the form "key=value" are represented as ("key", "value").
/// All other lines are ("", "line")
pub custom_attributes: Vec<(String, String)>,
}
impl HdrMetadata {
fn new() -> HdrMetadata {
HdrMetadata {
width: 0,
height: 0,
orientation: ((1, 0), (0, 1)),
exposure: None,
color_correction: None,
pixel_aspect_ratio: None,
custom_attributes: vec![],
}
}
// Updates header info, in strict mode returns error for malformed lines (no '=' separator)
// unknown attributes are skipped
fn update_header_info(&mut self, line: &str, strict: bool) -> ImageResult<()> {
// split line at first '='
// old Radiance HDR files (*.pic) feature tabs in key, so vvv trim
let maybe_key_value = split_at_first(line, "=").map(|(key, value)| (key.trim(), value));
// save all header lines in custom_attributes
match maybe_key_value {
Some((key, val)) => self
.custom_attributes
.push((key.to_owned(), val.to_owned())),
None => self
.custom_attributes
.push((String::new(), line.to_owned())),
}
// parse known attributes
match maybe_key_value {
Some(("FORMAT", val)) => {
if val.trim() != "32-bit_rle_rgbe" {
// XYZE isn't supported yet
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Hdr.into(),
UnsupportedErrorKind::Format(ImageFormatHint::Name(limit_string_len(
val, 20,
))),
),
));
}
}
Some(("EXPOSURE", val)) => {
match val.trim().parse::<f32>() {
Ok(v) => {
self.exposure = Some(self.exposure.unwrap_or(1.0) * v); // all encountered exposure values should be multiplied
}
Err(parse_error) => {
if strict {
return Err(DecoderError::UnparsableF32(
LineType::Exposure,
parse_error,
)
.into());
} // no else, skip this line in non-strict mode
}
}
}
Some(("PIXASPECT", val)) => {
match val.trim().parse::<f32>() {
Ok(v) => {
self.pixel_aspect_ratio = Some(self.pixel_aspect_ratio.unwrap_or(1.0) * v);
// all encountered exposure values should be multiplied
}
Err(parse_error) => {
if strict {
return Err(DecoderError::UnparsableF32(
LineType::Pixaspect,
parse_error,
)
.into());
} // no else, skip this line in non-strict mode
}
}
}
Some(("COLORCORR", val)) => {
let mut rgbcorr = [1.0, 1.0, 1.0];
match parse_space_separated_f32(val, &mut rgbcorr, LineType::Colorcorr) {
Ok(extra_numbers) => {
if strict && extra_numbers {
return Err(DecoderError::ExtraneousColorcorrNumbers.into());
} // no else, just ignore extra numbers
let (rc, gc, bc) = self.color_correction.unwrap_or((1.0, 1.0, 1.0));
self.color_correction =
Some((rc * rgbcorr[0], gc * rgbcorr[1], bc * rgbcorr[2]));
}
Err(err) => {
if strict {
return Err(err);
} // no else, skip malformed line in non-strict mode
}
}
}
None => {
// old Radiance HDR files (*.pic) contain commands in a header
// just skip them
}
_ => {
// skip unknown attribute
}
} // match attributes
Ok(())
}
}
fn parse_space_separated_f32(line: &str, vals: &mut [f32], line_tp: LineType) -> ImageResult<bool> {
let mut nums = line.split_whitespace();
for val in vals.iter_mut() {
if let Some(num) = nums.next() {
match num.parse::<f32>() {
Ok(v) => *val = v,
Err(err) => return Err(DecoderError::UnparsableF32(line_tp, err).into()),
}
} else {
// not enough numbers in line
return Err(DecoderError::LineTooShort(line_tp).into());
}
}
Ok(nums.next().is_some())
}
// Parses dimension line "-Y height +X width"
// returns (width, height) or error
fn parse_dimensions_line(line: &str, strict: bool) -> ImageResult<(u32, u32)> {
const DIMENSIONS_COUNT: usize = 4;
let mut dim_parts = line.split_whitespace();
let c1_tag = dim_parts
.next()
.ok_or(DecoderError::DimensionsLineTooShort(0, DIMENSIONS_COUNT))?;
let c1_str = dim_parts
.next()
.ok_or(DecoderError::DimensionsLineTooShort(1, DIMENSIONS_COUNT))?;
let c2_tag = dim_parts
.next()
.ok_or(DecoderError::DimensionsLineTooShort(2, DIMENSIONS_COUNT))?;
let c2_str = dim_parts
.next()
.ok_or(DecoderError::DimensionsLineTooShort(3, DIMENSIONS_COUNT))?;
if strict && dim_parts.next().is_some() {
// extra data in dimensions line
return Err(DecoderError::DimensionsLineTooLong(DIMENSIONS_COUNT).into());
} // no else
// dimensions line is in the form "-Y 10 +X 20"
// There are 8 possible orientations: +Y +X, +X -Y and so on
match (c1_tag, c2_tag) {
("-Y", "+X") => {
// Common orientation (left-right, top-down)
// c1_str is height, c2_str is width
let height = c1_str
.parse::<u32>()
.map_err(|pe| DecoderError::UnparsableU32(LineType::DimensionsHeight, pe))?;
let width = c2_str
.parse::<u32>()
.map_err(|pe| DecoderError::UnparsableU32(LineType::DimensionsWidth, pe))?;
Ok((width, height))
}
_ => Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Hdr.into(),
UnsupportedErrorKind::GenericFeature(format!(
"Orientation {} {}",
limit_string_len(c1_tag, 4),
limit_string_len(c2_tag, 4)
)),
),
)),
} // final expression. Returns value
}
// Returns string with no more than len+3 characters
fn limit_string_len(s: &str, len: usize) -> String {
let s_char_len = s.chars().count();
if s_char_len > len {
s.chars().take(len).chain("...".chars()).collect()
} else {
s.into()
}
}
// Splits string into (before separator, after separator) tuple
// or None if separator isn't found
fn split_at_first<'a>(s: &'a str, separator: &str) -> Option<(&'a str, &'a str)> {
match s.find(separator) {
None | Some(0) => None,
Some(p) if p >= s.len() - separator.len() => None,
Some(p) => Some((&s[..p], &s[(p + separator.len())..])),
}
}
// Reads input until b"\n" or EOF
// Returns vector of read bytes NOT including end of line characters
// or return None to indicate end of file
fn read_line_u8<R: Read>(r: &mut R) -> io::Result<Option<Vec<u8>>> {
// keeping repeated redundant allocations to avoid added complexity of having a `&mut tmp` argument
#[allow(clippy::disallowed_methods)]
let mut ret = Vec::with_capacity(16);
loop {
let mut byte = [0];
if r.read(&mut byte)? == 0 || byte[0] == b'\n' {
if ret.is_empty() && byte[0] != b'\n' {
return Ok(None);
}
return Ok(Some(ret));
}
ret.push(byte[0]);
}
}
#[cfg(test)]
mod tests {
use std::{borrow::Cow, io::Cursor};
use super::*;
#[test]
fn split_at_first_test() {
assert_eq!(split_at_first(&Cow::Owned(String::new()), "="), None);
assert_eq!(split_at_first(&Cow::Owned("=".into()), "="), None);
assert_eq!(split_at_first(&Cow::Owned("= ".into()), "="), None);
assert_eq!(
split_at_first(&Cow::Owned(" = ".into()), "="),
Some((" ", " "))
);
assert_eq!(
split_at_first(&Cow::Owned("EXPOSURE= ".into()), "="),
Some(("EXPOSURE", " "))
);
assert_eq!(
split_at_first(&Cow::Owned("EXPOSURE= =".into()), "="),
Some(("EXPOSURE", " ="))
);
assert_eq!(
split_at_first(&Cow::Owned("EXPOSURE== =".into()), "=="),
Some(("EXPOSURE", " ="))
);
assert_eq!(split_at_first(&Cow::Owned("EXPOSURE".into()), ""), None);
}
#[test]
fn read_line_u8_test() {
let buf: Vec<_> = (&b"One\nTwo\nThree\nFour\n\n\n"[..]).into();
let input = &mut Cursor::new(buf);
assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"One"[..]);
assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"Two"[..]);
assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"Three"[..]);
assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b"Four"[..]);
assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b""[..]);
assert_eq!(&read_line_u8(input).unwrap().unwrap()[..], &b""[..]);
assert_eq!(read_line_u8(input).unwrap(), None);
}
#[test]
fn dimension_overflow() {
let data = b"#?RADIANCE\nFORMAT=32-bit_rle_rgbe\n\n -Y 4294967295 +X 4294967295";
assert!(HdrDecoder::new(Cursor::new(data)).is_err());
assert!(HdrDecoder::new_nonstrict(Cursor::new(data)).is_err());
}
}

480
vendor/image/src/codecs/hdr/encoder.rs vendored Normal file
View File

@@ -0,0 +1,480 @@
use std::cmp::Ordering;
use std::io::{Result, Write};
use crate::codecs::hdr::{rgbe8, Rgbe8Pixel, SIGNATURE};
use crate::color::Rgb;
use crate::error::{ImageResult, UnsupportedError, UnsupportedErrorKind};
use crate::{ExtendedColorType, ImageEncoder, ImageError, ImageFormat};
/// Radiance HDR encoder
pub struct HdrEncoder<W: Write> {
w: W,
}
impl<W: Write> ImageEncoder for HdrEncoder<W> {
fn write_image(
self,
unaligned_bytes: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
match color_type {
ExtendedColorType::Rgb32F => {
let bytes_per_pixel = color_type.bits_per_pixel() as usize / 8;
let rgbe_pixels = unaligned_bytes
.chunks_exact(bytes_per_pixel)
.map(|bytes| to_rgbe8(Rgb::<f32>(bytemuck::pod_read_unaligned(bytes))));
// the length will be checked inside encode_pixels
self.encode_pixels(rgbe_pixels, width as usize, height as usize)
}
_ => Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Hdr.into(),
UnsupportedErrorKind::Color(color_type),
),
)),
}
}
}
impl<W: Write> HdrEncoder<W> {
/// Creates encoder
pub fn new(w: W) -> HdrEncoder<W> {
HdrEncoder { w }
}
/// Encodes the image ```rgb```
/// that has dimensions ```width``` and ```height```
pub fn encode(self, rgb: &[Rgb<f32>], width: usize, height: usize) -> ImageResult<()> {
self.encode_pixels(rgb.iter().map(|&rgb| to_rgbe8(rgb)), width, height)
}
/// Encodes the image ```flattened_rgbe_pixels```
/// that has dimensions ```width``` and ```height```.
/// The callback must return the color for the given flattened index of the pixel (row major).
fn encode_pixels(
mut self,
mut flattened_rgbe_pixels: impl ExactSizeIterator<Item = Rgbe8Pixel>,
width: usize,
height: usize,
) -> ImageResult<()> {
assert!(
flattened_rgbe_pixels.len() >= width * height,
"not enough pixels provided"
); // bonus: this might elide some bounds checks
let w = &mut self.w;
w.write_all(SIGNATURE)?;
w.write_all(b"\n")?;
w.write_all(b"# Rust HDR encoder\n")?;
w.write_all(b"FORMAT=32-bit_rle_rgbe\n\n")?;
w.write_all(format!("-Y {height} +X {width}\n").as_bytes())?;
if !(8..=32_768).contains(&width) {
for pixel in flattened_rgbe_pixels {
write_rgbe8(w, pixel)?;
}
} else {
// new RLE marker contains scanline width
let marker = rgbe8(2, 2, (width / 256) as u8, (width % 256) as u8);
// buffers for encoded pixels
let mut bufr = vec![0; width];
let mut bufg = vec![0; width];
let mut bufb = vec![0; width];
let mut bufe = vec![0; width];
let mut rle_buf = vec![0; width];
for _scanline_index in 0..height {
assert!(flattened_rgbe_pixels.len() >= width); // may reduce the bound checks
for ((((r, g), b), e), pixel) in bufr
.iter_mut()
.zip(bufg.iter_mut())
.zip(bufb.iter_mut())
.zip(bufe.iter_mut())
.zip(&mut flattened_rgbe_pixels)
{
*r = pixel.c[0];
*g = pixel.c[1];
*b = pixel.c[2];
*e = pixel.e;
}
write_rgbe8(w, marker)?; // New RLE encoding marker
rle_buf.clear();
rle_compress(&bufr[..], &mut rle_buf);
w.write_all(&rle_buf[..])?;
rle_buf.clear();
rle_compress(&bufg[..], &mut rle_buf);
w.write_all(&rle_buf[..])?;
rle_buf.clear();
rle_compress(&bufb[..], &mut rle_buf);
w.write_all(&rle_buf[..])?;
rle_buf.clear();
rle_compress(&bufe[..], &mut rle_buf);
w.write_all(&rle_buf[..])?;
}
}
Ok(())
}
}
#[derive(Debug, PartialEq, Eq)]
enum RunOrNot {
Run(u8, usize),
Norun(usize, usize),
}
use self::RunOrNot::{Norun, Run};
const RUN_MAX_LEN: usize = 127;
const NORUN_MAX_LEN: usize = 128;
struct RunIterator<'a> {
data: &'a [u8],
curidx: usize,
}
impl<'a> RunIterator<'a> {
fn new(data: &'a [u8]) -> RunIterator<'a> {
RunIterator { data, curidx: 0 }
}
}
impl Iterator for RunIterator<'_> {
type Item = RunOrNot;
fn next(&mut self) -> Option<Self::Item> {
if self.curidx == self.data.len() {
None
} else {
let cv = self.data[self.curidx];
let crun = self.data[self.curidx..]
.iter()
.take_while(|&&v| v == cv)
.take(RUN_MAX_LEN)
.count();
let ret = if crun > 2 {
Run(cv, crun)
} else {
Norun(self.curidx, crun)
};
self.curidx += crun;
Some(ret)
}
}
}
struct NorunCombineIterator<'a> {
runiter: RunIterator<'a>,
prev: Option<RunOrNot>,
}
impl<'a> NorunCombineIterator<'a> {
fn new(data: &'a [u8]) -> NorunCombineIterator<'a> {
NorunCombineIterator {
runiter: RunIterator::new(data),
prev: None,
}
}
}
// Combines sequential noruns produced by RunIterator
impl Iterator for NorunCombineIterator<'_> {
type Item = RunOrNot;
fn next(&mut self) -> Option<Self::Item> {
loop {
match self.prev.take() {
Some(Run(c, len)) => {
// Just return stored run
return Some(Run(c, len));
}
Some(Norun(idx, len)) => {
// Let's see if we need to continue norun
match self.runiter.next() {
Some(Norun(_, len1)) => {
// norun continues
let clen = len + len1; // combined length
match clen.cmp(&NORUN_MAX_LEN) {
Ordering::Equal => return Some(Norun(idx, clen)),
Ordering::Greater => {
// combined norun exceeds maximum length. store extra part of norun
self.prev =
Some(Norun(idx + NORUN_MAX_LEN, clen - NORUN_MAX_LEN));
// then return maximal norun
return Some(Norun(idx, NORUN_MAX_LEN));
}
Ordering::Less => {
// len + len1 < NORUN_MAX_LEN
self.prev = Some(Norun(idx, len + len1));
// combine and continue loop
}
}
}
Some(Run(c, len1)) => {
// Run encountered. Store it
self.prev = Some(Run(c, len1));
return Some(Norun(idx, len)); // and return combined norun
}
None => {
// End of sequence
return Some(Norun(idx, len)); // return combined norun
}
}
} // End match self.prev.take() == Some(NoRun())
None => {
// No norun to combine
match self.runiter.next() {
Some(Norun(idx, len)) => {
self.prev = Some(Norun(idx, len));
// store for combine and continue the loop
}
Some(Run(c, len)) => {
// Some run. Just return it
return Some(Run(c, len));
}
None => {
// That's all, folks
return None;
}
}
} // End match self.prev.take() == None
} // End match
} // End loop
}
}
// Appends RLE compressed ```data``` to ```rle```
fn rle_compress(data: &[u8], rle: &mut Vec<u8>) {
rle.clear();
if data.is_empty() {
rle.push(0); // Technically correct. It means read next 0 bytes.
return;
}
// Task: split data into chunks of repeating (max 127) and non-repeating bytes (max 128)
// Prepend non-repeating chunk with its length
// Replace repeating byte with (run length + 128) and the byte
for rnr in NorunCombineIterator::new(data) {
match rnr {
Run(c, len) => {
assert!(len <= 127);
rle.push(128u8 + len as u8);
rle.push(c);
}
Norun(idx, len) => {
assert!(len <= 128);
rle.push(len as u8);
rle.extend_from_slice(&data[idx..idx + len]);
}
}
}
}
fn write_rgbe8<W: Write>(w: &mut W, v: Rgbe8Pixel) -> Result<()> {
w.write_all(&[v.c[0], v.c[1], v.c[2], v.e])
}
/// Converts ```Rgb<f32>``` into ```Rgbe8Pixel```
pub(crate) fn to_rgbe8(pix: Rgb<f32>) -> Rgbe8Pixel {
let pix = pix.0;
let mx = f32::max(pix[0], f32::max(pix[1], pix[2]));
if mx <= 0.0 {
Rgbe8Pixel { c: [0, 0, 0], e: 0 }
} else {
// let (frac, exp) = mx.frexp(); // unstable yet
let exp = mx.log2().floor() as i32 + 1;
let mul = f32::powi(2.0, exp);
let mut conv = [0u8; 3];
for (cv, &sv) in conv.iter_mut().zip(pix.iter()) {
*cv = f32::trunc(sv / mul * 256.0) as u8;
}
Rgbe8Pixel {
c: conv,
e: (exp + 128) as u8,
}
}
}
#[test]
fn to_rgbe8_test() {
use crate::codecs::hdr::rgbe8;
let test_cases = vec![rgbe8(0, 0, 0, 0), rgbe8(1, 1, 128, 128)];
for &pix in &test_cases {
assert_eq!(pix, to_rgbe8(pix.to_hdr()));
}
for mc in 128..255 {
// TODO: use inclusive range when stable
let pix = rgbe8(mc, mc, mc, 100);
assert_eq!(pix, to_rgbe8(pix.to_hdr()));
let pix = rgbe8(mc, 0, mc, 130);
assert_eq!(pix, to_rgbe8(pix.to_hdr()));
let pix = rgbe8(0, 0, mc, 140);
assert_eq!(pix, to_rgbe8(pix.to_hdr()));
let pix = rgbe8(1, 0, mc, 150);
assert_eq!(pix, to_rgbe8(pix.to_hdr()));
let pix = rgbe8(1, mc, 10, 128);
assert_eq!(pix, to_rgbe8(pix.to_hdr()));
for c in 0..255 {
// Radiance HDR seems to be pre IEEE 754.
// exponent can be -128 (represented as 0u8), so some colors cannot be represented in normalized f32
// Let's exclude exponent value of -128 (0u8) from testing
let pix = rgbe8(1, mc, c, if c == 0 { 1 } else { c });
assert_eq!(pix, to_rgbe8(pix.to_hdr()));
}
}
fn relative_dist(a: Rgb<f32>, b: Rgb<f32>) -> f32 {
// maximal difference divided by maximal value
let max_diff =
a.0.iter()
.zip(b.0.iter())
.fold(0.0, |diff, (&a, &b)| f32::max(diff, (a - b).abs()));
let max_val =
a.0.iter()
.chain(b.0.iter())
.fold(0.0, |maxv, &a| f32::max(maxv, a));
if max_val == 0.0 {
0.0
} else {
max_diff / max_val
}
}
let test_values = vec![
0.000_001, 0.000_02, 0.000_3, 0.004, 0.05, 0.6, 7.0, 80.0, 900.0, 1_000.0, 20_000.0,
300_000.0,
];
for &r in &test_values {
for &g in &test_values {
for &b in &test_values {
let c1 = Rgb([r, g, b]);
let c2 = to_rgbe8(c1).to_hdr();
let rel_dist = relative_dist(c1, c2);
// Maximal value is normalized to the range 128..256, thus we have 1/128 precision
assert!(
rel_dist <= 1.0 / 128.0,
"Relative distance ({rel_dist}) exceeds 1/128 for {c1:?} and {c2:?}"
);
}
}
}
}
#[test]
fn runiterator_test() {
let data = [];
let mut run_iter = RunIterator::new(&data[..]);
assert_eq!(run_iter.next(), None);
let data = [5];
let mut run_iter = RunIterator::new(&data[..]);
assert_eq!(run_iter.next(), Some(Norun(0, 1)));
assert_eq!(run_iter.next(), None);
let data = [1, 1];
let mut run_iter = RunIterator::new(&data[..]);
assert_eq!(run_iter.next(), Some(Norun(0, 2)));
assert_eq!(run_iter.next(), None);
let data = [0, 0, 0];
let mut run_iter = RunIterator::new(&data[..]);
assert_eq!(run_iter.next(), Some(Run(0u8, 3)));
assert_eq!(run_iter.next(), None);
let data = [0, 0, 1, 1];
let mut run_iter = RunIterator::new(&data[..]);
assert_eq!(run_iter.next(), Some(Norun(0, 2)));
assert_eq!(run_iter.next(), Some(Norun(2, 2)));
assert_eq!(run_iter.next(), None);
let data = [0, 0, 0, 1, 1];
let mut run_iter = RunIterator::new(&data[..]);
assert_eq!(run_iter.next(), Some(Run(0u8, 3)));
assert_eq!(run_iter.next(), Some(Norun(3, 2)));
assert_eq!(run_iter.next(), None);
let data = [1, 2, 2, 2];
let mut run_iter = RunIterator::new(&data[..]);
assert_eq!(run_iter.next(), Some(Norun(0, 1)));
assert_eq!(run_iter.next(), Some(Run(2u8, 3)));
assert_eq!(run_iter.next(), None);
let data = [1, 1, 2, 2, 2];
let mut run_iter = RunIterator::new(&data[..]);
assert_eq!(run_iter.next(), Some(Norun(0, 2)));
assert_eq!(run_iter.next(), Some(Run(2u8, 3)));
assert_eq!(run_iter.next(), None);
let data = [2; 128];
let mut run_iter = RunIterator::new(&data[..]);
assert_eq!(run_iter.next(), Some(Run(2u8, 127)));
assert_eq!(run_iter.next(), Some(Norun(127, 1)));
assert_eq!(run_iter.next(), None);
let data = [2; 129];
let mut run_iter = RunIterator::new(&data[..]);
assert_eq!(run_iter.next(), Some(Run(2u8, 127)));
assert_eq!(run_iter.next(), Some(Norun(127, 2)));
assert_eq!(run_iter.next(), None);
let data = [2; 130];
let mut run_iter = RunIterator::new(&data[..]);
assert_eq!(run_iter.next(), Some(Run(2u8, 127)));
assert_eq!(run_iter.next(), Some(Run(2u8, 3)));
assert_eq!(run_iter.next(), None);
}
#[test]
fn noruncombine_test() {
fn a<T>(mut v: Vec<T>, mut other: Vec<T>) -> Vec<T> {
v.append(&mut other);
v
}
let v = [];
let mut rsi = NorunCombineIterator::new(&v[..]);
assert_eq!(rsi.next(), None);
let v = [1];
let mut rsi = NorunCombineIterator::new(&v[..]);
assert_eq!(rsi.next(), Some(Norun(0, 1)));
assert_eq!(rsi.next(), None);
let v = [2, 2];
let mut rsi = NorunCombineIterator::new(&v[..]);
assert_eq!(rsi.next(), Some(Norun(0, 2)));
assert_eq!(rsi.next(), None);
let v = [3, 3, 3];
let mut rsi = NorunCombineIterator::new(&v[..]);
assert_eq!(rsi.next(), Some(Run(3, 3)));
assert_eq!(rsi.next(), None);
let v = [4, 4, 3, 3, 3];
let mut rsi = NorunCombineIterator::new(&v[..]);
assert_eq!(rsi.next(), Some(Norun(0, 2)));
assert_eq!(rsi.next(), Some(Run(3, 3)));
assert_eq!(rsi.next(), None);
let v = vec![40; 400];
let mut rsi = NorunCombineIterator::new(&v[..]);
assert_eq!(rsi.next(), Some(Run(40, 127)));
assert_eq!(rsi.next(), Some(Run(40, 127)));
assert_eq!(rsi.next(), Some(Run(40, 127)));
assert_eq!(rsi.next(), Some(Run(40, 19)));
assert_eq!(rsi.next(), None);
let v = a(a(vec![5; 3], vec![6; 129]), vec![7, 3, 7, 10, 255]);
let mut rsi = NorunCombineIterator::new(&v[..]);
assert_eq!(rsi.next(), Some(Run(5, 3)));
assert_eq!(rsi.next(), Some(Run(6, 127)));
assert_eq!(rsi.next(), Some(Norun(130, 7)));
assert_eq!(rsi.next(), None);
let v = a(a(vec![5; 2], vec![6; 129]), vec![7, 3, 7, 7, 255]);
let mut rsi = NorunCombineIterator::new(&v[..]);
assert_eq!(rsi.next(), Some(Norun(0, 2)));
assert_eq!(rsi.next(), Some(Run(6, 127)));
assert_eq!(rsi.next(), Some(Norun(129, 7)));
assert_eq!(rsi.next(), None);
let v: Vec<_> = std::iter::repeat(())
.flat_map(|()| 0..2)
.take(257)
.collect();
let mut rsi = NorunCombineIterator::new(&v[..]);
assert_eq!(rsi.next(), Some(Norun(0, 128)));
assert_eq!(rsi.next(), Some(Norun(128, 128)));
assert_eq!(rsi.next(), Some(Norun(256, 1)));
assert_eq!(rsi.next(), None);
}

14
vendor/image/src/codecs/hdr/mod.rs vendored Normal file
View File

@@ -0,0 +1,14 @@
//! Decoding of Radiance HDR Images
//!
//! A decoder for Radiance HDR images
//!
//! # Related Links
//!
//! * <http://radsite.lbl.gov/radiance/refer/filefmts.pdf>
//! * <http://www.graphics.cornell.edu/~bjw/rgbe/rgbe.c>
mod decoder;
mod encoder;
pub use self::decoder::*;
pub use self::encoder::*;

446
vendor/image/src/codecs/ico/decoder.rs vendored Normal file
View File

@@ -0,0 +1,446 @@
use byteorder_lite::{LittleEndian, ReadBytesExt};
use std::io::{BufRead, Read, Seek, SeekFrom};
use std::{error, fmt};
use crate::color::ColorType;
use crate::error::{
DecodingError, ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind,
};
use crate::{ImageDecoder, ImageFormat};
use self::InnerDecoder::*;
use crate::codecs::bmp::BmpDecoder;
use crate::codecs::png::{PngDecoder, PNG_SIGNATURE};
/// Errors that can occur during decoding and parsing an ICO image or one of its enclosed images.
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
enum DecoderError {
/// The ICO directory is empty
NoEntries,
/// The number of color planes (0 or 1), or the horizontal coordinate of the hotspot for CUR files too big.
IcoEntryTooManyPlanesOrHotspot,
/// The bit depth (may be 0 meaning unspecified), or the vertical coordinate of the hotspot for CUR files too big.
IcoEntryTooManyBitsPerPixelOrHotspot,
/// The entry is in PNG format and specified a length that is shorter than PNG header.
PngShorterThanHeader,
/// The enclosed PNG is not in RGBA, which is invalid: <https://blogs.msdn.microsoft.com/oldnewthing/20101022-00/?p=12473>/.
PngNotRgba,
/// The entry is in BMP format and specified a data size that is not correct for the image and optional mask data.
InvalidDataSize,
/// The dimensions specified by the entry does not match the dimensions in the header of the enclosed image.
ImageEntryDimensionMismatch {
/// The mismatched subimage's type
format: IcoEntryImageFormat,
/// The dimensions specified by the entry
entry: (u16, u16),
/// The dimensions of the image itself
image: (u32, u32),
},
}
impl fmt::Display for DecoderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DecoderError::NoEntries => f.write_str("ICO directory contains no image"),
DecoderError::IcoEntryTooManyPlanesOrHotspot => {
f.write_str("ICO image entry has too many color planes or too large hotspot value")
}
DecoderError::IcoEntryTooManyBitsPerPixelOrHotspot => f.write_str(
"ICO image entry has too many bits per pixel or too large hotspot value",
),
DecoderError::PngShorterThanHeader => {
f.write_str("Entry specified a length that is shorter than PNG header!")
}
DecoderError::PngNotRgba => f.write_str("The PNG is not in RGBA format!"),
DecoderError::InvalidDataSize => {
f.write_str("ICO image data size did not match expected size")
}
DecoderError::ImageEntryDimensionMismatch {
format,
entry,
image,
} => f.write_fmt(format_args!(
"Entry{entry:?} and {format}{image:?} dimensions do not match!"
)),
}
}
}
impl From<DecoderError> for ImageError {
fn from(e: DecoderError) -> ImageError {
ImageError::Decoding(DecodingError::new(ImageFormat::Ico.into(), e))
}
}
impl error::Error for DecoderError {}
/// The image formats an ICO may contain
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
enum IcoEntryImageFormat {
/// PNG in ARGB
Png,
/// BMP with optional alpha mask
Bmp,
}
impl fmt::Display for IcoEntryImageFormat {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match self {
IcoEntryImageFormat::Png => "PNG",
IcoEntryImageFormat::Bmp => "BMP",
})
}
}
impl From<IcoEntryImageFormat> for ImageFormat {
fn from(val: IcoEntryImageFormat) -> Self {
match val {
IcoEntryImageFormat::Png => ImageFormat::Png,
IcoEntryImageFormat::Bmp => ImageFormat::Bmp,
}
}
}
/// An ico decoder
pub struct IcoDecoder<R: BufRead + Seek> {
selected_entry: DirEntry,
inner_decoder: InnerDecoder<R>,
}
enum InnerDecoder<R: BufRead + Seek> {
Bmp(BmpDecoder<R>),
Png(Box<PngDecoder<R>>),
}
#[derive(Clone, Copy, Default)]
struct DirEntry {
width: u8,
height: u8,
// We ignore some header fields as they will be replicated in the PNG, BMP and they are not
// necessary for determining the best_entry.
#[allow(unused)]
color_count: u8,
// Wikipedia has this to say:
// Although Microsoft's technical documentation states that this value must be zero, the icon
// encoder built into .NET (System.Drawing.Icon.Save) sets this value to 255. It appears that
// the operating system ignores this value altogether.
#[allow(unused)]
reserved: u8,
// We ignore some header fields as they will be replicated in the PNG, BMP and they are not
// necessary for determining the best_entry.
#[allow(unused)]
num_color_planes: u16,
bits_per_pixel: u16,
image_length: u32,
image_offset: u32,
}
impl<R: BufRead + Seek> IcoDecoder<R> {
/// Create a new decoder that decodes from the stream ```r```
pub fn new(mut r: R) -> ImageResult<IcoDecoder<R>> {
let entries = read_entries(&mut r)?;
let entry = best_entry(entries)?;
let decoder = entry.decoder(r)?;
Ok(IcoDecoder {
selected_entry: entry,
inner_decoder: decoder,
})
}
}
fn read_entries<R: Read>(r: &mut R) -> ImageResult<Vec<DirEntry>> {
let _reserved = r.read_u16::<LittleEndian>()?;
let _type = r.read_u16::<LittleEndian>()?;
let count = r.read_u16::<LittleEndian>()?;
(0..count).map(|_| read_entry(r)).collect()
}
fn read_entry<R: Read>(r: &mut R) -> ImageResult<DirEntry> {
Ok(DirEntry {
width: r.read_u8()?,
height: r.read_u8()?,
color_count: r.read_u8()?,
reserved: r.read_u8()?,
num_color_planes: {
// This may be either the number of color planes (0 or 1), or the horizontal coordinate
// of the hotspot for CUR files.
let num = r.read_u16::<LittleEndian>()?;
if num > 256 {
return Err(DecoderError::IcoEntryTooManyPlanesOrHotspot.into());
}
num
},
bits_per_pixel: {
// This may be either the bit depth (may be 0 meaning unspecified),
// or the vertical coordinate of the hotspot for CUR files.
let num = r.read_u16::<LittleEndian>()?;
if num > 256 {
return Err(DecoderError::IcoEntryTooManyBitsPerPixelOrHotspot.into());
}
num
},
image_length: r.read_u32::<LittleEndian>()?,
image_offset: r.read_u32::<LittleEndian>()?,
})
}
/// Find the entry with the highest (color depth, size).
fn best_entry(mut entries: Vec<DirEntry>) -> ImageResult<DirEntry> {
let mut best = entries.pop().ok_or(DecoderError::NoEntries)?;
let mut best_score = (
best.bits_per_pixel,
u32::from(best.real_width()) * u32::from(best.real_height()),
);
for entry in entries {
let score = (
entry.bits_per_pixel,
u32::from(entry.real_width()) * u32::from(entry.real_height()),
);
if score > best_score {
best = entry;
best_score = score;
}
}
Ok(best)
}
impl DirEntry {
fn real_width(&self) -> u16 {
match self.width {
0 => 256,
w => u16::from(w),
}
}
fn real_height(&self) -> u16 {
match self.height {
0 => 256,
h => u16::from(h),
}
}
fn matches_dimensions(&self, width: u32, height: u32) -> bool {
u32::from(self.real_width()) == width.min(256)
&& u32::from(self.real_height()) == height.min(256)
}
fn seek_to_start<R: Read + Seek>(&self, r: &mut R) -> ImageResult<()> {
r.seek(SeekFrom::Start(u64::from(self.image_offset)))?;
Ok(())
}
fn is_png<R: Read + Seek>(&self, r: &mut R) -> ImageResult<bool> {
self.seek_to_start(r)?;
// Read the first 8 bytes to sniff the image.
let mut signature = [0u8; 8];
r.read_exact(&mut signature)?;
Ok(signature == PNG_SIGNATURE)
}
fn decoder<R: BufRead + Seek>(&self, mut r: R) -> ImageResult<InnerDecoder<R>> {
let is_png = self.is_png(&mut r)?;
self.seek_to_start(&mut r)?;
if is_png {
Ok(Png(Box::new(PngDecoder::new(r)?)))
} else {
Ok(Bmp(BmpDecoder::new_with_ico_format(r)?))
}
}
}
impl<R: BufRead + Seek> ImageDecoder for IcoDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
match self.inner_decoder {
Bmp(ref decoder) => decoder.dimensions(),
Png(ref decoder) => decoder.dimensions(),
}
}
fn color_type(&self) -> ColorType {
match self.inner_decoder {
Bmp(ref decoder) => decoder.color_type(),
Png(ref decoder) => decoder.color_type(),
}
}
fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
match self.inner_decoder {
Png(decoder) => {
if self.selected_entry.image_length < PNG_SIGNATURE.len() as u32 {
return Err(DecoderError::PngShorterThanHeader.into());
}
// Check if the image dimensions match the ones in the image data.
let (width, height) = decoder.dimensions();
if !self.selected_entry.matches_dimensions(width, height) {
return Err(DecoderError::ImageEntryDimensionMismatch {
format: IcoEntryImageFormat::Png,
entry: (
self.selected_entry.real_width(),
self.selected_entry.real_height(),
),
image: (width, height),
}
.into());
}
// Embedded PNG images can only be of the 32BPP RGBA format.
// https://blogs.msdn.microsoft.com/oldnewthing/20101022-00/?p=12473/
if decoder.color_type() != ColorType::Rgba8 {
return Err(DecoderError::PngNotRgba.into());
}
decoder.read_image(buf)
}
Bmp(mut decoder) => {
let (width, height) = decoder.dimensions();
if !self.selected_entry.matches_dimensions(width, height) {
return Err(DecoderError::ImageEntryDimensionMismatch {
format: IcoEntryImageFormat::Bmp,
entry: (
self.selected_entry.real_width(),
self.selected_entry.real_height(),
),
image: (width, height),
}
.into());
}
// The ICO decoder needs an alpha channel to apply the AND mask.
if decoder.color_type() != ColorType::Rgba8 {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Bmp.into(),
UnsupportedErrorKind::Color(decoder.color_type().into()),
),
));
}
decoder.read_image_data(buf)?;
let r = decoder.reader();
let image_end = r.stream_position()?;
let data_end = u64::from(self.selected_entry.image_offset)
+ u64::from(self.selected_entry.image_length);
let mask_row_bytes = width.div_ceil(32) * 4;
let mask_length = u64::from(mask_row_bytes) * u64::from(height);
// data_end should be image_end + the mask length (mask_row_bytes * height).
// According to
// https://devblogs.microsoft.com/oldnewthing/20101021-00/?p=12483
// the mask is required, but according to Wikipedia
// https://en.wikipedia.org/wiki/ICO_(file_format)
// the mask is not required. Unfortunately, Wikipedia does not have a citation
// for that claim, so we can't be sure which is correct.
if data_end >= image_end + mask_length {
// If there's an AND mask following the image, read and apply it.
for y in 0..height {
let mut x = 0;
for _ in 0..mask_row_bytes {
// Apply the bits of each byte until we reach the end of the row.
let mask_byte = r.read_u8()?;
for bit in (0..8).rev() {
if x >= width {
break;
}
if mask_byte & (1 << bit) != 0 {
// Set alpha channel to transparent.
buf[((height - y - 1) * width + x) as usize * 4 + 3] = 0;
}
x += 1;
}
}
}
Ok(())
} else if data_end == image_end {
// accept images with no mask data
Ok(())
} else {
Err(DecoderError::InvalidDataSize.into())
}
}
}
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
#[cfg(test)]
mod test {
use super::*;
// Test if BMP images without alpha channel inside ICOs don't panic.
// Because the test data is invalid decoding should produce an error.
#[test]
fn bmp_16_with_missing_alpha_channel() {
let data = vec![
0x00, 0x00, 0x01, 0x00, 0x01, 0x00, 0x0e, 0x04, 0xc3, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x7c, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0x01, 0x00,
0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x8f, 0xf6, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x20, 0x66, 0x74, 0x83, 0x70, 0x61, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xeb, 0x00, 0x9b, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x47, 0x0d,
0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x62, 0x49, 0x48, 0x44, 0x52, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x0c,
0x00, 0x00, 0x00, 0xc3, 0x3f, 0x94, 0x61, 0xaa, 0x17, 0x4d, 0x8d, 0x79, 0x1d, 0x8b,
0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x2e, 0x28, 0x40, 0xe5, 0x9f,
0x4b, 0x4d, 0xe9, 0x87, 0xd3, 0xda, 0xd6, 0x89, 0x81, 0xc5, 0xa4, 0xa1, 0x60, 0x98,
0x31, 0xc7, 0x1d, 0xb6, 0x8f, 0x20, 0xc8, 0x3e, 0xee, 0xd8, 0xe4, 0x8f, 0xee, 0x7b,
0x48, 0x9b, 0x88, 0x25, 0x13, 0xda, 0xa4, 0x13, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x40,
0x16, 0x01, 0xff, 0xff, 0xff, 0xff, 0xe9, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xa3, 0x66, 0x64, 0x41, 0x54, 0xa3, 0xa3, 0x00, 0x00, 0x00, 0xb8, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x66, 0x64, 0x41, 0x54, 0xa3, 0xa3,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0xf6, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x66, 0x74, 0x83, 0x70, 0x61, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff,
0xeb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x62, 0x49,
0x48, 0x44, 0x52, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xc8, 0x00, 0x02, 0x0c, 0x00, 0xff, 0xff, 0xc6,
0x84, 0x00, 0x2a, 0x75, 0x03, 0xa3, 0x05, 0xfb, 0xe1, 0x6e, 0xe8, 0x27, 0xd6, 0xd3,
0x96, 0xc1, 0xe4, 0x30, 0x0c, 0x05, 0xb9, 0xa3, 0x8b, 0x29, 0xda, 0xa4, 0xf1, 0x4d,
0xf3, 0xb2, 0x98, 0x2b, 0xe6, 0x93, 0x07, 0xf9, 0xca, 0x2b, 0xc2, 0x39, 0x20, 0xba,
0x7c, 0xa0, 0xb1, 0x43, 0xe6, 0xf9, 0xdc, 0xd1, 0xc2, 0x52, 0xdc, 0x41, 0xc1, 0x2f,
0x29, 0xf7, 0x46, 0x32, 0xda, 0x1b, 0x72, 0x8c, 0xe6, 0x2b, 0x01, 0xe5, 0x49, 0x21,
0x89, 0x89, 0xe4, 0x3d, 0xa1, 0xdb, 0x3b, 0x4a, 0x0b, 0x52, 0x86, 0x52, 0x33, 0x9d,
0xb2, 0xcf, 0x4a, 0x86, 0x53, 0xd7, 0xa9, 0x4b, 0xaf, 0x62, 0x06, 0x49, 0x53, 0x00,
0xc3, 0x3f, 0x94, 0x61, 0xaa, 0x17, 0x4d, 0x8d, 0x79, 0x1d, 0x8b, 0x10, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x2e, 0x28, 0x40, 0xe5, 0x9f, 0x4b, 0x4d, 0xe9,
0x87, 0xd3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe7, 0xc5, 0x00,
0x02, 0x00, 0x00, 0x00, 0x06, 0x00, 0x0b, 0x00, 0x50, 0x31, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x76, 0x76, 0x01, 0x00, 0x00, 0x00, 0x76, 0x00,
0x00, 0x23, 0x3f, 0x52, 0x41, 0x44, 0x49, 0x41, 0x4e, 0x43, 0x45, 0x61, 0x50, 0x35,
0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x4d, 0x47, 0x49, 0x46, 0x38, 0x37, 0x61, 0x05,
0x50, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc7, 0x37, 0x61,
];
let decoder = IcoDecoder::new(std::io::Cursor::new(&data)).unwrap();
let mut buf = vec![0; usize::try_from(decoder.total_bytes()).unwrap()];
assert!(decoder.read_image(&mut buf).is_err());
}
}

189
vendor/image/src/codecs/ico/encoder.rs vendored Normal file
View File

@@ -0,0 +1,189 @@
use byteorder_lite::{LittleEndian, WriteBytesExt};
use std::borrow::Cow;
use std::io::{self, Write};
use crate::codecs::png::PngEncoder;
use crate::error::{ImageError, ImageResult, ParameterError, ParameterErrorKind};
use crate::{ExtendedColorType, ImageEncoder};
// Enum value indicating an ICO image (as opposed to a CUR image):
const ICO_IMAGE_TYPE: u16 = 1;
// The length of an ICO file ICONDIR structure, in bytes:
const ICO_ICONDIR_SIZE: u32 = 6;
// The length of an ICO file DIRENTRY structure, in bytes:
const ICO_DIRENTRY_SIZE: u32 = 16;
/// ICO encoder
pub struct IcoEncoder<W: Write> {
w: W,
}
/// An ICO image entry
pub struct IcoFrame<'a> {
// Pre-encoded PNG or BMP
encoded_image: Cow<'a, [u8]>,
// Stored as `0 => 256, n => n`
width: u8,
// Stored as `0 => 256, n => n`
height: u8,
color_type: ExtendedColorType,
}
impl<'a> IcoFrame<'a> {
/// Construct a new `IcoFrame` using a pre-encoded PNG or BMP
///
/// The `width` and `height` must be between 1 and 256 (inclusive).
pub fn with_encoded(
encoded_image: impl Into<Cow<'a, [u8]>>,
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<Self> {
let encoded_image = encoded_image.into();
if !(1..=256).contains(&width) {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(format!(
"the image width must be `1..=256`, instead width {width} was provided",
)),
)));
}
if !(1..=256).contains(&height) {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(format!(
"the image height must be `1..=256`, instead height {height} was provided",
)),
)));
}
Ok(Self {
encoded_image,
width: width as u8,
height: height as u8,
color_type,
})
}
/// Construct a new `IcoFrame` by encoding `buf` as a PNG
///
/// The `width` and `height` must be between 1 and 256 (inclusive)
pub fn as_png(
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<Self> {
let mut image_data: Vec<u8> = Vec::new();
PngEncoder::new(&mut image_data).write_image(buf, width, height, color_type)?;
let frame = Self::with_encoded(image_data, width, height, color_type)?;
Ok(frame)
}
}
impl<W: Write> IcoEncoder<W> {
/// Create a new encoder that writes its output to ```w```.
pub fn new(w: W) -> IcoEncoder<W> {
IcoEncoder { w }
}
/// Takes some [`IcoFrame`]s and encodes them into an ICO.
///
/// `images` is a list of images, usually ordered by dimension, which
/// must be between 1 and 65535 (inclusive) in length.
pub fn encode_images(mut self, images: &[IcoFrame<'_>]) -> ImageResult<()> {
if !(1..=usize::from(u16::MAX)).contains(&images.len()) {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(format!(
"the number of images must be `1..=u16::MAX`, instead {} images were provided",
images.len(),
)),
)));
}
let num_images = images.len() as u16;
let mut offset = ICO_ICONDIR_SIZE + (ICO_DIRENTRY_SIZE * (images.len() as u32));
write_icondir(&mut self.w, num_images)?;
for image in images {
write_direntry(
&mut self.w,
image.width,
image.height,
image.color_type,
offset,
image.encoded_image.len() as u32,
)?;
offset += image.encoded_image.len() as u32;
}
for image in images {
self.w.write_all(&image.encoded_image)?;
}
Ok(())
}
}
impl<W: Write> ImageEncoder for IcoEncoder<W> {
/// Write an ICO image with the specified width, height, and color type.
///
/// For color types with 16-bit per channel or larger, the contents of `buf` should be in
/// native endian.
///
/// WARNING: In image 0.23.14 and earlier this method erroneously expected buf to be in big endian.
#[track_caller]
fn write_image(
self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
let expected_buffer_len = color_type.buffer_size(width, height);
assert_eq!(
expected_buffer_len,
buf.len() as u64,
"Invalid buffer length: expected {expected_buffer_len} got {} for {width}x{height} image",
buf.len(),
);
let image = IcoFrame::as_png(buf, width, height, color_type)?;
self.encode_images(&[image])
}
}
fn write_icondir<W: Write>(w: &mut W, num_images: u16) -> io::Result<()> {
// Reserved field (must be zero):
w.write_u16::<LittleEndian>(0)?;
// Image type (ICO or CUR):
w.write_u16::<LittleEndian>(ICO_IMAGE_TYPE)?;
// Number of images in the file:
w.write_u16::<LittleEndian>(num_images)?;
Ok(())
}
fn write_direntry<W: Write>(
w: &mut W,
width: u8,
height: u8,
color: ExtendedColorType,
data_start: u32,
data_size: u32,
) -> io::Result<()> {
// Image dimensions:
w.write_u8(width)?;
w.write_u8(height)?;
// Number of colors in palette (or zero for no palette):
w.write_u8(0)?;
// Reserved field (must be zero):
w.write_u8(0)?;
// Color planes:
w.write_u16::<LittleEndian>(0)?;
// Bits per pixel:
w.write_u16::<LittleEndian>(color.bits_per_pixel())?;
// Image data size, in bytes:
w.write_u32::<LittleEndian>(data_size)?;
// Image data offset, in bytes:
w.write_u32::<LittleEndian>(data_start)?;
Ok(())
}

14
vendor/image/src/codecs/ico/mod.rs vendored Normal file
View File

@@ -0,0 +1,14 @@
//! Decoding and Encoding of ICO files
//!
//! A decoder and encoder for ICO (Windows Icon) image container files.
//!
//! # Related Links
//! * <https://msdn.microsoft.com/en-us/library/ms997538.aspx>
//! * <https://en.wikipedia.org/wiki/ICO_%28file_format%29>
pub use self::decoder::IcoDecoder;
#[allow(deprecated)]
pub use self::encoder::{IcoEncoder, IcoFrame};
mod decoder;
mod encoder;

210
vendor/image/src/codecs/jpeg/decoder.rs vendored Normal file
View File

@@ -0,0 +1,210 @@
use std::io::{BufRead, Seek};
use std::marker::PhantomData;
use crate::color::ColorType;
use crate::error::{
DecodingError, ImageError, ImageResult, LimitError, UnsupportedError, UnsupportedErrorKind,
};
use crate::metadata::Orientation;
use crate::{ImageDecoder, ImageFormat, Limits};
type ZuneColorSpace = zune_core::colorspace::ColorSpace;
/// JPEG decoder
pub struct JpegDecoder<R> {
input: Vec<u8>,
orig_color_space: ZuneColorSpace,
width: u16,
height: u16,
limits: Limits,
orientation: Option<Orientation>,
// For API compatibility with the previous jpeg_decoder wrapper.
// Can be removed later, which would be an API break.
phantom: PhantomData<R>,
}
impl<R: BufRead + Seek> JpegDecoder<R> {
/// Create a new decoder that decodes from the stream ```r```
pub fn new(r: R) -> ImageResult<JpegDecoder<R>> {
let mut input = Vec::new();
let mut r = r;
r.read_to_end(&mut input)?;
let options = zune_core::options::DecoderOptions::default()
.set_strict_mode(false)
.set_max_width(usize::MAX)
.set_max_height(usize::MAX);
let mut decoder = zune_jpeg::JpegDecoder::new_with_options(input.as_slice(), options);
decoder.decode_headers().map_err(ImageError::from_jpeg)?;
// now that we've decoded the headers we can `.unwrap()`
// all these functions that only fail if called before decoding the headers
let (width, height) = decoder.dimensions().unwrap();
// JPEG can only express dimensions up to 65535x65535, so this conversion cannot fail
let width: u16 = width.try_into().unwrap();
let height: u16 = height.try_into().unwrap();
let orig_color_space = decoder.get_output_colorspace().unwrap();
// Limits are disabled by default in the constructor for all decoders
let limits = Limits::no_limits();
Ok(JpegDecoder {
input,
orig_color_space,
width,
height,
limits,
orientation: None,
phantom: PhantomData,
})
}
}
impl<R: BufRead + Seek> ImageDecoder for JpegDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
(u32::from(self.width), u32::from(self.height))
}
fn color_type(&self) -> ColorType {
ColorType::from_jpeg(self.orig_color_space)
}
fn icc_profile(&mut self) -> ImageResult<Option<Vec<u8>>> {
let options = zune_core::options::DecoderOptions::default()
.set_strict_mode(false)
.set_max_width(usize::MAX)
.set_max_height(usize::MAX);
let mut decoder = zune_jpeg::JpegDecoder::new_with_options(&self.input, options);
decoder.decode_headers().map_err(ImageError::from_jpeg)?;
Ok(decoder.icc_profile())
}
fn exif_metadata(&mut self) -> ImageResult<Option<Vec<u8>>> {
let options = zune_core::options::DecoderOptions::default()
.set_strict_mode(false)
.set_max_width(usize::MAX)
.set_max_height(usize::MAX);
let mut decoder = zune_jpeg::JpegDecoder::new_with_options(&self.input, options);
decoder.decode_headers().map_err(ImageError::from_jpeg)?;
let exif = decoder.exif().cloned();
self.orientation = Some(
exif.as_ref()
.and_then(|exif| Orientation::from_exif_chunk(exif))
.unwrap_or(Orientation::NoTransforms),
);
Ok(exif)
}
fn orientation(&mut self) -> ImageResult<Orientation> {
// `exif_metadata` caches the orientation, so call it if `orientation` hasn't been set yet.
if self.orientation.is_none() {
let _ = self.exif_metadata()?;
}
Ok(self.orientation.unwrap())
}
fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
let advertised_len = self.total_bytes();
let actual_len = buf.len() as u64;
if actual_len != advertised_len {
return Err(ImageError::Decoding(DecodingError::new(
ImageFormat::Jpeg.into(),
format!(
"Length of the decoded data {actual_len} \
doesn't match the advertised dimensions of the image \
that imply length {advertised_len}"
),
)));
}
let mut decoder = new_zune_decoder(&self.input, self.orig_color_space, self.limits);
decoder.decode_into(buf).map_err(ImageError::from_jpeg)?;
Ok(())
}
fn set_limits(&mut self, limits: Limits) -> ImageResult<()> {
limits.check_support(&crate::LimitSupport::default())?;
let (width, height) = self.dimensions();
limits.check_dimensions(width, height)?;
self.limits = limits;
Ok(())
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
impl ColorType {
fn from_jpeg(colorspace: ZuneColorSpace) -> ColorType {
let colorspace = to_supported_color_space(colorspace);
use zune_core::colorspace::ColorSpace::*;
match colorspace {
// As of zune-jpeg 0.3.13 the output is always 8-bit,
// but support for 16-bit JPEG might be added in the future.
RGB => ColorType::Rgb8,
RGBA => ColorType::Rgba8,
Luma => ColorType::L8,
LumaA => ColorType::La8,
// to_supported_color_space() doesn't return any of the other variants
_ => unreachable!(),
}
}
}
fn to_supported_color_space(orig: ZuneColorSpace) -> ZuneColorSpace {
use zune_core::colorspace::ColorSpace::*;
match orig {
RGB | RGBA | Luma | LumaA => orig,
// the rest is not supported by `image` so it will be converted to RGB during decoding
_ => RGB,
}
}
fn new_zune_decoder(
input: &[u8],
orig_color_space: ZuneColorSpace,
limits: Limits,
) -> zune_jpeg::JpegDecoder<&[u8]> {
let target_color_space = to_supported_color_space(orig_color_space);
let mut options = zune_core::options::DecoderOptions::default()
.jpeg_set_out_colorspace(target_color_space)
.set_strict_mode(false);
options = options.set_max_width(match limits.max_image_width {
Some(max_width) => max_width as usize, // u32 to usize never truncates
None => usize::MAX,
});
options = options.set_max_height(match limits.max_image_height {
Some(max_height) => max_height as usize, // u32 to usize never truncates
None => usize::MAX,
});
zune_jpeg::JpegDecoder::new_with_options(input, options)
}
impl ImageError {
fn from_jpeg(err: zune_jpeg::errors::DecodeErrors) -> ImageError {
use zune_jpeg::errors::DecodeErrors::*;
match err {
Unsupported(desc) => ImageError::Unsupported(UnsupportedError::from_format_and_kind(
ImageFormat::Jpeg.into(),
UnsupportedErrorKind::GenericFeature(format!("{desc:?}")),
)),
LargeDimensions(_) => ImageError::Limits(LimitError::from_kind(
crate::error::LimitErrorKind::DimensionError,
)),
err => ImageError::Decoding(DecodingError::new(ImageFormat::Jpeg.into(), err)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::{fs, io::Cursor};
#[test]
fn test_exif_orientation() {
let data = fs::read("tests/images/jpg/portrait_2.jpg").unwrap();
let mut decoder = JpegDecoder::new(Cursor::new(data)).unwrap();
assert_eq!(decoder.orientation().unwrap(), Orientation::FlipHorizontal);
}
}

1213
vendor/image/src/codecs/jpeg/encoder.rs vendored Normal file

File diff suppressed because it is too large Load Diff

63
vendor/image/src/codecs/jpeg/entropy.rs vendored Normal file
View File

@@ -0,0 +1,63 @@
/// Given an array containing the number of codes of each code length,
/// this function generates the huffman codes lengths and their respective
/// code lengths as specified by the JPEG spec.
const fn derive_codes_and_sizes(bits: &[u8; 16]) -> ([u8; 256], [u16; 256]) {
let mut huffsize = [0u8; 256];
let mut huffcode = [0u16; 256];
let mut k = 0;
// Annex C.2
// Figure C.1
// Generate table of individual code lengths
let mut i = 0;
while i < 16 {
let mut j = 0;
while j < bits[i as usize] {
huffsize[k] = i + 1;
k += 1;
j += 1;
}
i += 1;
}
huffsize[k] = 0;
// Annex C.2
// Figure C.2
// Generate table of huffman codes
k = 0;
let mut code = 0u16;
let mut size = huffsize[0];
while huffsize[k] != 0 {
huffcode[k] = code;
code += 1;
k += 1;
if huffsize[k] == size {
continue;
}
// FIXME there is something wrong with this code
let diff = huffsize[k].wrapping_sub(size);
code = if diff < 16 { code << diff as usize } else { 0 };
size = size.wrapping_add(diff);
}
(huffsize, huffcode)
}
pub(crate) const fn build_huff_lut_const(bits: &[u8; 16], huffval: &[u8]) -> [(u8, u16); 256] {
let mut lut = [(17u8, 0u16); 256];
let (huffsize, huffcode) = derive_codes_and_sizes(bits);
let mut i = 0;
while i < huffval.len() {
lut[huffval[i] as usize] = (huffsize[i], huffcode[i]);
i += 1;
}
lut
}

15
vendor/image/src/codecs/jpeg/mod.rs vendored Normal file
View File

@@ -0,0 +1,15 @@
//! Decoding and Encoding of JPEG Images
//!
//! JPEG (Joint Photographic Experts Group) is an image format that supports lossy compression.
//! This module implements the Baseline JPEG standard.
//!
//! # Related Links
//! * <http://www.w3.org/Graphics/JPEG/itu-t81.pdf> - The JPEG specification
pub use self::decoder::JpegDecoder;
pub use self::encoder::{JpegEncoder, PixelDensity, PixelDensityUnit};
mod decoder;
mod encoder;
mod entropy;
mod transform;

View File

@@ -0,0 +1,196 @@
/*
fdct is a Rust translation of jfdctint.c from the
Independent JPEG Group's libjpeg version 9a
obtained from http://www.ijg.org/files/jpegsr9a.zip
It comes with the following conditions of distribution and use:
In plain English:
1. We don't promise that this software works. (But if you find any bugs,
please let us know!)
2. You can use this software for whatever you want. You don't have to pay us.
3. You may not pretend that you wrote this software. If you use it in a
program, you must acknowledge somewhere in your documentation that
you've used the IJG code.
In legalese:
The authors make NO WARRANTY or representation, either express or implied,
with respect to this software, its quality, accuracy, merchantability, or
fitness for a particular purpose. This software is provided "AS IS", and you,
its user, assume the entire risk as to its quality and accuracy.
This software is copyright (C) 1991-2014, Thomas G. Lane, Guido Vollbeding.
All Rights Reserved except as specified below.
Permission is hereby granted to use, copy, modify, and distribute this
software (or portions thereof) for any purpose, without fee, subject to these
conditions:
(1) If any part of the source code for this software is distributed, then this
README file must be included, with this copyright and no-warranty notice
unaltered; and any additions, deletions, or changes to the original files
must be clearly indicated in accompanying documentation.
(2) If only executable code is distributed, then the accompanying
documentation must state that "this software is based in part on the work of
the Independent JPEG Group".
(3) Permission for use of this software is granted only if the user accepts
full responsibility for any undesirable consequences; the authors accept
NO LIABILITY for damages of any kind.
These conditions apply to any software derived from or based on the IJG code,
not just to the unmodified library. If you use our work, you ought to
acknowledge us.
Permission is NOT granted for the use of any IJG author's name or company name
in advertising or publicity relating to this software or products derived from
it. This software may be referred to only as "the Independent JPEG Group's
software".
We specifically permit and encourage the use of this software as the basis of
commercial products, provided that all warranty or liability claims are
assumed by the product vendor.
*/
static CONST_BITS: i32 = 13;
static PASS1_BITS: i32 = 2;
static FIX_0_298631336: i32 = 2446;
static FIX_0_390180644: i32 = 3196;
static FIX_0_541196100: i32 = 4433;
static FIX_0_765366865: i32 = 6270;
static FIX_0_899976223: i32 = 7373;
static FIX_1_175875602: i32 = 9633;
static FIX_1_501321110: i32 = 12_299;
static FIX_1_847759065: i32 = 15_137;
static FIX_1_961570560: i32 = 16_069;
static FIX_2_053119869: i32 = 16_819;
static FIX_2_562915447: i32 = 20_995;
static FIX_3_072711026: i32 = 25_172;
pub(crate) fn fdct(samples: &[u8; 64], coeffs: &mut [i32; 64]) {
// Pass 1: process rows.
// Results are scaled by sqrt(8) compared to a true DCT
// furthermore we scale the results by 2**PASS1_BITS
for y in 0usize..8 {
let y0 = y * 8;
// Even part
let t0 = i32::from(samples[y0]) + i32::from(samples[y0 + 7]);
let t1 = i32::from(samples[y0 + 1]) + i32::from(samples[y0 + 6]);
let t2 = i32::from(samples[y0 + 2]) + i32::from(samples[y0 + 5]);
let t3 = i32::from(samples[y0 + 3]) + i32::from(samples[y0 + 4]);
let t10 = t0 + t3;
let t12 = t0 - t3;
let t11 = t1 + t2;
let t13 = t1 - t2;
let t0 = i32::from(samples[y0]) - i32::from(samples[y0 + 7]);
let t1 = i32::from(samples[y0 + 1]) - i32::from(samples[y0 + 6]);
let t2 = i32::from(samples[y0 + 2]) - i32::from(samples[y0 + 5]);
let t3 = i32::from(samples[y0 + 3]) - i32::from(samples[y0 + 4]);
// Apply unsigned -> signed conversion
coeffs[y0] = (t10 + t11 - 8 * 128) << PASS1_BITS as usize;
coeffs[y0 + 4] = (t10 - t11) << PASS1_BITS as usize;
let mut z1 = (t12 + t13) * FIX_0_541196100;
// Add fudge factor here for final descale
z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as usize;
coeffs[y0 + 2] = (z1 + t12 * FIX_0_765366865) >> (CONST_BITS - PASS1_BITS) as usize;
coeffs[y0 + 6] = (z1 - t13 * FIX_1_847759065) >> (CONST_BITS - PASS1_BITS) as usize;
// Odd part
let t12 = t0 + t2;
let t13 = t1 + t3;
let mut z1 = (t12 + t13) * FIX_1_175875602;
// Add fudge factor here for final descale
z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as usize;
let mut t12 = t12 * (-FIX_0_390180644);
let mut t13 = t13 * (-FIX_1_961570560);
t12 += z1;
t13 += z1;
let z1 = (t0 + t3) * (-FIX_0_899976223);
let mut t0 = t0 * FIX_1_501321110;
let mut t3 = t3 * FIX_0_298631336;
t0 += z1 + t12;
t3 += z1 + t13;
let z1 = (t1 + t2) * (-FIX_2_562915447);
let mut t1 = t1 * FIX_3_072711026;
let mut t2 = t2 * FIX_2_053119869;
t1 += z1 + t13;
t2 += z1 + t12;
coeffs[y0 + 1] = t0 >> (CONST_BITS - PASS1_BITS) as usize;
coeffs[y0 + 3] = t1 >> (CONST_BITS - PASS1_BITS) as usize;
coeffs[y0 + 5] = t2 >> (CONST_BITS - PASS1_BITS) as usize;
coeffs[y0 + 7] = t3 >> (CONST_BITS - PASS1_BITS) as usize;
}
// Pass 2: process columns
// We remove the PASS1_BITS scaling but leave the results scaled up an
// overall factor of 8
for x in (0usize..8).rev() {
// Even part
let t0 = coeffs[x] + coeffs[x + 8 * 7];
let t1 = coeffs[x + 8] + coeffs[x + 8 * 6];
let t2 = coeffs[x + 8 * 2] + coeffs[x + 8 * 5];
let t3 = coeffs[x + 8 * 3] + coeffs[x + 8 * 4];
// Add fudge factor here for final descale
let t10 = t0 + t3 + (1 << (PASS1_BITS - 1) as usize);
let t12 = t0 - t3;
let t11 = t1 + t2;
let t13 = t1 - t2;
let t0 = coeffs[x] - coeffs[x + 8 * 7];
let t1 = coeffs[x + 8] - coeffs[x + 8 * 6];
let t2 = coeffs[x + 8 * 2] - coeffs[x + 8 * 5];
let t3 = coeffs[x + 8 * 3] - coeffs[x + 8 * 4];
coeffs[x] = (t10 + t11) >> PASS1_BITS as usize;
coeffs[x + 8 * 4] = (t10 - t11) >> PASS1_BITS as usize;
let mut z1 = (t12 + t13) * FIX_0_541196100;
// Add fudge factor here for final descale
z1 += 1 << (CONST_BITS + PASS1_BITS - 1) as usize;
coeffs[x + 8 * 2] = (z1 + t12 * FIX_0_765366865) >> (CONST_BITS + PASS1_BITS) as usize;
coeffs[x + 8 * 6] = (z1 - t13 * FIX_1_847759065) >> (CONST_BITS + PASS1_BITS) as usize;
// Odd part
let t12 = t0 + t2;
let t13 = t1 + t3;
let mut z1 = (t12 + t13) * FIX_1_175875602;
// Add fudge factor here for final descale
z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as usize;
let mut t12 = t12 * (-FIX_0_390180644);
let mut t13 = t13 * (-FIX_1_961570560);
t12 += z1;
t13 += z1;
let z1 = (t0 + t3) * (-FIX_0_899976223);
let mut t0 = t0 * FIX_1_501321110;
let mut t3 = t3 * FIX_0_298631336;
t0 += z1 + t12;
t3 += z1 + t13;
let z1 = (t1 + t2) * (-FIX_2_562915447);
let mut t1 = t1 * FIX_3_072711026;
let mut t2 = t2 * FIX_2_053119869;
t1 += z1 + t13;
t2 += z1 + t12;
coeffs[x + 8] = t0 >> (CONST_BITS + PASS1_BITS) as usize;
coeffs[x + 8 * 3] = t1 >> (CONST_BITS + PASS1_BITS) as usize;
coeffs[x + 8 * 5] = t2 >> (CONST_BITS + PASS1_BITS) as usize;
coeffs[x + 8 * 7] = t3 >> (CONST_BITS + PASS1_BITS) as usize;
}
}

544
vendor/image/src/codecs/openexr.rs vendored Normal file
View File

@@ -0,0 +1,544 @@
//! Decoding of OpenEXR (.exr) Images
//!
//! OpenEXR is an image format that is widely used, especially in VFX,
//! because it supports lossless and lossy compression for float data.
//!
//! This decoder only supports RGB and RGBA images.
//! If an image does not contain alpha information,
//! it is defaulted to `1.0` (no transparency).
//!
//! # Related Links
//! * <https://www.openexr.com/documentation.html> - The OpenEXR reference.
//!
//!
//! Current limitations (July 2021):
//! - only pixel type `Rgba32F` and `Rgba16F` are supported
//! - only non-deep rgb/rgba files supported, no conversion from/to YCbCr or similar
//! - only the first non-deep rgb layer is used
//! - only the largest mip map level is used
//! - pixels outside display window are lost
//! - meta data is lost
//! - dwaa/dwab compressed images not supported yet by the exr library
//! - (chroma) subsampling not supported yet by the exr library
use exr::prelude::*;
use crate::error::{DecodingError, ImageFormatHint, UnsupportedError, UnsupportedErrorKind};
use crate::{
ColorType, ExtendedColorType, ImageDecoder, ImageEncoder, ImageError, ImageFormat, ImageResult,
};
use std::io::{BufRead, Seek, Write};
/// An OpenEXR decoder. Immediately reads the meta data from the file.
#[derive(Debug)]
pub struct OpenExrDecoder<R> {
exr_reader: exr::block::reader::Reader<R>,
// select a header that is rgb and not deep
header_index: usize,
// decode either rgb or rgba.
// can be specified to include or discard alpha channels.
// if none, the alpha channel will only be allocated where the file contains data for it.
alpha_preference: Option<bool>,
alpha_present_in_file: bool,
}
impl<R: BufRead + Seek> OpenExrDecoder<R> {
/// Create a decoder. Consumes the first few bytes of the source to extract image dimensions.
/// Assumes the reader is buffered. In most cases,
/// you should wrap your reader in a `BufReader` for best performance.
/// Loads an alpha channel if the file has alpha samples.
/// Use `with_alpha_preference` if you want to load or not load alpha unconditionally.
pub fn new(source: R) -> ImageResult<Self> {
Self::with_alpha_preference(source, None)
}
/// Create a decoder. Consumes the first few bytes of the source to extract image dimensions.
/// Assumes the reader is buffered. In most cases,
/// you should wrap your reader in a `BufReader` for best performance.
/// If alpha preference is specified, an alpha channel will
/// always be present or always be not present in the returned image.
/// If alpha preference is none, the alpha channel will only be returned if it is found in the file.
pub fn with_alpha_preference(source: R, alpha_preference: Option<bool>) -> ImageResult<Self> {
// read meta data, then wait for further instructions, keeping the file open and ready
let exr_reader = exr::block::read(source, false).map_err(to_image_err)?;
let header_index = exr_reader
.headers()
.iter()
.position(|header| {
// check if r/g/b exists in the channels
let has_rgb = ["R", "G", "B"]
.iter()
.all(|&required| // alpha will be optional
header.channels.find_index_of_channel(&Text::from(required)).is_some());
// we currently dont support deep images, or images with other color spaces than rgb
!header.deep && has_rgb
})
.ok_or_else(|| {
ImageError::Decoding(DecodingError::new(
ImageFormatHint::Exact(ImageFormat::OpenExr),
"image does not contain non-deep rgb channels",
))
})?;
let has_alpha = exr_reader.headers()[header_index]
.channels
.find_index_of_channel(&Text::from("A"))
.is_some();
Ok(Self {
alpha_preference,
exr_reader,
header_index,
alpha_present_in_file: has_alpha,
})
}
// does not leak exrs-specific meta data into public api, just does it for this module
fn selected_exr_header(&self) -> &exr::meta::header::Header {
&self.exr_reader.meta_data().headers[self.header_index]
}
}
impl<R: BufRead + Seek> ImageDecoder for OpenExrDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
let size = self
.selected_exr_header()
.shared_attributes
.display_window
.size;
(size.width() as u32, size.height() as u32)
}
fn color_type(&self) -> ColorType {
let returns_alpha = self.alpha_preference.unwrap_or(self.alpha_present_in_file);
if returns_alpha {
ColorType::Rgba32F
} else {
ColorType::Rgb32F
}
}
fn original_color_type(&self) -> ExtendedColorType {
if self.alpha_present_in_file {
ExtendedColorType::Rgba32F
} else {
ExtendedColorType::Rgb32F
}
}
// reads with or without alpha, depending on `self.alpha_preference` and `self.alpha_present_in_file`
fn read_image(self, unaligned_bytes: &mut [u8]) -> ImageResult<()> {
let _blocks_in_header = self.selected_exr_header().chunk_count as u64;
let channel_count = self.color_type().channel_count() as usize;
let display_window = self.selected_exr_header().shared_attributes.display_window;
let data_window_offset =
self.selected_exr_header().own_attributes.layer_position - display_window.position;
{
// check whether the buffer is large enough for the dimensions of the file
let (width, height) = self.dimensions();
let bytes_per_pixel = self.color_type().bytes_per_pixel() as usize;
let expected_byte_count = (width as usize)
.checked_mul(height as usize)
.and_then(|size| size.checked_mul(bytes_per_pixel));
// if the width and height does not match the length of the bytes, the arguments are invalid
let has_invalid_size_or_overflowed = expected_byte_count
.map(|expected_byte_count| unaligned_bytes.len() != expected_byte_count)
// otherwise, size calculation overflowed, is bigger than memory,
// therefore data is too small, so it is invalid.
.unwrap_or(true);
assert!(
!has_invalid_size_or_overflowed,
"byte buffer not large enough for the specified dimensions and f32 pixels"
);
}
let result = read()
.no_deep_data()
.largest_resolution_level()
.rgba_channels(
move |_size, _channels| vec![0_f32; display_window.size.area() * channel_count],
move |buffer, index_in_data_window, (r, g, b, a_or_1): (f32, f32, f32, f32)| {
let index_in_display_window =
index_in_data_window.to_i32() + data_window_offset;
// only keep pixels inside the data window
// TODO filter chunks based on this
if index_in_display_window.x() >= 0
&& index_in_display_window.y() >= 0
&& index_in_display_window.x() < display_window.size.width() as i32
&& index_in_display_window.y() < display_window.size.height() as i32
{
let index_in_display_window =
index_in_display_window.to_usize("index bug").unwrap();
let first_f32_index =
index_in_display_window.flat_index_for_size(display_window.size);
buffer[first_f32_index * channel_count
..(first_f32_index + 1) * channel_count]
.copy_from_slice(&[r, g, b, a_or_1][0..channel_count]);
// TODO white point chromaticities + srgb/linear conversion?
}
},
)
.first_valid_layer() // TODO select exact layer by self.header_index?
.all_attributes()
.from_chunks(self.exr_reader)
.map_err(to_image_err)?;
// TODO this copy is strictly not necessary, but the exr api is a little too simple for reading into a borrowed target slice
// this cast is safe and works with any alignment, as bytes are copied, and not f32 values.
// note: buffer slice length is checked in the beginning of this function and will be correct at this point
unaligned_bytes.copy_from_slice(bytemuck::cast_slice(
result.layer_data.channel_data.pixels.as_slice(),
));
Ok(())
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
/// Write a raw byte buffer of pixels,
/// returning an Error if it has an invalid length.
///
/// Assumes the writer is buffered. In most cases,
/// you should wrap your writer in a `BufWriter` for best performance.
// private. access via `OpenExrEncoder`
fn write_buffer(
mut buffered_write: impl Write + Seek,
unaligned_bytes: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
let width = width as usize;
let height = height as usize;
let bytes_per_pixel = color_type.bits_per_pixel() as usize / 8;
match color_type {
ExtendedColorType::Rgb32F => {
Image // TODO compression method zip??
::from_channels(
(width, height),
SpecificChannels::rgb(|pixel: Vec2<usize>| {
let pixel_index = pixel.flat_index_for_size(Vec2(width, height));
let start_byte = pixel_index * bytes_per_pixel;
let [r, g, b]: [f32; 3] = bytemuck::pod_read_unaligned(
&unaligned_bytes[start_byte..start_byte + bytes_per_pixel],
);
(r, g, b)
}),
)
.write()
// .on_progress(|progress| todo!())
.to_buffered(&mut buffered_write)
.map_err(to_image_err)?;
}
ExtendedColorType::Rgba32F => {
Image // TODO compression method zip??
::from_channels(
(width, height),
SpecificChannels::rgba(|pixel: Vec2<usize>| {
let pixel_index = pixel.flat_index_for_size(Vec2(width, height));
let start_byte = pixel_index * bytes_per_pixel;
let [r, g, b, a]: [f32; 4] = bytemuck::pod_read_unaligned(
&unaligned_bytes[start_byte..start_byte + bytes_per_pixel],
);
(r, g, b, a)
}),
)
.write()
// .on_progress(|progress| todo!())
.to_buffered(&mut buffered_write)
.map_err(to_image_err)?;
}
// TODO other color types and channel types
unsupported_color_type => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::OpenExr.into(),
UnsupportedErrorKind::Color(unsupported_color_type),
),
))
}
}
Ok(())
}
// TODO is this struct and trait actually used anywhere?
/// A thin wrapper that implements `ImageEncoder` for OpenEXR images. Will behave like `image::codecs::openexr::write_buffer`.
#[derive(Debug)]
pub struct OpenExrEncoder<W>(W);
impl<W> OpenExrEncoder<W> {
/// Create an `ImageEncoder`. Does not write anything yet. Writing later will behave like `image::codecs::openexr::write_buffer`.
// use constructor, not public field, for future backwards-compatibility
pub fn new(write: W) -> Self {
Self(write)
}
}
impl<W> ImageEncoder for OpenExrEncoder<W>
where
W: Write + Seek,
{
/// Writes the complete image.
///
/// Assumes the writer is buffered. In most cases, you should wrap your writer in a `BufWriter`
/// for best performance.
#[track_caller]
fn write_image(
self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
let expected_buffer_len = color_type.buffer_size(width, height);
assert_eq!(
expected_buffer_len,
buf.len() as u64,
"Invalid buffer length: expected {expected_buffer_len} got {} for {width}x{height} image",
buf.len(),
);
write_buffer(self.0, buf, width, height, color_type)
}
}
fn to_image_err(exr_error: Error) -> ImageError {
ImageError::Decoding(DecodingError::new(
ImageFormatHint::Exact(ImageFormat::OpenExr),
exr_error.to_string(),
))
}
#[cfg(test)]
mod test {
use super::*;
use std::fs::File;
use std::io::{BufReader, Cursor};
use std::path::{Path, PathBuf};
use crate::error::{LimitError, LimitErrorKind};
use crate::images::buffer::{Rgb32FImage, Rgba32FImage};
use crate::io::free_functions::decoder_to_vec;
use crate::{DynamicImage, ImageBuffer, Rgb, Rgba};
const BASE_PATH: &[&str] = &[".", "tests", "images", "exr"];
/// Write an `Rgb32FImage`.
/// Assumes the writer is buffered. In most cases,
/// you should wrap your writer in a `BufWriter` for best performance.
fn write_rgb_image(write: impl Write + Seek, image: &Rgb32FImage) -> ImageResult<()> {
write_buffer(
write,
bytemuck::cast_slice(image.as_raw().as_slice()),
image.width(),
image.height(),
ExtendedColorType::Rgb32F,
)
}
/// Write an `Rgba32FImage`.
/// Assumes the writer is buffered. In most cases,
/// you should wrap your writer in a `BufWriter` for best performance.
fn write_rgba_image(write: impl Write + Seek, image: &Rgba32FImage) -> ImageResult<()> {
write_buffer(
write,
bytemuck::cast_slice(image.as_raw().as_slice()),
image.width(),
image.height(),
ExtendedColorType::Rgba32F,
)
}
/// Read the file from the specified path into an `Rgba32FImage`.
fn read_as_rgba_image_from_file(path: impl AsRef<Path>) -> ImageResult<Rgba32FImage> {
read_as_rgba_image(BufReader::new(File::open(path)?))
}
/// Read the file from the specified path into an `Rgb32FImage`.
fn read_as_rgb_image_from_file(path: impl AsRef<Path>) -> ImageResult<Rgb32FImage> {
read_as_rgb_image(BufReader::new(File::open(path)?))
}
/// Read the file from the specified path into an `Rgb32FImage`.
fn read_as_rgb_image(read: impl BufRead + Seek) -> ImageResult<Rgb32FImage> {
let decoder = OpenExrDecoder::with_alpha_preference(read, Some(false))?;
let (width, height) = decoder.dimensions();
let buffer: Vec<f32> = decoder_to_vec(decoder)?;
ImageBuffer::from_raw(width, height, buffer)
// this should be the only reason for the "from raw" call to fail,
// even though such a large allocation would probably cause an error much earlier
.ok_or_else(|| {
ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory))
})
}
/// Read the file from the specified path into an `Rgba32FImage`.
fn read_as_rgba_image(read: impl BufRead + Seek) -> ImageResult<Rgba32FImage> {
let decoder = OpenExrDecoder::with_alpha_preference(read, Some(true))?;
let (width, height) = decoder.dimensions();
let buffer: Vec<f32> = decoder_to_vec(decoder)?;
ImageBuffer::from_raw(width, height, buffer)
// this should be the only reason for the "from raw" call to fail,
// even though such a large allocation would probably cause an error much earlier
.ok_or_else(|| {
ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory))
})
}
#[test]
fn compare_exr_hdr() {
if cfg!(not(feature = "hdr")) {
eprintln!("warning: to run all the openexr tests, activate the hdr feature flag");
}
#[cfg(feature = "hdr")]
{
use crate::codecs::hdr::HdrDecoder;
let folder = BASE_PATH.iter().collect::<PathBuf>();
let reference_path = folder.join("overexposed gradient.hdr");
let exr_path =
folder.join("overexposed gradient - data window equals display window.exr");
let hdr_decoder =
HdrDecoder::new(BufReader::new(File::open(reference_path).unwrap())).unwrap();
let hdr: Rgb32FImage = match DynamicImage::from_decoder(hdr_decoder).unwrap() {
DynamicImage::ImageRgb32F(image) => image,
_ => panic!("expected rgb32f image"),
};
let exr_pixels: Rgb32FImage = read_as_rgb_image_from_file(exr_path).unwrap();
assert_eq!(exr_pixels.dimensions(), hdr.dimensions());
for (expected, found) in hdr.pixels().zip(exr_pixels.pixels()) {
for (expected, found) in expected.0.iter().zip(found.0.iter()) {
// the large tolerance seems to be caused by
// the RGBE u8x4 pixel quantization of the hdr image format
assert!(
(expected - found).abs() < 0.1,
"expected {expected}, found {found}"
);
}
}
}
}
#[test]
fn roundtrip_rgba() {
let mut next_random = vec![1.0, 0.0, -1.0, -3.15, 27.0, 11.0, 31.0]
.into_iter()
.cycle();
let mut next_random = move || next_random.next().unwrap();
let generated_image: Rgba32FImage = ImageBuffer::from_fn(9, 31, |_x, _y| {
Rgba([next_random(), next_random(), next_random(), next_random()])
});
let mut bytes = vec![];
write_rgba_image(Cursor::new(&mut bytes), &generated_image).unwrap();
let decoded_image = read_as_rgba_image(Cursor::new(bytes)).unwrap();
debug_assert_eq!(generated_image, decoded_image);
}
#[test]
fn roundtrip_rgb() {
let mut next_random = vec![1.0, 0.0, -1.0, -3.15, 27.0, 11.0, 31.0]
.into_iter()
.cycle();
let mut next_random = move || next_random.next().unwrap();
let generated_image: Rgb32FImage = ImageBuffer::from_fn(9, 31, |_x, _y| {
Rgb([next_random(), next_random(), next_random()])
});
let mut bytes = vec![];
write_rgb_image(Cursor::new(&mut bytes), &generated_image).unwrap();
let decoded_image = read_as_rgb_image(Cursor::new(bytes)).unwrap();
debug_assert_eq!(generated_image, decoded_image);
}
#[test]
fn compare_rgba_rgb() {
let exr_path = BASE_PATH
.iter()
.collect::<PathBuf>()
.join("overexposed gradient - data window equals display window.exr");
let rgb: Rgb32FImage = read_as_rgb_image_from_file(&exr_path).unwrap();
let rgba: Rgba32FImage = read_as_rgba_image_from_file(&exr_path).unwrap();
assert_eq!(rgba.dimensions(), rgb.dimensions());
for (Rgb(rgb), Rgba(rgba)) in rgb.pixels().zip(rgba.pixels()) {
assert_eq!(rgb, &rgba[..3]);
}
}
#[test]
fn compare_cropped() {
// like in photoshop, exr images may have layers placed anywhere in a canvas.
// we don't want to load the pixels from the layer, but we want to load the pixels from the canvas.
// a layer might be smaller than the canvas, in that case the canvas should be transparent black
// where no layer was covering it. a layer might also be larger than the canvas,
// these pixels should be discarded.
//
// in this test we want to make sure that an
// auto-cropped image will be reproduced to the original.
let exr_path = BASE_PATH.iter().collect::<PathBuf>();
let original = exr_path.join("cropping - uncropped original.exr");
let cropped = exr_path.join("cropping - data window differs display window.exr");
// smoke-check that the exr files are actually not the same
{
let original_exr = read_first_flat_layer_from_file(&original).unwrap();
let cropped_exr = read_first_flat_layer_from_file(&cropped).unwrap();
assert_eq!(
original_exr.attributes.display_window,
cropped_exr.attributes.display_window
);
assert_ne!(
original_exr.layer_data.attributes.layer_position,
cropped_exr.layer_data.attributes.layer_position
);
assert_ne!(original_exr.layer_data.size, cropped_exr.layer_data.size);
}
// check that they result in the same image
let original: Rgba32FImage = read_as_rgba_image_from_file(&original).unwrap();
let cropped: Rgba32FImage = read_as_rgba_image_from_file(&cropped).unwrap();
assert_eq!(original.dimensions(), cropped.dimensions());
// the following is not a simple assert_eq, as in case of an error,
// the whole image would be printed to the console, which takes forever
assert!(original.pixels().zip(cropped.pixels()).all(|(a, b)| a == b));
}
}

157
vendor/image/src/codecs/pcx.rs vendored Normal file
View File

@@ -0,0 +1,157 @@
//! Decoding and Encoding of PCX Images
//!
//! PCX (PiCture eXchange) Format is an obsolete image format from the 1980s.
//!
//! # Related Links
//! * <https://en.wikipedia.org/wiki/PCX> - The PCX format on Wikipedia
extern crate pcx;
use std::io::{self, BufRead, Cursor, Read, Seek};
use std::iter;
use std::marker::PhantomData;
use std::mem;
use crate::color::{ColorType, ExtendedColorType};
use crate::error::{ImageError, ImageResult};
use crate::image::ImageDecoder;
/// Decoder for PCX images.
pub struct PCXDecoder<R>
where
R: BufRead + Seek,
{
dimensions: (u32, u32),
inner: pcx::Reader<R>,
}
impl<R> PCXDecoder<R>
where
R: BufRead + Seek,
{
/// Create a new `PCXDecoder`.
pub fn new(r: R) -> Result<PCXDecoder<R>, ImageError> {
let inner = pcx::Reader::new(r).map_err(ImageError::from_pcx_decode)?;
let dimensions = (u32::from(inner.width()), u32::from(inner.height()));
Ok(PCXDecoder { dimensions, inner })
}
}
impl ImageError {
fn from_pcx_decode(err: io::Error) -> ImageError {
ImageError::IoError(err)
}
}
/// Wrapper struct around a `Cursor<Vec<u8>>`
#[allow(dead_code)]
#[deprecated]
pub struct PCXReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
#[allow(deprecated)]
impl<R> Read for PCXReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
if self.0.position() == 0 && buf.is_empty() {
mem::swap(buf, self.0.get_mut());
Ok(buf.len())
} else {
self.0.read_to_end(buf)
}
}
}
impl<R: BufRead + Seek> ImageDecoder for PCXDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
self.dimensions
}
fn color_type(&self) -> ColorType {
ColorType::Rgb8
}
fn original_color_type(&self) -> ExtendedColorType {
if self.inner.is_paletted() {
return ExtendedColorType::Unknown(self.inner.header.bit_depth);
}
match (
self.inner.header.number_of_color_planes,
self.inner.header.bit_depth,
) {
(1, 1) => ExtendedColorType::L1,
(1, 2) => ExtendedColorType::L2,
(1, 4) => ExtendedColorType::L4,
(1, 8) => ExtendedColorType::L8,
(3, 1) => ExtendedColorType::Rgb1,
(3, 2) => ExtendedColorType::Rgb2,
(3, 4) => ExtendedColorType::Rgb4,
(3, 8) => ExtendedColorType::Rgb8,
(4, 1) => ExtendedColorType::Rgba1,
(4, 2) => ExtendedColorType::Rgba2,
(4, 4) => ExtendedColorType::Rgba4,
(4, 8) => ExtendedColorType::Rgba8,
(_, _) => unreachable!(),
}
}
fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
let height = self.inner.height() as usize;
let width = self.inner.width() as usize;
match self.inner.palette_length() {
// No palette to interpret, so we can just write directly to buf
None => {
for i in 0..height {
let offset = i * 3 * width;
self.inner
.next_row_rgb(&mut buf[offset..offset + (width * 3)])
.map_err(ImageError::from_pcx_decode)?;
}
}
// We need to convert from the palette colours to RGB values inline,
// but the pcx crate can't give us the palette first. Work around it
// by taking the paletted image into a buffer, then converting it to
// RGB8 after.
Some(palette_length) => {
let mut pal_buf: Vec<u8> = iter::repeat(0).take(height * width).collect();
for i in 0..height {
let offset = i * width;
self.inner
.next_row_paletted(&mut pal_buf[offset..offset + width])
.map_err(ImageError::from_pcx_decode)?;
}
let mut palette: Vec<u8> =
iter::repeat(0).take(3 * palette_length as usize).collect();
self.inner
.read_palette(&mut palette[..])
.map_err(ImageError::from_pcx_decode)?;
for i in 0..height {
for j in 0..width {
let pixel = pal_buf[i * width + j] as usize;
let offset = i * width * 3 + j * 3;
buf[offset] = palette[pixel * 3];
buf[offset + 1] = palette[pixel * 3 + 1];
buf[offset + 2] = palette[pixel * 3 + 2];
}
}
}
}
Ok(())
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}

793
vendor/image/src/codecs/png.rs vendored Normal file
View File

@@ -0,0 +1,793 @@
//! Decoding and Encoding of PNG Images
//!
//! PNG (Portable Network Graphics) is an image format that supports lossless compression.
//!
//! # Related Links
//! * <http://www.w3.org/TR/PNG/> - The PNG Specification
use std::borrow::Cow;
use std::io::{BufRead, Seek, Write};
use png::{BlendOp, DisposeOp};
use crate::animation::{Delay, Frame, Frames, Ratio};
use crate::color::{Blend, ColorType, ExtendedColorType};
use crate::error::{
DecodingError, ImageError, ImageResult, LimitError, LimitErrorKind, ParameterError,
ParameterErrorKind, UnsupportedError, UnsupportedErrorKind,
};
use crate::utils::vec_try_with_capacity;
use crate::{
AnimationDecoder, DynamicImage, GenericImage, GenericImageView, ImageBuffer, ImageDecoder,
ImageEncoder, ImageFormat, Limits, Luma, LumaA, Rgb, Rgba, RgbaImage,
};
// http://www.w3.org/TR/PNG-Structure.html
// The first eight bytes of a PNG file always contain the following (decimal) values:
pub(crate) const PNG_SIGNATURE: [u8; 8] = [137, 80, 78, 71, 13, 10, 26, 10];
/// PNG decoder
pub struct PngDecoder<R: BufRead + Seek> {
color_type: ColorType,
reader: png::Reader<R>,
limits: Limits,
}
impl<R: BufRead + Seek> PngDecoder<R> {
/// Creates a new decoder that decodes from the stream ```r```
pub fn new(r: R) -> ImageResult<PngDecoder<R>> {
Self::with_limits(r, Limits::no_limits())
}
/// Creates a new decoder that decodes from the stream ```r``` with the given limits.
pub fn with_limits(r: R, limits: Limits) -> ImageResult<PngDecoder<R>> {
limits.check_support(&crate::LimitSupport::default())?;
let max_bytes = usize::try_from(limits.max_alloc.unwrap_or(u64::MAX)).unwrap_or(usize::MAX);
let mut decoder = png::Decoder::new_with_limits(r, png::Limits { bytes: max_bytes });
let info = decoder.read_header_info().map_err(ImageError::from_png)?;
limits.check_dimensions(info.width, info.height)?;
// By default the PNG decoder will scale 16 bpc to 8 bpc, so custom
// transformations must be set. EXPAND preserves the default behavior
// expanding bpc < 8 to 8 bpc.
decoder.set_transformations(png::Transformations::EXPAND);
let reader = decoder.read_info().map_err(ImageError::from_png)?;
let (color_type, bits) = reader.output_color_type();
let color_type = match (color_type, bits) {
(png::ColorType::Grayscale, png::BitDepth::Eight) => ColorType::L8,
(png::ColorType::Grayscale, png::BitDepth::Sixteen) => ColorType::L16,
(png::ColorType::GrayscaleAlpha, png::BitDepth::Eight) => ColorType::La8,
(png::ColorType::GrayscaleAlpha, png::BitDepth::Sixteen) => ColorType::La16,
(png::ColorType::Rgb, png::BitDepth::Eight) => ColorType::Rgb8,
(png::ColorType::Rgb, png::BitDepth::Sixteen) => ColorType::Rgb16,
(png::ColorType::Rgba, png::BitDepth::Eight) => ColorType::Rgba8,
(png::ColorType::Rgba, png::BitDepth::Sixteen) => ColorType::Rgba16,
(png::ColorType::Grayscale, png::BitDepth::One) => {
return Err(unsupported_color(ExtendedColorType::L1))
}
(png::ColorType::GrayscaleAlpha, png::BitDepth::One) => {
return Err(unsupported_color(ExtendedColorType::La1))
}
(png::ColorType::Rgb, png::BitDepth::One) => {
return Err(unsupported_color(ExtendedColorType::Rgb1))
}
(png::ColorType::Rgba, png::BitDepth::One) => {
return Err(unsupported_color(ExtendedColorType::Rgba1))
}
(png::ColorType::Grayscale, png::BitDepth::Two) => {
return Err(unsupported_color(ExtendedColorType::L2))
}
(png::ColorType::GrayscaleAlpha, png::BitDepth::Two) => {
return Err(unsupported_color(ExtendedColorType::La2))
}
(png::ColorType::Rgb, png::BitDepth::Two) => {
return Err(unsupported_color(ExtendedColorType::Rgb2))
}
(png::ColorType::Rgba, png::BitDepth::Two) => {
return Err(unsupported_color(ExtendedColorType::Rgba2))
}
(png::ColorType::Grayscale, png::BitDepth::Four) => {
return Err(unsupported_color(ExtendedColorType::L4))
}
(png::ColorType::GrayscaleAlpha, png::BitDepth::Four) => {
return Err(unsupported_color(ExtendedColorType::La4))
}
(png::ColorType::Rgb, png::BitDepth::Four) => {
return Err(unsupported_color(ExtendedColorType::Rgb4))
}
(png::ColorType::Rgba, png::BitDepth::Four) => {
return Err(unsupported_color(ExtendedColorType::Rgba4))
}
(png::ColorType::Indexed, bits) => {
return Err(unsupported_color(ExtendedColorType::Unknown(bits as u8)))
}
};
Ok(PngDecoder {
color_type,
reader,
limits,
})
}
/// Returns the gamma value of the image or None if no gamma value is indicated.
///
/// If an sRGB chunk is present this method returns a gamma value of 0.45455 and ignores the
/// value in the gAMA chunk. This is the recommended behavior according to the PNG standard:
///
/// > When the sRGB chunk is present, [...] decoders that recognize the sRGB chunk but are not
/// > capable of colour management are recommended to ignore the gAMA and cHRM chunks, and use
/// > the values given above as if they had appeared in gAMA and cHRM chunks.
pub fn gamma_value(&self) -> ImageResult<Option<f64>> {
Ok(self
.reader
.info()
.source_gamma
.map(|x| f64::from(x.into_scaled()) / 100_000.0))
}
/// Turn this into an iterator over the animation frames.
///
/// Reading the complete animation requires more memory than reading the data from the IDAT
/// framemultiple frame buffers need to be reserved at the same time. We further do not
/// support compositing 16-bit colors. In any case this would be lossy as the interface of
/// animation decoders does not support 16-bit colors.
///
/// If something is not supported or a limit is violated then the decoding step that requires
/// them will fail and an error will be returned instead of the frame. No further frames will
/// be returned.
pub fn apng(self) -> ImageResult<ApngDecoder<R>> {
Ok(ApngDecoder::new(self))
}
/// Returns if the image contains an animation.
///
/// Note that the file itself decides if the default image is considered to be part of the
/// animation. When it is not the common interpretation is to use it as a thumbnail.
///
/// If a non-animated image is converted into an `ApngDecoder` then its iterator is empty.
pub fn is_apng(&self) -> ImageResult<bool> {
Ok(self.reader.info().animation_control.is_some())
}
}
fn unsupported_color(ect: ExtendedColorType) -> ImageError {
ImageError::Unsupported(UnsupportedError::from_format_and_kind(
ImageFormat::Png.into(),
UnsupportedErrorKind::Color(ect),
))
}
impl<R: BufRead + Seek> ImageDecoder for PngDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
self.reader.info().size()
}
fn color_type(&self) -> ColorType {
self.color_type
}
fn icc_profile(&mut self) -> ImageResult<Option<Vec<u8>>> {
Ok(self.reader.info().icc_profile.as_ref().map(|x| x.to_vec()))
}
fn exif_metadata(&mut self) -> ImageResult<Option<Vec<u8>>> {
Ok(self
.reader
.info()
.exif_metadata
.as_ref()
.map(|x| x.to_vec()))
}
fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
use byteorder_lite::{BigEndian, ByteOrder, NativeEndian};
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
self.reader.next_frame(buf).map_err(ImageError::from_png)?;
// PNG images are big endian. For 16 bit per channel and larger types,
// the buffer may need to be reordered to native endianness per the
// contract of `read_image`.
// TODO: assumes equal channel bit depth.
let bpc = self.color_type().bytes_per_pixel() / self.color_type().channel_count();
match bpc {
1 => (), // No reodering necessary for u8
2 => buf.chunks_exact_mut(2).for_each(|c| {
let v = BigEndian::read_u16(c);
NativeEndian::write_u16(c, v);
}),
_ => unreachable!(),
}
Ok(())
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
fn set_limits(&mut self, limits: Limits) -> ImageResult<()> {
limits.check_support(&crate::LimitSupport::default())?;
let info = self.reader.info();
limits.check_dimensions(info.width, info.height)?;
self.limits = limits;
// TODO: add `png::Reader::change_limits()` and call it here
// to also constrain the internal buffer allocations in the PNG crate
Ok(())
}
}
/// An [`AnimationDecoder`] adapter of [`PngDecoder`].
///
/// See [`PngDecoder::apng`] for more information.
///
/// [`AnimationDecoder`]: ../trait.AnimationDecoder.html
/// [`PngDecoder`]: struct.PngDecoder.html
/// [`PngDecoder::apng`]: struct.PngDecoder.html#method.apng
pub struct ApngDecoder<R: BufRead + Seek> {
inner: PngDecoder<R>,
/// The current output buffer.
current: Option<RgbaImage>,
/// The previous output buffer, used for dispose op previous.
previous: Option<RgbaImage>,
/// The dispose op of the current frame.
dispose: DisposeOp,
/// The region to dispose of the previous frame.
dispose_region: Option<(u32, u32, u32, u32)>,
/// The number of image still expected to be able to load.
remaining: u32,
/// The next (first) image is the thumbnail.
has_thumbnail: bool,
}
impl<R: BufRead + Seek> ApngDecoder<R> {
fn new(inner: PngDecoder<R>) -> Self {
let info = inner.reader.info();
let remaining = match info.animation_control() {
// The expected number of fcTL in the remaining image.
Some(actl) => actl.num_frames,
None => 0,
};
// If the IDAT has no fcTL then it is not part of the animation counted by
// num_frames. All following fdAT chunks must be preceded by an fcTL
let has_thumbnail = info.frame_control.is_none();
ApngDecoder {
inner,
current: None,
previous: None,
dispose: DisposeOp::Background,
dispose_region: None,
remaining,
has_thumbnail,
}
}
// TODO: thumbnail(&mut self) -> Option<impl ImageDecoder<'_>>
/// Decode one subframe and overlay it on the canvas.
fn mix_next_frame(&mut self) -> Result<Option<&RgbaImage>, ImageError> {
// The iterator always produces RGBA8 images
const COLOR_TYPE: ColorType = ColorType::Rgba8;
// Allocate the buffers, honoring the memory limits
let (width, height) = self.inner.dimensions();
{
let limits = &mut self.inner.limits;
if self.previous.is_none() {
limits.reserve_buffer(width, height, COLOR_TYPE)?;
self.previous = Some(RgbaImage::new(width, height));
}
if self.current.is_none() {
limits.reserve_buffer(width, height, COLOR_TYPE)?;
self.current = Some(RgbaImage::new(width, height));
}
}
// Remove this image from remaining.
self.remaining = match self.remaining.checked_sub(1) {
None => return Ok(None),
Some(next) => next,
};
// Shorten ourselves to 0 in case of error.
let remaining = self.remaining;
self.remaining = 0;
// Skip the thumbnail that is not part of the animation.
if self.has_thumbnail {
// Clone the limits so that our one-off allocation that's destroyed after this scope doesn't persist
let mut limits = self.inner.limits.clone();
let buffer_size = self.inner.reader.output_buffer_size().ok_or_else(|| {
ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory))
})?;
limits.reserve_usize(buffer_size)?;
let mut buffer = vec![0; buffer_size];
// TODO: add `png::Reader::change_limits()` and call it here
// to also constrain the internal buffer allocations in the PNG crate
self.inner
.reader
.next_frame(&mut buffer)
.map_err(ImageError::from_png)?;
self.has_thumbnail = false;
}
self.animatable_color_type()?;
// We've initialized them earlier in this function
let previous = self.previous.as_mut().unwrap();
let current = self.current.as_mut().unwrap();
// Dispose of the previous frame.
match self.dispose {
DisposeOp::None => {
previous.clone_from(current);
}
DisposeOp::Background => {
previous.clone_from(current);
if let Some((px, py, width, height)) = self.dispose_region {
let mut region_current = current.sub_image(px, py, width, height);
// FIXME: This is a workaround for the fact that `pixels_mut` is not implemented
let pixels: Vec<_> = region_current.pixels().collect();
for (x, y, _) in &pixels {
region_current.put_pixel(*x, *y, Rgba::from([0, 0, 0, 0]));
}
} else {
// The first frame is always a background frame.
current.pixels_mut().for_each(|pixel| {
*pixel = Rgba::from([0, 0, 0, 0]);
});
}
}
DisposeOp::Previous => {
let (px, py, width, height) = self
.dispose_region
.expect("The first frame must not set dispose=Previous");
let region_previous = previous.sub_image(px, py, width, height);
current
.copy_from(&region_previous.to_image(), px, py)
.unwrap();
}
}
// The allocations from now on are not going to persist,
// and will be destroyed at the end of the scope.
// Clone the limits so that any changes to them die with the allocations.
let mut limits = self.inner.limits.clone();
// Read next frame data.
let raw_frame_size = self.inner.reader.output_buffer_size().ok_or_else(|| {
ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory))
})?;
limits.reserve_usize(raw_frame_size)?;
let mut buffer = vec![0; raw_frame_size];
// TODO: add `png::Reader::change_limits()` and call it here
// to also constrain the internal buffer allocations in the PNG crate
self.inner
.reader
.next_frame(&mut buffer)
.map_err(ImageError::from_png)?;
let info = self.inner.reader.info();
// Find out how to interpret the decoded frame.
let (width, height, px, py, blend);
match info.frame_control() {
None => {
width = info.width;
height = info.height;
px = 0;
py = 0;
blend = BlendOp::Source;
}
Some(fc) => {
width = fc.width;
height = fc.height;
px = fc.x_offset;
py = fc.y_offset;
blend = fc.blend_op;
self.dispose = fc.dispose_op;
}
}
self.dispose_region = Some((px, py, width, height));
// Turn the data into an rgba image proper.
limits.reserve_buffer(width, height, COLOR_TYPE)?;
let source = match self.inner.color_type {
ColorType::L8 => {
let image = ImageBuffer::<Luma<_>, _>::from_raw(width, height, buffer).unwrap();
DynamicImage::ImageLuma8(image).into_rgba8()
}
ColorType::La8 => {
let image = ImageBuffer::<LumaA<_>, _>::from_raw(width, height, buffer).unwrap();
DynamicImage::ImageLumaA8(image).into_rgba8()
}
ColorType::Rgb8 => {
let image = ImageBuffer::<Rgb<_>, _>::from_raw(width, height, buffer).unwrap();
DynamicImage::ImageRgb8(image).into_rgba8()
}
ColorType::Rgba8 => ImageBuffer::<Rgba<_>, _>::from_raw(width, height, buffer).unwrap(),
ColorType::L16 | ColorType::Rgb16 | ColorType::La16 | ColorType::Rgba16 => {
// TODO: to enable remove restriction in `animatable_color_type` method.
unreachable!("16-bit apng not yet support")
}
_ => unreachable!("Invalid png color"),
};
// We've converted the raw frame to RGBA8 and disposed of the original allocation
limits.free_usize(raw_frame_size);
match blend {
BlendOp::Source => {
current
.copy_from(&source, px, py)
.expect("Invalid png image not detected in png");
}
BlendOp::Over => {
// TODO: investigate speed, speed-ups, and bounds-checks.
for (x, y, p) in source.enumerate_pixels() {
current.get_pixel_mut(x + px, y + py).blend(p);
}
}
}
// Ok, we can proceed with actually remaining images.
self.remaining = remaining;
// Return composited output buffer.
Ok(Some(self.current.as_ref().unwrap()))
}
fn animatable_color_type(&self) -> Result<(), ImageError> {
match self.inner.color_type {
ColorType::L8 | ColorType::Rgb8 | ColorType::La8 | ColorType::Rgba8 => Ok(()),
// TODO: do not handle multi-byte colors. Remember to implement it in `mix_next_frame`.
ColorType::L16 | ColorType::Rgb16 | ColorType::La16 | ColorType::Rgba16 => {
Err(unsupported_color(self.inner.color_type.into()))
}
_ => unreachable!("{:?} not a valid png color", self.inner.color_type),
}
}
}
impl<'a, R: BufRead + Seek + 'a> AnimationDecoder<'a> for ApngDecoder<R> {
fn into_frames(self) -> Frames<'a> {
struct FrameIterator<R: BufRead + Seek>(ApngDecoder<R>);
impl<R: BufRead + Seek> Iterator for FrameIterator<R> {
type Item = ImageResult<Frame>;
fn next(&mut self) -> Option<Self::Item> {
let image = match self.0.mix_next_frame() {
Ok(Some(image)) => image.clone(),
Ok(None) => return None,
Err(err) => return Some(Err(err)),
};
let info = self.0.inner.reader.info();
let fc = info.frame_control().unwrap();
// PNG delays are rations in seconds.
let num = u32::from(fc.delay_num) * 1_000u32;
let denom = match fc.delay_den {
// The standard dictates to replace by 100 when the denominator is 0.
0 => 100,
d => u32::from(d),
};
let delay = Delay::from_ratio(Ratio::new(num, denom));
Some(Ok(Frame::from_parts(image, 0, 0, delay)))
}
}
Frames::new(Box::new(FrameIterator(self)))
}
}
/// PNG encoder
pub struct PngEncoder<W: Write> {
w: W,
compression: CompressionType,
filter: FilterType,
icc_profile: Vec<u8>,
exif_metadata: Vec<u8>,
}
/// Compression level of a PNG encoder. The default setting is `Fast`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[non_exhaustive]
#[derive(Default)]
pub enum CompressionType {
/// Default compression level
Default,
/// Fast, minimal compression
#[default]
Fast,
/// High compression level
Best,
}
/// Filter algorithms used to process image data to improve compression.
///
/// The default filter is `Adaptive`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[non_exhaustive]
#[derive(Default)]
pub enum FilterType {
/// No processing done, best used for low bit depth grayscale or data with a
/// low color count
NoFilter,
/// Filters based on previous pixel in the same scanline
Sub,
/// Filters based on the scanline above
Up,
/// Filters based on the average of left and right neighbor pixels
Avg,
/// Algorithm that takes into account the left, upper left, and above pixels
Paeth,
/// Uses a heuristic to select one of the preceding filters for each
/// scanline rather than one filter for the entire image
#[default]
Adaptive,
}
impl<W: Write> PngEncoder<W> {
/// Create a new encoder that writes its output to ```w```
pub fn new(w: W) -> PngEncoder<W> {
PngEncoder {
w,
compression: CompressionType::default(),
filter: FilterType::default(),
icc_profile: Vec::new(),
exif_metadata: Vec::new(),
}
}
/// Create a new encoder that writes its output to `w` with `CompressionType` `compression` and
/// `FilterType` `filter`.
///
/// It is best to view the options as a _hint_ to the implementation on the smallest or fastest
/// option for encoding a particular image. That is, using options that map directly to a PNG
/// image parameter will use this parameter where possible. But variants that have no direct
/// mapping may be interpreted differently in minor versions. The exact output is expressly
/// __not__ part of the SemVer stability guarantee.
///
/// Note that it is not optimal to use a single filter type, so an adaptive
/// filter type is selected as the default. The filter which best minimizes
/// file size may change with the type of compression used.
pub fn new_with_quality(
w: W,
compression: CompressionType,
filter: FilterType,
) -> PngEncoder<W> {
PngEncoder {
w,
compression,
filter,
icc_profile: Vec::new(),
exif_metadata: Vec::new(),
}
}
fn encode_inner(
self,
data: &[u8],
width: u32,
height: u32,
color: ExtendedColorType,
) -> ImageResult<()> {
let (ct, bits) = match color {
ExtendedColorType::L8 => (png::ColorType::Grayscale, png::BitDepth::Eight),
ExtendedColorType::L16 => (png::ColorType::Grayscale, png::BitDepth::Sixteen),
ExtendedColorType::La8 => (png::ColorType::GrayscaleAlpha, png::BitDepth::Eight),
ExtendedColorType::La16 => (png::ColorType::GrayscaleAlpha, png::BitDepth::Sixteen),
ExtendedColorType::Rgb8 => (png::ColorType::Rgb, png::BitDepth::Eight),
ExtendedColorType::Rgb16 => (png::ColorType::Rgb, png::BitDepth::Sixteen),
ExtendedColorType::Rgba8 => (png::ColorType::Rgba, png::BitDepth::Eight),
ExtendedColorType::Rgba16 => (png::ColorType::Rgba, png::BitDepth::Sixteen),
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Png.into(),
UnsupportedErrorKind::Color(color),
),
))
}
};
let comp = match self.compression {
CompressionType::Default => png::Compression::Balanced,
CompressionType::Best => png::Compression::High,
_ => png::Compression::Fast,
};
let filter = match self.filter {
FilterType::NoFilter => png::Filter::NoFilter,
FilterType::Sub => png::Filter::Sub,
FilterType::Up => png::Filter::Up,
FilterType::Avg => png::Filter::Avg,
FilterType::Paeth => png::Filter::Paeth,
FilterType::Adaptive => png::Filter::Adaptive,
};
let mut info = png::Info::with_size(width, height);
if !self.icc_profile.is_empty() {
info.icc_profile = Some(Cow::Borrowed(&self.icc_profile));
}
if !self.exif_metadata.is_empty() {
info.exif_metadata = Some(Cow::Borrowed(&self.exif_metadata));
}
let mut encoder =
png::Encoder::with_info(self.w, info).map_err(|e| ImageError::IoError(e.into()))?;
encoder.set_color(ct);
encoder.set_depth(bits);
encoder.set_compression(comp);
encoder.set_filter(filter);
let mut writer = encoder
.write_header()
.map_err(|e| ImageError::IoError(e.into()))?;
writer
.write_image_data(data)
.map_err(|e| ImageError::IoError(e.into()))
}
}
impl<W: Write> ImageEncoder for PngEncoder<W> {
/// Write a PNG image with the specified width, height, and color type.
///
/// For color types with 16-bit per channel or larger, the contents of `buf` should be in
/// native endian. `PngEncoder` will automatically convert to big endian as required by the
/// underlying PNG format.
#[track_caller]
fn write_image(
self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
use ExtendedColorType::*;
let expected_buffer_len = color_type.buffer_size(width, height);
assert_eq!(
expected_buffer_len,
buf.len() as u64,
"Invalid buffer length: expected {expected_buffer_len} got {} for {width}x{height} image",
buf.len(),
);
// PNG images are big endian. For 16 bit per channel and larger types,
// the buffer may need to be reordered to big endian per the
// contract of `write_image`.
// TODO: assumes equal channel bit depth.
match color_type {
L8 | La8 | Rgb8 | Rgba8 => {
// No reodering necessary for u8
self.encode_inner(buf, width, height, color_type)
}
L16 | La16 | Rgb16 | Rgba16 => {
// Because the buffer is immutable and the PNG encoder does not
// yet take Write/Read traits, create a temporary buffer for
// big endian reordering.
let mut reordered;
let buf = if cfg!(target_endian = "little") {
reordered = vec_try_with_capacity(buf.len())?;
reordered.extend(buf.chunks_exact(2).flat_map(|le| [le[1], le[0]]));
&reordered
} else {
buf
};
self.encode_inner(buf, width, height, color_type)
}
_ => Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Hdr.into(),
UnsupportedErrorKind::Color(color_type),
),
)),
}
}
fn set_icc_profile(&mut self, icc_profile: Vec<u8>) -> Result<(), UnsupportedError> {
self.icc_profile = icc_profile;
Ok(())
}
fn set_exif_metadata(&mut self, exif: Vec<u8>) -> Result<(), UnsupportedError> {
self.exif_metadata = exif;
Ok(())
}
}
impl ImageError {
fn from_png(err: png::DecodingError) -> ImageError {
use png::DecodingError::*;
match err {
IoError(err) => ImageError::IoError(err),
// The input image was not a valid PNG.
err @ Format(_) => {
ImageError::Decoding(DecodingError::new(ImageFormat::Png.into(), err))
}
// Other is used when:
// - The decoder is polled for more animation frames despite being done (or not being animated
// in the first place).
// - The output buffer does not have the required size.
err @ Parameter(_) => ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(err.to_string()),
)),
LimitsExceeded => {
ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory))
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::io::free_functions::decoder_to_vec;
use std::io::{BufReader, Cursor, Read};
#[test]
fn ensure_no_decoder_off_by_one() {
let dec = PngDecoder::new(BufReader::new(
std::fs::File::open("tests/images/png/bugfixes/debug_triangle_corners_widescreen.png")
.unwrap(),
))
.expect("Unable to read PNG file (does it exist?)");
assert_eq![(2000, 1000), dec.dimensions()];
assert_eq![
ColorType::Rgb8,
dec.color_type(),
"Image MUST have the Rgb8 format"
];
let correct_bytes = decoder_to_vec(dec)
.expect("Unable to read file")
.bytes()
.map(|x| x.expect("Unable to read byte"))
.collect::<Vec<u8>>();
assert_eq![6_000_000, correct_bytes.len()];
}
#[test]
fn underlying_error() {
use std::error::Error;
let mut not_png =
std::fs::read("tests/images/png/bugfixes/debug_triangle_corners_widescreen.png")
.unwrap();
not_png[0] = 0;
let error = PngDecoder::new(Cursor::new(&not_png)).err().unwrap();
let _ = error
.source()
.unwrap()
.downcast_ref::<png::DecodingError>()
.expect("Caused by a png error");
}
#[test]
fn encode_bad_color_type() {
// regression test for issue #1663
let image = DynamicImage::new_rgb32f(1, 1);
let mut target = Cursor::new(vec![]);
let _ = image.write_to(&mut target, ImageFormat::Png);
}
}

126
vendor/image/src/codecs/pnm/autobreak.rs vendored Normal file
View File

@@ -0,0 +1,126 @@
//! Insert line breaks between written buffers when they would overflow the line length.
use std::io;
// The pnm standard says to insert line breaks after 70 characters. Assumes that no line breaks
// are actually written. We have to be careful to fully commit buffers or not commit them at all,
// otherwise we might insert a newline in the middle of a token.
pub(crate) struct AutoBreak<W: io::Write> {
wrapped: W,
line_capacity: usize,
line: Vec<u8>,
has_newline: bool,
panicked: bool, // see https://github.com/rust-lang/rust/issues/30888
}
impl<W: io::Write> AutoBreak<W> {
pub(crate) fn new(writer: W, line_capacity: usize) -> io::Result<Self> {
let mut line = Vec::new();
line.try_reserve_exact(line_capacity + 1)?;
Ok(AutoBreak {
wrapped: writer,
line_capacity,
line,
has_newline: false,
panicked: false,
})
}
fn flush_buf(&mut self) -> io::Result<()> {
// from BufWriter
let mut written = 0;
let len = self.line.len();
let mut ret = Ok(());
while written < len {
self.panicked = true;
let r = self.wrapped.write(&self.line[written..]);
self.panicked = false;
match r {
Ok(0) => {
ret = Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write the buffered data",
));
break;
}
Ok(n) => written += n,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(e) => {
ret = Err(e);
break;
}
}
}
if written > 0 {
self.line.drain(..written);
}
ret
}
}
impl<W: io::Write> io::Write for AutoBreak<W> {
fn write(&mut self, buffer: &[u8]) -> io::Result<usize> {
if self.has_newline {
self.flush()?;
self.has_newline = false;
}
if !self.line.is_empty() && self.line.len() + buffer.len() > self.line_capacity {
self.line.push(b'\n');
self.has_newline = true;
self.flush()?;
self.has_newline = false;
}
self.line.extend_from_slice(buffer);
Ok(buffer.len())
}
fn flush(&mut self) -> io::Result<()> {
self.flush_buf()?;
self.wrapped.flush()
}
}
impl<W: io::Write> Drop for AutoBreak<W> {
fn drop(&mut self) {
if !self.panicked {
let _r = self.flush_buf();
// internal writer flushed automatically by Drop
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Write;
#[test]
fn test_aligned_writes() {
let mut output = Vec::new();
{
let mut writer = AutoBreak::new(&mut output, 10).unwrap();
writer.write_all(b"0123456789").unwrap();
writer.write_all(b"0123456789").unwrap();
}
assert_eq!(output.as_slice(), b"0123456789\n0123456789");
}
#[test]
fn test_greater_writes() {
let mut output = Vec::new();
{
let mut writer = AutoBreak::new(&mut output, 10).unwrap();
writer.write_all(b"012").unwrap();
writer.write_all(b"345").unwrap();
writer.write_all(b"0123456789").unwrap();
writer.write_all(b"012345678910").unwrap();
writer.write_all(b"_").unwrap();
}
assert_eq!(output.as_slice(), b"012345\n0123456789\n012345678910\n_");
}
}

1465
vendor/image/src/codecs/pnm/decoder.rs vendored Normal file

File diff suppressed because it is too large Load Diff

770
vendor/image/src/codecs/pnm/encoder.rs vendored Normal file
View File

@@ -0,0 +1,770 @@
//! Encoding of PNM Images
use crate::utils::vec_try_with_capacity;
use std::fmt;
use std::io;
use std::io::Write;
use super::AutoBreak;
use super::{ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader, PixmapHeader};
use super::{HeaderRecord, PnmHeader, PnmSubtype, SampleEncoding};
use crate::color::ExtendedColorType;
use crate::error::{
ImageError, ImageResult, ParameterError, ParameterErrorKind, UnsupportedError,
UnsupportedErrorKind,
};
use crate::{ImageEncoder, ImageFormat};
use byteorder_lite::{BigEndian, WriteBytesExt};
enum HeaderStrategy {
Dynamic,
Subtype(PnmSubtype),
Chosen(PnmHeader),
}
#[derive(Clone, Copy)]
pub enum FlatSamples<'a> {
U8(&'a [u8]),
U16(&'a [u16]),
}
/// Encodes images to any of the `pnm` image formats.
pub struct PnmEncoder<W: Write> {
writer: W,
header: HeaderStrategy,
}
/// Encapsulate the checking system in the type system. Non of the fields are actually accessed
/// but requiring them forces us to validly construct the struct anyways.
struct CheckedImageBuffer<'a> {
_image: FlatSamples<'a>,
_width: u32,
_height: u32,
_color: ExtendedColorType,
}
// Check the header against the buffer. Each struct produces the next after a check.
struct UncheckedHeader<'a> {
header: &'a PnmHeader,
}
struct CheckedDimensions<'a> {
unchecked: UncheckedHeader<'a>,
width: u32,
height: u32,
}
struct CheckedHeaderColor<'a> {
dimensions: CheckedDimensions<'a>,
color: ExtendedColorType,
}
struct CheckedHeader<'a> {
color: CheckedHeaderColor<'a>,
encoding: TupleEncoding<'a>,
_image: CheckedImageBuffer<'a>,
}
enum TupleEncoding<'a> {
PbmBits {
samples: FlatSamples<'a>,
width: u32,
},
Ascii {
samples: FlatSamples<'a>,
},
Bytes {
samples: FlatSamples<'a>,
},
}
impl<W: Write> PnmEncoder<W> {
/// Create new `PnmEncoder` from the `writer`.
///
/// The encoded images will have some `pnm` format. If more control over the image type is
/// required, use either one of `with_subtype` or `with_header`. For more information on the
/// behaviour, see `with_dynamic_header`.
pub fn new(writer: W) -> Self {
PnmEncoder {
writer,
header: HeaderStrategy::Dynamic,
}
}
/// Encode a specific pnm subtype image.
///
/// The magic number and encoding type will be chosen as provided while the rest of the header
/// data will be generated dynamically. Trying to encode incompatible images (e.g. encoding an
/// RGB image as Graymap) will result in an error.
///
/// This will overwrite the effect of earlier calls to `with_header` and `with_dynamic_header`.
pub fn with_subtype(self, subtype: PnmSubtype) -> Self {
PnmEncoder {
writer: self.writer,
header: HeaderStrategy::Subtype(subtype),
}
}
/// Enforce the use of a chosen header.
///
/// While this option gives the most control over the actual written data, the encoding process
/// will error in case the header data and image parameters do not agree. It is the users
/// obligation to ensure that the width and height are set accordingly, for example.
///
/// Choose this option if you want a lossless decoding/encoding round trip.
///
/// This will overwrite the effect of earlier calls to `with_subtype` and `with_dynamic_header`.
pub fn with_header(self, header: PnmHeader) -> Self {
PnmEncoder {
writer: self.writer,
header: HeaderStrategy::Chosen(header),
}
}
/// Create the header dynamically for each image.
///
/// This is the default option upon creation of the encoder. With this, most images should be
/// encodable but the specific format chosen is out of the users control. The pnm subtype is
/// chosen arbitrarily by the library.
///
/// This will overwrite the effect of earlier calls to `with_subtype` and `with_header`.
pub fn with_dynamic_header(self) -> Self {
PnmEncoder {
writer: self.writer,
header: HeaderStrategy::Dynamic,
}
}
/// Encode an image whose samples are represented as a sequence of `u8` or `u16` data.
///
/// If `image` is a slice of `u8`, the samples will be interpreted based on the chosen `color` option.
/// Color types of 16-bit precision means that the bytes are reinterpreted as 16-bit samples,
/// otherwise they are treated as 8-bit samples.
/// If `image` is a slice of `u16`, the samples will be interpreted as 16-bit samples directly.
///
/// Some `pnm` subtypes are incompatible with some color options, a chosen header most
/// certainly with any deviation from the original decoded image.
pub fn encode<'s, S>(
&mut self,
image: S,
width: u32,
height: u32,
color: ExtendedColorType,
) -> ImageResult<()>
where
S: Into<FlatSamples<'s>>,
{
let image = image.into();
// adapt samples so that they are aligned even in 16-bit samples,
// required due to the narrowing of the image buffer to &[u8]
// on dynamic image writing
let image = match (image, color) {
(
FlatSamples::U8(samples),
ExtendedColorType::L16
| ExtendedColorType::La16
| ExtendedColorType::Rgb16
| ExtendedColorType::Rgba16,
) => {
match bytemuck::try_cast_slice(samples) {
// proceed with aligned 16-bit samples
Ok(samples) => FlatSamples::U16(samples),
Err(_e) => {
// reallocation is required
let new_samples: Vec<u16> = samples
.chunks(2)
.map(|chunk| u16::from_ne_bytes([chunk[0], chunk[1]]))
.collect();
let image = FlatSamples::U16(&new_samples);
// make a separate encoding path,
// because the image buffer lifetime has changed
return self.encode_impl(image, width, height, color);
}
}
}
// should not be necessary for any other case
_ => image,
};
self.encode_impl(image, width, height, color)
}
/// Encode an image whose samples are already interpreted correctly.
fn encode_impl(
&mut self,
samples: FlatSamples<'_>,
width: u32,
height: u32,
color: ExtendedColorType,
) -> ImageResult<()> {
match self.header {
HeaderStrategy::Dynamic => self.write_dynamic_header(samples, width, height, color),
HeaderStrategy::Subtype(subtype) => {
self.write_subtyped_header(subtype, samples, width, height, color)
}
HeaderStrategy::Chosen(ref header) => {
Self::write_with_header(&mut self.writer, header, samples, width, height, color)
}
}
}
/// Choose any valid pnm format that the image can be expressed in and write its header.
///
/// Returns how the body should be written if successful.
fn write_dynamic_header(
&mut self,
image: FlatSamples,
width: u32,
height: u32,
color: ExtendedColorType,
) -> ImageResult<()> {
let depth = u32::from(color.channel_count());
let (maxval, tupltype) = match color {
ExtendedColorType::L1 => (1, ArbitraryTuplType::BlackAndWhite),
ExtendedColorType::L8 => (0xff, ArbitraryTuplType::Grayscale),
ExtendedColorType::L16 => (0xffff, ArbitraryTuplType::Grayscale),
ExtendedColorType::La1 => (1, ArbitraryTuplType::BlackAndWhiteAlpha),
ExtendedColorType::La8 => (0xff, ArbitraryTuplType::GrayscaleAlpha),
ExtendedColorType::La16 => (0xffff, ArbitraryTuplType::GrayscaleAlpha),
ExtendedColorType::Rgb8 => (0xff, ArbitraryTuplType::RGB),
ExtendedColorType::Rgb16 => (0xffff, ArbitraryTuplType::RGB),
ExtendedColorType::Rgba8 => (0xff, ArbitraryTuplType::RGBAlpha),
ExtendedColorType::Rgba16 => (0xffff, ArbitraryTuplType::RGBAlpha),
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Pnm.into(),
UnsupportedErrorKind::Color(color),
),
))
}
};
let header = PnmHeader {
decoded: HeaderRecord::Arbitrary(ArbitraryHeader {
width,
height,
depth,
maxval,
tupltype: Some(tupltype),
}),
encoded: None,
};
Self::write_with_header(&mut self.writer, &header, image, width, height, color)
}
/// Try to encode the image with the chosen format, give its corresponding pixel encoding type.
fn write_subtyped_header(
&mut self,
subtype: PnmSubtype,
image: FlatSamples,
width: u32,
height: u32,
color: ExtendedColorType,
) -> ImageResult<()> {
let header = match (subtype, color) {
(PnmSubtype::ArbitraryMap, color) => {
return self.write_dynamic_header(image, width, height, color)
}
(PnmSubtype::Pixmap(encoding), ExtendedColorType::Rgb8) => PnmHeader {
decoded: HeaderRecord::Pixmap(PixmapHeader {
encoding,
width,
height,
maxval: 255,
}),
encoded: None,
},
(PnmSubtype::Graymap(encoding), ExtendedColorType::L8) => PnmHeader {
decoded: HeaderRecord::Graymap(GraymapHeader {
encoding,
width,
height,
maxwhite: 255,
}),
encoded: None,
},
(PnmSubtype::Bitmap(encoding), ExtendedColorType::L8 | ExtendedColorType::L1) => {
PnmHeader {
decoded: HeaderRecord::Bitmap(BitmapHeader {
encoding,
height,
width,
}),
encoded: None,
}
}
(_, _) => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Pnm.into(),
UnsupportedErrorKind::Color(color),
),
))
}
};
Self::write_with_header(&mut self.writer, &header, image, width, height, color)
}
/// Try to encode the image with the chosen header, checking if values are correct.
///
/// Returns how the body should be written if successful.
fn write_with_header(
writer: &mut dyn Write,
header: &PnmHeader,
image: FlatSamples,
width: u32,
height: u32,
color: ExtendedColorType,
) -> ImageResult<()> {
let unchecked = UncheckedHeader { header };
unchecked
.check_header_dimensions(width, height)?
.check_header_color(color)?
.check_sample_values(image)?
.write_header(writer)?
.write_image(writer)
}
}
impl<W: Write> ImageEncoder for PnmEncoder<W> {
#[track_caller]
fn write_image(
mut self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
let expected_buffer_len = color_type.buffer_size(width, height);
assert_eq!(
expected_buffer_len,
buf.len() as u64,
"Invalid buffer length: expected {expected_buffer_len} got {} for {width}x{height} image",
buf.len(),
);
self.encode(buf, width, height, color_type)
}
}
impl<'a> CheckedImageBuffer<'a> {
fn check(
image: FlatSamples<'a>,
width: u32,
height: u32,
color: ExtendedColorType,
) -> ImageResult<CheckedImageBuffer<'a>> {
let components = color.channel_count() as usize;
let uwidth = width as usize;
let uheight = height as usize;
let expected_len = components
.checked_mul(uwidth)
.and_then(|v| v.checked_mul(uheight));
if Some(image.len()) != expected_len {
// Image buffer does not correspond to size and colour.
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
Ok(CheckedImageBuffer {
_image: image,
_width: width,
_height: height,
_color: color,
})
}
}
impl<'a> UncheckedHeader<'a> {
fn check_header_dimensions(
self,
width: u32,
height: u32,
) -> ImageResult<CheckedDimensions<'a>> {
if self.header.width() != width || self.header.height() != height {
// Chosen header does not match Image dimensions.
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
Ok(CheckedDimensions {
unchecked: self,
width,
height,
})
}
}
impl<'a> CheckedDimensions<'a> {
// Check color compatibility with the header. This will only error when we are certain that
// the combination is bogus (e.g. combining Pixmap and Palette) but allows uncertain
// combinations (basically a ArbitraryTuplType::Custom with any color of fitting depth).
fn check_header_color(self, color: ExtendedColorType) -> ImageResult<CheckedHeaderColor<'a>> {
let components = u32::from(color.channel_count());
match *self.unchecked.header {
PnmHeader {
decoded: HeaderRecord::Bitmap(_),
..
} => match color {
ExtendedColorType::L1 | ExtendedColorType::L8 | ExtendedColorType::L16 => (),
_ => {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(
"PBM format only support luma color types".to_owned(),
),
)))
}
},
PnmHeader {
decoded: HeaderRecord::Graymap(_),
..
} => match color {
ExtendedColorType::L1 | ExtendedColorType::L8 | ExtendedColorType::L16 => (),
_ => {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(
"PGM format only support luma color types".to_owned(),
),
)))
}
},
PnmHeader {
decoded: HeaderRecord::Pixmap(_),
..
} => match color {
ExtendedColorType::Rgb8 => (),
_ => {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(
"PPM format only support ExtendedColorType::Rgb8".to_owned(),
),
)))
}
},
PnmHeader {
decoded:
HeaderRecord::Arbitrary(ArbitraryHeader {
depth,
ref tupltype,
..
}),
..
} => match (tupltype, color) {
(&Some(ArbitraryTuplType::BlackAndWhite), ExtendedColorType::L1) => (),
(&Some(ArbitraryTuplType::BlackAndWhiteAlpha), ExtendedColorType::La8) => (),
(&Some(ArbitraryTuplType::Grayscale), ExtendedColorType::L1) => (),
(&Some(ArbitraryTuplType::Grayscale), ExtendedColorType::L8) => (),
(&Some(ArbitraryTuplType::Grayscale), ExtendedColorType::L16) => (),
(&Some(ArbitraryTuplType::GrayscaleAlpha), ExtendedColorType::La8) => (),
(&Some(ArbitraryTuplType::RGB), ExtendedColorType::Rgb8) => (),
(&Some(ArbitraryTuplType::RGB), ExtendedColorType::Rgb16) => (),
(&Some(ArbitraryTuplType::RGBAlpha), ExtendedColorType::Rgba8) => (),
(&Some(ArbitraryTuplType::RGBAlpha), ExtendedColorType::Rgba16) => (),
(&None, _) if depth == components => (),
(&Some(ArbitraryTuplType::Custom(_)), _) if depth == components => (),
_ if depth != components => {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(format!(
"Depth mismatch: header {depth} vs. color {components}"
)),
)))
}
_ => {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(
"Invalid color type for selected PAM color type".to_owned(),
),
)))
}
},
}
Ok(CheckedHeaderColor {
dimensions: self,
color,
})
}
}
impl<'a> CheckedHeaderColor<'a> {
fn check_sample_values(self, image: FlatSamples<'a>) -> ImageResult<CheckedHeader<'a>> {
let header_maxval = match self.dimensions.unchecked.header.decoded {
HeaderRecord::Bitmap(_) => 1,
HeaderRecord::Graymap(GraymapHeader { maxwhite, .. }) => maxwhite,
HeaderRecord::Pixmap(PixmapHeader { maxval, .. }) => maxval,
HeaderRecord::Arbitrary(ArbitraryHeader { maxval, .. }) => maxval,
};
// We trust the image color bit count to be correct at least.
let max_sample = match self.color {
ExtendedColorType::Unknown(n) if n <= 16 => (1 << n) - 1,
ExtendedColorType::L1 => 1,
ExtendedColorType::L8
| ExtendedColorType::La8
| ExtendedColorType::Rgb8
| ExtendedColorType::Rgba8
| ExtendedColorType::Bgr8
| ExtendedColorType::Bgra8 => 0xff,
ExtendedColorType::L16
| ExtendedColorType::La16
| ExtendedColorType::Rgb16
| ExtendedColorType::Rgba16 => 0xffff,
_ => {
// Unsupported target color type.
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Pnm.into(),
UnsupportedErrorKind::Color(self.color),
),
));
}
};
// Avoid the performance heavy check if possible, e.g. if the header has been chosen by us.
if header_maxval < max_sample && !image.all_smaller(header_maxval) {
// Sample value greater than allowed for chosen header.
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Pnm.into(),
UnsupportedErrorKind::GenericFeature(
"Sample value greater than allowed for chosen header".to_owned(),
),
),
));
}
let encoding = image.encoding_for(&self.dimensions.unchecked.header.decoded);
let image = CheckedImageBuffer::check(
image,
self.dimensions.width,
self.dimensions.height,
self.color,
)?;
Ok(CheckedHeader {
color: self,
encoding,
_image: image,
})
}
}
impl<'a> CheckedHeader<'a> {
fn write_header(self, writer: &mut dyn Write) -> ImageResult<TupleEncoding<'a>> {
self.header().write(writer)?;
Ok(self.encoding)
}
fn header(&self) -> &PnmHeader {
self.color.dimensions.unchecked.header
}
}
struct SampleWriter<'a>(&'a mut dyn Write);
impl SampleWriter<'_> {
fn write_samples_ascii<V>(self, samples: V) -> io::Result<()>
where
V: Iterator,
V::Item: fmt::Display,
{
let mut auto_break_writer = AutoBreak::new(self.0, 70)?;
for value in samples {
write!(auto_break_writer, "{value} ")?;
}
auto_break_writer.flush()
}
fn write_pbm_bits<V>(self, samples: &[V], width: u32) -> io::Result<()>
/* Default gives 0 for all primitives. TODO: replace this with `Zeroable` once it hits stable */
where
V: Default + Eq + Copy,
{
// The length of an encoded scanline
let line_width = (width - 1) / 8 + 1;
// We'll be writing single bytes, so buffer
let mut line_buffer = vec_try_with_capacity(line_width as usize)?;
for line in samples.chunks(width as usize) {
for byte_bits in line.chunks(8) {
let mut byte = 0u8;
for i in 0..8 {
// Black pixels are encoded as 1s
if let Some(&v) = byte_bits.get(i) {
if v == V::default() {
byte |= 1u8 << (7 - i);
}
}
}
line_buffer.push(byte);
}
self.0.write_all(line_buffer.as_slice())?;
line_buffer.clear();
}
self.0.flush()
}
}
impl<'a> FlatSamples<'a> {
fn len(&self) -> usize {
match *self {
FlatSamples::U8(arr) => arr.len(),
FlatSamples::U16(arr) => arr.len(),
}
}
fn all_smaller(&self, max_val: u32) -> bool {
match *self {
FlatSamples::U8(arr) => arr.iter().all(|&val| u32::from(val) <= max_val),
FlatSamples::U16(arr) => arr.iter().all(|&val| u32::from(val) <= max_val),
}
}
fn encoding_for(&self, header: &HeaderRecord) -> TupleEncoding<'a> {
match *header {
HeaderRecord::Bitmap(BitmapHeader {
encoding: SampleEncoding::Binary,
width,
..
}) => TupleEncoding::PbmBits {
samples: *self,
width,
},
HeaderRecord::Bitmap(BitmapHeader {
encoding: SampleEncoding::Ascii,
..
}) => TupleEncoding::Ascii { samples: *self },
HeaderRecord::Arbitrary(_) => TupleEncoding::Bytes { samples: *self },
HeaderRecord::Graymap(GraymapHeader {
encoding: SampleEncoding::Ascii,
..
})
| HeaderRecord::Pixmap(PixmapHeader {
encoding: SampleEncoding::Ascii,
..
}) => TupleEncoding::Ascii { samples: *self },
HeaderRecord::Graymap(GraymapHeader {
encoding: SampleEncoding::Binary,
..
})
| HeaderRecord::Pixmap(PixmapHeader {
encoding: SampleEncoding::Binary,
..
}) => TupleEncoding::Bytes { samples: *self },
}
}
}
impl<'a> From<&'a [u8]> for FlatSamples<'a> {
fn from(samples: &'a [u8]) -> Self {
FlatSamples::U8(samples)
}
}
impl<'a> From<&'a [u16]> for FlatSamples<'a> {
fn from(samples: &'a [u16]) -> Self {
FlatSamples::U16(samples)
}
}
impl TupleEncoding<'_> {
fn write_image(&self, writer: &mut dyn Write) -> ImageResult<()> {
match *self {
TupleEncoding::PbmBits {
samples: FlatSamples::U8(samples),
width,
} => SampleWriter(writer)
.write_pbm_bits(samples, width)
.map_err(ImageError::IoError),
TupleEncoding::PbmBits {
samples: FlatSamples::U16(samples),
width,
} => SampleWriter(writer)
.write_pbm_bits(samples, width)
.map_err(ImageError::IoError),
TupleEncoding::Bytes {
samples: FlatSamples::U8(samples),
} => writer.write_all(samples).map_err(ImageError::IoError),
TupleEncoding::Bytes {
samples: FlatSamples::U16(samples),
} => samples.iter().try_for_each(|&sample| {
writer
.write_u16::<BigEndian>(sample)
.map_err(ImageError::IoError)
}),
TupleEncoding::Ascii {
samples: FlatSamples::U8(samples),
} => SampleWriter(writer)
.write_samples_ascii(samples.iter())
.map_err(ImageError::IoError),
TupleEncoding::Ascii {
samples: FlatSamples::U16(samples),
} => SampleWriter(writer)
.write_samples_ascii(samples.iter())
.map_err(ImageError::IoError),
}
}
}
#[test]
fn pbm_allows_black() {
let imgbuf = crate::DynamicImage::new_luma8(50, 50);
let mut buffer = vec![];
let encoder =
PnmEncoder::new(&mut buffer).with_subtype(PnmSubtype::Bitmap(SampleEncoding::Ascii));
imgbuf
.write_with_encoder(encoder)
.expect("all-zeroes is a black image");
}
#[test]
fn pbm_allows_white() {
let imgbuf =
crate::DynamicImage::ImageLuma8(crate::ImageBuffer::from_pixel(50, 50, crate::Luma([1])));
let mut buffer = vec![];
let encoder =
PnmEncoder::new(&mut buffer).with_subtype(PnmSubtype::Bitmap(SampleEncoding::Ascii));
imgbuf
.write_with_encoder(encoder)
.expect("all-zeroes is a white image");
}
#[test]
fn pbm_verifies_pixels() {
let imgbuf =
crate::DynamicImage::ImageLuma8(crate::ImageBuffer::from_pixel(50, 50, crate::Luma([255])));
let mut buffer = vec![];
let encoder =
PnmEncoder::new(&mut buffer).with_subtype(PnmSubtype::Bitmap(SampleEncoding::Ascii));
imgbuf
.write_with_encoder(encoder)
.expect_err("failed to catch violating samples");
}

366
vendor/image/src/codecs/pnm/header.rs vendored Normal file
View File

@@ -0,0 +1,366 @@
use std::{fmt, io};
/// The kind of encoding used to store sample values
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum SampleEncoding {
/// Samples are unsigned binary integers in big endian
Binary,
/// Samples are encoded as decimal ascii strings separated by whitespace
Ascii,
}
/// Denotes the category of the magic number
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum PnmSubtype {
/// Magic numbers P1 and P4
Bitmap(SampleEncoding),
/// Magic numbers P2 and P5
Graymap(SampleEncoding),
/// Magic numbers P3 and P6
Pixmap(SampleEncoding),
/// Magic number P7
ArbitraryMap,
}
/// Stores the complete header data of a file.
///
/// Internally, provides mechanisms for lossless reencoding. After reading a file with the decoder
/// it is possible to recover the header and construct an encoder. Using the encoder on the just
/// loaded image should result in a byte copy of the original file (for single image pnms without
/// additional trailing data).
#[derive(Clone)]
pub struct PnmHeader {
pub(crate) decoded: HeaderRecord,
pub(crate) encoded: Option<Vec<u8>>,
}
#[derive(Clone)]
pub(crate) enum HeaderRecord {
Bitmap(BitmapHeader),
Graymap(GraymapHeader),
Pixmap(PixmapHeader),
Arbitrary(ArbitraryHeader),
}
/// Header produced by a `pbm` file ("Portable Bit Map")
#[derive(Clone, Copy, Debug)]
pub struct BitmapHeader {
/// Binary or Ascii encoded file
pub encoding: SampleEncoding,
/// Height of the image file
pub height: u32,
/// Width of the image file
pub width: u32,
}
/// Header produced by a `pgm` file ("Portable Gray Map")
#[derive(Clone, Copy, Debug)]
pub struct GraymapHeader {
/// Binary or Ascii encoded file
pub encoding: SampleEncoding,
/// Height of the image file
pub height: u32,
/// Width of the image file
pub width: u32,
/// Maximum sample value within the image
pub maxwhite: u32,
}
/// Header produced by a `ppm` file ("Portable Pixel Map")
#[derive(Clone, Copy, Debug)]
pub struct PixmapHeader {
/// Binary or Ascii encoded file
pub encoding: SampleEncoding,
/// Height of the image file
pub height: u32,
/// Width of the image file
pub width: u32,
/// Maximum sample value within the image
pub maxval: u32,
}
/// Header produced by a `pam` file ("Portable Arbitrary Map")
#[derive(Clone, Debug)]
pub struct ArbitraryHeader {
/// Height of the image file
pub height: u32,
/// Width of the image file
pub width: u32,
/// Number of color channels
pub depth: u32,
/// Maximum sample value within the image
pub maxval: u32,
/// Color interpretation of image pixels
pub tupltype: Option<ArbitraryTuplType>,
}
/// Standardized tuple type specifiers in the header of a `pam`.
#[derive(Clone, Debug)]
pub enum ArbitraryTuplType {
/// Pixels are either black (0) or white (1)
BlackAndWhite,
/// Pixels are either black (0) or white (1) and a second alpha channel
BlackAndWhiteAlpha,
/// Pixels represent the amount of white
Grayscale,
/// Grayscale with an additional alpha channel
GrayscaleAlpha,
/// Three channels: Red, Green, Blue
RGB,
/// Four channels: Red, Green, Blue, Alpha
RGBAlpha,
/// An image format which is not standardized
Custom(String),
}
impl ArbitraryTuplType {
pub(crate) fn name(&self) -> &str {
match self {
ArbitraryTuplType::BlackAndWhite => "BLACKANDWHITE",
ArbitraryTuplType::BlackAndWhiteAlpha => "BLACKANDWHITE_ALPHA",
ArbitraryTuplType::Grayscale => "GRAYSCALE",
ArbitraryTuplType::GrayscaleAlpha => "GRAYSCALE_ALPHA",
ArbitraryTuplType::RGB => "RGB",
ArbitraryTuplType::RGBAlpha => "RGB_ALPHA",
ArbitraryTuplType::Custom(custom) => custom,
}
}
}
impl PnmSubtype {
/// Get the two magic constant bytes corresponding to this format subtype.
#[must_use]
pub fn magic_constant(self) -> &'static [u8; 2] {
match self {
PnmSubtype::Bitmap(SampleEncoding::Ascii) => b"P1",
PnmSubtype::Graymap(SampleEncoding::Ascii) => b"P2",
PnmSubtype::Pixmap(SampleEncoding::Ascii) => b"P3",
PnmSubtype::Bitmap(SampleEncoding::Binary) => b"P4",
PnmSubtype::Graymap(SampleEncoding::Binary) => b"P5",
PnmSubtype::Pixmap(SampleEncoding::Binary) => b"P6",
PnmSubtype::ArbitraryMap => b"P7",
}
}
/// Whether samples are stored as binary or as decimal ascii
#[must_use]
pub fn sample_encoding(self) -> SampleEncoding {
match self {
PnmSubtype::ArbitraryMap => SampleEncoding::Binary,
PnmSubtype::Bitmap(enc) => enc,
PnmSubtype::Graymap(enc) => enc,
PnmSubtype::Pixmap(enc) => enc,
}
}
}
impl PnmHeader {
/// Retrieve the format subtype from which the header was created.
#[must_use]
pub fn subtype(&self) -> PnmSubtype {
match self.decoded {
HeaderRecord::Bitmap(BitmapHeader { encoding, .. }) => PnmSubtype::Bitmap(encoding),
HeaderRecord::Graymap(GraymapHeader { encoding, .. }) => PnmSubtype::Graymap(encoding),
HeaderRecord::Pixmap(PixmapHeader { encoding, .. }) => PnmSubtype::Pixmap(encoding),
HeaderRecord::Arbitrary(ArbitraryHeader { .. }) => PnmSubtype::ArbitraryMap,
}
}
/// The width of the image this header is for.
#[must_use]
pub fn width(&self) -> u32 {
match self.decoded {
HeaderRecord::Bitmap(BitmapHeader { width, .. }) => width,
HeaderRecord::Graymap(GraymapHeader { width, .. }) => width,
HeaderRecord::Pixmap(PixmapHeader { width, .. }) => width,
HeaderRecord::Arbitrary(ArbitraryHeader { width, .. }) => width,
}
}
/// The height of the image this header is for.
#[must_use]
pub fn height(&self) -> u32 {
match self.decoded {
HeaderRecord::Bitmap(BitmapHeader { height, .. }) => height,
HeaderRecord::Graymap(GraymapHeader { height, .. }) => height,
HeaderRecord::Pixmap(PixmapHeader { height, .. }) => height,
HeaderRecord::Arbitrary(ArbitraryHeader { height, .. }) => height,
}
}
/// The biggest value a sample can have. In other words, the colour resolution.
#[must_use]
pub fn maximal_sample(&self) -> u32 {
match self.decoded {
HeaderRecord::Bitmap(BitmapHeader { .. }) => 1,
HeaderRecord::Graymap(GraymapHeader { maxwhite, .. }) => maxwhite,
HeaderRecord::Pixmap(PixmapHeader { maxval, .. }) => maxval,
HeaderRecord::Arbitrary(ArbitraryHeader { maxval, .. }) => maxval,
}
}
/// Retrieve the underlying bitmap header if any
#[must_use]
pub fn as_bitmap(&self) -> Option<&BitmapHeader> {
match self.decoded {
HeaderRecord::Bitmap(ref bitmap) => Some(bitmap),
_ => None,
}
}
/// Retrieve the underlying graymap header if any
#[must_use]
pub fn as_graymap(&self) -> Option<&GraymapHeader> {
match self.decoded {
HeaderRecord::Graymap(ref graymap) => Some(graymap),
_ => None,
}
}
/// Retrieve the underlying pixmap header if any
#[must_use]
pub fn as_pixmap(&self) -> Option<&PixmapHeader> {
match self.decoded {
HeaderRecord::Pixmap(ref pixmap) => Some(pixmap),
_ => None,
}
}
/// Retrieve the underlying arbitrary header if any
#[must_use]
pub fn as_arbitrary(&self) -> Option<&ArbitraryHeader> {
match self.decoded {
HeaderRecord::Arbitrary(ref arbitrary) => Some(arbitrary),
_ => None,
}
}
/// Write the header back into a binary stream
pub fn write(&self, writer: &mut dyn io::Write) -> io::Result<()> {
writer.write_all(self.subtype().magic_constant())?;
match *self {
PnmHeader {
encoded: Some(ref content),
..
} => writer.write_all(content),
PnmHeader {
decoded:
HeaderRecord::Bitmap(BitmapHeader {
encoding: _encoding,
width,
height,
}),
..
} => writeln!(writer, "\n{width} {height}"),
PnmHeader {
decoded:
HeaderRecord::Graymap(GraymapHeader {
encoding: _encoding,
width,
height,
maxwhite,
}),
..
} => writeln!(writer, "\n{width} {height} {maxwhite}"),
PnmHeader {
decoded:
HeaderRecord::Pixmap(PixmapHeader {
encoding: _encoding,
width,
height,
maxval,
}),
..
} => writeln!(writer, "\n{width} {height} {maxval}"),
PnmHeader {
decoded:
HeaderRecord::Arbitrary(ArbitraryHeader {
width,
height,
depth,
maxval,
ref tupltype,
}),
..
} => {
struct TupltypeWriter<'a>(&'a Option<ArbitraryTuplType>);
impl fmt::Display for TupltypeWriter<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
Some(tt) => writeln!(f, "TUPLTYPE {}", tt.name()),
None => Ok(()),
}
}
}
writeln!(
writer,
"\nWIDTH {}\nHEIGHT {}\nDEPTH {}\nMAXVAL {}\n{}ENDHDR",
width,
height,
depth,
maxval,
TupltypeWriter(tupltype)
)
}
}
}
}
impl From<BitmapHeader> for PnmHeader {
fn from(header: BitmapHeader) -> Self {
PnmHeader {
decoded: HeaderRecord::Bitmap(header),
encoded: None,
}
}
}
impl From<GraymapHeader> for PnmHeader {
fn from(header: GraymapHeader) -> Self {
PnmHeader {
decoded: HeaderRecord::Graymap(header),
encoded: None,
}
}
}
impl From<PixmapHeader> for PnmHeader {
fn from(header: PixmapHeader) -> Self {
PnmHeader {
decoded: HeaderRecord::Pixmap(header),
encoded: None,
}
}
}
impl From<ArbitraryHeader> for PnmHeader {
fn from(header: ArbitraryHeader) -> Self {
PnmHeader {
decoded: HeaderRecord::Arbitrary(header),
encoded: None,
}
}
}

191
vendor/image/src/codecs/pnm/mod.rs vendored Normal file
View File

@@ -0,0 +1,191 @@
//! Decoding of netpbm image formats (pbm, pgm, ppm and pam).
//!
//! The formats pbm, pgm and ppm are fully supported. Only the official subformats
//! (`BLACKANDWHITE`, `GRAYSCALE`, `RGB`, `BLACKANDWHITE_ALPHA`, `GRAYSCALE_ALPHA`,
//! and `RGB_ALPHA`) of pam are supported; custom tuple types have no clear
//! interpretation as an image and will be rejected.
use self::autobreak::AutoBreak;
pub use self::decoder::PnmDecoder;
pub use self::encoder::PnmEncoder;
use self::header::HeaderRecord;
pub use self::header::{
ArbitraryHeader, ArbitraryTuplType, BitmapHeader, GraymapHeader, PixmapHeader,
};
pub use self::header::{PnmHeader, PnmSubtype, SampleEncoding};
mod autobreak;
mod decoder;
mod encoder;
mod header;
#[cfg(test)]
mod tests {
use super::*;
use crate::ExtendedColorType;
use crate::ImageDecoder as _;
use byteorder_lite::{ByteOrder, NativeEndian};
fn execute_roundtrip_default(buffer: &[u8], width: u32, height: u32, color: ExtendedColorType) {
let mut encoded_buffer = Vec::new();
{
let mut encoder = PnmEncoder::new(&mut encoded_buffer);
encoder
.encode(buffer, width, height, color)
.expect("Failed to encode the image buffer");
}
let (header, loaded_color, loaded_image) = {
let decoder = PnmDecoder::new(&encoded_buffer[..]).unwrap();
let color_type = decoder.color_type();
let mut image = vec![0; decoder.total_bytes() as usize];
decoder
.read_image(&mut image)
.expect("Failed to decode the image");
let (_, header) = PnmDecoder::new(&encoded_buffer[..]).unwrap().into_inner();
(header, color_type, image)
};
assert_eq!(header.width(), width);
assert_eq!(header.height(), height);
assert_eq!(ExtendedColorType::from(loaded_color), color);
assert_eq!(loaded_image.as_slice(), buffer);
}
fn execute_roundtrip_with_subtype(
buffer: &[u8],
width: u32,
height: u32,
color: ExtendedColorType,
subtype: PnmSubtype,
) {
let mut encoded_buffer = Vec::new();
{
let mut encoder = PnmEncoder::new(&mut encoded_buffer).with_subtype(subtype);
encoder
.encode(buffer, width, height, color)
.expect("Failed to encode the image buffer");
}
let (header, loaded_color, loaded_image) = {
let decoder = PnmDecoder::new(&encoded_buffer[..]).unwrap();
let color_type = decoder.color_type();
let mut image = vec![0; decoder.total_bytes() as usize];
decoder
.read_image(&mut image)
.expect("Failed to decode the image");
let (_, header) = PnmDecoder::new(&encoded_buffer[..]).unwrap().into_inner();
(header, color_type, image)
};
assert_eq!(header.width(), width);
assert_eq!(header.height(), height);
assert_eq!(header.subtype(), subtype);
assert_eq!(ExtendedColorType::from(loaded_color), color);
assert_eq!(loaded_image.as_slice(), buffer);
}
fn execute_roundtrip_u16(buffer: &[u16], width: u32, height: u32, color: ExtendedColorType) {
let mut encoded_buffer = Vec::new();
{
let mut encoder = PnmEncoder::new(&mut encoded_buffer);
encoder
.encode(buffer, width, height, color)
.expect("Failed to encode the image buffer");
}
let (header, loaded_color, loaded_image) = {
let decoder = PnmDecoder::new(&encoded_buffer[..]).unwrap();
let color_type = decoder.color_type();
let mut image = vec![0; decoder.total_bytes() as usize];
decoder
.read_image(&mut image)
.expect("Failed to decode the image");
let (_, header) = PnmDecoder::new(&encoded_buffer[..]).unwrap().into_inner();
(header, color_type, image)
};
let mut buffer_u8 = vec![0; buffer.len() * 2];
NativeEndian::write_u16_into(buffer, &mut buffer_u8[..]);
assert_eq!(header.width(), width);
assert_eq!(header.height(), height);
assert_eq!(ExtendedColorType::from(loaded_color), color);
assert_eq!(loaded_image, buffer_u8);
}
#[test]
fn roundtrip_gray() {
#[rustfmt::skip]
let buf: [u8; 16] = [
0, 0, 0, 255,
255, 255, 255, 255,
255, 0, 255, 0,
255, 0, 0, 0,
];
execute_roundtrip_default(&buf, 4, 4, ExtendedColorType::L8);
execute_roundtrip_with_subtype(&buf, 4, 4, ExtendedColorType::L8, PnmSubtype::ArbitraryMap);
execute_roundtrip_with_subtype(
&buf,
4,
4,
ExtendedColorType::L8,
PnmSubtype::Graymap(SampleEncoding::Ascii),
);
execute_roundtrip_with_subtype(
&buf,
4,
4,
ExtendedColorType::L8,
PnmSubtype::Graymap(SampleEncoding::Binary),
);
}
#[test]
fn roundtrip_rgb() {
#[rustfmt::skip]
let buf: [u8; 27] = [
0, 0, 0,
0, 0, 255,
0, 255, 0,
0, 255, 255,
255, 0, 0,
255, 0, 255,
255, 255, 0,
255, 255, 255,
255, 255, 255,
];
execute_roundtrip_default(&buf, 3, 3, ExtendedColorType::Rgb8);
execute_roundtrip_with_subtype(
&buf,
3,
3,
ExtendedColorType::Rgb8,
PnmSubtype::ArbitraryMap,
);
execute_roundtrip_with_subtype(
&buf,
3,
3,
ExtendedColorType::Rgb8,
PnmSubtype::Pixmap(SampleEncoding::Binary),
);
execute_roundtrip_with_subtype(
&buf,
3,
3,
ExtendedColorType::Rgb8,
PnmSubtype::Pixmap(SampleEncoding::Ascii),
);
}
#[test]
fn roundtrip_u16() {
let buf: [u16; 6] = [0, 1, 0xFFFF, 0x1234, 0x3412, 0xBEAF];
execute_roundtrip_u16(&buf, 6, 1, ExtendedColorType::L16);
}
}

120
vendor/image/src/codecs/qoi.rs vendored Normal file
View File

@@ -0,0 +1,120 @@
//! Decoding and encoding of QOI images
use crate::error::{DecodingError, EncodingError, UnsupportedError, UnsupportedErrorKind};
use crate::{
ColorType, ExtendedColorType, ImageDecoder, ImageEncoder, ImageError, ImageFormat, ImageResult,
};
use std::io::{Read, Write};
/// QOI decoder
pub struct QoiDecoder<R> {
decoder: qoi::Decoder<R>,
}
impl<R> QoiDecoder<R>
where
R: Read,
{
/// Creates a new decoder that decodes from the stream ```reader```
pub fn new(reader: R) -> ImageResult<Self> {
let decoder = qoi::Decoder::from_stream(reader).map_err(decoding_error)?;
Ok(Self { decoder })
}
}
impl<R: Read> ImageDecoder for QoiDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
(self.decoder.header().width, self.decoder.header().height)
}
fn color_type(&self) -> ColorType {
match self.decoder.header().channels {
qoi::Channels::Rgb => ColorType::Rgb8,
qoi::Channels::Rgba => ColorType::Rgba8,
}
}
fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
self.decoder.decode_to_buf(buf).map_err(decoding_error)?;
Ok(())
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
fn decoding_error(error: qoi::Error) -> ImageError {
ImageError::Decoding(DecodingError::new(ImageFormat::Qoi.into(), error))
}
fn encoding_error(error: qoi::Error) -> ImageError {
ImageError::Encoding(EncodingError::new(ImageFormat::Qoi.into(), error))
}
/// QOI encoder
pub struct QoiEncoder<W> {
writer: W,
}
impl<W: Write> QoiEncoder<W> {
/// Creates a new encoder that writes its output to ```writer```
pub fn new(writer: W) -> Self {
Self { writer }
}
}
impl<W: Write> ImageEncoder for QoiEncoder<W> {
#[track_caller]
fn write_image(
mut self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
if !matches!(
color_type,
ExtendedColorType::Rgba8 | ExtendedColorType::Rgb8
) {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Qoi.into(),
UnsupportedErrorKind::Color(color_type),
),
));
}
let expected_buffer_len = color_type.buffer_size(width, height);
assert_eq!(
expected_buffer_len,
buf.len() as u64,
"Invalid buffer length: expected {expected_buffer_len} got {} for {width}x{height} image",
buf.len(),
);
// Encode data in QOI
let data = qoi::encode_to_vec(buf, width, height).map_err(encoding_error)?;
// Write data to buffer
self.writer.write_all(&data[..])?;
self.writer.flush()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs::File;
#[test]
fn decode_test_image() {
let decoder = QoiDecoder::new(File::open("tests/images/qoi/basic-test.qoi").unwrap())
.expect("Unable to read QOI file");
assert_eq!((5, 5), decoder.dimensions());
assert_eq!(ColorType::Rgba8, decoder.color_type());
}
}

403
vendor/image/src/codecs/tga/decoder.rs vendored Normal file
View File

@@ -0,0 +1,403 @@
use super::header::{Header, ImageType, ALPHA_BIT_MASK};
use crate::error::DecodingError;
use crate::io::ReadExt;
use crate::utils::vec_try_with_capacity;
use crate::{
color::{ColorType, ExtendedColorType},
error::{ImageError, ImageResult, UnsupportedError, UnsupportedErrorKind},
ImageDecoder, ImageFormat,
};
use byteorder_lite::ReadBytesExt;
use std::io::{self, Read};
struct ColorMap {
/// sizes in bytes
start_offset: usize,
entry_size: usize,
bytes: Vec<u8>,
}
impl ColorMap {
/// Get one entry from the color map
pub(crate) fn get(&self, index: usize) -> Option<&[u8]> {
// TODO: Should we actually be *subtracting* start_offset from the index here?
let entry = self.start_offset + self.entry_size * index;
self.bytes.get(entry..entry + self.entry_size)
}
}
/// The representation of a TGA decoder
pub struct TgaDecoder<R> {
r: R,
width: usize,
height: usize,
// The number of bytes in the raw input data for each pixel. If a color map is used, this is the
// number of bytes for each color map index.
raw_bytes_per_pixel: usize,
image_type: ImageType,
color_type: ColorType,
original_color_type: Option<ExtendedColorType>,
header: Header,
color_map: Option<ColorMap>,
}
#[derive(Copy, Clone, Debug, PartialOrd, PartialEq)]
enum TgaOrientation {
TopLeft,
TopRight,
BottomRight,
BottomLeft,
}
impl TgaOrientation {
fn from_image_desc_byte(value: u8) -> Self {
// Set bits 4 and 5 indicates direction, if bit 4 is set then pixel order right -> left,
// when bit 5 is set it indicates rows top -> bottom direction.
// Sources:
// https://en.wikipedia.org/wiki/Truevision_TGA ; Image specification (field 5)
if value & (1u8 << 4) == 0 {
// Left -> Right
if value & (1u8 << 5) == 0 {
TgaOrientation::BottomLeft
} else {
TgaOrientation::TopLeft
}
} else {
// Right -> Left
if value & (1u8 << 5) == 0 {
TgaOrientation::BottomRight
} else {
TgaOrientation::TopRight
}
}
}
}
impl<R: Read> TgaDecoder<R> {
/// Create a new decoder that decodes from the stream `r`
pub fn new(mut r: R) -> ImageResult<TgaDecoder<R>> {
// Read header
let header = Header::from_reader(&mut r)?;
let image_type = ImageType::new(header.image_type);
let width = header.image_width as usize;
let height = header.image_height as usize;
let raw_bytes_per_pixel = (header.pixel_depth as usize).div_ceil(8);
let num_alpha_bits = header.image_desc & ALPHA_BIT_MASK;
// Validate header
if ![8, 16, 24, 32].contains(&header.pixel_depth) || ![0, 8].contains(&num_alpha_bits) {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Tga.into(),
UnsupportedErrorKind::Color(ExtendedColorType::Unknown(header.pixel_depth)),
),
));
}
if image_type.is_color_mapped() {
if header.map_type != 1 {
return Err(ImageError::Decoding(DecodingError::new(
ImageFormat::Tga.into(),
"Color map type must be 1 for color mapped images",
)));
} else if ![8, 16].contains(&header.pixel_depth) {
return Err(ImageError::Decoding(DecodingError::new(
ImageFormat::Tga.into(),
"Color map must use 1 or 2 byte indexes",
)));
} else if header.pixel_depth > header.map_entry_size {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Tga.into(),
UnsupportedErrorKind::GenericFeature(
"Indices larger than pixel values".into(),
),
),
));
}
}
// TODO: validate the rest of the fields in the header.
// Read image ID (and ignore it)
let mut tmp = [0u8; 256];
r.read_exact(&mut tmp[0..header.id_length as usize])?;
// Read color map
let mut color_map = None;
if header.map_type == 1 {
let entry_size = (header.map_entry_size as usize).div_ceil(8);
if ![2, 3, 4].contains(&entry_size) {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Tga.into(),
UnsupportedErrorKind::GenericFeature(
"Unsupported color map entry size".into(),
),
),
));
}
let mut bytes = Vec::new();
r.read_exact_vec(&mut bytes, entry_size * header.map_length as usize)?;
// Color maps are technically allowed in non-color-mapped images, so check that we
// actually need the color map before storing it.
if image_type.is_color_mapped() {
color_map = Some(ColorMap {
entry_size,
start_offset: header.map_origin as usize,
bytes,
});
}
}
// Compute output pixel depth
let total_pixel_bits = if header.map_type == 1 {
header.map_entry_size
} else {
header.pixel_depth
};
let num_other_bits = total_pixel_bits
.checked_sub(num_alpha_bits)
.ok_or_else(|| {
ImageError::Decoding(DecodingError::new(
ImageFormat::Tga.into(),
"More alpha bits than pixel bits",
))
})?;
// Determine color type
let color_type;
let mut original_color_type = None;
match (num_alpha_bits, num_other_bits, image_type.is_color()) {
// really, the encoding is BGR and BGRA, this is fixed up with
// `TgaDecoder::reverse_encoding`.
(0, 32, true) => color_type = ColorType::Rgba8,
(8, 24, true) => color_type = ColorType::Rgba8,
(0, 24, true) => color_type = ColorType::Rgb8,
(8, 8, false) => color_type = ColorType::La8,
(0, 8, false) => color_type = ColorType::L8,
(8, 0, false) => {
// alpha-only image is treated as L8
color_type = ColorType::L8;
original_color_type = Some(ExtendedColorType::A8);
}
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Tga.into(),
UnsupportedErrorKind::Color(ExtendedColorType::Unknown(header.pixel_depth)),
),
))
}
}
Ok(TgaDecoder {
r,
width,
height,
raw_bytes_per_pixel,
image_type,
color_type,
original_color_type,
header,
color_map,
})
}
/// Reads a run length encoded data for given number of bytes
fn read_encoded_data(&mut self, buf: &mut [u8]) -> io::Result<()> {
assert!(self.raw_bytes_per_pixel <= 4);
let mut repeat_buf = [0; 4];
let repeat_buf = &mut repeat_buf[..self.raw_bytes_per_pixel];
let mut index = 0;
while index < buf.len() {
let run_packet = self.r.read_u8()?;
// If the highest bit in `run_packet` is set, then we repeat pixels
//
// Note: the TGA format adds 1 to both counts because having a count
// of 0 would be pointless.
if (run_packet & 0x80) != 0 {
// high bit set, so we will repeat the data
let repeat_count = ((run_packet & !0x80) + 1) as usize;
self.r.read_exact(repeat_buf)?;
for chunk in buf[index..]
.chunks_exact_mut(self.raw_bytes_per_pixel)
.take(repeat_count)
{
chunk.copy_from_slice(repeat_buf);
}
index += repeat_count * self.raw_bytes_per_pixel;
} else {
// not set, so `run_packet+1` is the number of non-encoded pixels
let num_raw_bytes =
((run_packet + 1) as usize * self.raw_bytes_per_pixel).min(buf.len() - index);
self.r.read_exact(&mut buf[index..][..num_raw_bytes])?;
index += num_raw_bytes;
}
}
Ok(())
}
/// Expands indices into its mapped color
fn expand_color_map(
&self,
input: &[u8],
output: &mut [u8],
color_map: &ColorMap,
) -> ImageResult<()> {
if self.raw_bytes_per_pixel == 1 {
for (&index, chunk) in input
.iter()
.zip(output.chunks_exact_mut(color_map.entry_size))
{
if let Some(color) = color_map.get(index as usize) {
chunk.copy_from_slice(color);
} else {
return Err(ImageError::Decoding(DecodingError::new(
ImageFormat::Tga.into(),
"Invalid color map index",
)));
}
}
} else if self.raw_bytes_per_pixel == 2 {
for (index, chunk) in input
.chunks_exact(2)
.zip(output.chunks_exact_mut(color_map.entry_size))
{
let index = u16::from_le_bytes(index.try_into().unwrap());
if let Some(color) = color_map.get(index as usize) {
chunk.copy_from_slice(color);
} else {
return Err(ImageError::Decoding(DecodingError::new(
ImageFormat::Tga.into(),
"Invalid color map index",
)));
}
}
} else {
unreachable!("Supported bytes_per_pixel values are checked in TgaDecoder::new");
}
Ok(())
}
/// Reverse from BGR encoding to RGB encoding
///
/// TGA files are stored in the BGRA encoding. This function swaps
/// the blue and red bytes in the `pixels` array.
fn reverse_encoding_in_output(&mut self, pixels: &mut [u8]) {
// We only need to reverse the encoding of color images
match self.color_type {
ColorType::Rgb8 | ColorType::Rgba8 => {
for chunk in pixels.chunks_mut(self.color_type.bytes_per_pixel().into()) {
chunk.swap(0, 2);
}
}
_ => {}
}
}
/// Change image orientation depending on the flags set
fn fixup_orientation(&mut self, pixels: &mut [u8]) {
let orientation = TgaOrientation::from_image_desc_byte(self.header.image_desc);
// Flip image if bottom->top direction
if (orientation == TgaOrientation::BottomLeft || orientation == TgaOrientation::BottomRight)
&& self.height > 1
{
let row_stride = self.width * self.raw_bytes_per_pixel;
let (left_part, right_part) = pixels.split_at_mut(self.height / 2 * row_stride);
for (src, dst) in left_part
.chunks_exact_mut(row_stride)
.zip(right_part.chunks_exact_mut(row_stride).rev())
{
for (src, dst) in src.iter_mut().zip(dst.iter_mut()) {
std::mem::swap(src, dst);
}
}
}
// Flop image if right->left direction
if (orientation == TgaOrientation::BottomRight || orientation == TgaOrientation::TopRight)
&& self.width > 1
{
for row in pixels.chunks_exact_mut(self.width * self.raw_bytes_per_pixel) {
let (left_part, right_part) =
row.split_at_mut(self.width / 2 * self.raw_bytes_per_pixel);
for (src, dst) in left_part
.chunks_exact_mut(self.raw_bytes_per_pixel)
.zip(right_part.chunks_exact_mut(self.raw_bytes_per_pixel).rev())
{
for (src, dst) in src.iter_mut().zip(dst.iter_mut()) {
std::mem::swap(dst, src);
}
}
}
}
}
}
impl<R: Read> ImageDecoder for TgaDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
(self.width as u32, self.height as u32)
}
fn color_type(&self) -> ColorType {
self.color_type
}
fn original_color_type(&self) -> ExtendedColorType {
self.original_color_type
.unwrap_or_else(|| self.color_type().into())
}
fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
// Decode the raw data
//
// We have already checked in `TgaDecoder::new` that the indices take less space than the
// pixels they encode, so it is safe to read the raw data into `buf`.
let num_raw_bytes = self.width * self.height * self.raw_bytes_per_pixel;
if self.image_type.is_encoded() {
self.read_encoded_data(&mut buf[..num_raw_bytes])?;
} else {
self.r.read_exact(&mut buf[..num_raw_bytes])?;
}
self.fixup_orientation(buf);
// Expand the indices using the color map if necessary
if let Some(ref color_map) = self.color_map {
// This allocation could be avoided by expanding each row (or block of pixels) as it is
// read, or by doing the color map expansion in-place. But those may be more effort than
// it is worth.
let mut rawbuf = vec_try_with_capacity(num_raw_bytes)?;
rawbuf.extend_from_slice(&buf[..num_raw_bytes]);
self.expand_color_map(&rawbuf, buf, color_map)?;
}
self.reverse_encoding_in_output(buf);
Ok(())
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}

521
vendor/image/src/codecs/tga/encoder.rs vendored Normal file
View File

@@ -0,0 +1,521 @@
use super::header::Header;
use crate::{codecs::tga::header::ImageType, error::EncodingError, utils::vec_try_with_capacity};
use crate::{DynamicImage, ExtendedColorType, ImageEncoder, ImageError, ImageFormat, ImageResult};
use std::{error, fmt, io::Write};
/// Errors that can occur during encoding and saving of a TGA image.
#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
enum EncoderError {
/// Invalid TGA width.
WidthInvalid(u32),
/// Invalid TGA height.
HeightInvalid(u32),
}
impl fmt::Display for EncoderError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
EncoderError::WidthInvalid(s) => f.write_fmt(format_args!("Invalid TGA width: {s}")),
EncoderError::HeightInvalid(s) => f.write_fmt(format_args!("Invalid TGA height: {s}")),
}
}
}
impl From<EncoderError> for ImageError {
fn from(e: EncoderError) -> ImageError {
ImageError::Encoding(EncodingError::new(ImageFormat::Tga.into(), e))
}
}
impl error::Error for EncoderError {}
/// TGA encoder.
pub struct TgaEncoder<W: Write> {
writer: W,
/// Run-length encoding
use_rle: bool,
}
const MAX_RUN_LENGTH: u8 = 128;
#[derive(Debug, Eq, PartialEq)]
enum PacketType {
Raw,
Rle,
}
impl<W: Write> TgaEncoder<W> {
/// Create a new encoder that writes its output to ```w```.
pub fn new(w: W) -> TgaEncoder<W> {
TgaEncoder {
writer: w,
use_rle: true,
}
}
/// Disables run-length encoding
pub fn disable_rle(mut self) -> TgaEncoder<W> {
self.use_rle = false;
self
}
/// Writes a raw packet to the writer
fn write_raw_packet(&mut self, pixels: &[u8], counter: u8) -> ImageResult<()> {
// Set high bit = 0 and store counter - 1 (because 0 would be useless)
// The counter fills 7 bits max, so the high bit is set to 0 implicitly
let header = counter - 1;
self.writer.write_all(&[header])?;
self.writer.write_all(pixels)?;
Ok(())
}
/// Writes a run-length encoded packet to the writer
fn write_rle_encoded_packet(&mut self, pixel: &[u8], counter: u8) -> ImageResult<()> {
// Set high bit = 1 and store counter - 1 (because 0 would be useless)
let header = 0x80 | (counter - 1);
self.writer.write_all(&[header])?;
self.writer.write_all(pixel)?;
Ok(())
}
/// Writes the run-length encoded buffer to the writer
fn run_length_encode(
&mut self,
image: &[u8],
color_type: ExtendedColorType,
) -> ImageResult<()> {
use PacketType::*;
let bytes_per_pixel = color_type.bits_per_pixel() / 8;
let capacity_in_bytes = usize::from(MAX_RUN_LENGTH) * usize::from(bytes_per_pixel);
// Buffer to temporarily store pixels
// so we can choose whether to use RLE or not when we need to
let mut buf = vec_try_with_capacity(capacity_in_bytes)?;
let mut counter = 0;
let mut prev_pixel = None;
let mut packet_type = Rle;
for pixel in image.chunks(usize::from(bytes_per_pixel)) {
// Make sure we are not at the first pixel
if let Some(prev) = prev_pixel {
if pixel == prev {
if packet_type == Raw && counter > 0 {
self.write_raw_packet(&buf, counter)?;
counter = 0;
buf.clear();
}
packet_type = Rle;
} else if packet_type == Rle && counter > 0 {
self.write_rle_encoded_packet(prev, counter)?;
counter = 0;
packet_type = Raw;
buf.clear();
}
}
counter += 1;
buf.extend_from_slice(pixel);
debug_assert!(buf.len() <= capacity_in_bytes);
if counter == MAX_RUN_LENGTH {
match packet_type {
Rle => self.write_rle_encoded_packet(prev_pixel.unwrap(), counter),
Raw => self.write_raw_packet(&buf, counter),
}?;
counter = 0;
packet_type = Rle;
buf.clear();
}
prev_pixel = Some(pixel);
}
if counter > 0 {
match packet_type {
Rle => self.write_rle_encoded_packet(prev_pixel.unwrap(), counter),
Raw => self.write_raw_packet(&buf, counter),
}?;
}
Ok(())
}
/// Encodes the image ```buf``` that has dimensions ```width```
/// and ```height``` and ```ColorType``` ```color_type```.
///
/// The dimensions of the image must be between 0 and 65535 (inclusive) or
/// an error will be returned.
///
/// # Panics
///
/// Panics if `width * height * color_type.bytes_per_pixel() != data.len()`.
#[track_caller]
pub fn encode(
mut self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
let expected_buffer_len = color_type.buffer_size(width, height);
assert_eq!(
expected_buffer_len,
buf.len() as u64,
"Invalid buffer length: expected {expected_buffer_len} got {} for {width}x{height} image",
buf.len(),
);
// Validate dimensions.
let width = u16::try_from(width)
.map_err(|_| ImageError::from(EncoderError::WidthInvalid(width)))?;
let height = u16::try_from(height)
.map_err(|_| ImageError::from(EncoderError::HeightInvalid(height)))?;
// Write out TGA header.
let header = Header::from_pixel_info(color_type, width, height, self.use_rle)?;
header.write_to(&mut self.writer)?;
let image_type = ImageType::new(header.image_type);
match image_type {
//TODO: support RunColorMap, and change match to image_type.is_encoded()
ImageType::RunTrueColor | ImageType::RunGrayScale => {
// Write run-length encoded image data
match color_type {
ExtendedColorType::Rgb8 | ExtendedColorType::Rgba8 => {
let mut image = Vec::from(buf);
for pixel in image.chunks_mut(usize::from(color_type.bits_per_pixel() / 8))
{
pixel.swap(0, 2);
}
self.run_length_encode(&image, color_type)?;
}
_ => {
self.run_length_encode(buf, color_type)?;
}
}
}
_ => {
// Write uncompressed image data
match color_type {
ExtendedColorType::Rgb8 | ExtendedColorType::Rgba8 => {
let mut image = Vec::from(buf);
for pixel in image.chunks_mut(usize::from(color_type.bits_per_pixel() / 8))
{
pixel.swap(0, 2);
}
self.writer.write_all(&image)?;
}
_ => {
self.writer.write_all(buf)?;
}
}
}
}
Ok(())
}
}
impl<W: Write> ImageEncoder for TgaEncoder<W> {
#[track_caller]
fn write_image(
self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
self.encode(buf, width, height, color_type)
}
fn make_compatible_img(
&self,
_: crate::io::encoder::MethodSealedToImage,
img: &DynamicImage,
) -> Option<DynamicImage> {
crate::io::encoder::dynimage_conversion_8bit(img)
}
}
#[cfg(test)]
mod tests {
use super::{EncoderError, TgaEncoder};
use crate::{codecs::tga::TgaDecoder, ExtendedColorType, ImageDecoder, ImageError};
use std::{error::Error, io::Cursor};
#[test]
fn test_image_width_too_large() {
// TGA cannot encode images larger than 65,535×65,535
// create a 65,536×1 8-bit black image buffer
let size = usize::from(u16::MAX) + 1;
let dimension = size as u32;
let img = vec![0u8; size];
// Try to encode an image that is too large
let mut encoded = Vec::new();
let encoder = TgaEncoder::new(&mut encoded);
let result = encoder.encode(&img, dimension, 1, ExtendedColorType::L8);
match result {
Err(ImageError::Encoding(err)) => {
let err = err
.source()
.unwrap()
.downcast_ref::<EncoderError>()
.unwrap();
assert_eq!(*err, EncoderError::WidthInvalid(dimension));
}
other => panic!(
"Encoding an image that is too wide should return a InvalidWidth \
it returned {other:?} instead"
),
}
}
#[test]
fn test_image_height_too_large() {
// TGA cannot encode images larger than 65,535×65,535
// create a 65,536×1 8-bit black image buffer
let size = usize::from(u16::MAX) + 1;
let dimension = size as u32;
let img = vec![0u8; size];
// Try to encode an image that is too large
let mut encoded = Vec::new();
let encoder = TgaEncoder::new(&mut encoded);
let result = encoder.encode(&img, 1, dimension, ExtendedColorType::L8);
match result {
Err(ImageError::Encoding(err)) => {
let err = err
.source()
.unwrap()
.downcast_ref::<EncoderError>()
.unwrap();
assert_eq!(*err, EncoderError::HeightInvalid(dimension));
}
other => panic!(
"Encoding an image that is too tall should return a InvalidHeight \
it returned {other:?} instead"
),
}
}
#[test]
fn test_compression_diff() {
let image = [0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2];
let uncompressed_bytes = {
let mut encoded_data = Vec::new();
let encoder = TgaEncoder::new(&mut encoded_data).disable_rle();
encoder
.encode(&image, 5, 1, ExtendedColorType::Rgb8)
.expect("could not encode image");
encoded_data
};
let compressed_bytes = {
let mut encoded_data = Vec::new();
let encoder = TgaEncoder::new(&mut encoded_data);
encoder
.encode(&image, 5, 1, ExtendedColorType::Rgb8)
.expect("could not encode image");
encoded_data
};
assert!(uncompressed_bytes.len() > compressed_bytes.len());
}
mod compressed {
use super::*;
fn round_trip_image(
image: &[u8],
width: u32,
height: u32,
c: ExtendedColorType,
) -> Vec<u8> {
let mut encoded_data = Vec::new();
{
let encoder = TgaEncoder::new(&mut encoded_data);
encoder
.encode(image, width, height, c)
.expect("could not encode image");
}
let decoder = TgaDecoder::new(Cursor::new(&encoded_data)).expect("failed to decode");
let mut buf = vec![0; decoder.total_bytes() as usize];
decoder.read_image(&mut buf).expect("failed to decode");
buf
}
#[test]
fn mixed_packets() {
let image = [
255, 255, 255, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255,
];
let decoded = round_trip_image(&image, 5, 1, ExtendedColorType::Rgb8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_gray() {
let image = [0, 1, 2];
let decoded = round_trip_image(&image, 3, 1, ExtendedColorType::L8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_graya() {
let image = [0, 1, 2, 3, 4, 5];
let decoded = round_trip_image(&image, 1, 3, ExtendedColorType::La8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_single_pixel_rgb() {
let image = [0, 1, 2];
let decoded = round_trip_image(&image, 1, 1, ExtendedColorType::Rgb8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_three_pixel_rgb() {
let image = [0, 1, 2, 0, 1, 2, 0, 1, 2];
let decoded = round_trip_image(&image, 3, 1, ExtendedColorType::Rgb8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_3px_rgb() {
let image = [0; 3 * 3 * 3]; // 3x3 pixels, 3 bytes per pixel
let decoded = round_trip_image(&image, 3, 3, ExtendedColorType::Rgb8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_different() {
let image = [0, 1, 2, 0, 1, 3, 0, 1, 4];
let decoded = round_trip_image(&image, 3, 1, ExtendedColorType::Rgb8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_different_2() {
let image = [0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 4];
let decoded = round_trip_image(&image, 4, 1, ExtendedColorType::Rgb8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_different_3() {
let image = [0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 4, 0, 1, 2];
let decoded = round_trip_image(&image, 5, 1, ExtendedColorType::Rgb8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_bw() {
// This example demonstrates the run-length counter being saturated
// It should never overflow and can be 128 max
let image = crate::open("tests/images/tga/encoding/black_white.tga").unwrap();
let (width, height) = (image.width(), image.height());
let image = image.as_rgb8().unwrap().to_vec();
let decoded = round_trip_image(&image, width, height, ExtendedColorType::Rgb8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
}
mod uncompressed {
use super::*;
fn round_trip_image(
image: &[u8],
width: u32,
height: u32,
c: ExtendedColorType,
) -> Vec<u8> {
let mut encoded_data = Vec::new();
{
let encoder = TgaEncoder::new(&mut encoded_data).disable_rle();
encoder
.encode(image, width, height, c)
.expect("could not encode image");
}
let decoder = TgaDecoder::new(Cursor::new(&encoded_data)).expect("failed to decode");
let mut buf = vec![0; decoder.total_bytes() as usize];
decoder.read_image(&mut buf).expect("failed to decode");
buf
}
#[test]
fn round_trip_single_pixel_rgb() {
let image = [0, 1, 2];
let decoded = round_trip_image(&image, 1, 1, ExtendedColorType::Rgb8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_single_pixel_rgba() {
let image = [0, 1, 2, 3];
let decoded = round_trip_image(&image, 1, 1, ExtendedColorType::Rgba8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_gray() {
let image = [0, 1, 2];
let decoded = round_trip_image(&image, 3, 1, ExtendedColorType::L8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_graya() {
let image = [0, 1, 2, 3, 4, 5];
let decoded = round_trip_image(&image, 1, 3, ExtendedColorType::La8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
#[test]
fn round_trip_3px_rgb() {
let image = [0; 3 * 3 * 3]; // 3x3 pixels, 3 bytes per pixel
let decoded = round_trip_image(&image, 3, 3, ExtendedColorType::Rgb8);
assert_eq!(decoded.len(), image.len());
assert_eq!(decoded.as_slice(), image);
}
}
}

156
vendor/image/src/codecs/tga/header.rs vendored Normal file
View File

@@ -0,0 +1,156 @@
use crate::error::{UnsupportedError, UnsupportedErrorKind};
use crate::{ExtendedColorType, ImageError, ImageFormat, ImageResult};
use byteorder_lite::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::io::{Read, Write};
pub(crate) const ALPHA_BIT_MASK: u8 = 0b1111;
pub(crate) const SCREEN_ORIGIN_BIT_MASK: u8 = 0b10_0000;
pub(crate) enum ImageType {
NoImageData = 0,
/// Uncompressed images.
RawColorMap = 1,
RawTrueColor = 2,
RawGrayScale = 3,
/// Run length encoded images.
RunColorMap = 9,
RunTrueColor = 10,
RunGrayScale = 11,
Unknown,
}
impl ImageType {
/// Create a new image type from a u8.
pub(crate) fn new(img_type: u8) -> ImageType {
match img_type {
0 => ImageType::NoImageData,
1 => ImageType::RawColorMap,
2 => ImageType::RawTrueColor,
3 => ImageType::RawGrayScale,
9 => ImageType::RunColorMap,
10 => ImageType::RunTrueColor,
11 => ImageType::RunGrayScale,
_ => ImageType::Unknown,
}
}
/// Check if the image format uses colors as opposed to gray scale.
pub(crate) fn is_color(&self) -> bool {
matches! { *self,
ImageType::RawColorMap
| ImageType::RawTrueColor
| ImageType::RunTrueColor
| ImageType::RunColorMap
}
}
/// Does the image use a color map.
pub(crate) fn is_color_mapped(&self) -> bool {
matches!(*self, Self::RawColorMap | Self::RunColorMap)
}
/// Is the image run length encoded.
pub(crate) fn is_encoded(&self) -> bool {
matches!(
*self,
Self::RunColorMap | Self::RunTrueColor | Self::RunGrayScale
)
}
}
/// Header used by TGA image files.
#[derive(Debug, Default)]
pub(crate) struct Header {
pub(crate) id_length: u8, // length of ID string
pub(crate) map_type: u8, // color map type
pub(crate) image_type: u8, // image type code
pub(crate) map_origin: u16, // starting index of map
pub(crate) map_length: u16, // length of map
pub(crate) map_entry_size: u8, // size of map entries in bits
pub(crate) x_origin: u16, // x-origin of image
pub(crate) y_origin: u16, // y-origin of image
pub(crate) image_width: u16, // width of image
pub(crate) image_height: u16, // height of image
pub(crate) pixel_depth: u8, // bits per pixel
pub(crate) image_desc: u8, // image descriptor
}
impl Header {
/// Load the header with values from pixel information.
pub(crate) fn from_pixel_info(
color_type: ExtendedColorType,
width: u16,
height: u16,
use_rle: bool,
) -> ImageResult<Self> {
let mut header = Self::default();
if width > 0 && height > 0 {
let (num_alpha_bits, other_channel_bits, image_type) = match (color_type, use_rle) {
(ExtendedColorType::Rgba8, true) => (8, 24, ImageType::RunTrueColor),
(ExtendedColorType::Rgb8, true) => (0, 24, ImageType::RunTrueColor),
(ExtendedColorType::La8, true) => (8, 8, ImageType::RunGrayScale),
(ExtendedColorType::L8, true) => (0, 8, ImageType::RunGrayScale),
(ExtendedColorType::Rgba8, false) => (8, 24, ImageType::RawTrueColor),
(ExtendedColorType::Rgb8, false) => (0, 24, ImageType::RawTrueColor),
(ExtendedColorType::La8, false) => (8, 8, ImageType::RawGrayScale),
(ExtendedColorType::L8, false) => (0, 8, ImageType::RawGrayScale),
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Tga.into(),
UnsupportedErrorKind::Color(color_type),
),
))
}
};
header.image_type = image_type as u8;
header.image_width = width;
header.image_height = height;
header.pixel_depth = num_alpha_bits + other_channel_bits;
header.image_desc = num_alpha_bits & ALPHA_BIT_MASK;
header.image_desc |= SCREEN_ORIGIN_BIT_MASK; // Upper left origin.
}
Ok(header)
}
/// Load the header with values from the reader.
pub(crate) fn from_reader(r: &mut dyn Read) -> ImageResult<Self> {
Ok(Self {
id_length: r.read_u8()?,
map_type: r.read_u8()?,
image_type: r.read_u8()?,
map_origin: r.read_u16::<LittleEndian>()?,
map_length: r.read_u16::<LittleEndian>()?,
map_entry_size: r.read_u8()?,
x_origin: r.read_u16::<LittleEndian>()?,
y_origin: r.read_u16::<LittleEndian>()?,
image_width: r.read_u16::<LittleEndian>()?,
image_height: r.read_u16::<LittleEndian>()?,
pixel_depth: r.read_u8()?,
image_desc: r.read_u8()?,
})
}
/// Write out the header values.
pub(crate) fn write_to(&self, w: &mut dyn Write) -> ImageResult<()> {
w.write_u8(self.id_length)?;
w.write_u8(self.map_type)?;
w.write_u8(self.image_type)?;
w.write_u16::<LittleEndian>(self.map_origin)?;
w.write_u16::<LittleEndian>(self.map_length)?;
w.write_u8(self.map_entry_size)?;
w.write_u16::<LittleEndian>(self.x_origin)?;
w.write_u16::<LittleEndian>(self.y_origin)?;
w.write_u16::<LittleEndian>(self.image_width)?;
w.write_u16::<LittleEndian>(self.image_height)?;
w.write_u8(self.pixel_depth)?;
w.write_u8(self.image_desc)?;
Ok(())
}
}

17
vendor/image/src/codecs/tga/mod.rs vendored Normal file
View File

@@ -0,0 +1,17 @@
//! Decoding of TGA Images
//!
//! # Related Links
//! <http://googlesites.inequation.org/tgautilities>
/// A decoder for TGA images
///
/// Currently this decoder does not support 8, 15 and 16 bit color images.
pub use self::decoder::TgaDecoder;
//TODO add 8, 15, 16 bit color support
pub use self::encoder::TgaEncoder;
mod decoder;
mod encoder;
mod header;

492
vendor/image/src/codecs/tiff.rs vendored Normal file
View File

@@ -0,0 +1,492 @@
//! Decoding and Encoding of TIFF Images
//!
//! TIFF (Tagged Image File Format) is a versatile image format that supports
//! lossless and lossy compression.
//!
//! # Related Links
//! * <http://partners.adobe.com/public/developer/tiff/index.html> - The TIFF specification
use std::io::{self, BufRead, Cursor, Read, Seek, Write};
use std::marker::PhantomData;
use std::mem;
use tiff::decoder::{Decoder, DecodingResult};
use tiff::tags::Tag;
use crate::color::{ColorType, ExtendedColorType};
use crate::error::{
DecodingError, EncodingError, ImageError, ImageResult, LimitError, LimitErrorKind,
ParameterError, ParameterErrorKind, UnsupportedError, UnsupportedErrorKind,
};
use crate::metadata::Orientation;
use crate::{utils, ImageDecoder, ImageEncoder, ImageFormat};
/// Decoder for TIFF images.
pub struct TiffDecoder<R>
where
R: BufRead + Seek,
{
dimensions: (u32, u32),
color_type: ColorType,
original_color_type: ExtendedColorType,
// We only use an Option here so we can call with_limits on the decoder without moving.
inner: Option<Decoder<R>>,
}
impl<R> TiffDecoder<R>
where
R: BufRead + Seek,
{
/// Create a new `TiffDecoder`.
pub fn new(r: R) -> Result<TiffDecoder<R>, ImageError> {
let mut inner = Decoder::new(r).map_err(ImageError::from_tiff_decode)?;
let dimensions = inner.dimensions().map_err(ImageError::from_tiff_decode)?;
let tiff_color_type = inner.colortype().map_err(ImageError::from_tiff_decode)?;
match inner.find_tag_unsigned_vec::<u16>(Tag::SampleFormat) {
Ok(Some(sample_formats)) => {
for format in sample_formats {
check_sample_format(format, tiff_color_type)?;
}
}
Ok(None) => { /* assume UInt format */ }
Err(other) => return Err(ImageError::from_tiff_decode(other)),
}
let planar_config = inner
.find_tag(Tag::PlanarConfiguration)
.map(|res| res.and_then(|r| r.into_u16().ok()).unwrap_or_default())
.unwrap_or_default();
// Decode not supported for non Chunky Planar Configuration
if planar_config > 1 {
Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Tiff.into(),
UnsupportedErrorKind::GenericFeature(String::from("PlanarConfiguration = 2")),
),
))?;
}
let color_type = match tiff_color_type {
tiff::ColorType::Gray(1) => ColorType::L8,
tiff::ColorType::Gray(8) => ColorType::L8,
tiff::ColorType::Gray(16) => ColorType::L16,
tiff::ColorType::GrayA(8) => ColorType::La8,
tiff::ColorType::GrayA(16) => ColorType::La16,
tiff::ColorType::RGB(8) => ColorType::Rgb8,
tiff::ColorType::RGB(16) => ColorType::Rgb16,
tiff::ColorType::RGBA(8) => ColorType::Rgba8,
tiff::ColorType::RGBA(16) => ColorType::Rgba16,
tiff::ColorType::CMYK(8) => ColorType::Rgb8,
tiff::ColorType::RGB(32) => ColorType::Rgb32F,
tiff::ColorType::RGBA(32) => ColorType::Rgba32F,
tiff::ColorType::Palette(n) | tiff::ColorType::Gray(n) => {
return Err(err_unknown_color_type(n))
}
tiff::ColorType::GrayA(n) => return Err(err_unknown_color_type(n.saturating_mul(2))),
tiff::ColorType::RGB(n) => return Err(err_unknown_color_type(n.saturating_mul(3))),
tiff::ColorType::YCbCr(n) => return Err(err_unknown_color_type(n.saturating_mul(3))),
tiff::ColorType::RGBA(n) | tiff::ColorType::CMYK(n) => {
return Err(err_unknown_color_type(n.saturating_mul(4)))
}
tiff::ColorType::Multiband {
bit_depth,
num_samples,
} => {
return Err(err_unknown_color_type(
bit_depth.saturating_mul(num_samples.min(255) as u8),
))
}
_ => return Err(err_unknown_color_type(0)),
};
let original_color_type = match tiff_color_type {
tiff::ColorType::Gray(1) => ExtendedColorType::L1,
tiff::ColorType::CMYK(8) => ExtendedColorType::Cmyk8,
_ => color_type.into(),
};
Ok(TiffDecoder {
dimensions,
color_type,
original_color_type,
inner: Some(inner),
})
}
// The buffer can be larger for CMYK than the RGB output
fn total_bytes_buffer(&self) -> u64 {
let dimensions = self.dimensions();
let total_pixels = u64::from(dimensions.0) * u64::from(dimensions.1);
let bytes_per_pixel = if self.original_color_type == ExtendedColorType::Cmyk8 {
16
} else {
u64::from(self.color_type().bytes_per_pixel())
};
total_pixels.saturating_mul(bytes_per_pixel)
}
}
fn check_sample_format(sample_format: u16, color_type: tiff::ColorType) -> Result<(), ImageError> {
use tiff::{tags::SampleFormat, ColorType};
let num_bits = match color_type {
ColorType::CMYK(k) => k,
ColorType::Gray(k) => k,
ColorType::RGB(k) => k,
ColorType::RGBA(k) => k,
ColorType::GrayA(k) => k,
ColorType::Palette(k) | ColorType::YCbCr(k) => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Tiff.into(),
UnsupportedErrorKind::GenericFeature(format!(
"Unhandled TIFF color type {color_type:?} for {k} bits",
)),
),
))
}
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Tiff.into(),
UnsupportedErrorKind::GenericFeature(format!(
"Unhandled TIFF color type {color_type:?}",
)),
),
))
}
};
match SampleFormat::from_u16(sample_format) {
Some(SampleFormat::Uint) if num_bits <= 16 => Ok(()),
Some(SampleFormat::IEEEFP) if num_bits == 32 => Ok(()),
_ => Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Tiff.into(),
UnsupportedErrorKind::GenericFeature(format!(
"Unhandled TIFF sample format {sample_format:?} for {num_bits} bits",
)),
),
)),
}
}
fn err_unknown_color_type(value: u8) -> ImageError {
ImageError::Unsupported(UnsupportedError::from_format_and_kind(
ImageFormat::Tiff.into(),
UnsupportedErrorKind::Color(ExtendedColorType::Unknown(value)),
))
}
impl ImageError {
fn from_tiff_decode(err: tiff::TiffError) -> ImageError {
match err {
tiff::TiffError::IoError(err) => ImageError::IoError(err),
err @ (tiff::TiffError::FormatError(_)
| tiff::TiffError::IntSizeError
| tiff::TiffError::UsageError(_)) => {
ImageError::Decoding(DecodingError::new(ImageFormat::Tiff.into(), err))
}
tiff::TiffError::UnsupportedError(desc) => {
ImageError::Unsupported(UnsupportedError::from_format_and_kind(
ImageFormat::Tiff.into(),
UnsupportedErrorKind::GenericFeature(desc.to_string()),
))
}
tiff::TiffError::LimitsExceeded => {
ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory))
}
}
}
fn from_tiff_encode(err: tiff::TiffError) -> ImageError {
match err {
tiff::TiffError::IoError(err) => ImageError::IoError(err),
err @ (tiff::TiffError::FormatError(_)
| tiff::TiffError::IntSizeError
| tiff::TiffError::UsageError(_)) => {
ImageError::Encoding(EncodingError::new(ImageFormat::Tiff.into(), err))
}
tiff::TiffError::UnsupportedError(desc) => {
ImageError::Unsupported(UnsupportedError::from_format_and_kind(
ImageFormat::Tiff.into(),
UnsupportedErrorKind::GenericFeature(desc.to_string()),
))
}
tiff::TiffError::LimitsExceeded => {
ImageError::Limits(LimitError::from_kind(LimitErrorKind::InsufficientMemory))
}
}
}
}
/// Wrapper struct around a `Cursor<Vec<u8>>`
#[allow(dead_code)]
#[deprecated]
pub struct TiffReader<R>(Cursor<Vec<u8>>, PhantomData<R>);
#[allow(deprecated)]
impl<R> Read for TiffReader<R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.0.read(buf)
}
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
if self.0.position() == 0 && buf.is_empty() {
mem::swap(buf, self.0.get_mut());
Ok(buf.len())
} else {
self.0.read_to_end(buf)
}
}
}
impl<R: BufRead + Seek> ImageDecoder for TiffDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
self.dimensions
}
fn color_type(&self) -> ColorType {
self.color_type
}
fn original_color_type(&self) -> ExtendedColorType {
self.original_color_type
}
fn icc_profile(&mut self) -> ImageResult<Option<Vec<u8>>> {
if let Some(decoder) = &mut self.inner {
Ok(decoder.get_tag_u8_vec(Tag::Unknown(34675)).ok())
} else {
Ok(None)
}
}
fn orientation(&mut self) -> ImageResult<Orientation> {
if let Some(decoder) = &mut self.inner {
Ok(decoder
.find_tag(Tag::Orientation)
.map_err(ImageError::from_tiff_decode)?
.and_then(|v| Orientation::from_exif(v.into_u16().ok()?.min(255) as u8))
.unwrap_or(Orientation::NoTransforms))
} else {
Ok(Orientation::NoTransforms)
}
}
fn set_limits(&mut self, limits: crate::Limits) -> ImageResult<()> {
limits.check_support(&crate::LimitSupport::default())?;
let (width, height) = self.dimensions();
limits.check_dimensions(width, height)?;
let max_alloc = limits.max_alloc.unwrap_or(u64::MAX);
let max_intermediate_alloc = max_alloc.saturating_sub(self.total_bytes_buffer());
let mut tiff_limits: tiff::decoder::Limits = Default::default();
tiff_limits.decoding_buffer_size =
usize::try_from(max_alloc - max_intermediate_alloc).unwrap_or(usize::MAX);
tiff_limits.intermediate_buffer_size =
usize::try_from(max_intermediate_alloc).unwrap_or(usize::MAX);
tiff_limits.ifd_value_size = tiff_limits.intermediate_buffer_size;
self.inner = Some(self.inner.take().unwrap().with_limits(tiff_limits));
Ok(())
}
fn read_image(self, buf: &mut [u8]) -> ImageResult<()> {
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
match self
.inner
.unwrap()
.read_image()
.map_err(ImageError::from_tiff_decode)?
{
DecodingResult::U8(v) if self.original_color_type == ExtendedColorType::Cmyk8 => {
let mut out_cur = Cursor::new(buf);
for cmyk in v.chunks_exact(4) {
out_cur.write_all(&cmyk_to_rgb(cmyk))?;
}
}
DecodingResult::U8(v) if self.original_color_type == ExtendedColorType::L1 => {
let width = self.dimensions.0;
let row_bytes = width.div_ceil(8);
for (in_row, out_row) in v
.chunks_exact(row_bytes as usize)
.zip(buf.chunks_exact_mut(width as usize))
{
out_row.copy_from_slice(&utils::expand_bits(1, width, in_row));
}
}
DecodingResult::U8(v) => {
buf.copy_from_slice(&v);
}
DecodingResult::U16(v) => {
buf.copy_from_slice(bytemuck::cast_slice(&v));
}
DecodingResult::U32(v) => {
buf.copy_from_slice(bytemuck::cast_slice(&v));
}
DecodingResult::U64(v) => {
buf.copy_from_slice(bytemuck::cast_slice(&v));
}
DecodingResult::I8(v) => {
buf.copy_from_slice(bytemuck::cast_slice(&v));
}
DecodingResult::I16(v) => {
buf.copy_from_slice(bytemuck::cast_slice(&v));
}
DecodingResult::I32(v) => {
buf.copy_from_slice(bytemuck::cast_slice(&v));
}
DecodingResult::I64(v) => {
buf.copy_from_slice(bytemuck::cast_slice(&v));
}
DecodingResult::F32(v) => {
buf.copy_from_slice(bytemuck::cast_slice(&v));
}
DecodingResult::F64(v) => {
buf.copy_from_slice(bytemuck::cast_slice(&v));
}
DecodingResult::F16(_) => unreachable!(),
}
Ok(())
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
/// Encoder for tiff images
pub struct TiffEncoder<W> {
w: W,
}
fn cmyk_to_rgb(cmyk: &[u8]) -> [u8; 3] {
let c = f32::from(cmyk[0]);
let m = f32::from(cmyk[1]);
let y = f32::from(cmyk[2]);
let kf = 1. - f32::from(cmyk[3]) / 255.;
[
((255. - c) * kf) as u8,
((255. - m) * kf) as u8,
((255. - y) * kf) as u8,
]
}
/// Convert a slice of sample bytes to its semantic type, being a `Pod`.
fn u8_slice_as_pod<P: bytemuck::Pod>(buf: &[u8]) -> ImageResult<std::borrow::Cow<'_, [P]>> {
bytemuck::try_cast_slice(buf)
.map(std::borrow::Cow::Borrowed)
.or_else(|err| {
match err {
bytemuck::PodCastError::TargetAlignmentGreaterAndInputNotAligned => {
// If the buffer is not aligned for a native slice, copy the buffer into a Vec,
// aligning it in the process. This is only done if the element count can be
// represented exactly.
let vec = bytemuck::allocation::pod_collect_to_vec(buf);
Ok(std::borrow::Cow::Owned(vec))
}
/* only expecting: bytemuck::PodCastError::OutputSliceWouldHaveSlop */
_ => {
// `bytemuck::PodCastError` of bytemuck-1.2.0 does not implement `Error` and
// `Display` trait.
// See <https://github.com/Lokathor/bytemuck/issues/22>.
Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::Generic(format!(
"Casting samples to their representation failed: {err:?}",
)),
)))
}
}
})
}
impl<W: Write + Seek> TiffEncoder<W> {
/// Create a new encoder that writes its output to `w`
pub fn new(w: W) -> TiffEncoder<W> {
TiffEncoder { w }
}
/// Encodes the image `image` that has dimensions `width` and `height` and `ColorType` `c`.
///
/// 16-bit types assume the buffer is native endian.
///
/// # Panics
///
/// Panics if `width * height * color_type.bytes_per_pixel() != data.len()`.
#[track_caller]
pub fn encode(
self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
use tiff::encoder::colortype::{
Gray16, Gray8, RGB32Float, RGBA32Float, RGB16, RGB8, RGBA16, RGBA8,
};
let expected_buffer_len = color_type.buffer_size(width, height);
assert_eq!(
expected_buffer_len,
buf.len() as u64,
"Invalid buffer length: expected {expected_buffer_len} got {} for {width}x{height} image",
buf.len(),
);
let mut encoder =
tiff::encoder::TiffEncoder::new(self.w).map_err(ImageError::from_tiff_encode)?;
match color_type {
ExtendedColorType::L8 => encoder.write_image::<Gray8>(width, height, buf),
ExtendedColorType::Rgb8 => encoder.write_image::<RGB8>(width, height, buf),
ExtendedColorType::Rgba8 => encoder.write_image::<RGBA8>(width, height, buf),
ExtendedColorType::L16 => {
encoder.write_image::<Gray16>(width, height, u8_slice_as_pod::<u16>(buf)?.as_ref())
}
ExtendedColorType::Rgb16 => {
encoder.write_image::<RGB16>(width, height, u8_slice_as_pod::<u16>(buf)?.as_ref())
}
ExtendedColorType::Rgba16 => {
encoder.write_image::<RGBA16>(width, height, u8_slice_as_pod::<u16>(buf)?.as_ref())
}
ExtendedColorType::Rgb32F => encoder.write_image::<RGB32Float>(
width,
height,
u8_slice_as_pod::<f32>(buf)?.as_ref(),
),
ExtendedColorType::Rgba32F => encoder.write_image::<RGBA32Float>(
width,
height,
u8_slice_as_pod::<f32>(buf)?.as_ref(),
),
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::Tiff.into(),
UnsupportedErrorKind::Color(color_type),
),
))
}
}
.map_err(ImageError::from_tiff_encode)?;
Ok(())
}
}
impl<W: Write + Seek> ImageEncoder for TiffEncoder<W> {
#[track_caller]
fn write_image(
self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
self.encode(buf, width, height, color_type)
}
}

172
vendor/image/src/codecs/webp/decoder.rs vendored Normal file
View File

@@ -0,0 +1,172 @@
use std::io::{BufRead, Read, Seek};
use crate::buffer::ConvertBuffer;
use crate::error::{DecodingError, ImageError, ImageResult};
use crate::metadata::Orientation;
use crate::{
AnimationDecoder, ColorType, Delay, Frame, Frames, ImageDecoder, ImageFormat, RgbImage, Rgba,
RgbaImage,
};
/// WebP Image format decoder.
///
/// Supports both lossless and lossy WebP images.
pub struct WebPDecoder<R> {
inner: image_webp::WebPDecoder<R>,
orientation: Option<Orientation>,
}
impl<R: BufRead + Seek> WebPDecoder<R> {
/// Create a new `WebPDecoder` from the Reader `r`.
pub fn new(r: R) -> ImageResult<Self> {
Ok(Self {
inner: image_webp::WebPDecoder::new(r).map_err(ImageError::from_webp_decode)?,
orientation: None,
})
}
/// Returns true if the image as described by the bitstream is animated.
pub fn has_animation(&self) -> bool {
self.inner.is_animated()
}
/// Sets the background color if the image is an extended and animated webp.
pub fn set_background_color(&mut self, color: Rgba<u8>) -> ImageResult<()> {
self.inner
.set_background_color(color.0)
.map_err(ImageError::from_webp_decode)
}
}
impl<R: BufRead + Seek> ImageDecoder for WebPDecoder<R> {
fn dimensions(&self) -> (u32, u32) {
self.inner.dimensions()
}
fn color_type(&self) -> ColorType {
if self.inner.has_alpha() {
ColorType::Rgba8
} else {
ColorType::Rgb8
}
}
fn read_image(mut self, buf: &mut [u8]) -> ImageResult<()> {
assert_eq!(u64::try_from(buf.len()), Ok(self.total_bytes()));
self.inner
.read_image(buf)
.map_err(ImageError::from_webp_decode)
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
fn icc_profile(&mut self) -> ImageResult<Option<Vec<u8>>> {
self.inner
.icc_profile()
.map_err(ImageError::from_webp_decode)
}
fn exif_metadata(&mut self) -> ImageResult<Option<Vec<u8>>> {
let exif = self
.inner
.exif_metadata()
.map_err(ImageError::from_webp_decode)?;
self.orientation = Some(
exif.as_ref()
.and_then(|exif| Orientation::from_exif_chunk(exif))
.unwrap_or(Orientation::NoTransforms),
);
Ok(exif)
}
fn orientation(&mut self) -> ImageResult<Orientation> {
// `exif_metadata` caches the orientation, so call it if `orientation` hasn't been set yet.
if self.orientation.is_none() {
let _ = self.exif_metadata()?;
}
Ok(self.orientation.unwrap())
}
}
impl<'a, R: 'a + BufRead + Seek> AnimationDecoder<'a> for WebPDecoder<R> {
fn into_frames(self) -> Frames<'a> {
struct FramesInner<R: Read + Seek> {
decoder: WebPDecoder<R>,
current: u32,
}
impl<R: BufRead + Seek> Iterator for FramesInner<R> {
type Item = ImageResult<Frame>;
fn next(&mut self) -> Option<Self::Item> {
if self.current == self.decoder.inner.num_frames() {
return None;
}
self.current += 1;
let (width, height) = self.decoder.inner.dimensions();
let (img, delay) = if self.decoder.inner.has_alpha() {
let mut img = RgbaImage::new(width, height);
match self.decoder.inner.read_frame(&mut img) {
Ok(delay) => (img, delay),
Err(image_webp::DecodingError::NoMoreFrames) => return None,
Err(e) => return Some(Err(ImageError::from_webp_decode(e))),
}
} else {
let mut img = RgbImage::new(width, height);
match self.decoder.inner.read_frame(&mut img) {
Ok(delay) => (img.convert(), delay),
Err(image_webp::DecodingError::NoMoreFrames) => return None,
Err(e) => return Some(Err(ImageError::from_webp_decode(e))),
}
};
Some(Ok(Frame::from_parts(
img,
0,
0,
Delay::from_numer_denom_ms(delay, 1),
)))
}
}
Frames::new(Box::new(FramesInner {
decoder: self,
current: 0,
}))
}
}
impl ImageError {
fn from_webp_decode(e: image_webp::DecodingError) -> Self {
match e {
image_webp::DecodingError::IoError(e) => ImageError::IoError(e),
_ => ImageError::Decoding(DecodingError::new(ImageFormat::WebP.into(), e)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn add_with_overflow_size() {
let bytes = vec![
0x52, 0x49, 0x46, 0x46, 0xaf, 0x37, 0x80, 0x47, 0x57, 0x45, 0x42, 0x50, 0x6c, 0x64,
0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xfb, 0x7e, 0x73, 0x00, 0x06, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65,
0x40, 0xfb, 0xff, 0xff, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65,
0x00, 0x00, 0x00, 0x00, 0x62, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49,
0x49, 0x54, 0x55, 0x50, 0x4c, 0x54, 0x59, 0x50, 0x45, 0x33, 0x37, 0x44, 0x4d, 0x46,
];
let data = std::io::Cursor::new(bytes);
let _ = WebPDecoder::new(data);
}
}

148
vendor/image/src/codecs/webp/encoder.rs vendored Normal file
View File

@@ -0,0 +1,148 @@
//! Encoding of WebP images.
use std::io::Write;
use crate::error::{EncodingError, UnsupportedError, UnsupportedErrorKind};
use crate::{DynamicImage, ExtendedColorType, ImageEncoder, ImageError, ImageFormat, ImageResult};
/// WebP Encoder.
///
/// ### Limitations
///
/// Right now only **lossless** encoding is supported.
///
/// If you need **lossy** encoding, you'll have to use `libwebp`.
/// Example code for encoding a [`DynamicImage`](crate::DynamicImage) with `libwebp`
/// via the [`webp`](https://docs.rs/webp/latest/webp/) crate can be found
/// [here](https://github.com/jaredforth/webp/blob/main/examples/convert.rs).
///
/// ### Compression ratio
///
/// This encoder reaches compression ratios higher than PNG at a fraction of the encoding time.
/// However, it does not reach the full potential of lossless WebP for reducing file size.
///
/// If you need an even higher compression ratio at the cost of much slower encoding,
/// please encode the image with `libwebp` as outlined above.
pub struct WebPEncoder<W> {
inner: image_webp::WebPEncoder<W>,
}
impl<W: Write> WebPEncoder<W> {
/// Create a new encoder that writes its output to `w`.
///
/// Uses "VP8L" lossless encoding.
pub fn new_lossless(w: W) -> Self {
Self {
inner: image_webp::WebPEncoder::new(w),
}
}
/// Encode image data with the indicated color type.
///
/// The encoder requires image data be Rgb8 or Rgba8.
///
/// # Panics
///
/// Panics if `width * height * color.bytes_per_pixel() != data.len()`.
#[track_caller]
pub fn encode(
self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
let expected_buffer_len = color_type.buffer_size(width, height);
assert_eq!(
expected_buffer_len,
buf.len() as u64,
"Invalid buffer length: expected {expected_buffer_len} got {} for {width}x{height} image",
buf.len(),
);
let color_type = match color_type {
ExtendedColorType::L8 => image_webp::ColorType::L8,
ExtendedColorType::La8 => image_webp::ColorType::La8,
ExtendedColorType::Rgb8 => image_webp::ColorType::Rgb8,
ExtendedColorType::Rgba8 => image_webp::ColorType::Rgba8,
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormat::WebP.into(),
UnsupportedErrorKind::Color(color_type),
),
))
}
};
self.inner
.encode(buf, width, height, color_type)
.map_err(ImageError::from_webp_encode)
}
}
impl<W: Write> ImageEncoder for WebPEncoder<W> {
#[track_caller]
fn write_image(
self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()> {
self.encode(buf, width, height, color_type)
}
fn set_icc_profile(&mut self, icc_profile: Vec<u8>) -> Result<(), UnsupportedError> {
self.inner.set_icc_profile(icc_profile);
Ok(())
}
fn set_exif_metadata(&mut self, exif: Vec<u8>) -> Result<(), UnsupportedError> {
self.inner.set_exif_metadata(exif);
Ok(())
}
fn make_compatible_img(
&self,
_: crate::io::encoder::MethodSealedToImage,
img: &DynamicImage,
) -> Option<DynamicImage> {
crate::io::encoder::dynimage_conversion_8bit(img)
}
}
impl ImageError {
fn from_webp_encode(e: image_webp::EncodingError) -> Self {
match e {
image_webp::EncodingError::IoError(e) => ImageError::IoError(e),
_ => ImageError::Encoding(EncodingError::new(ImageFormat::WebP.into(), e)),
}
}
}
#[cfg(test)]
mod tests {
use crate::{ImageEncoder, RgbaImage};
#[test]
fn write_webp() {
let img = RgbaImage::from_raw(10, 6, (0..240).collect()).unwrap();
let mut output = Vec::new();
super::WebPEncoder::new_lossless(&mut output)
.write_image(
img.inner_pixels(),
img.width(),
img.height(),
crate::ExtendedColorType::Rgba8,
)
.unwrap();
let img2 = crate::load_from_memory_with_format(&output, crate::ImageFormat::WebP)
.unwrap()
.to_rgba8();
assert_eq!(img, img2);
}
}

7
vendor/image/src/codecs/webp/mod.rs vendored Normal file
View File

@@ -0,0 +1,7 @@
//! Decoding and Encoding of WebP Images
mod decoder;
mod encoder;
pub use self::decoder::WebPDecoder;
pub use self::encoder::WebPEncoder;

1104
vendor/image/src/color.rs vendored Normal file

File diff suppressed because it is too large Load Diff

594
vendor/image/src/error.rs vendored Normal file
View File

@@ -0,0 +1,594 @@
//! Contains detailed error representation.
//!
//! See the main [`ImageError`] which contains a variant for each specialized error type. The
//! subtypes used in each variant are opaque by design. They can be roughly inspected through their
//! respective `kind` methods which work similar to `std::io::Error::kind`.
//!
//! The error interface makes it possible to inspect the error of an underlying decoder or encoder,
//! through the `Error::source` method. Note that this is not part of the stable interface and you
//! may not rely on a particular error value for a particular operation. This means mainly that
//! `image` does not promise to remain on a particular version of its underlying decoders but if
//! you ensure to use the same version of the dependency (or at least of the error type) through
//! external means then you could inspect the error type in slightly more detail.
//!
//! [`ImageError`]: enum.ImageError.html
use std::collections::TryReserveError;
use std::error::Error;
use std::{fmt, io};
use crate::color::ExtendedColorType;
use crate::{metadata::Cicp, ImageFormat};
/// The generic error type for image operations.
///
/// This high level enum allows, by variant matching, a rough separation of concerns between
/// underlying IO, the caller, format specifications, and the `image` implementation.
#[derive(Debug)]
pub enum ImageError {
/// An error was encountered while decoding.
///
/// This means that the input data did not conform to the specification of some image format,
/// or that no format could be determined, or that it did not match format specific
/// requirements set by the caller.
Decoding(DecodingError),
/// An error was encountered while encoding.
///
/// The input image can not be encoded with the chosen format, for example because the
/// specification has no representation for its color space or because a necessary conversion
/// is ambiguous. In some cases it might also happen that the dimensions can not be used with
/// the format.
Encoding(EncodingError),
/// An error was encountered in input arguments.
///
/// This is a catch-all case for strictly internal operations such as scaling, conversions,
/// etc. that involve no external format specifications.
Parameter(ParameterError),
/// Completing the operation would have required more resources than allowed.
///
/// Errors of this type are limits set by the user or environment, *not* inherent in a specific
/// format or operation that was executed.
Limits(LimitError),
/// An operation can not be completed by the chosen abstraction.
///
/// This means that it might be possible for the operation to succeed in general but
/// * it requires a disabled feature,
/// * the implementation does not yet exist, or
/// * no abstraction for a lower level could be found.
Unsupported(UnsupportedError),
/// An error occurred while interacting with the environment.
IoError(io::Error),
}
/// The implementation for an operation was not provided.
///
/// See the variant [`Unsupported`] for more documentation.
///
/// [`Unsupported`]: enum.ImageError.html#variant.Unsupported
#[derive(Debug)]
pub struct UnsupportedError {
format: ImageFormatHint,
kind: UnsupportedErrorKind,
}
/// Details what feature is not supported.
#[derive(Clone, Debug, Hash, PartialEq)]
#[non_exhaustive]
pub enum UnsupportedErrorKind {
/// The required color type can not be handled.
Color(ExtendedColorType),
/// Dealing with an intricate layout is not implemented for an algorithm.
ColorLayout(ExtendedColorType),
/// The colors or transfer function of the CICP are not supported.
ColorspaceCicp(Cicp),
/// An image format is not supported.
Format(ImageFormatHint),
/// Some feature specified by string.
/// This is discouraged and is likely to get deprecated (but not removed).
GenericFeature(String),
}
/// An error was encountered while encoding an image.
///
/// This is used as an opaque representation for the [`ImageError::Encoding`] variant. See its
/// documentation for more information.
///
/// [`ImageError::Encoding`]: enum.ImageError.html#variant.Encoding
#[derive(Debug)]
pub struct EncodingError {
format: ImageFormatHint,
underlying: Option<Box<dyn Error + Send + Sync>>,
}
/// An error was encountered in inputs arguments.
///
/// This is used as an opaque representation for the [`ImageError::Parameter`] variant. See its
/// documentation for more information.
///
/// [`ImageError::Parameter`]: enum.ImageError.html#variant.Parameter
#[derive(Debug)]
pub struct ParameterError {
kind: ParameterErrorKind,
underlying: Option<Box<dyn Error + Send + Sync>>,
}
/// Details how a parameter is malformed.
#[derive(Clone, Debug, Hash, PartialEq)]
#[non_exhaustive]
pub enum ParameterErrorKind {
/// The dimensions passed are wrong.
DimensionMismatch,
/// Repeated an operation for which error that could not be cloned was emitted already.
FailedAlready,
/// The cicp is required to be RGB-like but had other matrix transforms or narrow range.
RgbCicpRequired(Cicp),
/// A string describing the parameter.
/// This is discouraged and is likely to get deprecated (but not removed).
Generic(String),
/// The end of the image has been reached.
NoMoreData,
/// An operation expected a concrete color space but another was found.
CicpMismatch {
/// The cicp that was expected.
expected: Cicp,
/// The cicp that was found.
found: Cicp,
},
}
/// An error was encountered while decoding an image.
///
/// This is used as an opaque representation for the [`ImageError::Decoding`] variant. See its
/// documentation for more information.
///
/// [`ImageError::Decoding`]: enum.ImageError.html#variant.Decoding
#[derive(Debug)]
pub struct DecodingError {
format: ImageFormatHint,
underlying: Option<Box<dyn Error + Send + Sync>>,
}
/// Completing the operation would have required more resources than allowed.
///
/// This is used as an opaque representation for the [`ImageError::Limits`] variant. See its
/// documentation for more information.
///
/// [`ImageError::Limits`]: enum.ImageError.html#variant.Limits
#[derive(Debug)]
pub struct LimitError {
kind: LimitErrorKind,
// do we need an underlying error?
}
/// Indicates the limit that prevented an operation from completing.
///
/// Note that this enumeration is not exhaustive and may in the future be extended to provide more
/// detailed information or to incorporate other resources types.
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
#[non_exhaustive]
#[allow(missing_copy_implementations)] // Might be non-Copy in the future.
pub enum LimitErrorKind {
/// The resulting image exceed dimension limits in either direction.
DimensionError,
/// The operation would have performed an allocation larger than allowed.
InsufficientMemory,
/// The specified strict limits are not supported for this operation
Unsupported {
/// The given limits
limits: crate::Limits,
/// The supported strict limits
supported: crate::LimitSupport,
},
}
/// A best effort representation for image formats.
#[derive(Clone, Debug, Hash, PartialEq)]
#[non_exhaustive]
pub enum ImageFormatHint {
/// The format is known exactly.
Exact(ImageFormat),
/// The format can be identified by a name.
Name(String),
/// A common path extension for the format is known.
PathExtension(std::path::PathBuf),
/// The format is not known or could not be determined.
Unknown,
}
impl UnsupportedError {
/// Create an `UnsupportedError` for an image with details on the unsupported feature.
///
/// If the operation was not connected to a particular image format then the hint may be
/// `Unknown`.
#[must_use]
pub fn from_format_and_kind(format: ImageFormatHint, kind: UnsupportedErrorKind) -> Self {
UnsupportedError { format, kind }
}
/// Returns the corresponding `UnsupportedErrorKind` of the error.
#[must_use]
pub fn kind(&self) -> UnsupportedErrorKind {
self.kind.clone()
}
/// Returns the image format associated with this error.
#[must_use]
pub fn format_hint(&self) -> ImageFormatHint {
self.format.clone()
}
}
impl DecodingError {
/// Create a `DecodingError` that stems from an arbitrary error of an underlying decoder.
pub fn new(format: ImageFormatHint, err: impl Into<Box<dyn Error + Send + Sync>>) -> Self {
DecodingError {
format,
underlying: Some(err.into()),
}
}
/// Create a `DecodingError` for an image format.
///
/// The error will not contain any further information but is very easy to create.
#[must_use]
pub fn from_format_hint(format: ImageFormatHint) -> Self {
DecodingError {
format,
underlying: None,
}
}
/// Returns the image format associated with this error.
#[must_use]
pub fn format_hint(&self) -> ImageFormatHint {
self.format.clone()
}
}
impl EncodingError {
/// Create an `EncodingError` that stems from an arbitrary error of an underlying encoder.
pub fn new(format: ImageFormatHint, err: impl Into<Box<dyn Error + Send + Sync>>) -> Self {
EncodingError {
format,
underlying: Some(err.into()),
}
}
/// Create an `EncodingError` for an image format.
///
/// The error will not contain any further information but is very easy to create.
#[must_use]
pub fn from_format_hint(format: ImageFormatHint) -> Self {
EncodingError {
format,
underlying: None,
}
}
/// Return the image format associated with this error.
#[must_use]
pub fn format_hint(&self) -> ImageFormatHint {
self.format.clone()
}
}
impl ParameterError {
/// Construct a `ParameterError` directly from a corresponding kind.
#[must_use]
pub fn from_kind(kind: ParameterErrorKind) -> Self {
ParameterError {
kind,
underlying: None,
}
}
/// Returns the corresponding `ParameterErrorKind` of the error.
#[must_use]
pub fn kind(&self) -> ParameterErrorKind {
self.kind.clone()
}
}
impl LimitError {
/// Construct a generic `LimitError` directly from a corresponding kind.
#[must_use]
pub fn from_kind(kind: LimitErrorKind) -> Self {
LimitError { kind }
}
/// Returns the corresponding `LimitErrorKind` of the error.
#[must_use]
pub fn kind(&self) -> LimitErrorKind {
self.kind.clone()
}
}
impl From<LimitErrorKind> for LimitError {
fn from(kind: LimitErrorKind) -> Self {
Self { kind }
}
}
impl From<io::Error> for ImageError {
fn from(err: io::Error) -> ImageError {
ImageError::IoError(err)
}
}
impl From<TryReserveError> for ImageError {
fn from(_: TryReserveError) -> ImageError {
ImageError::Limits(LimitErrorKind::InsufficientMemory.into())
}
}
impl From<ImageFormat> for ImageFormatHint {
fn from(format: ImageFormat) -> Self {
ImageFormatHint::Exact(format)
}
}
impl From<&'_ std::path::Path> for ImageFormatHint {
fn from(path: &'_ std::path::Path) -> Self {
match path.extension() {
Some(ext) => ImageFormatHint::PathExtension(ext.into()),
None => ImageFormatHint::Unknown,
}
}
}
impl From<ImageFormatHint> for UnsupportedError {
fn from(hint: ImageFormatHint) -> Self {
UnsupportedError {
format: hint.clone(),
kind: UnsupportedErrorKind::Format(hint),
}
}
}
/// Result of an image decoding/encoding process
pub type ImageResult<T> = Result<T, ImageError>;
impl fmt::Display for ImageError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self {
ImageError::IoError(err) => err.fmt(fmt),
ImageError::Decoding(err) => err.fmt(fmt),
ImageError::Encoding(err) => err.fmt(fmt),
ImageError::Parameter(err) => err.fmt(fmt),
ImageError::Limits(err) => err.fmt(fmt),
ImageError::Unsupported(err) => err.fmt(fmt),
}
}
}
impl Error for ImageError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
ImageError::IoError(err) => err.source(),
ImageError::Decoding(err) => err.source(),
ImageError::Encoding(err) => err.source(),
ImageError::Parameter(err) => err.source(),
ImageError::Limits(err) => err.source(),
ImageError::Unsupported(err) => err.source(),
}
}
}
impl fmt::Display for UnsupportedError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match &self.kind {
UnsupportedErrorKind::Format(ImageFormatHint::Unknown) => {
write!(fmt, "The image format could not be determined",)
}
UnsupportedErrorKind::Format(format @ ImageFormatHint::PathExtension(_)) => write!(
fmt,
"The file extension {format} was not recognized as an image format",
),
UnsupportedErrorKind::Format(format) => {
write!(fmt, "The image format {format} is not supported",)
}
UnsupportedErrorKind::Color(color) => write!(
fmt,
"The encoder or decoder for {} does not support the color type `{:?}`",
self.format, color,
),
UnsupportedErrorKind::ColorLayout(layout) => write!(
fmt,
"Converting with the texel memory layout {layout:?} is not supported",
),
UnsupportedErrorKind::ColorspaceCicp(color) => write!(
fmt,
"The colorimetric interpretation of a CICP color space is not supported for `{color:?}`",
),
UnsupportedErrorKind::GenericFeature(message) => match &self.format {
ImageFormatHint::Unknown => write!(
fmt,
"The decoder does not support the format feature {message}",
),
other => write!(
fmt,
"The decoder for {other} does not support the format features {message}",
),
},
}
}
}
impl Error for UnsupportedError {}
impl fmt::Display for ParameterError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match &self.kind {
ParameterErrorKind::DimensionMismatch => write!(
fmt,
"The Image's dimensions are either too \
small or too large"
),
ParameterErrorKind::FailedAlready => write!(
fmt,
"The end the image stream has been reached due to a previous error"
),
ParameterErrorKind::RgbCicpRequired(cicp) => {
write!(fmt, "The CICP {cicp:?} can not be used for RGB images",)
}
ParameterErrorKind::Generic(message) => {
write!(fmt, "The parameter is malformed: {message}",)
}
ParameterErrorKind::NoMoreData => write!(fmt, "The end of the image has been reached",),
ParameterErrorKind::CicpMismatch { expected, found } => {
write!(
fmt,
"The color space {found:?} does not match the expected {expected:?}",
)
}
}?;
if let Some(underlying) = &self.underlying {
write!(fmt, "\n{underlying}")?;
}
Ok(())
}
}
impl Error for ParameterError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match &self.underlying {
None => None,
Some(source) => Some(&**source),
}
}
}
impl fmt::Display for EncodingError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match &self.underlying {
Some(underlying) => write!(
fmt,
"Format error encoding {}:\n{}",
self.format, underlying,
),
None => write!(fmt, "Format error encoding {}", self.format,),
}
}
}
impl Error for EncodingError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match &self.underlying {
None => None,
Some(source) => Some(&**source),
}
}
}
impl fmt::Display for DecodingError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match &self.underlying {
None => match self.format {
ImageFormatHint::Unknown => write!(fmt, "Format error"),
_ => write!(fmt, "Format error decoding {}", self.format),
},
Some(underlying) => {
write!(fmt, "Format error decoding {}: {}", self.format, underlying)
}
}
}
}
impl Error for DecodingError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match &self.underlying {
None => None,
Some(source) => Some(&**source),
}
}
}
impl fmt::Display for LimitError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self.kind {
LimitErrorKind::InsufficientMemory => write!(fmt, "Memory limit exceeded"),
LimitErrorKind::DimensionError => write!(fmt, "Image size exceeds limit"),
LimitErrorKind::Unsupported { .. } => {
write!(fmt, "The following strict limits are specified but not supported by the opertation: ")?;
Ok(())
}
}
}
}
impl Error for LimitError {}
impl fmt::Display for ImageFormatHint {
fn fmt(&self, fmt: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self {
ImageFormatHint::Exact(format) => write!(fmt, "{format:?}"),
ImageFormatHint::Name(name) => write!(fmt, "`{name}`"),
ImageFormatHint::PathExtension(ext) => write!(fmt, "`.{ext:?}`"),
ImageFormatHint::Unknown => write!(fmt, "`Unknown`"),
}
}
}
/// Converting [`ExtendedColorType`] to [`ColorType`][`crate::ColorType`] failed.
///
/// This type is convertible to [`ImageError`] as [`ImageError::Unsupported`].
#[derive(Clone)]
#[allow(missing_copy_implementations)]
pub struct TryFromExtendedColorError {
pub(crate) was: ExtendedColorType,
}
impl fmt::Debug for TryFromExtendedColorError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self)
}
}
impl fmt::Display for TryFromExtendedColorError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"The pixel layout {:?} is not supported as a buffer ColorType",
self.was
)
}
}
impl Error for TryFromExtendedColorError {}
impl From<TryFromExtendedColorError> for ImageError {
fn from(err: TryFromExtendedColorError) -> ImageError {
ImageError::Unsupported(UnsupportedError::from_format_and_kind(
ImageFormatHint::Unknown,
UnsupportedErrorKind::Color(err.was),
))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::mem::size_of;
#[allow(dead_code)]
// This will fail to compile if the size of this type is large.
const ASSERT_SMALLISH: usize = [0][(size_of::<ImageError>() >= 200) as usize];
#[test]
fn test_send_sync_stability() {
fn assert_send_sync<T: Send + Sync>() {}
assert_send_sync::<ImageError>();
}
}

139
vendor/image/src/hooks.rs vendored Normal file
View File

@@ -0,0 +1,139 @@
//! This module provides a way to register decoding hooks for image formats not directly supported
//! by this crate.
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::{BufRead, BufReader, Read, Seek},
sync::RwLock,
};
use crate::{ImageDecoder, ImageResult};
pub(crate) trait ReadSeek: Read + Seek {}
impl<T: Read + Seek> ReadSeek for T {}
pub(crate) static DECODING_HOOKS: RwLock<Option<HashMap<OsString, DecodingHook>>> =
RwLock::new(None);
pub(crate) type DetectionHook = (&'static [u8], &'static [u8], OsString);
pub(crate) static GUESS_FORMAT_HOOKS: RwLock<Vec<DetectionHook>> = RwLock::new(Vec::new());
/// A wrapper around a type-erased trait object that implements `Read` and `Seek`.
pub struct GenericReader<'a>(pub(crate) BufReader<Box<dyn ReadSeek + 'a>>);
impl Read for GenericReader<'_> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
self.0.read(buf)
}
fn read_vectored(&mut self, bufs: &mut [std::io::IoSliceMut<'_>]) -> std::io::Result<usize> {
self.0.read_vectored(bufs)
}
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> std::io::Result<usize> {
self.0.read_to_end(buf)
}
fn read_to_string(&mut self, buf: &mut String) -> std::io::Result<usize> {
self.0.read_to_string(buf)
}
fn read_exact(&mut self, buf: &mut [u8]) -> std::io::Result<()> {
self.0.read_exact(buf)
}
}
impl BufRead for GenericReader<'_> {
fn fill_buf(&mut self) -> std::io::Result<&[u8]> {
self.0.fill_buf()
}
fn consume(&mut self, amt: usize) {
self.0.consume(amt)
}
fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> std::io::Result<usize> {
self.0.read_until(byte, buf)
}
fn read_line(&mut self, buf: &mut String) -> std::io::Result<usize> {
self.0.read_line(buf)
}
}
impl Seek for GenericReader<'_> {
fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
self.0.seek(pos)
}
fn rewind(&mut self) -> std::io::Result<()> {
self.0.rewind()
}
fn stream_position(&mut self) -> std::io::Result<u64> {
self.0.stream_position()
}
// TODO: Add `seek_relative` once MSRV is at least 1.80.0
}
/// A function to produce an `ImageDecoder` for a given image format.
pub type DecodingHook =
Box<dyn for<'a> Fn(GenericReader<'a>) -> ImageResult<Box<dyn ImageDecoder + 'a>> + Send + Sync>;
/// Register a new decoding hook or returns false if one already exists for the given format.
pub fn register_decoding_hook(extension: OsString, hook: DecodingHook) -> bool {
let mut hooks = DECODING_HOOKS.write().unwrap();
if hooks.is_none() {
*hooks = Some(HashMap::new());
}
match hooks.as_mut().unwrap().entry(extension) {
std::collections::hash_map::Entry::Vacant(entry) => {
entry.insert(hook);
true
}
std::collections::hash_map::Entry::Occupied(_) => false,
}
}
/// Returns whether a decoding hook has been registered for the given format.
pub fn decoding_hook_registered(extension: &OsStr) -> bool {
DECODING_HOOKS
.read()
.unwrap()
.as_ref()
.map(|hooks| hooks.contains_key(extension))
.unwrap_or(false)
}
/// Registers a format detection hook.
///
/// The signature field holds the magic bytes from the start of the file that must be matched to
/// detect the format. The mask field is optional and can be used to specify which bytes in the
/// signature should be ignored during the detection.
///
/// # Examples
///
/// ## Using the mask to ignore some bytes
///
/// ```
/// # use image::hooks::register_format_detection_hook;
/// // WebP signature is 'riff' followed by 4 bytes of length and then by 'webp'.
/// // This requires a mask to ignore the length.
/// register_format_detection_hook("webp".into(),
/// &[b'r', b'i', b'f', b'f', 0, 0, 0, 0, b'w', b'e', b'b', b'p'],
/// Some(&[0xff, 0xff, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff]),
/// );
/// ```
///
/// ## Multiple signatures
///
/// ```
/// # use image::hooks::register_format_detection_hook;
/// // JPEG XL has two different signatures: https://en.wikipedia.org/wiki/JPEG_XL
/// // This function should be called twice to register them both.
/// register_format_detection_hook("jxl".into(), &[0xff, 0x0a], None);
/// register_format_detection_hook("jxl".into(),
/// &[0x00, 0x00, 0x00, 0x0c, 0x4a, 0x58, 0x4c, 0x20, 0x0d, 0x0a, 0x87, 0x0a], None,
/// );
/// ```
///
pub fn register_format_detection_hook(
extension: OsString,
signature: &'static [u8],
mask: Option<&'static [u8]>,
) {
GUESS_FORMAT_HOOKS
.write()
.unwrap()
.push((signature, mask.unwrap_or(&[]), extension));
}

408
vendor/image/src/imageops/affine.rs vendored Normal file
View File

@@ -0,0 +1,408 @@
//! Functions for performing affine transformations.
use crate::error::{ImageError, ParameterError, ParameterErrorKind};
use crate::traits::Pixel;
use crate::{GenericImage, GenericImageView, ImageBuffer};
/// Rotate an image 90 degrees clockwise.
pub fn rotate90<I: GenericImageView>(
image: &I,
) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
where
I::Pixel: 'static,
{
let (width, height) = image.dimensions();
let mut out = image.buffer_with_dimensions(height, width);
let _ = rotate90_in(image, &mut out);
out
}
/// Rotate an image 180 degrees clockwise.
pub fn rotate180<I: GenericImageView>(
image: &I,
) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
where
I::Pixel: 'static,
{
let (width, height) = image.dimensions();
let mut out = image.buffer_with_dimensions(width, height);
let _ = rotate180_in(image, &mut out);
out
}
/// Rotate an image 270 degrees clockwise.
pub fn rotate270<I: GenericImageView>(
image: &I,
) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
where
I::Pixel: 'static,
{
let (width, height) = image.dimensions();
let mut out = image.buffer_with_dimensions(height, width);
let _ = rotate270_in(image, &mut out);
out
}
/// Rotate an image 90 degrees clockwise and put the result into the destination [`ImageBuffer`].
pub fn rotate90_in<I, Container>(
image: &I,
destination: &mut ImageBuffer<I::Pixel, Container>,
) -> crate::ImageResult<()>
where
I: GenericImageView,
I::Pixel: 'static,
Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>,
{
let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
if w0 != h1 || h0 != w1 {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
for y in 0..h0 {
for x in 0..w0 {
let p = image.get_pixel(x, y);
destination.put_pixel(h0 - y - 1, x, p);
}
}
Ok(())
}
/// Rotate an image 180 degrees clockwise and put the result into the destination [`ImageBuffer`].
pub fn rotate180_in<I, Container>(
image: &I,
destination: &mut ImageBuffer<I::Pixel, Container>,
) -> crate::ImageResult<()>
where
I: GenericImageView,
I::Pixel: 'static,
Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>,
{
let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
if w0 != w1 || h0 != h1 {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
for y in 0..h0 {
for x in 0..w0 {
let p = image.get_pixel(x, y);
destination.put_pixel(w0 - x - 1, h0 - y - 1, p);
}
}
Ok(())
}
/// Rotate an image 270 degrees clockwise and put the result into the destination [`ImageBuffer`].
pub fn rotate270_in<I, Container>(
image: &I,
destination: &mut ImageBuffer<I::Pixel, Container>,
) -> crate::ImageResult<()>
where
I: GenericImageView,
I::Pixel: 'static,
Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>,
{
let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
if w0 != h1 || h0 != w1 {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
for y in 0..h0 {
for x in 0..w0 {
let p = image.get_pixel(x, y);
destination.put_pixel(y, w0 - x - 1, p);
}
}
Ok(())
}
/// Flip an image horizontally
pub fn flip_horizontal<I: GenericImageView>(
image: &I,
) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
where
I::Pixel: 'static,
{
let mut out = image.buffer_like();
let _ = flip_horizontal_in(image, &mut out);
out
}
/// Flip an image vertically
pub fn flip_vertical<I: GenericImageView>(
image: &I,
) -> ImageBuffer<I::Pixel, Vec<<I::Pixel as Pixel>::Subpixel>>
where
I::Pixel: 'static,
{
let mut out = image.buffer_like();
let _ = flip_vertical_in(image, &mut out);
out
}
/// Flip an image horizontally and put the result into the destination [`ImageBuffer`].
pub fn flip_horizontal_in<I, Container>(
image: &I,
destination: &mut ImageBuffer<I::Pixel, Container>,
) -> crate::ImageResult<()>
where
I: GenericImageView,
I::Pixel: 'static,
Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>,
{
let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
if w0 != w1 || h0 != h1 {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
for y in 0..h0 {
for x in 0..w0 {
let p = image.get_pixel(x, y);
destination.put_pixel(w0 - x - 1, y, p);
}
}
Ok(())
}
/// Flip an image vertically and put the result into the destination [`ImageBuffer`].
pub fn flip_vertical_in<I, Container>(
image: &I,
destination: &mut ImageBuffer<I::Pixel, Container>,
) -> crate::ImageResult<()>
where
I: GenericImageView,
I::Pixel: 'static,
Container: std::ops::DerefMut<Target = [<I::Pixel as Pixel>::Subpixel]>,
{
let ((w0, h0), (w1, h1)) = (image.dimensions(), destination.dimensions());
if w0 != w1 || h0 != h1 {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
for y in 0..h0 {
for x in 0..w0 {
let p = image.get_pixel(x, y);
destination.put_pixel(x, h0 - 1 - y, p);
}
}
Ok(())
}
/// Rotate an image 180 degrees clockwise in place.
pub fn rotate180_in_place<I: GenericImage>(image: &mut I) {
let (width, height) = image.dimensions();
for y in 0..height / 2 {
for x in 0..width {
let p = image.get_pixel(x, y);
let x2 = width - x - 1;
let y2 = height - y - 1;
let p2 = image.get_pixel(x2, y2);
image.put_pixel(x, y, p2);
image.put_pixel(x2, y2, p);
}
}
if height % 2 != 0 {
let middle = height / 2;
for x in 0..width / 2 {
let p = image.get_pixel(x, middle);
let x2 = width - x - 1;
let p2 = image.get_pixel(x2, middle);
image.put_pixel(x, middle, p2);
image.put_pixel(x2, middle, p);
}
}
}
/// Flip an image horizontally in place.
pub fn flip_horizontal_in_place<I: GenericImage>(image: &mut I) {
let (width, height) = image.dimensions();
for y in 0..height {
for x in 0..width / 2 {
let x2 = width - x - 1;
let p2 = image.get_pixel(x2, y);
let p = image.get_pixel(x, y);
image.put_pixel(x2, y, p);
image.put_pixel(x, y, p2);
}
}
}
/// Flip an image vertically in place.
pub fn flip_vertical_in_place<I: GenericImage>(image: &mut I) {
let (width, height) = image.dimensions();
for y in 0..height / 2 {
for x in 0..width {
let y2 = height - y - 1;
let p2 = image.get_pixel(x, y2);
let p = image.get_pixel(x, y);
image.put_pixel(x, y2, p);
image.put_pixel(x, y, p2);
}
}
}
#[cfg(test)]
mod test {
use super::{
flip_horizontal, flip_horizontal_in_place, flip_vertical, flip_vertical_in_place,
rotate180, rotate180_in_place, rotate270, rotate90,
};
use crate::traits::Pixel;
use crate::{GenericImage, GrayImage, ImageBuffer};
macro_rules! assert_pixels_eq {
($actual:expr, $expected:expr) => {{
let actual_dim = $actual.dimensions();
let expected_dim = $expected.dimensions();
if actual_dim != expected_dim {
panic!(
"dimensions do not match. \
actual: {:?}, expected: {:?}",
actual_dim, expected_dim
)
}
let diffs = pixel_diffs($actual, $expected);
if !diffs.is_empty() {
let mut err = "".to_string();
let diff_messages = diffs
.iter()
.take(5)
.map(|d| format!("\nactual: {:?}, expected {:?} ", d.0, d.1))
.collect::<Vec<_>>()
.join("");
err.push_str(&diff_messages);
panic!("pixels do not match. {:?}", err)
}
}};
}
#[test]
fn test_rotate90() {
let image: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
let expected: GrayImage =
ImageBuffer::from_raw(2, 3, vec![10u8, 0u8, 11u8, 1u8, 12u8, 2u8]).unwrap();
assert_pixels_eq!(&rotate90(&image), &expected);
}
#[test]
fn test_rotate180() {
let image: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
let expected: GrayImage =
ImageBuffer::from_raw(3, 2, vec![12u8, 11u8, 10u8, 2u8, 1u8, 0u8]).unwrap();
assert_pixels_eq!(&rotate180(&image), &expected);
}
#[test]
fn test_rotate270() {
let image: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
let expected: GrayImage =
ImageBuffer::from_raw(2, 3, vec![2u8, 12u8, 1u8, 11u8, 0u8, 10u8]).unwrap();
assert_pixels_eq!(&rotate270(&image), &expected);
}
#[test]
fn test_rotate180_in_place() {
let mut image: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
let expected: GrayImage =
ImageBuffer::from_raw(3, 2, vec![12u8, 11u8, 10u8, 2u8, 1u8, 0u8]).unwrap();
rotate180_in_place(&mut image);
assert_pixels_eq!(&image, &expected);
}
#[test]
fn test_flip_horizontal() {
let image: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
let expected: GrayImage =
ImageBuffer::from_raw(3, 2, vec![2u8, 1u8, 0u8, 12u8, 11u8, 10u8]).unwrap();
assert_pixels_eq!(&flip_horizontal(&image), &expected);
}
#[test]
fn test_flip_vertical() {
let image: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
let expected: GrayImage =
ImageBuffer::from_raw(3, 2, vec![10u8, 11u8, 12u8, 0u8, 1u8, 2u8]).unwrap();
assert_pixels_eq!(&flip_vertical(&image), &expected);
}
#[test]
fn test_flip_horizontal_in_place() {
let mut image: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
let expected: GrayImage =
ImageBuffer::from_raw(3, 2, vec![2u8, 1u8, 0u8, 12u8, 11u8, 10u8]).unwrap();
flip_horizontal_in_place(&mut image);
assert_pixels_eq!(&image, &expected);
}
#[test]
fn test_flip_vertical_in_place() {
let mut image: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
let expected: GrayImage =
ImageBuffer::from_raw(3, 2, vec![10u8, 11u8, 12u8, 0u8, 1u8, 2u8]).unwrap();
flip_vertical_in_place(&mut image);
assert_pixels_eq!(&image, &expected);
}
#[allow(clippy::type_complexity)]
fn pixel_diffs<I, J, P>(left: &I, right: &J) -> Vec<((u32, u32, P), (u32, u32, P))>
where
I: GenericImage<Pixel = P>,
J: GenericImage<Pixel = P>,
P: Pixel + Eq,
{
left.pixels()
.zip(right.pixels())
.filter(|&(p, q)| p != q)
.collect::<Vec<_>>()
}
}

650
vendor/image/src/imageops/colorops.rs vendored Normal file
View File

@@ -0,0 +1,650 @@
//! Functions for altering and converting the color of pixelbufs
use num_traits::NumCast;
use crate::color::{FromColor, IntoColor, Luma, LumaA};
use crate::metadata::{CicpColorPrimaries, CicpTransferCharacteristics};
use crate::traits::{Pixel, Primitive};
use crate::utils::clamp;
use crate::{GenericImage, GenericImageView, ImageBuffer};
type Subpixel<I> = <<I as GenericImageView>::Pixel as Pixel>::Subpixel;
/// Convert the supplied image to grayscale. Alpha channel is discarded.
pub fn grayscale<I: GenericImageView>(
image: &I,
) -> ImageBuffer<Luma<Subpixel<I>>, Vec<Subpixel<I>>> {
grayscale_with_type(image)
}
/// Convert the supplied image to grayscale. Alpha channel is preserved.
pub fn grayscale_alpha<I: GenericImageView>(
image: &I,
) -> ImageBuffer<LumaA<Subpixel<I>>, Vec<Subpixel<I>>> {
grayscale_with_type_alpha(image)
}
/// Convert the supplied image to a grayscale image with the specified pixel type. Alpha channel is discarded.
pub fn grayscale_with_type<NewPixel, I: GenericImageView>(
image: &I,
) -> ImageBuffer<NewPixel, Vec<NewPixel::Subpixel>>
where
NewPixel: Pixel + FromColor<Luma<Subpixel<I>>>,
{
let (width, height) = image.dimensions();
let mut out = ImageBuffer::new(width, height);
out.copy_color_space_from(&image.buffer_with_dimensions(0, 0));
for (x, y, pixel) in image.pixels() {
let grayscale = pixel.to_luma();
let new_pixel = grayscale.into_color(); // no-op for luma->luma
out.put_pixel(x, y, new_pixel);
}
out
}
/// Convert the supplied image to a grayscale image with the specified pixel type. Alpha channel is preserved.
pub fn grayscale_with_type_alpha<NewPixel, I: GenericImageView>(
image: &I,
) -> ImageBuffer<NewPixel, Vec<NewPixel::Subpixel>>
where
NewPixel: Pixel + FromColor<LumaA<Subpixel<I>>>,
{
let (width, height) = image.dimensions();
let mut out = ImageBuffer::new(width, height);
out.copy_color_space_from(&image.buffer_with_dimensions(0, 0));
for (x, y, pixel) in image.pixels() {
let grayscale = pixel.to_luma_alpha();
let new_pixel = grayscale.into_color(); // no-op for luma->luma
out.put_pixel(x, y, new_pixel);
}
out
}
/// Invert each pixel within the supplied image.
/// This function operates in place.
pub fn invert<I: GenericImage>(image: &mut I) {
// TODO find a way to use pixels?
let (width, height) = image.dimensions();
for y in 0..height {
for x in 0..width {
let mut p = image.get_pixel(x, y);
p.invert();
image.put_pixel(x, y, p);
}
}
}
/// Adjust the contrast of the supplied image.
/// ```contrast``` is the amount to adjust the contrast by.
/// Negative values decrease the contrast and positive values increase the contrast.
///
/// *[See also `contrast_in_place`.][contrast_in_place]*
pub fn contrast<I, P, S>(image: &I, contrast: f32) -> ImageBuffer<P, Vec<S>>
where
I: GenericImageView<Pixel = P>,
P: Pixel<Subpixel = S> + 'static,
S: Primitive + 'static,
{
let mut out = image.buffer_like();
let max = S::DEFAULT_MAX_VALUE;
let max: f32 = NumCast::from(max).unwrap();
let percent = ((100.0 + contrast) / 100.0).powi(2);
for (x, y, pixel) in image.pixels() {
let f = pixel.map(|b| {
let c: f32 = NumCast::from(b).unwrap();
let d = ((c / max - 0.5) * percent + 0.5) * max;
let e = clamp(d, 0.0, max);
NumCast::from(e).unwrap()
});
out.put_pixel(x, y, f);
}
out
}
/// Adjust the contrast of the supplied image in place.
/// ```contrast``` is the amount to adjust the contrast by.
/// Negative values decrease the contrast and positive values increase the contrast.
///
/// *[See also `contrast`.][contrast]*
pub fn contrast_in_place<I>(image: &mut I, contrast: f32)
where
I: GenericImage,
{
let (width, height) = image.dimensions();
let max = <I::Pixel as Pixel>::Subpixel::DEFAULT_MAX_VALUE;
let max: f32 = NumCast::from(max).unwrap();
let percent = ((100.0 + contrast) / 100.0).powi(2);
// TODO find a way to use pixels?
for y in 0..height {
for x in 0..width {
let f = image.get_pixel(x, y).map(|b| {
let c: f32 = NumCast::from(b).unwrap();
let d = ((c / max - 0.5) * percent + 0.5) * max;
let e = clamp(d, 0.0, max);
NumCast::from(e).unwrap()
});
image.put_pixel(x, y, f);
}
}
}
/// Brighten the supplied image.
/// ```value``` is the amount to brighten each pixel by.
/// Negative values decrease the brightness and positive values increase it.
///
/// *[See also `brighten_in_place`.][brighten_in_place]*
pub fn brighten<I, P, S>(image: &I, value: i32) -> ImageBuffer<P, Vec<S>>
where
I: GenericImageView<Pixel = P>,
P: Pixel<Subpixel = S> + 'static,
S: Primitive + 'static,
{
let mut out = image.buffer_like();
let max = S::DEFAULT_MAX_VALUE;
let max: i32 = NumCast::from(max).unwrap();
for (x, y, pixel) in image.pixels() {
let e = pixel.map_with_alpha(
|b| {
let c: i32 = NumCast::from(b).unwrap();
let d = clamp(c + value, 0, max);
NumCast::from(d).unwrap()
},
|alpha| alpha,
);
out.put_pixel(x, y, e);
}
out
}
/// Brighten the supplied image in place.
/// ```value``` is the amount to brighten each pixel by.
/// Negative values decrease the brightness and positive values increase it.
///
/// *[See also `brighten`.][brighten]*
pub fn brighten_in_place<I>(image: &mut I, value: i32)
where
I: GenericImage,
{
let (width, height) = image.dimensions();
let max = <I::Pixel as Pixel>::Subpixel::DEFAULT_MAX_VALUE;
let max: i32 = NumCast::from(max).unwrap(); // TODO what does this do for f32? clamp at 1??
// TODO find a way to use pixels?
for y in 0..height {
for x in 0..width {
let e = image.get_pixel(x, y).map_with_alpha(
|b| {
let c: i32 = NumCast::from(b).unwrap();
let d = clamp(c + value, 0, max);
NumCast::from(d).unwrap()
},
|alpha| alpha,
);
image.put_pixel(x, y, e);
}
}
}
/// Hue rotate the supplied image.
/// `value` is the degrees to rotate each pixel by.
/// 0 and 360 do nothing, the rest rotates by the given degree value.
/// just like the css webkit filter hue-rotate(180)
///
/// *[See also `huerotate_in_place`.][huerotate_in_place]*
pub fn huerotate<I, P, S>(image: &I, value: i32) -> ImageBuffer<P, Vec<S>>
where
I: GenericImageView<Pixel = P>,
P: Pixel<Subpixel = S> + 'static,
S: Primitive + 'static,
{
let mut out = image.buffer_like();
let angle: f64 = NumCast::from(value).unwrap();
let cosv = angle.to_radians().cos();
let sinv = angle.to_radians().sin();
let matrix: [f64; 9] = [
// Reds
0.213 + cosv * 0.787 - sinv * 0.213,
0.715 - cosv * 0.715 - sinv * 0.715,
0.072 - cosv * 0.072 + sinv * 0.928,
// Greens
0.213 - cosv * 0.213 + sinv * 0.143,
0.715 + cosv * 0.285 + sinv * 0.140,
0.072 - cosv * 0.072 - sinv * 0.283,
// Blues
0.213 - cosv * 0.213 - sinv * 0.787,
0.715 - cosv * 0.715 + sinv * 0.715,
0.072 + cosv * 0.928 + sinv * 0.072,
];
for (x, y, pixel) in out.enumerate_pixels_mut() {
let p = image.get_pixel(x, y);
#[allow(deprecated)]
let (k1, k2, k3, k4) = p.channels4();
let vec: (f64, f64, f64, f64) = (
NumCast::from(k1).unwrap(),
NumCast::from(k2).unwrap(),
NumCast::from(k3).unwrap(),
NumCast::from(k4).unwrap(),
);
let r = vec.0;
let g = vec.1;
let b = vec.2;
let new_r = matrix[0] * r + matrix[1] * g + matrix[2] * b;
let new_g = matrix[3] * r + matrix[4] * g + matrix[5] * b;
let new_b = matrix[6] * r + matrix[7] * g + matrix[8] * b;
let max = 255f64;
#[allow(deprecated)]
let outpixel = Pixel::from_channels(
NumCast::from(clamp(new_r, 0.0, max)).unwrap(),
NumCast::from(clamp(new_g, 0.0, max)).unwrap(),
NumCast::from(clamp(new_b, 0.0, max)).unwrap(),
NumCast::from(clamp(vec.3, 0.0, max)).unwrap(),
);
*pixel = outpixel;
}
out
}
/// Hue rotate the supplied image in place.
///
/// `value` is the degrees to rotate each pixel by.
/// 0 and 360 do nothing, the rest rotates by the given degree value.
/// just like the css webkit filter hue-rotate(180)
///
/// *[See also `huerotate`.][huerotate]*
pub fn huerotate_in_place<I>(image: &mut I, value: i32)
where
I: GenericImage,
{
let (width, height) = image.dimensions();
let angle: f64 = NumCast::from(value).unwrap();
let cosv = angle.to_radians().cos();
let sinv = angle.to_radians().sin();
let matrix: [f64; 9] = [
// Reds
0.213 + cosv * 0.787 - sinv * 0.213,
0.715 - cosv * 0.715 - sinv * 0.715,
0.072 - cosv * 0.072 + sinv * 0.928,
// Greens
0.213 - cosv * 0.213 + sinv * 0.143,
0.715 + cosv * 0.285 + sinv * 0.140,
0.072 - cosv * 0.072 - sinv * 0.283,
// Blues
0.213 - cosv * 0.213 - sinv * 0.787,
0.715 - cosv * 0.715 + sinv * 0.715,
0.072 + cosv * 0.928 + sinv * 0.072,
];
// TODO find a way to use pixels?
for y in 0..height {
for x in 0..width {
let pixel = image.get_pixel(x, y);
#[allow(deprecated)]
let (k1, k2, k3, k4) = pixel.channels4();
let vec: (f64, f64, f64, f64) = (
NumCast::from(k1).unwrap(),
NumCast::from(k2).unwrap(),
NumCast::from(k3).unwrap(),
NumCast::from(k4).unwrap(),
);
let r = vec.0;
let g = vec.1;
let b = vec.2;
let new_r = matrix[0] * r + matrix[1] * g + matrix[2] * b;
let new_g = matrix[3] * r + matrix[4] * g + matrix[5] * b;
let new_b = matrix[6] * r + matrix[7] * g + matrix[8] * b;
let max = 255f64;
#[allow(deprecated)]
let outpixel = Pixel::from_channels(
NumCast::from(clamp(new_r, 0.0, max)).unwrap(),
NumCast::from(clamp(new_g, 0.0, max)).unwrap(),
NumCast::from(clamp(new_b, 0.0, max)).unwrap(),
NumCast::from(clamp(vec.3, 0.0, max)).unwrap(),
);
image.put_pixel(x, y, outpixel);
}
}
}
/// A color map
pub trait ColorMap {
/// The color type on which the map operates on
type Color;
/// Returns the index of the closest match of `color`
/// in the color map.
fn index_of(&self, color: &Self::Color) -> usize;
/// Looks up color by index in the color map. If `idx` is out of range for the color map, or
/// `ColorMap` doesn't implement `lookup` `None` is returned.
fn lookup(&self, index: usize) -> Option<Self::Color> {
let _ = index;
None
}
/// Determine if this implementation of `ColorMap` overrides the default `lookup`.
fn has_lookup(&self) -> bool {
false
}
/// Maps `color` to the closest color in the color map.
fn map_color(&self, color: &mut Self::Color);
}
/// A bi-level color map
///
/// # Examples
/// ```
/// use image::imageops::colorops::{index_colors, BiLevel, ColorMap};
/// use image::{ImageBuffer, Luma};
///
/// let (w, h) = (16, 16);
/// // Create an image with a smooth horizontal gradient from black (0) to white (255).
/// let gray = ImageBuffer::from_fn(w, h, |x, y| -> Luma<u8> { [(255 * x / w) as u8].into() });
/// // Mapping the gray image through the `BiLevel` filter should map gray pixels less than half
/// // intensity (127) to black (0), and anything greater to white (255).
/// let cmap = BiLevel;
/// let palletized = index_colors(&gray, &cmap);
/// let mapped = ImageBuffer::from_fn(w, h, |x, y| {
/// let p = palletized.get_pixel(x, y);
/// cmap.lookup(p.0[0] as usize)
/// .expect("indexed color out-of-range")
/// });
/// // Create an black and white image of expected output.
/// let bw = ImageBuffer::from_fn(w, h, |x, y| -> Luma<u8> {
/// if x <= (w / 2) {
/// [0].into()
/// } else {
/// [255].into()
/// }
/// });
/// assert_eq!(mapped, bw);
/// ```
#[derive(Clone, Copy)]
pub struct BiLevel;
impl ColorMap for BiLevel {
type Color = Luma<u8>;
#[inline(always)]
fn index_of(&self, color: &Luma<u8>) -> usize {
let luma = color.0;
if luma[0] > 127 {
1
} else {
0
}
}
#[inline(always)]
fn lookup(&self, idx: usize) -> Option<Self::Color> {
match idx {
0 => Some([0].into()),
1 => Some([255].into()),
_ => None,
}
}
/// Indicate `NeuQuant` implements `lookup`.
fn has_lookup(&self) -> bool {
true
}
#[inline(always)]
fn map_color(&self, color: &mut Luma<u8>) {
let new_color = 0xFF * self.index_of(color) as u8;
let luma = &mut color.0;
luma[0] = new_color;
}
}
#[cfg(feature = "color_quant")]
impl ColorMap for color_quant::NeuQuant {
type Color = crate::color::Rgba<u8>;
#[inline(always)]
fn index_of(&self, color: &Self::Color) -> usize {
self.index_of(color.channels())
}
#[inline(always)]
fn lookup(&self, idx: usize) -> Option<Self::Color> {
self.lookup(idx).map(|p| p.into())
}
/// Indicate NeuQuant implements `lookup`.
fn has_lookup(&self) -> bool {
true
}
#[inline(always)]
fn map_color(&self, color: &mut Self::Color) {
self.map_pixel(color.channels_mut());
}
}
/// Floyd-Steinberg error diffusion
fn diffuse_err<P: Pixel<Subpixel = u8>>(pixel: &mut P, error: [i16; 3], factor: i16) {
for (e, c) in error.iter().zip(pixel.channels_mut().iter_mut()) {
*c = match <i16 as From<_>>::from(*c) + e * factor / 16 {
val if val < 0 => 0,
val if val > 0xFF => 0xFF,
val => val as u8,
}
}
}
macro_rules! do_dithering(
($map:expr, $image:expr, $err:expr, $x:expr, $y:expr) => (
{
let old_pixel = $image[($x, $y)];
let new_pixel = $image.get_pixel_mut($x, $y);
$map.map_color(new_pixel);
for ((e, &old), &new) in $err.iter_mut()
.zip(old_pixel.channels().iter())
.zip(new_pixel.channels().iter())
{
*e = <i16 as From<_>>::from(old) - <i16 as From<_>>::from(new)
}
}
)
);
/// Reduces the colors of the image using the supplied `color_map` while applying
/// Floyd-Steinberg dithering to improve the visual conception
pub fn dither<Pix, Map>(image: &mut ImageBuffer<Pix, Vec<u8>>, color_map: &Map)
where
Map: ColorMap<Color = Pix> + ?Sized,
Pix: Pixel<Subpixel = u8> + 'static,
{
let (width, height) = image.dimensions();
let mut err: [i16; 3] = [0; 3];
for y in 0..height - 1 {
let x = 0;
do_dithering!(color_map, image, err, x, y);
diffuse_err(image.get_pixel_mut(x + 1, y), err, 7);
diffuse_err(image.get_pixel_mut(x, y + 1), err, 5);
diffuse_err(image.get_pixel_mut(x + 1, y + 1), err, 1);
for x in 1..width - 1 {
do_dithering!(color_map, image, err, x, y);
diffuse_err(image.get_pixel_mut(x + 1, y), err, 7);
diffuse_err(image.get_pixel_mut(x - 1, y + 1), err, 3);
diffuse_err(image.get_pixel_mut(x, y + 1), err, 5);
diffuse_err(image.get_pixel_mut(x + 1, y + 1), err, 1);
}
let x = width - 1;
do_dithering!(color_map, image, err, x, y);
diffuse_err(image.get_pixel_mut(x - 1, y + 1), err, 3);
diffuse_err(image.get_pixel_mut(x, y + 1), err, 5);
}
let y = height - 1;
let x = 0;
do_dithering!(color_map, image, err, x, y);
diffuse_err(image.get_pixel_mut(x + 1, y), err, 7);
for x in 1..width - 1 {
do_dithering!(color_map, image, err, x, y);
diffuse_err(image.get_pixel_mut(x + 1, y), err, 7);
}
let x = width - 1;
do_dithering!(color_map, image, err, x, y);
}
/// Reduces the colors using the supplied `color_map` and returns an image of the indices
pub fn index_colors<Pix, Map>(
image: &ImageBuffer<Pix, Vec<u8>>,
color_map: &Map,
) -> ImageBuffer<Luma<u8>, Vec<u8>>
where
Map: ColorMap<Color = Pix> + ?Sized,
Pix: Pixel<Subpixel = u8> + 'static,
{
// Special case, we do *not* want to copy the color space here.
let mut indices = ImageBuffer::new(image.width(), image.height());
indices.set_rgb_primaries(CicpColorPrimaries::Unspecified);
indices.set_transfer_function(CicpTransferCharacteristics::Unspecified);
for (pixel, idx) in image.pixels().zip(indices.pixels_mut()) {
*idx = Luma([color_map.index_of(pixel) as u8]);
}
indices
}
#[cfg(test)]
mod test {
use super::*;
use crate::GrayImage;
macro_rules! assert_pixels_eq {
($actual:expr, $expected:expr) => {{
let actual_dim = $actual.dimensions();
let expected_dim = $expected.dimensions();
if actual_dim != expected_dim {
panic!(
"dimensions do not match. \
actual: {:?}, expected: {:?}",
actual_dim, expected_dim
)
}
let diffs = pixel_diffs($actual, $expected);
if !diffs.is_empty() {
let mut err = "".to_string();
let diff_messages = diffs
.iter()
.take(5)
.map(|d| format!("\nactual: {:?}, expected {:?} ", d.0, d.1))
.collect::<Vec<_>>()
.join("");
err.push_str(&diff_messages);
panic!("pixels do not match. {:?}", err)
}
}};
}
#[test]
fn test_dither() {
let mut image = ImageBuffer::from_raw(2, 2, vec![127, 127, 127, 127]).unwrap();
let cmap = BiLevel;
dither(&mut image, &cmap);
assert_eq!(&*image, &[0, 0xFF, 0xFF, 0]);
assert_eq!(index_colors(&image, &cmap).into_raw(), vec![0, 1, 1, 0]);
}
#[test]
fn test_grayscale() {
let image: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
let expected: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
assert_pixels_eq!(&grayscale(&image), &expected);
}
#[test]
fn test_invert() {
let mut image: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
let expected: GrayImage =
ImageBuffer::from_raw(3, 2, vec![255u8, 254u8, 253u8, 245u8, 244u8, 243u8]).unwrap();
invert(&mut image);
assert_pixels_eq!(&image, &expected);
}
#[test]
fn test_brighten() {
let image: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
let expected: GrayImage =
ImageBuffer::from_raw(3, 2, vec![10u8, 11u8, 12u8, 20u8, 21u8, 22u8]).unwrap();
assert_pixels_eq!(&brighten(&image, 10), &expected);
}
#[test]
fn test_brighten_place() {
let mut image: GrayImage =
ImageBuffer::from_raw(3, 2, vec![0u8, 1u8, 2u8, 10u8, 11u8, 12u8]).unwrap();
let expected: GrayImage =
ImageBuffer::from_raw(3, 2, vec![10u8, 11u8, 12u8, 20u8, 21u8, 22u8]).unwrap();
brighten_in_place(&mut image, 10);
assert_pixels_eq!(&image, &expected);
}
#[allow(clippy::type_complexity)]
fn pixel_diffs<I, J, P>(left: &I, right: &J) -> Vec<((u32, u32, P), (u32, u32, P))>
where
I: GenericImage<Pixel = P>,
J: GenericImage<Pixel = P>,
P: Pixel + Eq,
{
left.pixels()
.zip(right.pixels())
.filter(|&(p, q)| p != q)
.collect::<Vec<_>>()
}
}

573
vendor/image/src/imageops/fast_blur.rs vendored Normal file
View File

@@ -0,0 +1,573 @@
use num_traits::Bounded;
use crate::imageops::filter_1d::{SafeAdd, SafeMul};
use crate::{ImageBuffer, Pixel, Primitive};
/// Approximation of Gaussian blur.
///
/// # Arguments
///
/// * `image_buffer` - source image.
/// * `sigma` - value controls image flattening level.
///
/// This method assumes alpha pre-multiplication for images that contain non-constant alpha.
///
/// This method typically assumes that the input is scene-linear light.
/// If it is not, color distortion may occur.
///
/// Source: Kovesi, P.: Fast Almost-Gaussian Filtering The Australian Pattern
/// Recognition Society Conference: DICTA 2010. December 2010. Sydney.
#[must_use]
pub fn fast_blur<P: Pixel>(
input_buffer: &ImageBuffer<P, Vec<P::Subpixel>>,
sigma: f32,
) -> ImageBuffer<P, Vec<P::Subpixel>> {
let (width, height) = input_buffer.dimensions();
if width == 0 || height == 0 {
return input_buffer.clone();
}
let num_passes = 3;
let boxes = boxes_for_gauss(sigma, num_passes);
if boxes.is_empty() {
return input_buffer.clone();
}
let samples = input_buffer.as_flat_samples().samples;
let destination_size = match (width as usize)
.safe_mul(height as usize)
.and_then(|x| x.safe_mul(P::CHANNEL_COUNT as usize))
{
Ok(s) => s,
Err(_) => panic!("Width and height and channels count exceeded pointer size"),
};
let first_box = boxes[0];
let mut transient = vec![P::Subpixel::min_value(); destination_size];
let mut dst = vec![P::Subpixel::min_value(); destination_size];
// If destination_size isn't failed this one must not fail either
let stride = width as usize * P::CHANNEL_COUNT as usize;
// bound + radius + 1 must fit in a pointer size
test_radius_size(width as usize, first_box);
test_radius_size(height as usize, first_box);
box_blur_horizontal_pass_strategy::<P, P::Subpixel>(
samples,
stride,
&mut transient,
stride,
width,
first_box,
);
box_blur_vertical_pass_strategy::<P, P::Subpixel>(
&transient, stride, &mut dst, stride, width, height, first_box,
);
for &box_container in boxes.iter().skip(1) {
// bound + radius + 1 must fit in a pointer size
test_radius_size(width as usize, box_container);
test_radius_size(height as usize, box_container);
box_blur_horizontal_pass_strategy::<P, P::Subpixel>(
&dst,
stride,
&mut transient,
stride,
width,
box_container,
);
box_blur_vertical_pass_strategy::<P, P::Subpixel>(
&transient,
stride,
&mut dst,
stride,
width,
height,
box_container,
);
}
let mut buffer = ImageBuffer::from_raw(width, height, dst).unwrap();
buffer.copy_color_space_from(input_buffer);
buffer
}
#[inline]
fn test_radius_size(bound: usize, radius: usize) {
match bound.safe_add(radius) {
Ok(_) => {}
Err(_) => panic!("Radius overflowed maximum possible size"),
}
}
fn boxes_for_gauss(sigma: f32, n: usize) -> Vec<usize> {
let w_ideal = f32::sqrt((12.0 * sigma.powi(2) / (n as f32)) + 1.0);
let mut w_l = w_ideal.floor();
if w_l % 2.0 == 0.0 {
w_l -= 1.0;
}
let w_u = w_l + 2.0;
let m_ideal = 0.25 * (n as f32) * (w_l + 3.0) - 3.0 * sigma.powi(2) * (w_l + 1.0).recip();
let m = f32::round(m_ideal) as usize;
(0..n)
.map(|i| if i < m { w_l as usize } else { w_u as usize })
.map(|i| ceil_to_odd(i.saturating_sub(1) / 2))
.collect::<Vec<_>>()
}
#[inline]
fn ceil_to_odd(x: usize) -> usize {
if x % 2 == 0 {
x + 1
} else {
x
}
}
#[inline]
#[allow(clippy::manual_clamp)]
fn rounding_saturating_mul<T: Primitive>(v: f32, w: f32) -> T {
// T::DEFAULT_MAX_VALUE is equal to 1.0 only in cases where storage type if `f32/f64`,
// that means it should be safe to round here.
if T::DEFAULT_MAX_VALUE.to_f32().unwrap() != 1.0 {
T::from(
(v * w)
.round()
.min(T::DEFAULT_MAX_VALUE.to_f32().unwrap())
.max(T::DEFAULT_MIN_VALUE.to_f32().unwrap()),
)
.unwrap()
} else {
T::from(
(v * w)
.min(T::DEFAULT_MAX_VALUE.to_f32().unwrap())
.max(T::DEFAULT_MIN_VALUE.to_f32().unwrap()),
)
.unwrap()
}
}
fn box_blur_horizontal_pass_strategy<T, P: Primitive>(
src: &[P],
src_stride: usize,
dst: &mut [P],
dst_stride: usize,
width: u32,
radius: usize,
) where
T: Pixel,
{
if T::CHANNEL_COUNT == 1 {
box_blur_horizontal_pass_impl::<P, 1>(src, src_stride, dst, dst_stride, width, radius);
} else if T::CHANNEL_COUNT == 2 {
box_blur_horizontal_pass_impl::<P, 2>(src, src_stride, dst, dst_stride, width, radius);
} else if T::CHANNEL_COUNT == 3 {
box_blur_horizontal_pass_impl::<P, 3>(src, src_stride, dst, dst_stride, width, radius);
} else if T::CHANNEL_COUNT == 4 {
box_blur_horizontal_pass_impl::<P, 4>(src, src_stride, dst, dst_stride, width, radius);
} else {
unimplemented!("More than 4 channels is not yet implemented");
}
}
fn box_blur_vertical_pass_strategy<T: Pixel, P: Primitive>(
src: &[P],
src_stride: usize,
dst: &mut [P],
dst_stride: usize,
width: u32,
height: u32,
radius: usize,
) {
if T::CHANNEL_COUNT == 1 {
box_blur_vertical_pass_impl::<P, 1>(
src, src_stride, dst, dst_stride, width, height, radius,
);
} else if T::CHANNEL_COUNT == 2 {
box_blur_vertical_pass_impl::<P, 2>(
src, src_stride, dst, dst_stride, width, height, radius,
);
} else if T::CHANNEL_COUNT == 3 {
box_blur_vertical_pass_impl::<P, 3>(
src, src_stride, dst, dst_stride, width, height, radius,
);
} else if T::CHANNEL_COUNT == 4 {
box_blur_vertical_pass_impl::<P, 4>(
src, src_stride, dst, dst_stride, width, height, radius,
);
} else {
unimplemented!("More than 4 channels is not yet implemented");
}
}
fn box_blur_horizontal_pass_impl<T, const CN: usize>(
src: &[T],
src_stride: usize,
dst: &mut [T],
dst_stride: usize,
width: u32,
radius: usize,
) where
T: Primitive,
{
assert!(width > 0, "Width must be sanitized before this method");
test_radius_size(width as usize, radius);
let kernel_size = radius * 2 + 1;
let edge_count = ((kernel_size / 2) + 1) as f32;
let half_kernel = kernel_size / 2;
let weight = 1f32 / (radius * 2 + 1) as f32;
let width_bound = width as usize - 1;
// Horizontal blurring consists from 4 phases
// 1 - Fill initial sliding window
// 2 - Blur dangerous leading zone where clamping is required
// 3 - Blur *normal* zone where clamping is not required
// 4 - Blur dangerous trailing zone where clamping is required
for (dst, src) in dst
.chunks_exact_mut(dst_stride)
.zip(src.chunks_exact(src_stride))
{
let mut weight1: f32 = 0.;
let mut weight2: f32 = 0.;
let mut weight3: f32 = 0.;
let chunk0 = &src[..CN];
// replicate edge
let mut weight0 = chunk0[0].to_f32().unwrap() * edge_count;
if CN > 1 {
weight1 = chunk0[1].to_f32().unwrap() * edge_count;
}
if CN > 2 {
weight2 = chunk0[2].to_f32().unwrap() * edge_count;
}
if CN == 4 {
weight3 = chunk0[3].to_f32().unwrap() * edge_count;
}
for x in 1..=half_kernel {
let px = x.min(width_bound) * CN;
let chunk0 = &src[px..px + CN];
weight0 += chunk0[0].to_f32().unwrap();
if CN > 1 {
weight1 += chunk0[1].to_f32().unwrap();
}
if CN > 2 {
weight2 += chunk0[2].to_f32().unwrap();
}
if CN == 4 {
weight3 += chunk0[3].to_f32().unwrap();
}
}
for x in 0..half_kernel.min(width as usize) {
let next = (x + half_kernel + 1).min(width_bound) * CN;
let previous = (x as i64 - half_kernel as i64).max(0) as usize * CN;
let dst_chunk = &mut dst[x * CN..x * CN + CN];
dst_chunk[0] = rounding_saturating_mul(weight0, weight);
if CN > 1 {
dst_chunk[1] = rounding_saturating_mul(weight1, weight);
}
if CN > 2 {
dst_chunk[2] = rounding_saturating_mul(weight2, weight);
}
if CN == 4 {
dst_chunk[3] = rounding_saturating_mul(weight3, weight);
}
let next_chunk = &src[next..next + CN];
let previous_chunk = &src[previous..previous + CN];
weight0 += next_chunk[0].to_f32().unwrap();
if CN > 1 {
weight1 += next_chunk[1].to_f32().unwrap();
}
if CN > 2 {
weight2 += next_chunk[2].to_f32().unwrap();
}
if CN == 4 {
weight3 += next_chunk[3].to_f32().unwrap();
}
weight0 -= previous_chunk[0].to_f32().unwrap();
if CN > 1 {
weight1 -= previous_chunk[1].to_f32().unwrap();
}
if CN > 2 {
weight2 -= previous_chunk[2].to_f32().unwrap();
}
if CN == 4 {
weight3 -= previous_chunk[3].to_f32().unwrap();
}
}
let max_x_before_clamping = width_bound.saturating_sub(half_kernel + 1);
let row_length = src.len();
let mut last_processed_item = half_kernel;
if ((half_kernel * 2 + 1) * CN < row_length) && ((max_x_before_clamping * CN) < row_length)
{
let data_section = src;
let advanced_kernel_part = &data_section[(half_kernel * 2 + 1) * CN..];
let section_length = max_x_before_clamping - half_kernel;
let dst = &mut dst[half_kernel * CN..(half_kernel * CN + section_length * CN)];
for ((dst, src_previous), src_next) in dst
.chunks_exact_mut(CN)
.zip(data_section.chunks_exact(CN))
.zip(advanced_kernel_part.chunks_exact(CN))
{
let dst_chunk = &mut dst[..CN];
dst_chunk[0] = rounding_saturating_mul(weight0, weight);
if CN > 1 {
dst_chunk[1] = rounding_saturating_mul(weight1, weight);
}
if CN > 2 {
dst_chunk[2] = rounding_saturating_mul(weight2, weight);
}
if CN == 4 {
dst_chunk[3] = rounding_saturating_mul(weight3, weight);
}
weight0 += src_next[0].to_f32().unwrap();
if CN > 1 {
weight1 += src_next[1].to_f32().unwrap();
}
if CN > 2 {
weight2 += src_next[2].to_f32().unwrap();
}
if CN == 4 {
weight3 += src_next[3].to_f32().unwrap();
}
weight0 -= src_previous[0].to_f32().unwrap();
if CN > 1 {
weight1 -= src_previous[1].to_f32().unwrap();
}
if CN > 2 {
weight2 -= src_previous[2].to_f32().unwrap();
}
if CN == 4 {
weight3 -= src_previous[3].to_f32().unwrap();
}
}
last_processed_item = max_x_before_clamping;
}
for x in last_processed_item..width as usize {
let next = (x + half_kernel + 1).min(width_bound) * CN;
let previous = (x as i64 - half_kernel as i64).max(0) as usize * CN;
let dst_chunk = &mut dst[x * CN..x * CN + CN];
dst_chunk[0] = rounding_saturating_mul(weight0, weight);
if CN > 1 {
dst_chunk[1] = rounding_saturating_mul(weight1, weight);
}
if CN > 2 {
dst_chunk[2] = rounding_saturating_mul(weight2, weight);
}
if CN == 4 {
dst_chunk[3] = rounding_saturating_mul(weight3, weight);
}
let next_chunk = &src[next..next + CN];
let previous_chunk = &src[previous..previous + CN];
weight0 += next_chunk[0].to_f32().unwrap();
if CN > 1 {
weight1 += next_chunk[1].to_f32().unwrap();
}
if CN > 2 {
weight2 += next_chunk[2].to_f32().unwrap();
}
if CN == 4 {
weight3 += next_chunk[3].to_f32().unwrap();
}
weight0 -= previous_chunk[0].to_f32().unwrap();
if CN > 1 {
weight1 -= previous_chunk[1].to_f32().unwrap();
}
if CN > 2 {
weight2 -= previous_chunk[2].to_f32().unwrap();
}
if CN == 4 {
weight3 -= previous_chunk[3].to_f32().unwrap();
}
}
}
}
fn box_blur_vertical_pass_impl<T: Primitive, const CN: usize>(
src: &[T],
src_stride: usize,
dst: &mut [T],
dst_stride: usize,
width: u32,
height: u32,
radius: usize,
) {
assert!(width > 0, "Width must be sanitized before this method");
assert!(height > 0, "Height must be sanitized before this method");
test_radius_size(width as usize, radius);
let kernel_size = radius * 2 + 1;
let edge_count = ((kernel_size / 2) + 1) as f32;
let half_kernel = kernel_size / 2;
let weight = 1f32 / (radius * 2 + 1) as f32;
let buf_size = width as usize * CN;
let buf_cap = buf_size;
let height_bound = height as usize - 1;
// Instead of summing each column separately we use here transient buffer that
// averages columns in row manner.
// So, we make the initial buffer at the top edge
// and then doing blur by averaging the whole row ( which is in buffer )
// and subtracting and adding next and previous rows in horizontal manner.
let mut buffer = vec![0f32; buf_cap];
for (x, (v, bf)) in src.iter().zip(buffer.iter_mut()).enumerate() {
let mut w = v.to_f32().unwrap() * edge_count;
for y in 1..=half_kernel {
let y_src_shift = y.min(height_bound) * src_stride;
w += src[y_src_shift + x].to_f32().unwrap();
}
*bf = w;
}
for (dst, y) in dst.chunks_exact_mut(dst_stride).zip(0..height as usize) {
let next = (y + half_kernel + 1).min(height_bound) * src_stride;
let previous = (y as i64 - half_kernel as i64).max(0) as usize * src_stride;
let next_row = &src[next..next + width as usize * CN];
let previous_row = &src[previous..previous + width as usize * CN];
for (((src_next, src_previous), buffer), dst) in next_row
.iter()
.zip(previous_row.iter())
.zip(buffer.iter_mut())
.zip(dst.iter_mut())
{
let mut weight0 = *buffer;
*dst = rounding_saturating_mul(weight0, weight);
weight0 += src_next.to_f32().unwrap();
weight0 -= src_previous.to_f32().unwrap();
*buffer = weight0;
}
}
}
#[cfg(test)]
mod tests {
use crate::{DynamicImage, GrayAlphaImage, GrayImage, RgbImage, RgbaImage};
use std::time::{SystemTime, UNIX_EPOCH};
struct Rng {
state: u64,
}
impl Rng {
fn new(seed: u64) -> Self {
Self { state: seed }
}
fn next_u32(&mut self) -> u32 {
self.state = self.state.wrapping_mul(6364136223846793005).wrapping_add(1);
(self.state >> 32) as u32
}
fn next_u8(&mut self) -> u8 {
(self.next_u32() % 256) as u8
}
fn next_f32_in_range(&mut self, a: f32, b: f32) -> f32 {
let u = self.next_u32();
let unit = (u as f32) / (u32::MAX as f32 + 1.0);
a + (b - a) * unit
}
}
#[test]
fn test_box_blur() {
let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
let mut rng = Rng::new((now.as_millis() & 0xffff_ffff_ffff_ffff) as u64);
for _ in 0..35 {
let width = rng.next_u8();
let height = rng.next_u8();
let sigma = rng.next_f32_in_range(0., 100.);
let px = rng.next_u8();
let cn = rng.next_u8();
if width == 0 || height == 0 || sigma <= 0. {
continue;
}
match cn % 4 {
0 => {
let vc = vec![px; width as usize * height as usize];
let image = DynamicImage::from(
GrayImage::from_vec(u32::from(width), u32::from(height), vc).unwrap(),
);
let res = image.fast_blur(sigma);
for clr in res.as_bytes() {
assert_eq!(*clr, px);
}
}
1 => {
let vc = vec![px; width as usize * height as usize * 2];
let image = DynamicImage::from(
GrayAlphaImage::from_vec(u32::from(width), u32::from(height), vc).unwrap(),
);
let res = image.fast_blur(sigma);
for clr in res.as_bytes() {
assert_eq!(*clr, px);
}
}
2 => {
let vc = vec![px; width as usize * height as usize * 3];
let image = DynamicImage::from(
RgbImage::from_vec(u32::from(width), u32::from(height), vc).unwrap(),
);
let res = image.fast_blur(sigma);
for clr in res.as_bytes() {
assert_eq!(*clr, px);
}
}
3 => {
let vc = vec![px; width as usize * height as usize * 4];
let image = DynamicImage::from(
RgbaImage::from_vec(u32::from(width), u32::from(height), vc).unwrap(),
);
let res = image.fast_blur(sigma);
for clr in res.as_bytes() {
assert_eq!(*clr, px);
}
}
_ => {}
}
}
}
}

1000
vendor/image/src/imageops/filter_1d.rs vendored Normal file

File diff suppressed because it is too large Load Diff

572
vendor/image/src/imageops/mod.rs vendored Normal file
View File

@@ -0,0 +1,572 @@
//! Image Processing Functions
use std::cmp;
use crate::traits::{Lerp, Pixel, Primitive};
use crate::{GenericImage, GenericImageView, SubImage};
pub use self::sample::FilterType;
pub use self::sample::FilterType::{CatmullRom, Gaussian, Lanczos3, Nearest, Triangle};
/// Affine transformations
pub use self::affine::{
flip_horizontal, flip_horizontal_in, flip_horizontal_in_place, flip_vertical, flip_vertical_in,
flip_vertical_in_place, rotate180, rotate180_in, rotate180_in_place, rotate270, rotate270_in,
rotate90, rotate90_in,
};
pub use self::sample::{
blur, filter3x3, interpolate_bilinear, interpolate_nearest, resize, sample_bilinear,
sample_nearest, thumbnail, unsharpen,
};
/// Color operations
pub use self::colorops::{
brighten, contrast, dither, grayscale, grayscale_alpha, grayscale_with_type,
grayscale_with_type_alpha, huerotate, index_colors, invert, BiLevel, ColorMap,
};
mod affine;
// Public only because of Rust bug:
// https://github.com/rust-lang/rust/issues/18241
pub mod colorops;
mod fast_blur;
mod filter_1d;
mod sample;
pub use fast_blur::fast_blur;
pub(crate) use sample::gaussian_blur_dyn_image;
pub use sample::{blur_advanced, GaussianBlurParameters};
/// Return a mutable view into an image
/// The coordinates set the position of the top left corner of the crop.
pub fn crop<I: GenericImageView>(
image: &mut I,
x: u32,
y: u32,
width: u32,
height: u32,
) -> SubImage<&mut I> {
let (x, y, width, height) = crop_dimms(image, x, y, width, height);
SubImage::new(image, x, y, width, height)
}
/// Return an immutable view into an image
/// The coordinates set the position of the top left corner of the crop.
pub fn crop_imm<I: GenericImageView>(
image: &I,
x: u32,
y: u32,
width: u32,
height: u32,
) -> SubImage<&I> {
let (x, y, width, height) = crop_dimms(image, x, y, width, height);
SubImage::new(image, x, y, width, height)
}
fn crop_dimms<I: GenericImageView>(
image: &I,
x: u32,
y: u32,
width: u32,
height: u32,
) -> (u32, u32, u32, u32) {
let (iwidth, iheight) = image.dimensions();
let x = cmp::min(x, iwidth);
let y = cmp::min(y, iheight);
let height = cmp::min(height, iheight - y);
let width = cmp::min(width, iwidth - x);
(x, y, width, height)
}
/// Calculate the region that can be copied from top to bottom.
///
/// Given image size of bottom and top image, and a point at which we want to place the top image
/// onto the bottom image, how large can we be? Have to wary of the following issues:
/// * Top might be larger than bottom
/// * Overflows in the computation
/// * Coordinates could be completely out of bounds
///
/// The main idea is to make use of inequalities provided by the nature of `saturating_add` and
/// `saturating_sub`. These intrinsically validate that all resulting coordinates will be in bounds
/// for both images.
///
/// We want that all these coordinate accesses are safe:
/// 1. `bottom.get_pixel(x + [0..x_range), y + [0..y_range))`
/// 2. `top.get_pixel([0..x_range), [0..y_range))`
///
/// Proof that the function provides the necessary bounds for width. Note that all unaugmented math
/// operations are to be read in standard arithmetic, not integer arithmetic. Since no direct
/// integer arithmetic occurs in the implementation, this is unambiguous.
///
/// ```text
/// Three short notes/lemmata:
/// - Iff `(a - b) <= 0` then `a.saturating_sub(b) = 0`
/// - Iff `(a - b) >= 0` then `a.saturating_sub(b) = a - b`
/// - If `a <= c` then `a.saturating_sub(b) <= c.saturating_sub(b)`
///
/// 1.1 We show that if `bottom_width <= x`, then `x_range = 0` therefore `x + [0..x_range)` is empty.
///
/// x_range
/// = (top_width.saturating_add(x).min(bottom_width)).saturating_sub(x)
/// <= bottom_width.saturating_sub(x)
///
/// bottom_width <= x
/// <==> bottom_width - x <= 0
/// <==> bottom_width.saturating_sub(x) = 0
/// ==> x_range <= 0
/// ==> x_range = 0
///
/// 1.2 If `x < bottom_width` then `x + x_range < bottom_width`
///
/// x + x_range
/// <= x + bottom_width.saturating_sub(x)
/// = x + (bottom_width - x)
/// = bottom_width
///
/// 2. We show that `x_range <= top_width`
///
/// x_range
/// = (top_width.saturating_add(x).min(bottom_width)).saturating_sub(x)
/// <= top_width.saturating_add(x).saturating_sub(x)
/// <= (top_wdith + x).saturating_sub(x)
/// = top_width (due to `top_width >= 0` and `x >= 0`)
/// ```
///
/// Proof is the same for height.
#[must_use]
pub fn overlay_bounds(
(bottom_width, bottom_height): (u32, u32),
(top_width, top_height): (u32, u32),
x: u32,
y: u32,
) -> (u32, u32) {
let x_range = top_width
.saturating_add(x) // Calculate max coordinate
.min(bottom_width) // Restrict to lower width
.saturating_sub(x); // Determinate length from start `x`
let y_range = top_height
.saturating_add(y)
.min(bottom_height)
.saturating_sub(y);
(x_range, y_range)
}
/// Calculate the region that can be copied from top to bottom.
///
/// Given image size of bottom and top image, and a point at which we want to place the top image
/// onto the bottom image, how large can we be? Have to wary of the following issues:
/// * Top might be larger than bottom
/// * Overflows in the computation
/// * Coordinates could be completely out of bounds
///
/// The returned value is of the form:
///
/// `(origin_bottom_x, origin_bottom_y, origin_top_x, origin_top_y, x_range, y_range)`
///
/// The main idea is to do computations on i64's and then clamp to image dimensions.
/// In particular, we want to ensure that all these coordinate accesses are safe:
/// 1. `bottom.get_pixel(origin_bottom_x + [0..x_range), origin_bottom_y + [0..y_range))`
/// 2. `top.get_pixel(origin_top_y + [0..x_range), origin_top_y + [0..y_range))`
fn overlay_bounds_ext(
(bottom_width, bottom_height): (u32, u32),
(top_width, top_height): (u32, u32),
x: i64,
y: i64,
) -> (u32, u32, u32, u32, u32, u32) {
// Return a predictable value if the two images don't overlap at all.
if x > i64::from(bottom_width)
|| y > i64::from(bottom_height)
|| x.saturating_add(i64::from(top_width)) <= 0
|| y.saturating_add(i64::from(top_height)) <= 0
{
return (0, 0, 0, 0, 0, 0);
}
// Find the maximum x and y coordinates in terms of the bottom image.
let max_x = x.saturating_add(i64::from(top_width));
let max_y = y.saturating_add(i64::from(top_height));
// Clip the origin and maximum coordinates to the bounds of the bottom image.
// Casting to a u32 is safe because both 0 and `bottom_{width,height}` fit
// into 32-bits.
let max_inbounds_x = max_x.clamp(0, i64::from(bottom_width)) as u32;
let max_inbounds_y = max_y.clamp(0, i64::from(bottom_height)) as u32;
let origin_bottom_x = x.clamp(0, i64::from(bottom_width)) as u32;
let origin_bottom_y = y.clamp(0, i64::from(bottom_height)) as u32;
// The range is the difference between the maximum inbounds coordinates and
// the clipped origin. Unchecked subtraction is safe here because both are
// always positive and `max_inbounds_{x,y}` >= `origin_{x,y}` due to
// `top_{width,height}` being >= 0.
let x_range = max_inbounds_x - origin_bottom_x;
let y_range = max_inbounds_y - origin_bottom_y;
// If x (or y) is negative, then the origin of the top image is shifted by -x (or -y).
let origin_top_x = x.saturating_mul(-1).clamp(0, i64::from(top_width)) as u32;
let origin_top_y = y.saturating_mul(-1).clamp(0, i64::from(top_height)) as u32;
(
origin_bottom_x,
origin_bottom_y,
origin_top_x,
origin_top_y,
x_range,
y_range,
)
}
/// Overlay an image at a given coordinate (x, y)
pub fn overlay<I, J>(bottom: &mut I, top: &J, x: i64, y: i64)
where
I: GenericImage,
J: GenericImageView<Pixel = I::Pixel>,
{
let bottom_dims = bottom.dimensions();
let top_dims = top.dimensions();
// Crop our top image if we're going out of bounds
let (origin_bottom_x, origin_bottom_y, origin_top_x, origin_top_y, range_width, range_height) =
overlay_bounds_ext(bottom_dims, top_dims, x, y);
for y in 0..range_height {
for x in 0..range_width {
let p = top.get_pixel(origin_top_x + x, origin_top_y + y);
let mut bottom_pixel = bottom.get_pixel(origin_bottom_x + x, origin_bottom_y + y);
bottom_pixel.blend(&p);
bottom.put_pixel(origin_bottom_x + x, origin_bottom_y + y, bottom_pixel);
}
}
}
/// Tile an image by repeating it multiple times
///
/// # Examples
/// ```no_run
/// use image::RgbaImage;
///
/// let mut img = RgbaImage::new(1920, 1080);
/// let tile = image::open("tile.png").unwrap();
///
/// image::imageops::tile(&mut img, &tile);
/// img.save("tiled_wallpaper.png").unwrap();
/// ```
pub fn tile<I, J>(bottom: &mut I, top: &J)
where
I: GenericImage,
J: GenericImageView<Pixel = I::Pixel>,
{
for x in (0..bottom.width()).step_by(top.width() as usize) {
for y in (0..bottom.height()).step_by(top.height() as usize) {
overlay(bottom, top, i64::from(x), i64::from(y));
}
}
}
/// Fill the image with a linear vertical gradient
///
/// This function assumes a linear color space.
///
/// # Examples
/// ```no_run
/// use image::{Rgba, RgbaImage, Pixel};
///
/// let mut img = RgbaImage::new(100, 100);
/// let start = Rgba::from_slice(&[0, 128, 0, 0]);
/// let end = Rgba::from_slice(&[255, 255, 255, 255]);
///
/// image::imageops::vertical_gradient(&mut img, start, end);
/// img.save("vertical_gradient.png").unwrap();
pub fn vertical_gradient<S, P, I>(img: &mut I, start: &P, stop: &P)
where
I: GenericImage<Pixel = P>,
P: Pixel<Subpixel = S> + 'static,
S: Primitive + Lerp + 'static,
{
for y in 0..img.height() {
let pixel = start.map2(stop, |a, b| {
let y = <S::Ratio as num_traits::NumCast>::from(y).unwrap();
let height = <S::Ratio as num_traits::NumCast>::from(img.height() - 1).unwrap();
S::lerp(a, b, y / height)
});
for x in 0..img.width() {
img.put_pixel(x, y, pixel);
}
}
}
/// Fill the image with a linear horizontal gradient
///
/// This function assumes a linear color space.
///
/// # Examples
/// ```no_run
/// use image::{Rgba, RgbaImage, Pixel};
///
/// let mut img = RgbaImage::new(100, 100);
/// let start = Rgba::from_slice(&[0, 128, 0, 0]);
/// let end = Rgba::from_slice(&[255, 255, 255, 255]);
///
/// image::imageops::horizontal_gradient(&mut img, start, end);
/// img.save("horizontal_gradient.png").unwrap();
pub fn horizontal_gradient<S, P, I>(img: &mut I, start: &P, stop: &P)
where
I: GenericImage<Pixel = P>,
P: Pixel<Subpixel = S> + 'static,
S: Primitive + Lerp + 'static,
{
for x in 0..img.width() {
let pixel = start.map2(stop, |a, b| {
let x = <S::Ratio as num_traits::NumCast>::from(x).unwrap();
let width = <S::Ratio as num_traits::NumCast>::from(img.width() - 1).unwrap();
S::lerp(a, b, x / width)
});
for y in 0..img.height() {
img.put_pixel(x, y, pixel);
}
}
}
/// Replace the contents of an image at a given coordinate (x, y)
pub fn replace<I, J>(bottom: &mut I, top: &J, x: i64, y: i64)
where
I: GenericImage,
J: GenericImageView<Pixel = I::Pixel>,
{
let bottom_dims = bottom.dimensions();
let top_dims = top.dimensions();
// Crop our top image if we're going out of bounds
let (origin_bottom_x, origin_bottom_y, origin_top_x, origin_top_y, range_width, range_height) =
overlay_bounds_ext(bottom_dims, top_dims, x, y);
for y in 0..range_height {
for x in 0..range_width {
let p = top.get_pixel(origin_top_x + x, origin_top_y + y);
bottom.put_pixel(origin_bottom_x + x, origin_bottom_y + y, p);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::color::Rgb;
use crate::GrayAlphaImage;
use crate::GrayImage;
use crate::ImageBuffer;
use crate::RgbImage;
use crate::RgbaImage;
#[test]
fn test_overlay_bounds_ext() {
assert_eq!(
overlay_bounds_ext((10, 10), (10, 10), 0, 0),
(0, 0, 0, 0, 10, 10)
);
assert_eq!(
overlay_bounds_ext((10, 10), (10, 10), 1, 0),
(1, 0, 0, 0, 9, 10)
);
assert_eq!(
overlay_bounds_ext((10, 10), (10, 10), 0, 11),
(0, 0, 0, 0, 0, 0)
);
assert_eq!(
overlay_bounds_ext((10, 10), (10, 10), -1, 0),
(0, 0, 1, 0, 9, 10)
);
assert_eq!(
overlay_bounds_ext((10, 10), (10, 10), -10, 0),
(0, 0, 0, 0, 0, 0)
);
assert_eq!(
overlay_bounds_ext((10, 10), (10, 10), 1i64 << 50, 0),
(0, 0, 0, 0, 0, 0)
);
assert_eq!(
overlay_bounds_ext((10, 10), (10, 10), -(1i64 << 50), 0),
(0, 0, 0, 0, 0, 0)
);
assert_eq!(
overlay_bounds_ext((10, 10), (u32::MAX, 10), 10 - i64::from(u32::MAX), 0),
(0, 0, u32::MAX - 10, 0, 10, 10)
);
}
#[test]
/// Test that images written into other images works
fn test_image_in_image() {
let mut target = ImageBuffer::new(32, 32);
let source = ImageBuffer::from_pixel(16, 16, Rgb([255u8, 0, 0]));
overlay(&mut target, &source, 0, 0);
assert!(*target.get_pixel(0, 0) == Rgb([255u8, 0, 0]));
assert!(*target.get_pixel(15, 0) == Rgb([255u8, 0, 0]));
assert!(*target.get_pixel(16, 0) == Rgb([0u8, 0, 0]));
assert!(*target.get_pixel(0, 15) == Rgb([255u8, 0, 0]));
assert!(*target.get_pixel(0, 16) == Rgb([0u8, 0, 0]));
}
#[test]
/// Test that images written outside of a frame doesn't blow up
fn test_image_in_image_outside_of_bounds() {
let mut target = ImageBuffer::new(32, 32);
let source = ImageBuffer::from_pixel(32, 32, Rgb([255u8, 0, 0]));
overlay(&mut target, &source, 1, 1);
assert!(*target.get_pixel(0, 0) == Rgb([0, 0, 0]));
assert!(*target.get_pixel(1, 1) == Rgb([255u8, 0, 0]));
assert!(*target.get_pixel(31, 31) == Rgb([255u8, 0, 0]));
}
#[test]
/// Test that images written to coordinates out of the frame doesn't blow up
/// (issue came up in #848)
fn test_image_outside_image_no_wrap_around() {
let mut target = ImageBuffer::new(32, 32);
let source = ImageBuffer::from_pixel(32, 32, Rgb([255u8, 0, 0]));
overlay(&mut target, &source, 33, 33);
assert!(*target.get_pixel(0, 0) == Rgb([0, 0, 0]));
assert!(*target.get_pixel(1, 1) == Rgb([0, 0, 0]));
assert!(*target.get_pixel(31, 31) == Rgb([0, 0, 0]));
}
#[test]
/// Test that images written to coordinates with overflow works
fn test_image_coordinate_overflow() {
let mut target = ImageBuffer::new(16, 16);
let source = ImageBuffer::from_pixel(32, 32, Rgb([255u8, 0, 0]));
// Overflows to 'sane' coordinates but top is larger than bot.
overlay(
&mut target,
&source,
i64::from(u32::MAX - 31),
i64::from(u32::MAX - 31),
);
assert!(*target.get_pixel(0, 0) == Rgb([0, 0, 0]));
assert!(*target.get_pixel(1, 1) == Rgb([0, 0, 0]));
assert!(*target.get_pixel(15, 15) == Rgb([0, 0, 0]));
}
use super::{horizontal_gradient, vertical_gradient};
#[test]
/// Test that horizontal gradients are correctly generated
fn test_image_horizontal_gradient_limits() {
let mut img = ImageBuffer::new(100, 1);
let start = Rgb([0u8, 128, 0]);
let end = Rgb([255u8, 255, 255]);
horizontal_gradient(&mut img, &start, &end);
assert_eq!(img.get_pixel(0, 0), &start);
assert_eq!(img.get_pixel(img.width() - 1, 0), &end);
}
#[test]
/// Test that vertical gradients are correctly generated
fn test_image_vertical_gradient_limits() {
let mut img = ImageBuffer::new(1, 100);
let start = Rgb([0u8, 128, 0]);
let end = Rgb([255u8, 255, 255]);
vertical_gradient(&mut img, &start, &end);
assert_eq!(img.get_pixel(0, 0), &start);
assert_eq!(img.get_pixel(0, img.height() - 1), &end);
}
#[test]
/// Test blur doesn't panic when passed 0.0
fn test_blur_zero() {
let image = RgbaImage::new(50, 50);
let _ = blur(&image, 0.);
}
#[test]
/// Test fast blur doesn't panic when passed 0.0
fn test_fast_blur_zero() {
let image = RgbaImage::new(50, 50);
let _ = fast_blur(&image, 0.0);
}
#[test]
/// Test fast blur doesn't panic when passed negative numbers
fn test_fast_blur_negative() {
let image = RgbaImage::new(50, 50);
let _ = fast_blur(&image, -1.0);
}
#[test]
/// Test fast blur doesn't panic when sigma produces boxes larger than the image
fn test_fast_large_sigma() {
let image = RgbaImage::new(1, 1);
let _ = fast_blur(&image, 50.0);
}
#[test]
/// Test blur doesn't panic when passed an empty image (any direction)
fn test_fast_blur_empty() {
let image = RgbaImage::new(0, 0);
let _ = fast_blur(&image, 1.0);
let image = RgbaImage::new(20, 0);
let _ = fast_blur(&image, 1.0);
let image = RgbaImage::new(0, 20);
let _ = fast_blur(&image, 1.0);
}
#[test]
/// Test fast blur works with 3 channels
fn test_fast_blur_3_channels() {
let image = RgbImage::new(50, 50);
let _ = fast_blur(&image, 1.0);
}
#[test]
/// Test fast blur works with 2 channels
fn test_fast_blur_2_channels() {
let image = GrayAlphaImage::new(50, 50);
let _ = fast_blur(&image, 1.0);
}
#[test]
/// Test fast blur works with 1 channel
fn test_fast_blur_1_channels() {
let image = GrayImage::new(50, 50);
let _ = fast_blur(&image, 1.0);
}
#[test]
#[cfg(feature = "tiff")]
fn fast_blur_approximates_gaussian_blur_well() {
let path = concat!(
env!("CARGO_MANIFEST_DIR"),
"/tests/images/tiff/testsuite/rgb-3c-16b.tiff"
);
let image = crate::open(path).unwrap();
let image_blurred_gauss = image
.blur_advanced(GaussianBlurParameters::new_from_sigma(50.0))
.to_rgb8();
let image_blurred_gauss_samples = image_blurred_gauss.as_flat_samples();
let image_blurred_gauss_bytes = image_blurred_gauss_samples.as_slice();
let image_blurred_fast = image.fast_blur(50.0).to_rgb8();
let image_blurred_fast_samples = image_blurred_fast.as_flat_samples();
let image_blurred_fast_bytes = image_blurred_fast_samples.as_slice();
let error = image_blurred_gauss_bytes
.iter()
.zip(image_blurred_fast_bytes.iter())
.map(|(a, b)| (f32::from(*a) - f32::from(*b)) / f32::from(*a))
.sum::<f32>()
/ (image_blurred_gauss_bytes.len() as f32);
assert!(error < 0.05);
}
}

1868
vendor/image/src/imageops/sample.rs vendored Normal file

File diff suppressed because it is too large Load Diff

9
vendor/image/src/images.rs vendored Normal file
View File

@@ -0,0 +1,9 @@
//! An internal module for grouping all forms of image buffers.
pub(crate) mod buffer;
#[cfg(feature = "rayon")]
pub(crate) mod buffer_par;
pub(crate) mod dynimage;
pub(crate) mod generic_image;
// Public as we re-export the whole module including its documentation.
pub mod flat;
pub(crate) mod sub_image;

2214
vendor/image/src/images/buffer.rs vendored Normal file

File diff suppressed because it is too large Load Diff

496
vendor/image/src/images/buffer_par.rs vendored Normal file
View File

@@ -0,0 +1,496 @@
use rayon::iter::plumbing::*;
use rayon::iter::{IndexedParallelIterator, ParallelIterator};
use rayon::slice::{ChunksExact, ChunksExactMut, ParallelSlice, ParallelSliceMut};
use std::fmt;
use std::ops::{Deref, DerefMut};
use crate::traits::Pixel;
use crate::ImageBuffer;
/// Parallel iterator over pixel refs.
#[derive(Clone)]
pub struct PixelsPar<'a, P>
where
P: Pixel + Sync + 'a,
P::Subpixel: Sync + 'a,
{
chunks: ChunksExact<'a, P::Subpixel>,
}
impl<'a, P> ParallelIterator for PixelsPar<'a, P>
where
P: Pixel + Sync + 'a,
P::Subpixel: Sync + 'a,
{
type Item = &'a P;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.chunks
.map(|v| <P as Pixel>::from_slice(v))
.drive_unindexed(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'a, P> IndexedParallelIterator for PixelsPar<'a, P>
where
P: Pixel + Sync + 'a,
P::Subpixel: Sync + 'a,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
self.chunks
.map(|v| <P as Pixel>::from_slice(v))
.drive(consumer)
}
fn len(&self) -> usize {
self.chunks.len()
}
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
self.chunks
.map(|v| <P as Pixel>::from_slice(v))
.with_producer(callback)
}
}
impl<P> fmt::Debug for PixelsPar<'_, P>
where
P: Pixel + Sync,
P::Subpixel: Sync + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PixelsPar")
.field("chunks", &self.chunks)
.finish()
}
}
/// Parallel iterator over mutable pixel refs.
pub struct PixelsMutPar<'a, P>
where
P: Pixel + Send + Sync + 'a,
P::Subpixel: Send + Sync + 'a,
{
chunks: ChunksExactMut<'a, P::Subpixel>,
}
impl<'a, P> ParallelIterator for PixelsMutPar<'a, P>
where
P: Pixel + Send + Sync + 'a,
P::Subpixel: Send + Sync + 'a,
{
type Item = &'a mut P;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.chunks
.map(|v| <P as Pixel>::from_slice_mut(v))
.drive_unindexed(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'a, P> IndexedParallelIterator for PixelsMutPar<'a, P>
where
P: Pixel + Send + Sync + 'a,
P::Subpixel: Send + Sync + 'a,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
self.chunks
.map(|v| <P as Pixel>::from_slice_mut(v))
.drive(consumer)
}
fn len(&self) -> usize {
self.chunks.len()
}
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
self.chunks
.map(|v| <P as Pixel>::from_slice_mut(v))
.with_producer(callback)
}
}
impl<P> fmt::Debug for PixelsMutPar<'_, P>
where
P: Pixel + Send + Sync,
P::Subpixel: Send + Sync + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("PixelsMutPar")
.field("chunks", &self.chunks)
.finish()
}
}
/// Parallel iterator over pixel refs and their coordinates.
#[derive(Clone)]
pub struct EnumeratePixelsPar<'a, P>
where
P: Pixel + Sync + 'a,
P::Subpixel: Sync + 'a,
{
pixels: PixelsPar<'a, P>,
width: u32,
}
impl<'a, P> ParallelIterator for EnumeratePixelsPar<'a, P>
where
P: Pixel + Sync + 'a,
P::Subpixel: Sync + 'a,
{
type Item = (u32, u32, &'a P);
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.pixels
.enumerate()
.map(|(i, p)| {
(
(i % self.width as usize) as u32,
(i / self.width as usize) as u32,
p,
)
})
.drive_unindexed(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'a, P> IndexedParallelIterator for EnumeratePixelsPar<'a, P>
where
P: Pixel + Sync + 'a,
P::Subpixel: Sync + 'a,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
self.pixels
.enumerate()
.map(|(i, p)| {
(
(i % self.width as usize) as u32,
(i / self.width as usize) as u32,
p,
)
})
.drive(consumer)
}
fn len(&self) -> usize {
self.pixels.len()
}
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
self.pixels
.enumerate()
.map(|(i, p)| {
(
(i % self.width as usize) as u32,
(i / self.width as usize) as u32,
p,
)
})
.with_producer(callback)
}
}
impl<P> fmt::Debug for EnumeratePixelsPar<'_, P>
where
P: Pixel + Sync,
P::Subpixel: Sync + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EnumeratePixelsPar")
.field("pixels", &self.pixels)
.field("width", &self.width)
.finish()
}
}
/// Parallel iterator over mutable pixel refs and their coordinates.
pub struct EnumeratePixelsMutPar<'a, P>
where
P: Pixel + Send + Sync + 'a,
P::Subpixel: Send + Sync + 'a,
{
pixels: PixelsMutPar<'a, P>,
width: u32,
}
impl<'a, P> ParallelIterator for EnumeratePixelsMutPar<'a, P>
where
P: Pixel + Send + Sync + 'a,
P::Subpixel: Send + Sync + 'a,
{
type Item = (u32, u32, &'a mut P);
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.pixels
.enumerate()
.map(|(i, p)| {
(
(i % self.width as usize) as u32,
(i / self.width as usize) as u32,
p,
)
})
.drive_unindexed(consumer)
}
fn opt_len(&self) -> Option<usize> {
Some(self.len())
}
}
impl<'a, P> IndexedParallelIterator for EnumeratePixelsMutPar<'a, P>
where
P: Pixel + Send + Sync + 'a,
P::Subpixel: Send + Sync + 'a,
{
fn drive<C: Consumer<Self::Item>>(self, consumer: C) -> C::Result {
self.pixels
.enumerate()
.map(|(i, p)| {
(
(i % self.width as usize) as u32,
(i / self.width as usize) as u32,
p,
)
})
.drive(consumer)
}
fn len(&self) -> usize {
self.pixels.len()
}
fn with_producer<CB: ProducerCallback<Self::Item>>(self, callback: CB) -> CB::Output {
self.pixels
.enumerate()
.map(|(i, p)| {
(
(i % self.width as usize) as u32,
(i / self.width as usize) as u32,
p,
)
})
.with_producer(callback)
}
}
impl<P> fmt::Debug for EnumeratePixelsMutPar<'_, P>
where
P: Pixel + Send + Sync,
P::Subpixel: Send + Sync + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EnumeratePixelsMutPar")
.field("pixels", &self.pixels)
.field("width", &self.width)
.finish()
}
}
impl<P, Container> ImageBuffer<P, Container>
where
P: Pixel + Sync,
P::Subpixel: Sync,
Container: Deref<Target = [P::Subpixel]>,
{
/// Returns a parallel iterator over the pixels of this image, usable with `rayon`.
/// See [`pixels`] for more information.
///
/// [`pixels`]: #method.pixels
pub fn par_pixels(&self) -> PixelsPar<'_, P> {
PixelsPar {
chunks: self
.inner_pixels()
.par_chunks_exact(<P as Pixel>::CHANNEL_COUNT as usize),
}
}
/// Returns a parallel iterator over the pixels of this image and their coordinates, usable with `rayon`.
/// See [`enumerate_pixels`] for more information.
///
/// [`enumerate_pixels`]: #method.enumerate_pixels
pub fn par_enumerate_pixels(&self) -> EnumeratePixelsPar<'_, P> {
EnumeratePixelsPar {
pixels: self.par_pixels(),
width: self.width(),
}
}
}
impl<P, Container> ImageBuffer<P, Container>
where
P: Pixel + Send + Sync,
P::Subpixel: Send + Sync,
Container: Deref<Target = [P::Subpixel]> + DerefMut,
{
/// Returns a parallel iterator over the mutable pixels of this image, usable with `rayon`.
/// See [`pixels_mut`] for more information.
///
/// [`pixels_mut`]: #method.pixels_mut
pub fn par_pixels_mut(&mut self) -> PixelsMutPar<'_, P> {
PixelsMutPar {
chunks: self
.inner_pixels_mut()
.par_chunks_exact_mut(<P as Pixel>::CHANNEL_COUNT as usize),
}
}
/// Returns a parallel iterator over the mutable pixels of this image and their coordinates, usable with `rayon`.
/// See [`enumerate_pixels_mut`] for more information.
///
/// [`enumerate_pixels_mut`]: #method.enumerate_pixels_mut
pub fn par_enumerate_pixels_mut(&mut self) -> EnumeratePixelsMutPar<'_, P> {
let width = self.width();
EnumeratePixelsMutPar {
pixels: self.par_pixels_mut(),
width,
}
}
}
impl<P> ImageBuffer<P, Vec<P::Subpixel>>
where
P: Pixel + Send + Sync,
P::Subpixel: Send + Sync,
{
/// Constructs a new `ImageBuffer` by repeated application of the supplied function,
/// utilizing multi-threading via `rayon`.
///
/// The arguments to the function are the pixel's x and y coordinates.
///
/// # Panics
///
/// Panics when the resulting image is larger than the maximum size of a vector.
pub fn from_par_fn<F>(width: u32, height: u32, f: F) -> ImageBuffer<P, Vec<P::Subpixel>>
where
F: Fn(u32, u32) -> P + Send + Sync,
{
let mut buf = ImageBuffer::new(width, height);
buf.par_enumerate_pixels_mut().for_each(|(x, y, p)| {
*p = f(x, y);
});
buf
}
}
#[cfg(test)]
mod test {
use crate::{Rgb, RgbImage};
use rayon::iter::{IndexedParallelIterator, ParallelIterator};
fn test_width_height(width: u32, height: u32, len: usize) {
let mut image = RgbImage::new(width, height);
assert_eq!(image.par_enumerate_pixels_mut().len(), len);
assert_eq!(image.par_enumerate_pixels().len(), len);
assert_eq!(image.par_pixels_mut().len(), len);
assert_eq!(image.par_pixels().len(), len);
}
#[test]
fn zero_width_zero_height() {
test_width_height(0, 0, 0);
}
#[test]
fn zero_width_nonzero_height() {
test_width_height(0, 2, 0);
}
#[test]
fn nonzero_width_zero_height() {
test_width_height(2, 0, 0);
}
#[test]
fn iter_parity() {
let mut image1 = RgbImage::from_fn(17, 29, |x, y| {
Rgb(std::array::from_fn(|i| {
((x + y * 98 + i as u32 * 27) % 255) as u8
}))
});
let mut image2 = image1.clone();
assert_eq!(
image1.enumerate_pixels_mut().collect::<Vec<_>>(),
image2.par_enumerate_pixels_mut().collect::<Vec<_>>()
);
assert_eq!(
image1.enumerate_pixels().collect::<Vec<_>>(),
image2.par_enumerate_pixels().collect::<Vec<_>>()
);
assert_eq!(
image1.pixels_mut().collect::<Vec<_>>(),
image2.par_pixels_mut().collect::<Vec<_>>()
);
assert_eq!(
image1.pixels().collect::<Vec<_>>(),
image2.par_pixels().collect::<Vec<_>>()
);
}
}
#[cfg(test)]
#[cfg(feature = "benchmarks")]
mod benchmarks {
use crate::{Rgb, RgbImage};
const S: u32 = 1024;
#[bench]
fn creation(b: &mut test::Bencher) {
let mut bytes = 0;
b.iter(|| {
let img = RgbImage::from_fn(S, S, |_, _| test::black_box(pixel_func()));
bytes += img.as_raw().len() as u64;
});
b.bytes = bytes;
}
#[bench]
fn creation_par(b: &mut test::Bencher) {
let mut bytes = 0;
b.iter(|| {
let img = RgbImage::from_par_fn(S, S, |_, _| test::black_box(pixel_func()));
bytes += img.as_raw().len() as u64;
});
b.bytes = bytes;
}
fn pixel_func() -> Rgb<u8> {
use std::collections::hash_map::RandomState;
use std::hash::{BuildHasher, Hasher};
Rgb(std::array::from_fn(|_| {
RandomState::new().build_hasher().finish() as u8
}))
}
}

2246
vendor/image/src/images/dynimage.rs vendored Normal file

File diff suppressed because it is too large Load Diff

1716
vendor/image/src/images/flat.rs vendored Normal file

File diff suppressed because it is too large Load Diff

608
vendor/image/src/images/generic_image.rs vendored Normal file
View File

@@ -0,0 +1,608 @@
use crate::error::{ImageError, ImageResult, ParameterError, ParameterErrorKind};
use crate::math::Rect;
use crate::traits::Pixel;
use crate::{ImageBuffer, SubImage};
/// Trait to inspect an image.
///
/// ```
/// use image::{GenericImageView, Rgb, RgbImage};
///
/// let buffer = RgbImage::new(10, 10);
/// let image: &dyn GenericImageView<Pixel = Rgb<u8>> = &buffer;
/// ```
pub trait GenericImageView {
/// The type of pixel.
type Pixel: Pixel;
/// The width and height of this image.
fn dimensions(&self) -> (u32, u32);
/// The width of this image.
fn width(&self) -> u32 {
let (w, _) = self.dimensions();
w
}
/// The height of this image.
fn height(&self) -> u32 {
let (_, h) = self.dimensions();
h
}
/// Returns true if this x, y coordinate is contained inside the image.
fn in_bounds(&self, x: u32, y: u32) -> bool {
let (width, height) = self.dimensions();
x < width && y < height
}
/// Returns the pixel located at (x, y). Indexed from top left.
///
/// # Panics
///
/// Panics if `(x, y)` is out of bounds.
fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel;
/// Returns the pixel located at (x, y). Indexed from top left.
///
/// This function can be implemented in a way that ignores bounds checking.
/// # Safety
///
/// The coordinates must be [`in_bounds`] of the image.
///
/// [`in_bounds`]: #method.in_bounds
unsafe fn unsafe_get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
self.get_pixel(x, y)
}
/// Returns an Iterator over the pixels of this image.
/// The iterator yields the coordinates of each pixel
/// along with their value
fn pixels(&self) -> Pixels<'_, Self>
where
Self: Sized,
{
let (width, height) = self.dimensions();
Pixels {
image: self,
x: 0,
y: 0,
width,
height,
}
}
/// Returns a subimage that is an immutable view into this image.
/// You can use [`GenericImage::sub_image`] if you need a mutable view instead.
/// The coordinates set the position of the top left corner of the view.
///
/// # Panics
///
/// Panics if the dimensions provided fall out of bounds.
fn view(&self, x: u32, y: u32, width: u32, height: u32) -> SubImage<&Self>
where
Self: Sized,
{
assert!(u64::from(x) + u64::from(width) <= u64::from(self.width()));
assert!(u64::from(y) + u64::from(height) <= u64::from(self.height()));
SubImage::new(self, x, y, width, height)
}
/// Returns a subimage that is an immutable view into this image so long as
/// the provided coordinates and dimensions are within the bounds of this Image.
fn try_view(
&self,
x: u32,
y: u32,
width: u32,
height: u32,
) -> Result<SubImage<&Self>, ImageError>
where
Self: Sized,
{
if u64::from(x) + u64::from(width) > u64::from(self.width())
|| u64::from(y) + u64::from(height) > u64::from(self.height())
{
Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)))
} else {
Ok(SubImage::new(self, x, y, width, height))
}
}
/// Create an empty [`ImageBuffer`] with the same pixel type as this image.
///
/// This should ensure metadata such as the color space are transferred without copying any of
/// the pixel data. The idea is to prepare a buffer ready to be filled with a filtered or
/// portion of the channel data from the current image without performing the work of copying
/// the data into that buffer twice.
///
/// The default implementation defers to [`GenericImageView::buffer_like`].
fn buffer_like(&self) -> ImageBuffer<Self::Pixel, Vec<<Self::Pixel as Pixel>::Subpixel>> {
let (w, h) = self.dimensions();
self.buffer_with_dimensions(w, h)
}
/// Create an empty [`ImageBuffer`] with different dimensions.
///
/// See [`GenericImageView::buffer_like`].
///
/// Uses for this are for instances preparing a buffer for only a portion of the image, or
/// extracting the metadata to prepare a buffer of a different pixel type.
fn buffer_with_dimensions(
&self,
width: u32,
height: u32,
) -> ImageBuffer<Self::Pixel, Vec<<Self::Pixel as Pixel>::Subpixel>> {
ImageBuffer::new(width, height)
}
}
/// Immutable pixel iterator
#[derive(Debug)]
pub struct Pixels<'a, I: ?Sized + 'a> {
image: &'a I,
x: u32,
y: u32,
width: u32,
height: u32,
}
impl<I: GenericImageView> Iterator for Pixels<'_, I> {
type Item = (u32, u32, I::Pixel);
fn next(&mut self) -> Option<(u32, u32, I::Pixel)> {
if self.x >= self.width {
self.x = 0;
self.y += 1;
}
if self.y >= self.height {
None
} else {
let pixel = self.image.get_pixel(self.x, self.y);
let p = (self.x, self.y, pixel);
self.x += 1;
Some(p)
}
}
}
impl<I: ?Sized> Clone for Pixels<'_, I> {
fn clone(&self) -> Self {
Pixels { ..*self }
}
}
/// A trait for manipulating images.
pub trait GenericImage: GenericImageView {
/// Gets a reference to the mutable pixel at location `(x, y)`. Indexed from top left.
///
/// # Panics
///
/// Panics if `(x, y)` is out of bounds.
///
/// Panics for dynamic images (this method is deprecated and will be removed).
///
/// ## Known issues
///
/// This requires the buffer to contain a unique set of continuous channels in the exact order
/// and byte representation that the pixel type requires. This is somewhat restrictive.
///
/// TODO: Maybe use some kind of entry API? this would allow pixel type conversion on the fly
/// while still doing only one array lookup:
///
/// ```ignore
/// let px = image.pixel_entry_at(x,y);
/// px.set_from_rgba(rgba)
/// ```
#[deprecated(since = "0.24.0", note = "Use `get_pixel` and `put_pixel` instead.")]
fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut Self::Pixel;
/// Put a pixel at location (x, y). Indexed from top left.
///
/// # Panics
///
/// Panics if `(x, y)` is out of bounds.
fn put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel);
/// Puts a pixel at location (x, y). Indexed from top left.
///
/// This function can be implemented in a way that ignores bounds checking.
/// # Safety
///
/// The coordinates must be [`in_bounds`] of the image.
///
/// [`in_bounds`]: traits.GenericImageView.html#method.in_bounds
unsafe fn unsafe_put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
self.put_pixel(x, y, pixel);
}
/// Put a pixel at location (x, y), taking into account alpha channels
#[deprecated(
since = "0.24.0",
note = "Use iterator `pixels_mut` to blend the pixels directly"
)]
fn blend_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel);
/// Copies all of the pixels from another image into this image.
///
/// The other image is copied with the top-left corner of the
/// other image placed at (x, y).
///
/// In order to copy only a piece of the other image, use [`GenericImageView::view`].
///
/// You can use [`FlatSamples`] to source pixels from an arbitrary regular raster of channel
/// values, for example from a foreign interface or a fixed image.
///
/// # Returns
/// Returns an error if the image is too large to be copied at the given position
///
/// [`GenericImageView::view`]: trait.GenericImageView.html#method.view
/// [`FlatSamples`]: flat/struct.FlatSamples.html
fn copy_from<O>(&mut self, other: &O, x: u32, y: u32) -> ImageResult<()>
where
O: GenericImageView<Pixel = Self::Pixel>,
{
// Do bounds checking here so we can use the non-bounds-checking
// functions to copy pixels.
if self.width() < other.width() + x || self.height() < other.height() + y {
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
for k in 0..other.height() {
for i in 0..other.width() {
let p = other.get_pixel(i, k);
self.put_pixel(i + x, k + y, p);
}
}
Ok(())
}
/// Copies all of the pixels from one part of this image to another part of this image.
///
/// The destination rectangle of the copy is specified with the top-left corner placed at (x, y).
///
/// # Returns
/// `true` if the copy was successful, `false` if the image could not
/// be copied due to size constraints.
fn copy_within(&mut self, source: Rect, x: u32, y: u32) -> bool {
let Rect {
x: sx,
y: sy,
width,
height,
} = source;
let dx = x;
let dy = y;
assert!(sx < self.width() && dx < self.width());
assert!(sy < self.height() && dy < self.height());
if self.width() - dx.max(sx) < width || self.height() - dy.max(sy) < height {
return false;
}
// since `.rev()` creates a new dype we would either have to go with dynamic dispatch for the ranges
// or have quite a lot of code bloat. A macro gives us static dispatch with less visible bloat.
macro_rules! copy_within_impl_ {
($xiter:expr, $yiter:expr) => {
for y in $yiter {
let sy = sy + y;
let dy = dy + y;
for x in $xiter {
let sx = sx + x;
let dx = dx + x;
let pixel = self.get_pixel(sx, sy);
self.put_pixel(dx, dy, pixel);
}
}
};
}
// check how target and source rectangles relate to each other so we dont overwrite data before we copied it.
match (sx < dx, sy < dy) {
(true, true) => copy_within_impl_!((0..width).rev(), (0..height).rev()),
(true, false) => copy_within_impl_!((0..width).rev(), 0..height),
(false, true) => copy_within_impl_!(0..width, (0..height).rev()),
(false, false) => copy_within_impl_!(0..width, 0..height),
}
true
}
/// Returns a mutable subimage that is a view into this image.
/// If you want an immutable subimage instead, use [`GenericImageView::view`]
/// The coordinates set the position of the top left corner of the `SubImage`.
fn sub_image(&mut self, x: u32, y: u32, width: u32, height: u32) -> SubImage<&mut Self>
where
Self: Sized,
{
assert!(u64::from(x) + u64::from(width) <= u64::from(self.width()));
assert!(u64::from(y) + u64::from(height) <= u64::from(self.height()));
SubImage::new(self, x, y, width, height)
}
}
#[cfg(test)]
mod tests {
use super::{GenericImage, GenericImageView};
use crate::color::Rgba;
use crate::math::Rect;
use crate::{GrayImage, ImageBuffer};
#[test]
#[allow(deprecated)]
/// Test that alpha blending works as expected
fn test_image_alpha_blending() {
let mut target = ImageBuffer::new(1, 1);
target.put_pixel(0, 0, Rgba([255u8, 0, 0, 255]));
assert!(*target.get_pixel(0, 0) == Rgba([255, 0, 0, 255]));
target.blend_pixel(0, 0, Rgba([0, 255, 0, 255]));
assert!(*target.get_pixel(0, 0) == Rgba([0, 255, 0, 255]));
// Blending an alpha channel onto a solid background
target.blend_pixel(0, 0, Rgba([255, 0, 0, 127]));
assert!(*target.get_pixel(0, 0) == Rgba([127, 127, 0, 255]));
// Blending two alpha channels
target.put_pixel(0, 0, Rgba([0, 255, 0, 127]));
target.blend_pixel(0, 0, Rgba([255, 0, 0, 127]));
assert!(*target.get_pixel(0, 0) == Rgba([169, 85, 0, 190]));
}
#[test]
fn test_in_bounds() {
let mut target = ImageBuffer::new(2, 2);
target.put_pixel(0, 0, Rgba([255u8, 0, 0, 255]));
assert!(target.in_bounds(0, 0));
assert!(target.in_bounds(1, 0));
assert!(target.in_bounds(0, 1));
assert!(target.in_bounds(1, 1));
assert!(!target.in_bounds(2, 0));
assert!(!target.in_bounds(0, 2));
assert!(!target.in_bounds(2, 2));
}
#[test]
fn test_can_subimage_clone_nonmut() {
let mut source = ImageBuffer::new(3, 3);
source.put_pixel(1, 1, Rgba([255u8, 0, 0, 255]));
// A non-mutable copy of the source image
let source = source.clone();
// Clone a view into non-mutable to a separate buffer
let cloned = source.view(1, 1, 1, 1).to_image();
assert!(cloned.get_pixel(0, 0) == source.get_pixel(1, 1));
}
#[test]
fn test_can_nest_views() {
let mut source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
{
let mut sub1 = source.sub_image(0, 0, 2, 2);
let mut sub2 = sub1.sub_image(1, 1, 1, 1);
sub2.put_pixel(0, 0, Rgba([0, 0, 0, 0]));
}
assert_eq!(*source.get_pixel(1, 1), Rgba([0, 0, 0, 0]));
let view1 = source.view(0, 0, 2, 2);
assert_eq!(*source.get_pixel(1, 1), view1.get_pixel(1, 1));
let view2 = view1.view(1, 1, 1, 1);
assert_eq!(*source.get_pixel(1, 1), view2.get_pixel(0, 0));
}
#[test]
#[should_panic]
fn test_view_out_of_bounds() {
let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
source.view(1, 1, 3, 3);
}
#[test]
#[should_panic]
fn test_view_coordinates_out_of_bounds() {
let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
source.view(3, 3, 3, 3);
}
#[test]
#[should_panic]
fn test_view_width_out_of_bounds() {
let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
source.view(1, 1, 3, 2);
}
#[test]
#[should_panic]
fn test_view_height_out_of_bounds() {
let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
source.view(1, 1, 2, 3);
}
#[test]
#[should_panic]
fn test_view_x_out_of_bounds() {
let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
source.view(3, 1, 3, 3);
}
#[test]
#[should_panic]
fn test_view_y_out_of_bounds() {
let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
source.view(1, 3, 3, 3);
}
#[test]
fn test_view_in_bounds() {
let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
source.view(0, 0, 3, 3);
source.view(1, 1, 2, 2);
source.view(2, 2, 0, 0);
}
#[test]
fn test_copy_sub_image() {
let source = ImageBuffer::from_pixel(3, 3, Rgba([255u8, 0, 0, 255]));
let view = source.view(0, 0, 3, 3);
let _view2 = view;
view.to_image();
}
#[test]
fn test_generic_image_copy_within_oob() {
let mut image: GrayImage = ImageBuffer::from_raw(4, 4, vec![0u8; 16]).unwrap();
assert!(!image.sub_image(0, 0, 4, 4).copy_within(
Rect {
x: 0,
y: 0,
width: 5,
height: 4
},
0,
0
));
assert!(!image.sub_image(0, 0, 4, 4).copy_within(
Rect {
x: 0,
y: 0,
width: 4,
height: 5
},
0,
0
));
assert!(!image.sub_image(0, 0, 4, 4).copy_within(
Rect {
x: 1,
y: 0,
width: 4,
height: 4
},
0,
0
));
assert!(!image.sub_image(0, 0, 4, 4).copy_within(
Rect {
x: 0,
y: 0,
width: 4,
height: 4
},
1,
0
));
assert!(!image.sub_image(0, 0, 4, 4).copy_within(
Rect {
x: 0,
y: 1,
width: 4,
height: 4
},
0,
0
));
assert!(!image.sub_image(0, 0, 4, 4).copy_within(
Rect {
x: 0,
y: 0,
width: 4,
height: 4
},
0,
1
));
assert!(!image.sub_image(0, 0, 4, 4).copy_within(
Rect {
x: 1,
y: 1,
width: 4,
height: 4
},
0,
0
));
}
#[test]
fn test_generic_image_copy_within_tl() {
let data = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
let expected = [0, 1, 2, 3, 4, 0, 1, 2, 8, 4, 5, 6, 12, 8, 9, 10];
let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
assert!(image.sub_image(0, 0, 4, 4).copy_within(
Rect {
x: 0,
y: 0,
width: 3,
height: 3
},
1,
1
));
assert_eq!(&image.into_raw(), &expected);
}
#[test]
fn test_generic_image_copy_within_tr() {
let data = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
let expected = [0, 1, 2, 3, 1, 2, 3, 7, 5, 6, 7, 11, 9, 10, 11, 15];
let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
assert!(image.sub_image(0, 0, 4, 4).copy_within(
Rect {
x: 1,
y: 0,
width: 3,
height: 3
},
0,
1
));
assert_eq!(&image.into_raw(), &expected);
}
#[test]
fn test_generic_image_copy_within_bl() {
let data = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
let expected = [0, 4, 5, 6, 4, 8, 9, 10, 8, 12, 13, 14, 12, 13, 14, 15];
let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
assert!(image.sub_image(0, 0, 4, 4).copy_within(
Rect {
x: 0,
y: 1,
width: 3,
height: 3
},
1,
0
));
assert_eq!(&image.into_raw(), &expected);
}
#[test]
fn test_generic_image_copy_within_br() {
let data = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
let expected = [5, 6, 7, 3, 9, 10, 11, 7, 13, 14, 15, 11, 12, 13, 14, 15];
let mut image: GrayImage = ImageBuffer::from_raw(4, 4, Vec::from(&data[..])).unwrap();
assert!(image.sub_image(0, 0, 4, 4).copy_within(
Rect {
x: 1,
y: 1,
width: 3,
height: 3
},
0,
0
));
assert_eq!(&image.into_raw(), &expected);
}
}

263
vendor/image/src/images/sub_image.rs vendored Normal file
View File

@@ -0,0 +1,263 @@
use crate::{GenericImage, GenericImageView, ImageBuffer, Pixel};
use std::ops::{Deref, DerefMut};
/// A View into another image
///
/// Instances of this struct can be created using:
/// - [`GenericImage::sub_image`] to create a mutable view,
/// - [`GenericImageView::view`] to create an immutable view,
/// - [`SubImage::new`] to instantiate the struct directly.
///
/// Note that this does _not_ implement `GenericImage`, but it dereferences to one which allows you
/// to use it as if it did. See [Design Considerations](#Design-Considerations) below for details.
///
/// # Design Considerations
///
/// For reasons relating to coherence, this is not itself a `GenericImage` or a `GenericImageView`.
/// In short, we want to reserve the ability of adding traits implemented for _all_ generic images
/// but in a different manner for `SubImage`. This may be required to ensure that stacking
/// sub-images comes at no double indirect cost.
///
/// If, ultimately, this is not needed then a directly implementation of `GenericImage` can and
/// will get added. This inconvenience may alternatively get resolved if Rust allows some forms of
/// specialization, which might make this trick unnecessary and thus also allows for a direct
/// implementation.
#[derive(Copy, Clone)]
pub struct SubImage<I> {
inner: SubImageInner<I>,
}
/// The inner type of `SubImage` that implements `GenericImage{,View}`.
///
/// This type is _nominally_ `pub` but it is not exported from the crate. It should be regarded as
/// an existential type in any case.
#[derive(Copy, Clone)]
pub struct SubImageInner<I> {
image: I,
xoffset: u32,
yoffset: u32,
xstride: u32,
ystride: u32,
}
/// Alias to access Pixel behind a reference
type DerefPixel<I> = <<I as Deref>::Target as GenericImageView>::Pixel;
/// Alias to access Subpixel behind a reference
type DerefSubpixel<I> = <DerefPixel<I> as Pixel>::Subpixel;
impl<I> SubImage<I> {
/// Construct a new subimage
/// The coordinates set the position of the top left corner of the `SubImage`.
pub fn new(image: I, x: u32, y: u32, width: u32, height: u32) -> SubImage<I> {
SubImage {
inner: SubImageInner {
image,
xoffset: x,
yoffset: y,
xstride: width,
ystride: height,
},
}
}
/// Change the coordinates of this subimage.
pub fn change_bounds(&mut self, x: u32, y: u32, width: u32, height: u32) {
self.inner.xoffset = x;
self.inner.yoffset = y;
self.inner.xstride = width;
self.inner.ystride = height;
}
/// The offsets of this subimage relative to the underlying image.
pub fn offsets(&self) -> (u32, u32) {
(self.inner.xoffset, self.inner.yoffset)
}
/// Convert this subimage to an `ImageBuffer`
pub fn to_image(&self) -> ImageBuffer<DerefPixel<I>, Vec<DerefSubpixel<I>>>
where
I: Deref,
I::Target: GenericImageView + 'static,
{
let borrowed = &*self.inner.image;
let mut out = borrowed.buffer_with_dimensions(self.inner.xstride, self.inner.ystride);
for y in 0..self.inner.ystride {
for x in 0..self.inner.xstride {
let p = borrowed.get_pixel(x + self.inner.xoffset, y + self.inner.yoffset);
out.put_pixel(x, y, p);
}
}
out
}
}
/// Methods for readable images.
impl<I> SubImage<I>
where
I: Deref,
I::Target: GenericImageView,
{
/// Create a sub-view of the image.
///
/// The coordinates given are relative to the current view on the underlying image.
///
/// Note that this method is preferred to the one from `GenericImageView`. This is accessible
/// with the explicit method call syntax but it should rarely be needed due to causing an
/// extra level of indirection.
///
/// ```
/// use image::{GenericImageView, RgbImage, SubImage};
/// let buffer = RgbImage::new(10, 10);
///
/// let subimage: SubImage<&RgbImage> = buffer.view(0, 0, 10, 10);
/// let subview: SubImage<&RgbImage> = subimage.view(0, 0, 10, 10);
///
/// // Less efficient and NOT &RgbImage
/// let _: SubImage<&_> = GenericImageView::view(&*subimage, 0, 0, 10, 10);
/// ```
pub fn view(&self, x: u32, y: u32, width: u32, height: u32) -> SubImage<&I::Target> {
use crate::GenericImageView as _;
assert!(u64::from(x) + u64::from(width) <= u64::from(self.inner.width()));
assert!(u64::from(y) + u64::from(height) <= u64::from(self.inner.height()));
let x = self.inner.xoffset.saturating_add(x);
let y = self.inner.yoffset.saturating_add(y);
SubImage::new(&*self.inner.image, x, y, width, height)
}
/// Get a reference to the underlying image.
pub fn inner(&self) -> &I::Target {
&self.inner.image
}
}
impl<I> SubImage<I>
where
I: DerefMut,
I::Target: GenericImage,
{
/// Create a mutable sub-view of the image.
///
/// The coordinates given are relative to the current view on the underlying image.
pub fn sub_image(
&mut self,
x: u32,
y: u32,
width: u32,
height: u32,
) -> SubImage<&mut I::Target> {
assert!(u64::from(x) + u64::from(width) <= u64::from(self.inner.width()));
assert!(u64::from(y) + u64::from(height) <= u64::from(self.inner.height()));
let x = self.inner.xoffset.saturating_add(x);
let y = self.inner.yoffset.saturating_add(y);
SubImage::new(&mut *self.inner.image, x, y, width, height)
}
/// Get a mutable reference to the underlying image.
pub fn inner_mut(&mut self) -> &mut I::Target {
&mut self.inner.image
}
}
impl<I> Deref for SubImage<I>
where
I: Deref,
{
type Target = SubImageInner<I>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<I> DerefMut for SubImage<I>
where
I: DerefMut,
{
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
#[allow(deprecated)]
impl<I> GenericImageView for SubImageInner<I>
where
I: Deref,
I::Target: GenericImageView,
{
type Pixel = DerefPixel<I>;
fn dimensions(&self) -> (u32, u32) {
(self.xstride, self.ystride)
}
fn get_pixel(&self, x: u32, y: u32) -> Self::Pixel {
self.image.get_pixel(x + self.xoffset, y + self.yoffset)
}
/// Create a buffer with the (color) metadata of the underlying image.
fn buffer_with_dimensions(
&self,
width: u32,
height: u32,
) -> ImageBuffer<
<I::Target as GenericImageView>::Pixel,
Vec<<<I::Target as GenericImageView>::Pixel as Pixel>::Subpixel>,
> {
self.image.buffer_with_dimensions(width, height)
}
}
#[allow(deprecated)]
impl<I> GenericImage for SubImageInner<I>
where
I: DerefMut,
I::Target: GenericImage + Sized,
{
fn get_pixel_mut(&mut self, x: u32, y: u32) -> &mut Self::Pixel {
self.image.get_pixel_mut(x + self.xoffset, y + self.yoffset)
}
fn put_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
self.image
.put_pixel(x + self.xoffset, y + self.yoffset, pixel);
}
/// DEPRECATED: This method will be removed. Blend the pixel directly instead.
fn blend_pixel(&mut self, x: u32, y: u32, pixel: Self::Pixel) {
self.image
.blend_pixel(x + self.xoffset, y + self.yoffset, pixel);
}
}
#[cfg(test)]
mod tests {
use crate::{metadata::Cicp, GenericImageView, RgbaImage};
#[test]
fn preserves_color_space() {
let mut buffer = RgbaImage::new(16, 16);
buffer[(0, 0)] = crate::Rgba([0xff, 0, 0, 255]);
buffer.set_rgb_primaries(Cicp::DISPLAY_P3.primaries);
let view = buffer.view(0, 0, 16, 16);
let result = view.buffer_like();
assert_eq!(buffer.color_space(), result.color_space());
}
#[test]
fn deep_preserves_color_space() {
let mut buffer = RgbaImage::new(16, 16);
buffer[(0, 0)] = crate::Rgba([0xff, 0, 0, 255]);
buffer.set_rgb_primaries(Cicp::DISPLAY_P3.primaries);
let view = buffer.view(0, 0, 16, 16);
let view = view.view(0, 0, 16, 16);
let result = view.buffer_like();
assert_eq!(buffer.color_space(), result.color_space());
}
}

45
vendor/image/src/io.rs vendored Normal file
View File

@@ -0,0 +1,45 @@
//! Input and output of images.
use std::io;
use std::io::Read as _;
/// The decoder traits.
pub(crate) mod decoder;
/// The encoder traits.
pub(crate) mod encoder;
pub(crate) mod format;
pub(crate) mod free_functions;
pub(crate) mod image_reader_type;
pub(crate) mod limits;
#[deprecated(note = "this type has been moved and renamed to image::ImageReader")]
/// Deprecated re-export of `ImageReader` as `Reader`
pub type Reader<R> = ImageReader<R>;
#[deprecated(note = "this type has been moved to image::Limits")]
/// Deprecated re-export of `Limits`
pub type Limits = limits::Limits;
#[deprecated(note = "this type has been moved to image::LimitSupport")]
/// Deprecated re-export of `LimitSupport`
pub type LimitSupport = limits::LimitSupport;
pub(crate) use self::image_reader_type::ImageReader;
/// Adds `read_exact_vec`
pub(crate) trait ReadExt {
fn read_exact_vec(&mut self, vec: &mut Vec<u8>, len: usize) -> io::Result<()>;
}
impl<R: io::Read> ReadExt for R {
fn read_exact_vec(&mut self, vec: &mut Vec<u8>, len: usize) -> io::Result<()> {
let initial_len = vec.len();
vec.try_reserve(len)?;
match self.take(len as u64).read_to_end(vec) {
Ok(read) if read == len => Ok(()),
fail => {
vec.truncate(initial_len);
Err(fail.err().unwrap_or(io::ErrorKind::UnexpectedEof.into()))
}
}
}
}

209
vendor/image/src/io/decoder.rs vendored Normal file
View File

@@ -0,0 +1,209 @@
use crate::animation::Frames;
use crate::color::{ColorType, ExtendedColorType};
use crate::error::ImageResult;
use crate::metadata::Orientation;
/// The trait that all decoders implement
pub trait ImageDecoder {
/// Returns a tuple containing the width and height of the image
fn dimensions(&self) -> (u32, u32);
/// Returns the color type of the image data produced by this decoder
fn color_type(&self) -> ColorType;
/// Returns the color type of the image file before decoding
fn original_color_type(&self) -> ExtendedColorType {
self.color_type().into()
}
/// Returns the ICC color profile embedded in the image, or `Ok(None)` if the image does not have one.
///
/// For formats that don't support embedded profiles this function should always return `Ok(None)`.
fn icc_profile(&mut self) -> ImageResult<Option<Vec<u8>>> {
Ok(None)
}
/// Returns the raw [Exif](https://en.wikipedia.org/wiki/Exif) chunk, if it is present.
/// A third-party crate such as [`kamadak-exif`](https://docs.rs/kamadak-exif/) is required to actually parse it.
///
/// For formats that don't support embedded profiles this function should always return `Ok(None)`.
fn exif_metadata(&mut self) -> ImageResult<Option<Vec<u8>>> {
Ok(None)
}
/// Returns the orientation of the image.
///
/// This is usually obtained from the Exif metadata, if present. Formats that don't support
/// indicating orientation in their image metadata will return `Ok(Orientation::NoTransforms)`.
fn orientation(&mut self) -> ImageResult<Orientation> {
Ok(self
.exif_metadata()?
.and_then(|chunk| Orientation::from_exif_chunk(&chunk))
.unwrap_or(Orientation::NoTransforms))
}
/// Returns the total number of bytes in the decoded image.
///
/// This is the size of the buffer that must be passed to `read_image` or
/// `read_image_with_progress`. The returned value may exceed `usize::MAX`, in
/// which case it isn't actually possible to construct a buffer to decode all the image data
/// into. If, however, the size does not fit in a u64 then `u64::MAX` is returned.
fn total_bytes(&self) -> u64 {
let dimensions = self.dimensions();
let total_pixels = u64::from(dimensions.0) * u64::from(dimensions.1);
let bytes_per_pixel = u64::from(self.color_type().bytes_per_pixel());
total_pixels.saturating_mul(bytes_per_pixel)
}
/// Returns all the bytes in the image.
///
/// This function takes a slice of bytes and writes the pixel data of the image into it.
/// `buf` does not need to be aligned to any byte boundaries. However,
/// alignment to 2 or 4 byte boundaries may result in small performance
/// improvements for certain decoder implementations.
///
/// The returned pixel data will always be in native endian. This allows
/// `[u16]` and `[f32]` slices to be cast to `[u8]` and used for this method.
///
/// # Panics
///
/// This function panics if `buf.len() != self.total_bytes()`.
///
/// # Examples
///
/// ```
/// # use image::ImageDecoder;
/// fn read_16bit_image(decoder: impl ImageDecoder) -> Vec<u16> {
/// let mut buf: Vec<u16> = vec![0; (decoder.total_bytes() / 2) as usize];
/// decoder.read_image(bytemuck::cast_slice_mut(&mut buf));
/// buf
/// }
/// ```
fn read_image(self, buf: &mut [u8]) -> ImageResult<()>
where
Self: Sized;
/// Set the decoder to have the specified limits. See [`Limits`] for the different kinds of
/// limits that is possible to set.
///
/// Note to implementors: make sure you call [`Limits::check_support`] so that
/// decoding fails if any unsupported strict limits are set. Also make sure
/// you call [`Limits::check_dimensions`] to check the `max_image_width` and
/// `max_image_height` limits.
///
/// **Note**: By default, _no_ limits are defined. This may be changed in future major version
/// increases.
///
/// [`Limits`]: ./io/struct.Limits.html
/// [`Limits::check_support`]: ./io/struct.Limits.html#method.check_support
/// [`Limits::check_dimensions`]: ./io/struct.Limits.html#method.check_dimensions
fn set_limits(&mut self, limits: crate::Limits) -> ImageResult<()> {
limits.check_support(&crate::LimitSupport::default())?;
let (width, height) = self.dimensions();
limits.check_dimensions(width, height)?;
Ok(())
}
/// Use `read_image` instead; this method is an implementation detail needed so the trait can
/// be object safe.
///
/// Note to implementors: This method should be implemented by calling `read_image` on
/// the boxed decoder...
/// ```ignore
/// fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
/// (*self).read_image(buf)
/// }
/// ```
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()>;
}
#[deny(clippy::missing_trait_methods)]
impl<T: ?Sized + ImageDecoder> ImageDecoder for Box<T> {
fn dimensions(&self) -> (u32, u32) {
(**self).dimensions()
}
fn color_type(&self) -> ColorType {
(**self).color_type()
}
fn original_color_type(&self) -> ExtendedColorType {
(**self).original_color_type()
}
fn icc_profile(&mut self) -> ImageResult<Option<Vec<u8>>> {
(**self).icc_profile()
}
fn exif_metadata(&mut self) -> ImageResult<Option<Vec<u8>>> {
(**self).exif_metadata()
}
fn orientation(&mut self) -> ImageResult<Orientation> {
(**self).orientation()
}
fn total_bytes(&self) -> u64 {
(**self).total_bytes()
}
fn read_image(self, buf: &mut [u8]) -> ImageResult<()>
where
Self: Sized,
{
T::read_image_boxed(self, buf)
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
T::read_image_boxed(*self, buf)
}
fn set_limits(&mut self, limits: crate::Limits) -> ImageResult<()> {
(**self).set_limits(limits)
}
}
/// Specialized image decoding not be supported by all formats
pub trait ImageDecoderRect: ImageDecoder {
/// Decode a rectangular section of the image.
///
/// This function takes a slice of bytes and writes the pixel data of the image into it.
/// The rectangle is specified by the x and y coordinates of the top left corner, the width
/// and height of the rectangle, and the row pitch of the buffer. The row pitch is the number
/// of bytes between the start of one row and the start of the next row. The row pitch must be
/// at least as large as the width of the rectangle in bytes.
fn read_rect(
&mut self,
x: u32,
y: u32,
width: u32,
height: u32,
buf: &mut [u8],
row_pitch: usize,
) -> ImageResult<()>;
}
/// `AnimationDecoder` trait
pub trait AnimationDecoder<'a> {
/// Consume the decoder producing a series of frames.
fn into_frames(self) -> Frames<'a>;
}
#[cfg(test)]
mod tests {
use super::{ColorType, ImageDecoder, ImageResult};
#[test]
fn total_bytes_overflow() {
struct D;
impl ImageDecoder for D {
fn color_type(&self) -> ColorType {
ColorType::Rgb8
}
fn dimensions(&self) -> (u32, u32) {
(0xffff_ffff, 0xffff_ffff)
}
fn read_image(self, _buf: &mut [u8]) -> ImageResult<()> {
unimplemented!()
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
assert_eq!(D.total_bytes(), u64::MAX);
let v: ImageResult<Vec<u8>> = crate::io::free_functions::decoder_to_vec(D);
assert!(v.is_err());
}
}

132
vendor/image/src/io/encoder.rs vendored Normal file
View File

@@ -0,0 +1,132 @@
use crate::error::{ImageFormatHint, ImageResult, UnsupportedError, UnsupportedErrorKind};
use crate::{ColorType, DynamicImage, ExtendedColorType};
/// Nominally public but DO NOT expose this type.
///
/// To be somewhat sure here's a compile fail test:
///
/// ```compile_fail
/// use image::MethodSealedToImage;
/// ```
///
/// ```compile_fail
/// use image::io::MethodSealedToImage;
/// ```
///
/// The same implementation strategy for a partially public trait is used in the standard library,
/// for the different effect of forbidding `Error::type_id` overrides thus making them reliable for
/// their calls through the `dyn` version of the trait.
///
/// Read more: <https://predr.ag/blog/definitive-guide-to-sealed-traits-in-rust/>
#[derive(Clone, Copy)]
pub struct MethodSealedToImage;
/// The trait all encoders implement
pub trait ImageEncoder {
/// Writes all the bytes in an image to the encoder.
///
/// This function takes a slice of bytes of the pixel data of the image
/// and encodes them. Just like for [`ImageDecoder::read_image`], no particular
/// alignment is required and data is expected to be in native endian.
/// The implementation will reorder the endianness as necessary for the target encoding format.
///
/// # Panics
///
/// Panics if `width * height * color_type.bytes_per_pixel() != buf.len()`.
fn write_image(
self,
buf: &[u8],
width: u32,
height: u32,
color_type: ExtendedColorType,
) -> ImageResult<()>;
/// Set the ICC profile to use for the image.
///
/// This function is a no-op for formats that don't support ICC profiles.
/// For formats that do support ICC profiles, the profile will be embedded
/// in the image when it is saved.
///
/// # Errors
///
/// This function returns an error if the format does not support ICC profiles.
fn set_icc_profile(&mut self, icc_profile: Vec<u8>) -> Result<(), UnsupportedError> {
let _ = icc_profile;
Err(UnsupportedError::from_format_and_kind(
ImageFormatHint::Unknown,
UnsupportedErrorKind::GenericFeature(
"ICC profiles are not supported for this format".into(),
),
))
}
/// Set the EXIF metadata to use for the image.
///
/// This function is a no-op for formats that don't support EXIF metadata.
/// For formats that do support EXIF metadata, the metadata will be embedded
/// in the image when it is saved.
///
/// # Errors
///
/// This function returns an error if the format does not support EXIF metadata or if the
/// encoder doesn't implement saving EXIF metadata yet.
fn set_exif_metadata(&mut self, exif: Vec<u8>) -> Result<(), UnsupportedError> {
let _ = exif;
Err(UnsupportedError::from_format_and_kind(
ImageFormatHint::Unknown,
UnsupportedErrorKind::GenericFeature(
"EXIF metadata is not supported for this format".into(),
),
))
}
/// Convert the image to a compatible format for the encoder. This is used by the encoding
/// methods on `DynamicImage`.
///
/// Note that this is method is sealed to the crate and effectively pub(crate) due to the
/// argument type not being nameable.
#[doc(hidden)]
fn make_compatible_img(
&self,
_: MethodSealedToImage,
_input: &DynamicImage,
) -> Option<DynamicImage> {
None
}
}
pub(crate) trait ImageEncoderBoxed: ImageEncoder {
fn write_image(
self: Box<Self>,
buf: &'_ [u8],
width: u32,
height: u32,
color: ExtendedColorType,
) -> ImageResult<()>;
}
impl<T: ImageEncoder> ImageEncoderBoxed for T {
fn write_image(
self: Box<Self>,
buf: &'_ [u8],
width: u32,
height: u32,
color: ExtendedColorType,
) -> ImageResult<()> {
(*self).write_image(buf, width, height, color)
}
}
/// Implement `dynimage_conversion_sequence` for the common case of supporting only 8-bit colors
/// (with and without alpha).
#[allow(unused)]
pub(crate) fn dynimage_conversion_8bit(img: &DynamicImage) -> Option<DynamicImage> {
use ColorType::*;
match img.color() {
Rgb8 | Rgba8 | L8 | La8 => None,
L16 => Some(img.to_luma8().into()),
La16 => Some(img.to_luma_alpha8().into()),
Rgb16 | Rgb32F => Some(img.to_rgb8().into()),
Rgba16 | Rgba32F => Some(img.to_rgba8().into()),
}
}

470
vendor/image/src/io/format.rs vendored Normal file
View File

@@ -0,0 +1,470 @@
use std::ffi::OsStr;
use std::path::Path;
use crate::error::{ImageError, ImageFormatHint, ImageResult};
/// An enumeration of supported image formats.
/// Not all formats support both encoding and decoding.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[non_exhaustive]
pub enum ImageFormat {
/// An Image in PNG Format
Png,
/// An Image in JPEG Format
Jpeg,
/// An Image in GIF Format
Gif,
/// An Image in WEBP Format
WebP,
/// An Image in general PNM Format
Pnm,
/// An Image in TIFF Format
Tiff,
/// An Image in TGA Format
Tga,
/// An Image in DDS Format
Dds,
/// An Image in BMP Format
Bmp,
/// An Image in ICO Format
Ico,
/// An Image in Radiance HDR Format
Hdr,
/// An Image in OpenEXR Format
OpenExr,
/// An Image in farbfeld Format
Farbfeld,
/// An Image in AVIF Format
Avif,
/// An Image in QOI Format
Qoi,
/// An Image in PCX Format
#[cfg_attr(not(feature = "serde"), deprecated)]
#[doc(hidden)]
Pcx,
}
impl ImageFormat {
/// Return the image format specified by a path's file extension.
///
/// # Example
///
/// ```
/// use image::ImageFormat;
///
/// let format = ImageFormat::from_extension("jpg");
/// assert_eq!(format, Some(ImageFormat::Jpeg));
/// ```
#[inline]
pub fn from_extension<S>(ext: S) -> Option<Self>
where
S: AsRef<OsStr>,
{
// thin wrapper function to strip generics
fn inner(ext: &OsStr) -> Option<ImageFormat> {
let ext = ext.to_str()?.to_ascii_lowercase();
// NOTE: when updating this, also update extensions_str()
Some(match ext.as_str() {
"avif" => ImageFormat::Avif,
"jpg" | "jpeg" | "jfif" => ImageFormat::Jpeg,
"png" | "apng" => ImageFormat::Png,
"gif" => ImageFormat::Gif,
"webp" => ImageFormat::WebP,
"tif" | "tiff" => ImageFormat::Tiff,
"tga" => ImageFormat::Tga,
"dds" => ImageFormat::Dds,
"bmp" => ImageFormat::Bmp,
"ico" => ImageFormat::Ico,
"hdr" => ImageFormat::Hdr,
"exr" => ImageFormat::OpenExr,
"pbm" | "pam" | "ppm" | "pgm" | "pnm" => ImageFormat::Pnm,
"ff" => ImageFormat::Farbfeld,
"qoi" => ImageFormat::Qoi,
_ => return None,
})
}
inner(ext.as_ref())
}
/// Return the image format specified by the path's file extension.
///
/// # Example
///
/// ```
/// use image::ImageFormat;
///
/// let format = ImageFormat::from_path("images/ferris.png")?;
/// assert_eq!(format, ImageFormat::Png);
///
/// # Ok::<(), image::error::ImageError>(())
/// ```
#[inline]
pub fn from_path<P>(path: P) -> ImageResult<Self>
where
P: AsRef<Path>,
{
// thin wrapper function to strip generics
fn inner(path: &Path) -> ImageResult<ImageFormat> {
let exact_ext = path.extension();
exact_ext
.and_then(ImageFormat::from_extension)
.ok_or_else(|| {
let format_hint = match exact_ext {
None => ImageFormatHint::Unknown,
Some(os) => ImageFormatHint::PathExtension(os.into()),
};
ImageError::Unsupported(format_hint.into())
})
}
inner(path.as_ref())
}
/// Return the image format specified by a MIME type.
///
/// # Example
///
/// ```
/// use image::ImageFormat;
///
/// let format = ImageFormat::from_mime_type("image/png").unwrap();
/// assert_eq!(format, ImageFormat::Png);
/// ```
pub fn from_mime_type<M>(mime_type: M) -> Option<Self>
where
M: AsRef<str>,
{
match mime_type.as_ref() {
"image/avif" => Some(ImageFormat::Avif),
"image/jpeg" => Some(ImageFormat::Jpeg),
"image/png" => Some(ImageFormat::Png),
"image/gif" => Some(ImageFormat::Gif),
"image/webp" => Some(ImageFormat::WebP),
"image/tiff" => Some(ImageFormat::Tiff),
"image/x-targa" | "image/x-tga" => Some(ImageFormat::Tga),
"image/vnd-ms.dds" => Some(ImageFormat::Dds),
"image/bmp" => Some(ImageFormat::Bmp),
"image/x-icon" | "image/vnd.microsoft.icon" => Some(ImageFormat::Ico),
"image/vnd.radiance" => Some(ImageFormat::Hdr),
"image/x-exr" => Some(ImageFormat::OpenExr),
"image/x-portable-bitmap"
| "image/x-portable-graymap"
| "image/x-portable-pixmap"
| "image/x-portable-anymap" => Some(ImageFormat::Pnm),
// Qoi's MIME type is being worked on.
// See: https://github.com/phoboslab/qoi/issues/167
"image/x-qoi" => Some(ImageFormat::Qoi),
_ => None,
}
}
/// Return the MIME type for this image format or "application/octet-stream" if no MIME type
/// exists for the format.
///
/// Some notes on a few of the MIME types:
///
/// - The portable anymap format has a separate MIME type for the pixmap, graymap and bitmap
/// formats, but this method returns the general "image/x-portable-anymap" MIME type.
/// - The Targa format has two common MIME types, "image/x-targa" and "image/x-tga"; this
/// method returns "image/x-targa" for that format.
/// - The QOI MIME type is still a work in progress. This method returns "image/x-qoi" for
/// that format.
///
/// # Example
///
/// ```
/// use image::ImageFormat;
///
/// let mime_type = ImageFormat::Png.to_mime_type();
/// assert_eq!(mime_type, "image/png");
/// ```
#[must_use]
pub fn to_mime_type(&self) -> &'static str {
match self {
ImageFormat::Avif => "image/avif",
ImageFormat::Jpeg => "image/jpeg",
ImageFormat::Png => "image/png",
ImageFormat::Gif => "image/gif",
ImageFormat::WebP => "image/webp",
ImageFormat::Tiff => "image/tiff",
// the targa MIME type has two options, but this one seems to be used more
ImageFormat::Tga => "image/x-targa",
ImageFormat::Dds => "image/vnd-ms.dds",
ImageFormat::Bmp => "image/bmp",
ImageFormat::Ico => "image/x-icon",
ImageFormat::Hdr => "image/vnd.radiance",
ImageFormat::OpenExr => "image/x-exr",
// return the most general MIME type
ImageFormat::Pnm => "image/x-portable-anymap",
// Qoi's MIME type is being worked on.
// See: https://github.com/phoboslab/qoi/issues/167
ImageFormat::Qoi => "image/x-qoi",
// farbfeld's MIME type taken from https://www.wikidata.org/wiki/Q28206109
ImageFormat::Farbfeld => "application/octet-stream",
#[allow(deprecated)]
ImageFormat::Pcx => "image/vnd.zbrush.pcx",
}
}
/// Return if the `ImageFormat` can be decoded by the lib.
#[inline]
#[must_use]
pub fn can_read(&self) -> bool {
// Needs to be updated once a new variant's decoder is added to free_functions.rs::load
match self {
ImageFormat::Png => true,
ImageFormat::Gif => true,
ImageFormat::Jpeg => true,
ImageFormat::WebP => true,
ImageFormat::Tiff => true,
ImageFormat::Tga => true,
ImageFormat::Dds => false,
ImageFormat::Bmp => true,
ImageFormat::Ico => true,
ImageFormat::Hdr => true,
ImageFormat::OpenExr => true,
ImageFormat::Pnm => true,
ImageFormat::Farbfeld => true,
ImageFormat::Avif => true,
ImageFormat::Qoi => true,
#[allow(deprecated)]
ImageFormat::Pcx => false,
}
}
/// Return if the `ImageFormat` can be encoded by the lib.
#[inline]
#[must_use]
pub fn can_write(&self) -> bool {
// Needs to be updated once a new variant's encoder is added to free_functions.rs::save_buffer_with_format_impl
match self {
ImageFormat::Gif => true,
ImageFormat::Ico => true,
ImageFormat::Jpeg => true,
ImageFormat::Png => true,
ImageFormat::Bmp => true,
ImageFormat::Tiff => true,
ImageFormat::Tga => true,
ImageFormat::Pnm => true,
ImageFormat::Farbfeld => true,
ImageFormat::Avif => true,
ImageFormat::WebP => true,
ImageFormat::Hdr => true,
ImageFormat::OpenExr => true,
ImageFormat::Dds => false,
ImageFormat::Qoi => true,
#[allow(deprecated)]
ImageFormat::Pcx => false,
}
}
/// Return a list of applicable extensions for this format.
///
/// All currently recognized image formats specify at least on extension but for future
/// compatibility you should not rely on this fact. The list may be empty if the format has no
/// recognized file representation, for example in case it is used as a purely transient memory
/// format.
///
/// The method name `extensions` remains reserved for introducing another method in the future
/// that yields a slice of `OsStr` which is blocked by several features of const evaluation.
#[must_use]
pub fn extensions_str(self) -> &'static [&'static str] {
// NOTE: when updating this, also update from_extension()
match self {
ImageFormat::Png => &["png"],
ImageFormat::Jpeg => &["jpg", "jpeg"],
ImageFormat::Gif => &["gif"],
ImageFormat::WebP => &["webp"],
ImageFormat::Pnm => &["pbm", "pam", "ppm", "pgm", "pnm"],
ImageFormat::Tiff => &["tiff", "tif"],
ImageFormat::Tga => &["tga"],
ImageFormat::Dds => &["dds"],
ImageFormat::Bmp => &["bmp"],
ImageFormat::Ico => &["ico"],
ImageFormat::Hdr => &["hdr"],
ImageFormat::OpenExr => &["exr"],
ImageFormat::Farbfeld => &["ff"],
// According to: https://aomediacodec.github.io/av1-avif/#mime-registration
ImageFormat::Avif => &["avif"],
ImageFormat::Qoi => &["qoi"],
#[allow(deprecated)]
ImageFormat::Pcx => &["pcx"],
}
}
/// Return the `ImageFormat`s which are enabled for reading.
#[inline]
#[must_use]
pub fn reading_enabled(&self) -> bool {
match self {
ImageFormat::Png => cfg!(feature = "png"),
ImageFormat::Gif => cfg!(feature = "gif"),
ImageFormat::Jpeg => cfg!(feature = "jpeg"),
ImageFormat::WebP => cfg!(feature = "webp"),
ImageFormat::Tiff => cfg!(feature = "tiff"),
ImageFormat::Tga => cfg!(feature = "tga"),
ImageFormat::Bmp => cfg!(feature = "bmp"),
ImageFormat::Ico => cfg!(feature = "ico"),
ImageFormat::Hdr => cfg!(feature = "hdr"),
ImageFormat::OpenExr => cfg!(feature = "exr"),
ImageFormat::Pnm => cfg!(feature = "pnm"),
ImageFormat::Farbfeld => cfg!(feature = "ff"),
ImageFormat::Avif => cfg!(feature = "avif"),
ImageFormat::Qoi => cfg!(feature = "qoi"),
#[allow(deprecated)]
ImageFormat::Pcx => false,
ImageFormat::Dds => false,
}
}
/// Return the `ImageFormat`s which are enabled for writing.
#[inline]
#[must_use]
pub fn writing_enabled(&self) -> bool {
match self {
ImageFormat::Gif => cfg!(feature = "gif"),
ImageFormat::Ico => cfg!(feature = "ico"),
ImageFormat::Jpeg => cfg!(feature = "jpeg"),
ImageFormat::Png => cfg!(feature = "png"),
ImageFormat::Bmp => cfg!(feature = "bmp"),
ImageFormat::Tiff => cfg!(feature = "tiff"),
ImageFormat::Tga => cfg!(feature = "tga"),
ImageFormat::Pnm => cfg!(feature = "pnm"),
ImageFormat::Farbfeld => cfg!(feature = "ff"),
ImageFormat::Avif => cfg!(feature = "avif"),
ImageFormat::WebP => cfg!(feature = "webp"),
ImageFormat::OpenExr => cfg!(feature = "exr"),
ImageFormat::Qoi => cfg!(feature = "qoi"),
ImageFormat::Hdr => cfg!(feature = "hdr"),
#[allow(deprecated)]
ImageFormat::Pcx => false,
ImageFormat::Dds => false,
}
}
/// Return all `ImageFormat`s
pub fn all() -> impl Iterator<Item = ImageFormat> {
[
ImageFormat::Gif,
ImageFormat::Ico,
ImageFormat::Jpeg,
ImageFormat::Png,
ImageFormat::Bmp,
ImageFormat::Tiff,
ImageFormat::Tga,
ImageFormat::Pnm,
ImageFormat::Farbfeld,
ImageFormat::Avif,
ImageFormat::WebP,
ImageFormat::OpenExr,
ImageFormat::Qoi,
ImageFormat::Dds,
ImageFormat::Hdr,
#[allow(deprecated)]
ImageFormat::Pcx,
]
.iter()
.copied()
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::path::Path;
use super::{ImageFormat, ImageResult};
#[test]
fn test_image_format_from_path() {
fn from_path(s: &str) -> ImageResult<ImageFormat> {
ImageFormat::from_path(Path::new(s))
}
assert_eq!(from_path("./a.jpg").unwrap(), ImageFormat::Jpeg);
assert_eq!(from_path("./a.jpeg").unwrap(), ImageFormat::Jpeg);
assert_eq!(from_path("./a.JPEG").unwrap(), ImageFormat::Jpeg);
assert_eq!(from_path("./a.pNg").unwrap(), ImageFormat::Png);
assert_eq!(from_path("./a.gif").unwrap(), ImageFormat::Gif);
assert_eq!(from_path("./a.webp").unwrap(), ImageFormat::WebP);
assert_eq!(from_path("./a.tiFF").unwrap(), ImageFormat::Tiff);
assert_eq!(from_path("./a.tif").unwrap(), ImageFormat::Tiff);
assert_eq!(from_path("./a.tga").unwrap(), ImageFormat::Tga);
assert_eq!(from_path("./a.dds").unwrap(), ImageFormat::Dds);
assert_eq!(from_path("./a.bmp").unwrap(), ImageFormat::Bmp);
assert_eq!(from_path("./a.Ico").unwrap(), ImageFormat::Ico);
assert_eq!(from_path("./a.hdr").unwrap(), ImageFormat::Hdr);
assert_eq!(from_path("./a.exr").unwrap(), ImageFormat::OpenExr);
assert_eq!(from_path("./a.pbm").unwrap(), ImageFormat::Pnm);
assert_eq!(from_path("./a.pAM").unwrap(), ImageFormat::Pnm);
assert_eq!(from_path("./a.Ppm").unwrap(), ImageFormat::Pnm);
assert_eq!(from_path("./a.pgm").unwrap(), ImageFormat::Pnm);
assert_eq!(from_path("./a.AViF").unwrap(), ImageFormat::Avif);
assert!(from_path("./a.txt").is_err());
assert!(from_path("./a").is_err());
}
#[test]
fn image_formats_are_recognized() {
use ImageFormat::*;
const ALL_FORMATS: &[ImageFormat] = &[
Avif, Png, Jpeg, Gif, WebP, Pnm, Tiff, Tga, Dds, Bmp, Ico, Hdr, Farbfeld, OpenExr,
];
for &format in ALL_FORMATS {
let mut file = Path::new("file.nothing").to_owned();
for ext in format.extensions_str() {
assert!(file.set_extension(ext));
match ImageFormat::from_path(&file) {
Err(_) => panic!("Path {} not recognized as {:?}", file.display(), format),
Ok(result) => assert_eq!(format, result),
}
}
}
}
#[test]
fn all() {
let all_formats: HashSet<ImageFormat> = ImageFormat::all().collect();
assert!(all_formats.contains(&ImageFormat::Avif));
assert!(all_formats.contains(&ImageFormat::Gif));
assert!(all_formats.contains(&ImageFormat::Bmp));
assert!(all_formats.contains(&ImageFormat::Farbfeld));
assert!(all_formats.contains(&ImageFormat::Jpeg));
}
#[test]
fn reading_enabled() {
assert_eq!(cfg!(feature = "jpeg"), ImageFormat::Jpeg.reading_enabled());
assert_eq!(
cfg!(feature = "ff"),
ImageFormat::Farbfeld.reading_enabled()
);
assert!(!ImageFormat::Dds.reading_enabled());
}
#[test]
fn writing_enabled() {
assert_eq!(cfg!(feature = "jpeg"), ImageFormat::Jpeg.writing_enabled());
assert_eq!(
cfg!(feature = "ff"),
ImageFormat::Farbfeld.writing_enabled()
);
assert!(!ImageFormat::Dds.writing_enabled());
}
}

504
vendor/image/src/io/free_functions.rs vendored Normal file
View File

@@ -0,0 +1,504 @@
use std::fs::File;
use std::io::{self, BufRead, BufWriter, Seek, Write};
use std::path::Path;
use std::{iter, mem::size_of};
use crate::io::encoder::ImageEncoderBoxed;
use crate::{codecs::*, ExtendedColorType, ImageReader};
use crate::error::{
ImageError, ImageFormatHint, ImageResult, LimitError, LimitErrorKind, ParameterError,
ParameterErrorKind, UnsupportedError, UnsupportedErrorKind,
};
use crate::{DynamicImage, ImageDecoder, ImageFormat};
/// Create a new image from a Reader.
///
/// Assumes the reader is already buffered. For optimal performance,
/// consider wrapping the reader with a `BufReader::new()`.
///
/// Try [`ImageReader`] for more advanced uses.
pub fn load<R: BufRead + Seek>(r: R, format: ImageFormat) -> ImageResult<DynamicImage> {
let mut reader = ImageReader::new(r);
reader.set_format(format);
reader.decode()
}
/// Saves the supplied buffer to a file at the path specified.
///
/// The image format is derived from the file extension. The buffer is assumed to have the correct
/// format according to the specified color type. This will lead to corrupted files if the buffer
/// contains malformed data.
pub fn save_buffer(
path: impl AsRef<Path>,
buf: &[u8],
width: u32,
height: u32,
color: impl Into<ExtendedColorType>,
) -> ImageResult<()> {
let format = ImageFormat::from_path(path.as_ref())?;
save_buffer_with_format(path, buf, width, height, color, format)
}
/// Saves the supplied buffer to a file given the path and desired format.
///
/// The buffer is assumed to have the correct format according to the specified color type. This
/// will lead to corrupted files if the buffer contains malformed data.
pub fn save_buffer_with_format(
path: impl AsRef<Path>,
buf: &[u8],
width: u32,
height: u32,
color: impl Into<ExtendedColorType>,
format: ImageFormat,
) -> ImageResult<()> {
let buffered_file_write = &mut BufWriter::new(File::create(path)?); // always seekable
let encoder = encoder_for_format(format, buffered_file_write)?;
encoder.write_image(buf, width, height, color.into())
}
pub(crate) fn encoder_for_format<'a, W: Write + Seek>(
format: ImageFormat,
buffered_write: &'a mut W,
) -> ImageResult<Box<dyn ImageEncoderBoxed + 'a>> {
Ok(match format {
#[cfg(feature = "png")]
ImageFormat::Png => Box::new(png::PngEncoder::new(buffered_write)),
#[cfg(feature = "jpeg")]
ImageFormat::Jpeg => Box::new(jpeg::JpegEncoder::new(buffered_write)),
#[cfg(feature = "pnm")]
ImageFormat::Pnm => Box::new(pnm::PnmEncoder::new(buffered_write)),
#[cfg(feature = "gif")]
ImageFormat::Gif => Box::new(gif::GifEncoder::new(buffered_write)),
#[cfg(feature = "ico")]
ImageFormat::Ico => Box::new(ico::IcoEncoder::new(buffered_write)),
#[cfg(feature = "bmp")]
ImageFormat::Bmp => Box::new(bmp::BmpEncoder::new(buffered_write)),
#[cfg(feature = "ff")]
ImageFormat::Farbfeld => Box::new(farbfeld::FarbfeldEncoder::new(buffered_write)),
#[cfg(feature = "tga")]
ImageFormat::Tga => Box::new(tga::TgaEncoder::new(buffered_write)),
#[cfg(feature = "exr")]
ImageFormat::OpenExr => Box::new(openexr::OpenExrEncoder::new(buffered_write)),
#[cfg(feature = "tiff")]
ImageFormat::Tiff => Box::new(tiff::TiffEncoder::new(buffered_write)),
#[cfg(feature = "avif")]
ImageFormat::Avif => Box::new(avif::AvifEncoder::new(buffered_write)),
#[cfg(feature = "qoi")]
ImageFormat::Qoi => Box::new(qoi::QoiEncoder::new(buffered_write)),
#[cfg(feature = "webp")]
ImageFormat::WebP => Box::new(webp::WebPEncoder::new_lossless(buffered_write)),
#[cfg(feature = "hdr")]
ImageFormat::Hdr => Box::new(hdr::HdrEncoder::new(buffered_write)),
_ => {
return Err(ImageError::Unsupported(
UnsupportedError::from_format_and_kind(
ImageFormatHint::Unknown,
UnsupportedErrorKind::Format(ImageFormatHint::Name(format!("{format:?}"))),
),
));
}
})
}
static MAGIC_BYTES: [(&[u8], &[u8], ImageFormat); 22] = [
(b"\x89PNG\r\n\x1a\n", b"", ImageFormat::Png),
(&[0xff, 0xd8, 0xff], b"", ImageFormat::Jpeg),
(b"GIF89a", b"", ImageFormat::Gif),
(b"GIF87a", b"", ImageFormat::Gif),
(
b"RIFF\0\0\0\0WEBP",
b"\xFF\xFF\xFF\xFF\0\0\0\0",
ImageFormat::WebP,
),
(b"MM\x00*", b"", ImageFormat::Tiff),
(b"II*\x00", b"", ImageFormat::Tiff),
(b"DDS ", b"", ImageFormat::Dds),
(b"BM", b"", ImageFormat::Bmp),
(&[0, 0, 1, 0], b"", ImageFormat::Ico),
(b"#?RADIANCE", b"", ImageFormat::Hdr),
(b"\0\0\0\0ftypavif", b"\xFF\xFF\0\0", ImageFormat::Avif),
(&[0x76, 0x2f, 0x31, 0x01], b"", ImageFormat::OpenExr), // = &exr::meta::magic_number::BYTES
(b"qoif", b"", ImageFormat::Qoi),
(b"P1", b"", ImageFormat::Pnm),
(b"P2", b"", ImageFormat::Pnm),
(b"P3", b"", ImageFormat::Pnm),
(b"P4", b"", ImageFormat::Pnm),
(b"P5", b"", ImageFormat::Pnm),
(b"P6", b"", ImageFormat::Pnm),
(b"P7", b"", ImageFormat::Pnm),
(b"farbfeld", b"", ImageFormat::Farbfeld),
];
/// Guess image format from memory block
///
/// Makes an educated guess about the image format based on the Magic Bytes at the beginning.
/// TGA is not supported by this function.
/// This is not to be trusted on the validity of the whole memory block
pub fn guess_format(buffer: &[u8]) -> ImageResult<ImageFormat> {
match guess_format_impl(buffer) {
Some(format) => Ok(format),
None => Err(ImageError::Unsupported(ImageFormatHint::Unknown.into())),
}
}
pub(crate) fn guess_format_impl(buffer: &[u8]) -> Option<ImageFormat> {
for &(signature, mask, format) in &MAGIC_BYTES {
if mask.is_empty() {
if buffer.starts_with(signature) {
return Some(format);
}
} else if buffer.len() >= signature.len()
&& buffer
.iter()
.zip(signature.iter())
.zip(mask.iter().chain(iter::repeat(&0xFF)))
.all(|((&byte, &sig), &mask)| byte & mask == sig)
{
return Some(format);
}
}
None
}
/// Decodes a specific region of the image, represented by the rectangle
/// starting from ```x``` and ```y``` and having ```length``` and ```width```
#[allow(dead_code)]
#[allow(clippy::too_many_arguments)]
pub(crate) fn load_rect<D, F1, F2, E>(
x: u32,
y: u32,
width: u32,
height: u32,
buf: &mut [u8],
row_pitch: usize,
decoder: &mut D,
scanline_bytes: usize,
mut seek_scanline: F1,
mut read_scanline: F2,
) -> ImageResult<()>
where
D: ImageDecoder,
F1: FnMut(&mut D, u64) -> io::Result<()>,
F2: FnMut(&mut D, &mut [u8]) -> Result<(), E>,
ImageError: From<E>,
{
let scanline_bytes = u64::try_from(scanline_bytes).unwrap();
let row_pitch = u64::try_from(row_pitch).unwrap();
let (x, y, width, height) = (
u64::from(x),
u64::from(y),
u64::from(width),
u64::from(height),
);
let dimensions = decoder.dimensions();
let bytes_per_pixel = u64::from(decoder.color_type().bytes_per_pixel());
let row_bytes = bytes_per_pixel * u64::from(dimensions.0);
let total_bytes = width * height * bytes_per_pixel;
assert!(
buf.len() >= usize::try_from(total_bytes).unwrap_or(usize::MAX),
"output buffer too short\n expected `{}`, provided `{}`",
total_bytes,
buf.len()
);
let mut current_scanline = 0;
let mut tmp = Vec::new();
let mut tmp_scanline = None;
{
// Read a range of the image starting from byte number `start` and continuing until byte
// number `end`. Updates `current_scanline` and `bytes_read` appropriately.
let mut read_image_range =
|mut start: u64, end: u64, mut output: &mut [u8]| -> ImageResult<()> {
// If the first scanline we need is already stored in the temporary buffer, then handle
// it first.
let target_scanline = start / scanline_bytes;
if tmp_scanline == Some(target_scanline) {
let position = target_scanline * scanline_bytes;
let offset = start.saturating_sub(position);
let len = (end - start)
.min(scanline_bytes - offset)
.min(end - position);
output
.write_all(&tmp[offset as usize..][..len as usize])
.unwrap();
start += len;
if start == end {
return Ok(());
}
}
let target_scanline = start / scanline_bytes;
if target_scanline != current_scanline {
seek_scanline(decoder, target_scanline)?;
current_scanline = target_scanline;
}
let mut position = current_scanline * scanline_bytes;
while position < end {
if position >= start && end - position >= scanline_bytes {
read_scanline(decoder, &mut output[..(scanline_bytes as usize)])?;
output = &mut output[scanline_bytes as usize..];
} else {
tmp.resize(scanline_bytes as usize, 0u8);
read_scanline(decoder, &mut tmp)?;
tmp_scanline = Some(current_scanline);
let offset = start.saturating_sub(position);
let len = (end - start)
.min(scanline_bytes - offset)
.min(end - position);
output
.write_all(&tmp[offset as usize..][..len as usize])
.unwrap();
}
current_scanline += 1;
position += scanline_bytes;
}
Ok(())
};
if x + width > u64::from(dimensions.0)
|| y + height > u64::from(dimensions.1)
|| width == 0
|| height == 0
{
return Err(ImageError::Parameter(ParameterError::from_kind(
ParameterErrorKind::DimensionMismatch,
)));
}
if scanline_bytes > usize::MAX as u64 {
return Err(ImageError::Limits(LimitError::from_kind(
LimitErrorKind::InsufficientMemory,
)));
}
if x == 0 && width == u64::from(dimensions.0) && row_pitch == row_bytes {
let start = x * bytes_per_pixel + y * row_bytes;
let end = (x + width) * bytes_per_pixel + (y + height - 1) * row_bytes;
read_image_range(start, end, buf)?;
} else {
for (output_slice, row) in buf.chunks_mut(row_pitch as usize).zip(y..(y + height)) {
let start = x * bytes_per_pixel + row * row_bytes;
let end = (x + width) * bytes_per_pixel + row * row_bytes;
read_image_range(start, end, output_slice)?;
}
}
}
// Seek back to the start
Ok(seek_scanline(decoder, 0)?)
}
/// Reads all of the bytes of a decoder into a Vec<T>. No particular alignment
/// of the output buffer is guaranteed.
///
/// Panics if there isn't enough memory to decode the image.
pub(crate) fn decoder_to_vec<T>(decoder: impl ImageDecoder) -> ImageResult<Vec<T>>
where
T: crate::traits::Primitive + bytemuck::Pod,
{
let total_bytes = usize::try_from(decoder.total_bytes());
if total_bytes.is_err() || total_bytes.unwrap() > isize::MAX as usize {
return Err(ImageError::Limits(LimitError::from_kind(
LimitErrorKind::InsufficientMemory,
)));
}
let mut buf = vec![num_traits::Zero::zero(); total_bytes.unwrap() / size_of::<T>()];
decoder.read_image(bytemuck::cast_slice_mut(buf.as_mut_slice()))?;
Ok(buf)
}
#[cfg(test)]
mod tests {
use crate::ColorType;
use std::io;
use super::{load_rect, ImageDecoder, ImageResult};
#[test]
fn test_load_rect() {
struct MockDecoder {
scanline_number: u64,
scanline_bytes: u64,
}
impl ImageDecoder for MockDecoder {
fn dimensions(&self) -> (u32, u32) {
(5, 5)
}
fn color_type(&self) -> ColorType {
ColorType::L8
}
fn read_image(self, _buf: &mut [u8]) -> ImageResult<()> {
unimplemented!()
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
const DATA: [u8; 25] = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24,
];
fn seek_scanline(m: &mut MockDecoder, n: u64) -> io::Result<()> {
m.scanline_number = n;
Ok(())
}
fn read_scanline(m: &mut MockDecoder, buf: &mut [u8]) -> io::Result<()> {
let bytes_read = m.scanline_number * m.scanline_bytes;
if bytes_read >= 25 {
return Ok(());
}
let len = m.scanline_bytes.min(25 - bytes_read);
buf[..(len as usize)].copy_from_slice(&DATA[(bytes_read as usize)..][..(len as usize)]);
m.scanline_number += 1;
Ok(())
}
for scanline_bytes in 1..30 {
let mut output = [0u8; 26];
load_rect(
0,
0,
5,
5,
&mut output,
5,
&mut MockDecoder {
scanline_number: 0,
scanline_bytes,
},
scanline_bytes as usize,
seek_scanline,
read_scanline,
)
.unwrap();
assert_eq!(output[0..25], DATA);
assert_eq!(output[25], 0);
output = [0u8; 26];
load_rect(
3,
2,
1,
1,
&mut output,
1,
&mut MockDecoder {
scanline_number: 0,
scanline_bytes,
},
scanline_bytes as usize,
seek_scanline,
read_scanline,
)
.unwrap();
assert_eq!(output[0..2], [13, 0]);
output = [0u8; 26];
load_rect(
3,
2,
2,
2,
&mut output,
2,
&mut MockDecoder {
scanline_number: 0,
scanline_bytes,
},
scanline_bytes as usize,
seek_scanline,
read_scanline,
)
.unwrap();
assert_eq!(output[0..5], [13, 14, 18, 19, 0]);
output = [0u8; 26];
load_rect(
1,
1,
2,
4,
&mut output,
2,
&mut MockDecoder {
scanline_number: 0,
scanline_bytes,
},
scanline_bytes as usize,
seek_scanline,
read_scanline,
)
.unwrap();
assert_eq!(output[0..9], [6, 7, 11, 12, 16, 17, 21, 22, 0]);
}
}
#[test]
fn test_load_rect_single_scanline() {
const DATA: [u8; 25] = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24,
];
struct MockDecoder;
impl ImageDecoder for MockDecoder {
fn dimensions(&self) -> (u32, u32) {
(5, 5)
}
fn color_type(&self) -> ColorType {
ColorType::L8
}
fn read_image(self, _buf: &mut [u8]) -> ImageResult<()> {
unimplemented!()
}
fn read_image_boxed(self: Box<Self>, buf: &mut [u8]) -> ImageResult<()> {
(*self).read_image(buf)
}
}
// Ensure that seek scanline is called only once.
let mut seeks = 0;
let seek_scanline = |_d: &mut MockDecoder, n: u64| -> io::Result<()> {
seeks += 1;
assert_eq!(n, 0);
assert_eq!(seeks, 1);
Ok(())
};
fn read_scanline(_m: &mut MockDecoder, buf: &mut [u8]) -> io::Result<()> {
buf.copy_from_slice(&DATA);
Ok(())
}
let mut output = [0; 26];
load_rect(
1,
1,
2,
4,
&mut output,
2,
&mut MockDecoder,
DATA.len(),
seek_scanline,
read_scanline,
)
.unwrap();
assert_eq!(output[0..9], [6, 7, 11, 12, 16, 17, 21, 22, 0]);
}
}

365
vendor/image/src/io/image_reader_type.rs vendored Normal file
View File

@@ -0,0 +1,365 @@
use std::ffi::OsString;
use std::fs::File;
use std::io::{self, BufRead, BufReader, Cursor, Read, Seek, SeekFrom};
use std::iter;
use std::path::Path;
use crate::error::{ImageFormatHint, ImageResult, UnsupportedError, UnsupportedErrorKind};
use crate::hooks::{GenericReader, DECODING_HOOKS, GUESS_FORMAT_HOOKS};
use crate::io::limits::Limits;
use crate::{DynamicImage, ImageDecoder, ImageError, ImageFormat};
use super::free_functions;
#[derive(Clone)]
enum Format {
BuiltIn(ImageFormat),
Extension(OsString),
}
/// A multi-format image reader.
///
/// Wraps an input reader to facilitate automatic detection of an image's format, appropriate
/// decoding method, and dispatches into the set of supported [`ImageDecoder`] implementations.
///
/// ## Usage
///
/// Opening a file, deducing the format based on the file path automatically, and trying to decode
/// the image contained can be performed by constructing the reader and immediately consuming it.
///
/// ```no_run
/// # use image::ImageError;
/// # use image::ImageReader;
/// # fn main() -> Result<(), ImageError> {
/// let image = ImageReader::open("path/to/image.png")?
/// .decode()?;
/// # Ok(()) }
/// ```
///
/// It is also possible to make a guess based on the content. This is especially handy if the
/// source is some blob in memory and you have constructed the reader in another way. Here is an
/// example with a `pnm` black-and-white subformat that encodes its pixel matrix with ascii values.
///
/// ```
/// # use image::ImageError;
/// # use image::ImageReader;
/// # fn main() -> Result<(), ImageError> {
/// use std::io::Cursor;
/// use image::ImageFormat;
///
/// let raw_data = b"P1 2 2\n\
/// 0 1\n\
/// 1 0\n";
///
/// let mut reader = ImageReader::new(Cursor::new(raw_data))
/// .with_guessed_format()
/// .expect("Cursor io never fails");
/// assert_eq!(reader.format(), Some(ImageFormat::Pnm));
///
/// # #[cfg(feature = "pnm")]
/// let image = reader.decode()?;
/// # Ok(()) }
/// ```
///
/// As a final fallback or if only a specific format must be used, the reader always allows manual
/// specification of the supposed image format with [`set_format`].
///
/// [`set_format`]: #method.set_format
/// [`ImageDecoder`]: ../trait.ImageDecoder.html
pub struct ImageReader<R: Read + Seek> {
/// The reader. Should be buffered.
inner: R,
/// The format, if one has been set or deduced.
format: Option<Format>,
/// Decoding limits
limits: Limits,
}
impl<'a, R: 'a + BufRead + Seek> ImageReader<R> {
/// Create a new image reader without a preset format.
///
/// Assumes the reader is already buffered. For optimal performance,
/// consider wrapping the reader with a `BufReader::new()`.
///
/// It is possible to guess the format based on the content of the read object with
/// [`with_guessed_format`], or to set the format directly with [`set_format`].
///
/// [`with_guessed_format`]: #method.with_guessed_format
/// [`set_format`]: method.set_format
pub fn new(buffered_reader: R) -> Self {
ImageReader {
inner: buffered_reader,
format: None,
limits: Limits::default(),
}
}
/// Construct a reader with specified format.
///
/// Assumes the reader is already buffered. For optimal performance,
/// consider wrapping the reader with a `BufReader::new()`.
pub fn with_format(buffered_reader: R, format: ImageFormat) -> Self {
ImageReader {
inner: buffered_reader,
format: Some(Format::BuiltIn(format)),
limits: Limits::default(),
}
}
/// Get the currently determined format.
pub fn format(&self) -> Option<ImageFormat> {
match self.format {
Some(Format::BuiltIn(ref format)) => Some(*format),
Some(Format::Extension(ref ext)) => ImageFormat::from_extension(ext),
None => None,
}
}
/// Supply the format as which to interpret the read image.
pub fn set_format(&mut self, format: ImageFormat) {
self.format = Some(Format::BuiltIn(format));
}
/// Remove the current information on the image format.
///
/// Note that many operations require format information to be present and will return e.g. an
/// `ImageError::Unsupported` when the image format has not been set.
pub fn clear_format(&mut self) {
self.format = None;
}
/// Disable all decoding limits.
pub fn no_limits(&mut self) {
self.limits = Limits::no_limits();
}
/// Set a custom set of decoding limits.
pub fn limits(&mut self, limits: Limits) {
self.limits = limits;
}
/// Unwrap the reader.
pub fn into_inner(self) -> R {
self.inner
}
/// Makes a decoder.
///
/// For all formats except PNG, the limits are ignored and can be set with
/// `ImageDecoder::set_limits` after calling this function. PNG is handled specially because that
/// decoder has a different API which does not allow setting limits after construction.
fn make_decoder(
format: Format,
reader: R,
limits_for_png: Limits,
) -> ImageResult<Box<dyn ImageDecoder + 'a>> {
#[allow(unused)]
use crate::codecs::*;
let format = match format {
Format::BuiltIn(format) => format,
Format::Extension(ext) => {
{
let hooks = DECODING_HOOKS.read().unwrap();
if let Some(hooks) = hooks.as_ref() {
if let Some(hook) = hooks.get(&ext) {
return hook(GenericReader(BufReader::new(Box::new(reader))));
}
}
}
ImageFormat::from_extension(&ext).ok_or(ImageError::Unsupported(
ImageFormatHint::PathExtension(ext.into()).into(),
))?
}
};
#[allow(unreachable_patterns)]
// Default is unreachable if all features are supported.
Ok(match format {
#[cfg(feature = "avif-native")]
ImageFormat::Avif => Box::new(avif::AvifDecoder::new(reader)?),
#[cfg(feature = "png")]
ImageFormat::Png => Box::new(png::PngDecoder::with_limits(reader, limits_for_png)?),
#[cfg(feature = "gif")]
ImageFormat::Gif => Box::new(gif::GifDecoder::new(reader)?),
#[cfg(feature = "jpeg")]
ImageFormat::Jpeg => Box::new(jpeg::JpegDecoder::new(reader)?),
#[cfg(feature = "webp")]
ImageFormat::WebP => Box::new(webp::WebPDecoder::new(reader)?),
#[cfg(feature = "tiff")]
ImageFormat::Tiff => Box::new(tiff::TiffDecoder::new(reader)?),
#[cfg(feature = "tga")]
ImageFormat::Tga => Box::new(tga::TgaDecoder::new(reader)?),
#[cfg(feature = "dds")]
ImageFormat::Dds => Box::new(dds::DdsDecoder::new(reader)?),
#[cfg(feature = "bmp")]
ImageFormat::Bmp => Box::new(bmp::BmpDecoder::new(reader)?),
#[cfg(feature = "ico")]
ImageFormat::Ico => Box::new(ico::IcoDecoder::new(reader)?),
#[cfg(feature = "hdr")]
ImageFormat::Hdr => Box::new(hdr::HdrDecoder::new(reader)?),
#[cfg(feature = "exr")]
ImageFormat::OpenExr => Box::new(openexr::OpenExrDecoder::new(reader)?),
#[cfg(feature = "pnm")]
ImageFormat::Pnm => Box::new(pnm::PnmDecoder::new(reader)?),
#[cfg(feature = "ff")]
ImageFormat::Farbfeld => Box::new(farbfeld::FarbfeldDecoder::new(reader)?),
#[cfg(feature = "qoi")]
ImageFormat::Qoi => Box::new(qoi::QoiDecoder::new(reader)?),
#[cfg(feature = "pcx")]
ImageFormat::Pcx => Box::new(pcx::PCXDecoder::new(reader)?),
format => {
return Err(ImageError::Unsupported(
ImageFormatHint::Exact(format).into(),
));
}
})
}
/// Convert the reader into a decoder.
pub fn into_decoder(mut self) -> ImageResult<impl ImageDecoder + 'a> {
let mut decoder =
Self::make_decoder(self.require_format()?, self.inner, self.limits.clone())?;
decoder.set_limits(self.limits)?;
Ok(decoder)
}
/// Make a format guess based on the content, replacing it on success.
///
/// Returns `Ok` with the guess if no io error occurs. Additionally, replaces the current
/// format if the guess was successful. If the guess was unable to determine a format then
/// the current format of the reader is unchanged.
///
/// Returns an error if the underlying reader fails. The format is unchanged. The error is a
/// `std::io::Error` and not `ImageError` since the only error case is an error when the
/// underlying reader seeks.
///
/// When an error occurs, the reader may not have been properly reset and it is potentially
/// hazardous to continue with more io.
///
/// ## Usage
///
/// This supplements the path based type deduction from [`ImageReader::open()`] with content based deduction.
/// This is more common in Linux and UNIX operating systems and also helpful if the path can
/// not be directly controlled.
///
/// ```no_run
/// # use image::ImageError;
/// # use image::ImageReader;
/// # fn main() -> Result<(), ImageError> {
/// let image = ImageReader::open("image.unknown")?
/// .with_guessed_format()?
/// .decode()?;
/// # Ok(()) }
/// ```
pub fn with_guessed_format(mut self) -> io::Result<Self> {
let format = self.guess_format()?;
// Replace format if found, keep current state if not.
self.format = format.or(self.format);
Ok(self)
}
fn guess_format(&mut self) -> io::Result<Option<Format>> {
let mut start = [0; 16];
// Save current offset, read start, restore offset.
let cur = self.inner.stream_position()?;
let len = io::copy(
// Accept shorter files but read at most 16 bytes.
&mut self.inner.by_ref().take(16),
&mut Cursor::new(&mut start[..]),
)?;
self.inner.seek(SeekFrom::Start(cur))?;
let hooks = GUESS_FORMAT_HOOKS.read().unwrap();
for &(signature, mask, ref extension) in &*hooks {
if mask.is_empty() {
if start.starts_with(signature) {
return Ok(Some(Format::Extension(extension.clone())));
}
} else if start.len() >= signature.len()
&& start
.iter()
.zip(signature.iter())
.zip(mask.iter().chain(iter::repeat(&0xFF)))
.all(|((&byte, &sig), &mask)| byte & mask == sig)
{
return Ok(Some(Format::Extension(extension.clone())));
}
}
if let Some(format) = free_functions::guess_format_impl(&start[..len as usize]) {
return Ok(Some(Format::BuiltIn(format)));
}
Ok(None)
}
/// Read the image dimensions.
///
/// Uses the current format to construct the correct reader for the format.
///
/// If no format was determined, returns an `ImageError::Unsupported`.
pub fn into_dimensions(self) -> ImageResult<(u32, u32)> {
self.into_decoder().map(|d| d.dimensions())
}
/// Read the image (replaces `load`).
///
/// Uses the current format to construct the correct reader for the format.
///
/// If no format was determined, returns an `ImageError::Unsupported`.
pub fn decode(mut self) -> ImageResult<DynamicImage> {
let format = self.require_format()?;
let mut limits = self.limits;
let mut decoder = Self::make_decoder(format, self.inner, limits.clone())?;
// Check that we do not allocate a bigger buffer than we are allowed to
// FIXME: should this rather go in `DynamicImage::from_decoder` somehow?
limits.reserve(decoder.total_bytes())?;
decoder.set_limits(limits)?;
DynamicImage::from_decoder(decoder)
}
fn require_format(&mut self) -> ImageResult<Format> {
self.format.clone().ok_or_else(|| {
ImageError::Unsupported(UnsupportedError::from_format_and_kind(
ImageFormatHint::Unknown,
UnsupportedErrorKind::Format(ImageFormatHint::Unknown),
))
})
}
}
impl ImageReader<BufReader<File>> {
/// Open a file to read, format will be guessed from path.
///
/// This will not attempt any io operation on the opened file.
///
/// If you want to inspect the content for a better guess on the format, which does not depend
/// on file extensions, follow this call with a call to [`with_guessed_format`].
///
/// [`with_guessed_format`]: #method.with_guessed_format
pub fn open<P>(path: P) -> io::Result<Self>
where
P: AsRef<Path>,
{
Self::open_impl(path.as_ref())
}
fn open_impl(path: &Path) -> io::Result<Self> {
let format = path
.extension()
.filter(|ext| !ext.is_empty())
.map(|ext| Format::Extension(ext.to_owned()));
Ok(ImageReader {
inner: BufReader::new(File::open(path)?),
format,
limits: Limits::default(),
})
}
}

172
vendor/image/src/io/limits.rs vendored Normal file
View File

@@ -0,0 +1,172 @@
use crate::{error, ColorType, ImageError, ImageResult};
/// Set of supported strict limits for a decoder.
#[derive(Clone, Debug, Default, Eq, PartialEq, Hash)]
#[allow(missing_copy_implementations)]
#[non_exhaustive]
pub struct LimitSupport {}
/// Resource limits for decoding.
///
/// Limits can be either *strict* or *non-strict*. Non-strict limits are best-effort
/// limits where the library does not guarantee that limit will not be exceeded. Do note
/// that it is still considered a bug if a non-strict limit is exceeded.
/// Some of the underlying decoders do not support such limits, so one cannot
/// rely on these limits being supported. For strict limits, the library makes a stronger
/// guarantee that the limit will not be exceeded. Exceeding a strict limit is considered
/// a critical bug. If a decoder cannot guarantee that it will uphold a strict limit, it
/// *must* fail with [`error::LimitErrorKind::Unsupported`].
///
/// The only currently supported strict limits are the `max_image_width` and `max_image_height`
/// limits, but more will be added in the future. [`LimitSupport`] will default to support
/// being false, and decoders should enable support for the limits they support in
/// [`ImageDecoder::set_limits`].
///
/// The limit check should only ever fail if a limit will be exceeded or an unsupported strict
/// limit is used.
///
/// [`LimitSupport`]: ./struct.LimitSupport.html
/// [`ImageDecoder::set_limits`]: ../trait.ImageDecoder.html#method.set_limits
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
#[allow(missing_copy_implementations)]
#[non_exhaustive]
pub struct Limits {
/// The maximum allowed image width. This limit is strict. The default is no limit.
pub max_image_width: Option<u32>,
/// The maximum allowed image height. This limit is strict. The default is no limit.
pub max_image_height: Option<u32>,
/// The maximum allowed sum of allocations allocated by the decoder at any one time excluding
/// allocator overhead. This limit is non-strict by default and some decoders may ignore it.
/// The bytes required to store the output image count towards this value. The default is
/// 512MiB.
pub max_alloc: Option<u64>,
}
/// Add some reasonable limits.
///
/// **Note**: This is not equivalent to _not_ adding limits. This may be changed in future major
/// version increases.
impl Default for Limits {
fn default() -> Limits {
Limits {
max_image_width: None,
max_image_height: None,
max_alloc: Some(512 * 1024 * 1024),
}
}
}
impl Limits {
/// Disable all limits.
#[must_use]
pub fn no_limits() -> Limits {
Limits {
max_image_width: None,
max_image_height: None,
max_alloc: None,
}
}
/// This function checks that all currently set strict limits are supported.
pub fn check_support(&self, _supported: &LimitSupport) -> ImageResult<()> {
Ok(())
}
/// This function checks the `max_image_width` and `max_image_height` limits given
/// the image width and height.
pub fn check_dimensions(&self, width: u32, height: u32) -> ImageResult<()> {
if let Some(max_width) = self.max_image_width {
if width > max_width {
return Err(ImageError::Limits(error::LimitError::from_kind(
error::LimitErrorKind::DimensionError,
)));
}
}
if let Some(max_height) = self.max_image_height {
if height > max_height {
return Err(ImageError::Limits(error::LimitError::from_kind(
error::LimitErrorKind::DimensionError,
)));
}
}
Ok(())
}
/// This function checks that the current limit allows for reserving the set amount
/// of bytes, it then reduces the limit accordingly.
pub fn reserve(&mut self, amount: u64) -> ImageResult<()> {
if let Some(max_alloc) = self.max_alloc.as_mut() {
if *max_alloc < amount {
return Err(ImageError::Limits(error::LimitError::from_kind(
error::LimitErrorKind::InsufficientMemory,
)));
}
*max_alloc -= amount;
}
Ok(())
}
/// This function acts identically to [`reserve`], but takes a `usize` for convenience.
///
/// [`reserve`]: #method.reserve
pub fn reserve_usize(&mut self, amount: usize) -> ImageResult<()> {
match u64::try_from(amount) {
Ok(n) => self.reserve(n),
Err(_) if self.max_alloc.is_some() => Err(ImageError::Limits(
error::LimitError::from_kind(error::LimitErrorKind::InsufficientMemory),
)),
Err(_) => {
// Out of bounds, but we weren't asked to consider any limit.
Ok(())
}
}
}
/// This function acts identically to [`reserve`], but accepts the width, height and color type
/// used to create an [`ImageBuffer`] and does all the math for you.
///
/// [`ImageBuffer`]: crate::ImageBuffer
/// [`reserve`]: #method.reserve
pub fn reserve_buffer(
&mut self,
width: u32,
height: u32,
color_type: ColorType,
) -> ImageResult<()> {
self.check_dimensions(width, height)?;
let in_memory_size = u64::from(width)
.saturating_mul(u64::from(height))
.saturating_mul(color_type.bytes_per_pixel().into());
self.reserve(in_memory_size)?;
Ok(())
}
/// This function increases the `max_alloc` limit with amount. Should only be used
/// together with [`reserve`].
///
/// [`reserve`]: #method.reserve
pub fn free(&mut self, amount: u64) {
if let Some(max_alloc) = self.max_alloc.as_mut() {
*max_alloc = max_alloc.saturating_add(amount);
}
}
/// This function acts identically to [`free`], but takes a `usize` for convenience.
///
/// [`free`]: #method.free
pub fn free_usize(&mut self, amount: usize) {
match u64::try_from(amount) {
Ok(n) => self.free(n),
Err(_) if self.max_alloc.is_some() => {
panic!("max_alloc is set, we should have exited earlier when the reserve failed");
}
Err(_) => {
// Out of bounds, but we weren't asked to consider any limit.
}
}
}
}

317
vendor/image/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,317 @@
//! # Overview
//!
//! This crate provides native rust implementations of image encoding and decoding as well as some
//! basic image manipulation functions. Additional documentation can currently also be found in the
//! [README.md file which is most easily viewed on
//! github](https://github.com/image-rs/image/blob/main/README.md).
//!
//! There are two core problems for which this library provides solutions: a unified interface for image
//! encodings and simple generic buffers for their content. It's possible to use either feature
//! without the other. The focus is on a small and stable set of common operations that can be
//! supplemented by other specialized crates. The library also prefers safe solutions with few
//! dependencies.
//!
//! # High level API
//!
//! Load images using [`ImageReader`](crate::ImageReader):
//!
//! ```rust,no_run
//! use std::io::Cursor;
//! use image::ImageReader;
//! # fn main() -> Result<(), image::ImageError> {
//! # let bytes = vec![0u8];
//!
//! let img = ImageReader::open("myimage.png")?.decode()?;
//! let img2 = ImageReader::new(Cursor::new(bytes)).with_guessed_format()?.decode()?;
//! # Ok(())
//! # }
//! ```
//!
//! And save them using [`save`] or [`write_to`] methods:
//!
//! ```rust,no_run
//! # use std::io::{Write, Cursor};
//! # use image::{DynamicImage, ImageFormat};
//! # #[cfg(feature = "png")]
//! # fn main() -> Result<(), image::ImageError> {
//! # let img: DynamicImage = unimplemented!();
//! # let img2: DynamicImage = unimplemented!();
//! img.save("empty.jpg")?;
//!
//! let mut bytes: Vec<u8> = Vec::new();
//! img2.write_to(&mut Cursor::new(&mut bytes), image::ImageFormat::Png)?;
//! # Ok(())
//! # }
//! # #[cfg(not(feature = "png"))] fn main() {}
//! ```
//!
//! With default features, the crate includes support for [many common image formats](codecs/index.html#supported-formats).
//!
//! [`save`]: enum.DynamicImage.html#method.save
//! [`write_to`]: enum.DynamicImage.html#method.write_to
//! [`ImageReader`]: struct.Reader.html
//!
//! # Image buffers
//!
//! The two main types for storing images:
//! * [`ImageBuffer`] which holds statically typed image contents.
//! * [`DynamicImage`] which is an enum over the supported `ImageBuffer` formats
//! and supports conversions between them.
//!
//! As well as a few more specialized options:
//! * [`GenericImage`] trait for a mutable image buffer.
//! * [`GenericImageView`] trait for read only references to a `GenericImage`.
//! * [`flat`] module containing types for interoperability with generic channel
//! matrices and foreign interfaces.
//!
//! [`GenericImageView`]: trait.GenericImageView.html
//! [`GenericImage`]: trait.GenericImage.html
//! [`ImageBuffer`]: struct.ImageBuffer.html
//! [`DynamicImage`]: enum.DynamicImage.html
//! [`flat`]: flat/index.html
//!
//! # Low level encoding/decoding API
//!
//! Implementations of [`ImageEncoder`] provides low level control over encoding:
//! ```rust,no_run
//! # use std::io::Write;
//! # use image::DynamicImage;
//! # use image::ImageEncoder;
//! # #[cfg(feature = "jpeg")]
//! # fn main() -> Result<(), image::ImageError> {
//! # use image::codecs::jpeg::JpegEncoder;
//! # let img: DynamicImage = unimplemented!();
//! # let writer: Box<dyn Write> = unimplemented!();
//! let encoder = JpegEncoder::new_with_quality(&mut writer, 95);
//! img.write_with_encoder(encoder)?;
//! # Ok(())
//! # }
//! # #[cfg(not(feature = "jpeg"))] fn main() {}
//! ```
//! While [`ImageDecoder`] and [`ImageDecoderRect`] give access to more advanced decoding options:
//!
//! ```rust,no_run
//! # use std::io::{BufReader, Cursor};
//! # use image::DynamicImage;
//! # use image::ImageDecoder;
//! # #[cfg(feature = "png")]
//! # fn main() -> Result<(), image::ImageError> {
//! # use image::codecs::png::PngDecoder;
//! # let img: DynamicImage = unimplemented!();
//! # let reader: BufReader<Cursor<&[u8]>> = unimplemented!();
//! let decoder = PngDecoder::new(&mut reader)?;
//! let icc = decoder.icc_profile();
//! let img = DynamicImage::from_decoder(decoder)?;
//! # Ok(())
//! # }
//! # #[cfg(not(feature = "png"))] fn main() {}
//! ```
//!
//! [`DynamicImage::from_decoder`]: enum.DynamicImage.html#method.from_decoder
//! [`ImageDecoderRect`]: trait.ImageDecoderRect.html
//! [`ImageDecoder`]: trait.ImageDecoder.html
//! [`ImageEncoder`]: trait.ImageEncoder.html
#![warn(missing_docs)]
#![warn(unused_qualifications)]
#![deny(unreachable_pub)]
#![deny(deprecated)]
#![deny(missing_copy_implementations)]
#![cfg_attr(all(test, feature = "benchmarks"), feature(test))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
// We've temporarily disabled PCX support for 0.25.5 release
// by removing the corresponding feature.
// We want to ship bug fixes without committing to PCX support.
//
// Cargo shows warnings about code depending on a nonexistent feature
// even to people using the crate as a dependency,
// so we have to suppress those warnings.
#![allow(unexpected_cfgs)]
#[cfg(all(test, feature = "benchmarks"))]
extern crate test;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
pub use crate::color::{ColorType, ExtendedColorType};
pub use crate::color::{Luma, LumaA, Rgb, Rgba};
pub use crate::error::{ImageError, ImageResult};
pub use crate::images::generic_image::{GenericImage, GenericImageView, Pixels};
pub use crate::images::sub_image::SubImage;
pub use crate::images::buffer::{
ConvertColorOptions,
GrayAlphaImage,
GrayImage,
// Image types
ImageBuffer,
Rgb32FImage,
RgbImage,
Rgba32FImage,
RgbaImage,
};
pub use crate::flat::FlatSamples;
// Traits
pub use crate::traits::{EncodableLayout, Pixel, PixelWithColorType, Primitive};
// Opening and loading images
pub use crate::images::dynimage::{
image_dimensions, load_from_memory, load_from_memory_with_format, open,
write_buffer_with_format,
};
pub use crate::io::free_functions::{guess_format, load, save_buffer, save_buffer_with_format};
pub use crate::io::{
decoder::{AnimationDecoder, ImageDecoder, ImageDecoderRect},
encoder::ImageEncoder,
format::ImageFormat,
image_reader_type::ImageReader,
limits::{LimitSupport, Limits},
};
pub use crate::images::dynimage::DynamicImage;
pub use crate::animation::{Delay, Frame, Frames};
// More detailed error type
pub mod error;
/// Iterators and other auxiliary structure for the `ImageBuffer` type.
pub mod buffer {
// Only those not exported at the top-level
pub use crate::images::buffer::{
ConvertBuffer, EnumeratePixels, EnumeratePixelsMut, EnumerateRows, EnumerateRowsMut,
Pixels, PixelsMut, Rows, RowsMut,
};
#[cfg(feature = "rayon")]
pub use crate::images::buffer_par::*;
}
// Math utils
pub mod math;
// Image processing functions
pub mod imageops;
// Buffer representations for ffi.
pub use crate::images::flat;
/// Encoding and decoding for various image file formats.
///
/// # Supported formats
///
/// | Feature | Format | Notes
/// | ------- | -------- | -----
/// | `avif` | AVIF | Decoding requires the `avif-native` feature, uses the libdav1d C library.
/// | `bmp` | BMP |
/// | `dds` | DDS | Only decoding is supported.
/// | `exr` | OpenEXR |
/// | `ff` | Farbfeld |
/// | `gif` | GIF |
/// | `hdr` | HDR |
/// | `ico` | ICO |
/// | `jpeg` | JPEG |
/// | `png` | PNG |
/// | `pnm` | PNM |
/// | `qoi` | QOI |
/// | `tga` | TGA |
/// | `tiff` | TIFF |
/// | `webp` | WebP | Only lossless encoding is currently supported.
///
/// ## A note on format specific features
///
/// One of the main goals of `image` is stability, in runtime but also for programmers. This
/// ensures that performance as well as safety fixes reach a majority of its user base with little
/// effort. Re-exporting all details of its dependencies would run counter to this goal as it
/// linked _all_ major version bumps between them and `image`. As such, we are wary of exposing too
/// many details, or configuration options, that are not shared between different image formats.
///
/// Nevertheless, the advantage of precise control is hard to ignore. We will thus consider
/// _wrappers_, not direct re-exports, in either of the following cases:
///
/// 1. A standard specifies that configuration _x_ is required for decoders/encoders and there
/// exists an essentially canonical way to control it.
/// 2. At least two different implementations agree on some (sub-)set of features in practice.
/// 3. A technical argument including measurements of the performance, space benefits, or otherwise
/// objectively quantified benefits can be made, and the added interface is unlikely to require
/// breaking changes.
///
/// Features that fulfill two or more criteria are preferred.
///
/// Re-exports of dependencies that reach version `1` will be discussed when it happens.
pub mod codecs {
#[cfg(any(feature = "avif", feature = "avif-native"))]
pub mod avif;
#[cfg(feature = "bmp")]
pub mod bmp;
#[cfg(feature = "dds")]
pub mod dds;
#[cfg(feature = "ff")]
pub mod farbfeld;
#[cfg(feature = "gif")]
pub mod gif;
#[cfg(feature = "hdr")]
pub mod hdr;
#[cfg(feature = "ico")]
pub mod ico;
#[cfg(feature = "jpeg")]
pub mod jpeg;
#[cfg(feature = "exr")]
pub mod openexr;
#[cfg(feature = "pcx")]
pub mod pcx;
#[cfg(feature = "png")]
pub mod png;
#[cfg(feature = "pnm")]
pub mod pnm;
#[cfg(feature = "qoi")]
pub mod qoi;
#[cfg(feature = "tga")]
pub mod tga;
#[cfg(feature = "tiff")]
pub mod tiff;
#[cfg(feature = "webp")]
pub mod webp;
#[cfg(feature = "dds")]
mod dxt;
}
mod animation;
mod color;
pub mod hooks;
mod images;
/// Deprecated io module the original io module has been renamed to `image_reader`.
/// This is going to be internal.
pub mod io;
pub mod metadata;
//TODO delete this module after a few releases
mod traits;
mod utils;
// Can't use the macro-call itself within the `doc` attribute. So force it to eval it as part of
// the macro invocation.
//
// The inspiration for the macro and implementation is from
// <https://github.com/GuillaumeGomez/doc-comment>
//
// MIT License
//
// Copyright (c) 2018 Guillaume Gomez
macro_rules! insert_as_doc {
{ $content:expr } => {
#[allow(unused_doc_comments)]
#[doc = $content] extern "Rust" { }
}
}
// Provides the README.md as doc, to ensure the example works!
insert_as_doc!(include_str!("../README.md"));

7
vendor/image/src/math/mod.rs vendored Normal file
View File

@@ -0,0 +1,7 @@
//! Mathematical helper functions and types.
mod rect;
mod utils;
pub use self::rect::Rect;
pub(crate) use utils::multiply_accumulate;
pub(super) use utils::resize_dimensions;

13
vendor/image/src/math/rect.rs vendored Normal file
View File

@@ -0,0 +1,13 @@
/// A Rectangle defined by its top left corner, width and height.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Rect {
/// The x coordinate of the top left corner.
pub x: u32,
/// The y coordinate of the top left corner.
pub y: u32,
/// The rectangle's width.
pub width: u32,
/// The rectangle's height.
pub height: u32,
}

167
vendor/image/src/math/utils.rs vendored Normal file
View File

@@ -0,0 +1,167 @@
//! Shared mathematical utility functions.
use std::cmp::max;
use std::ops::{Add, Mul};
use num_traits::MulAdd;
#[cfg(any(
all(
any(target_arch = "x86", target_arch = "x86_64"),
target_feature = "fma"
),
all(target_arch = "aarch64", target_feature = "neon")
))]
#[inline(always)]
/// Uses fused multiply add when available
///
/// It is important not to call it if FMA flag is not turned on,
/// Rust inserts libc `fmaf` based implementation here if FMA is clearly not available at compile time.
/// This needs for speed only, one rounding error don't do anything useful here, thus it's blocked when
/// we can't detect FMA availability at compile time.
pub(crate) fn multiply_accumulate<
T: Copy + Mul<T, Output = T> + Add<T, Output = T> + MulAdd<T, Output = T>,
>(
acc: T,
a: T,
b: T,
) -> T {
MulAdd::mul_add(a, b, acc)
}
#[inline(always)]
#[cfg(not(any(
all(
any(target_arch = "x86", target_arch = "x86_64"),
target_feature = "fma"
),
all(target_arch = "aarch64", target_feature = "neon")
)))]
pub(crate) fn multiply_accumulate<
T: Copy + Mul<T, Output = T> + Add<T, Output = T> + MulAdd<T, Output = T>,
>(
acc: T,
a: T,
b: T,
) -> T {
acc + a * b
}
/// Calculates the width and height an image should be resized to.
/// This preserves aspect ratio, and based on the `fill` parameter
/// will either fill the dimensions to fit inside the smaller constraint
/// (will overflow the specified bounds on one axis to preserve
/// aspect ratio), or will shrink so that both dimensions are
/// completely contained within the given `width` and `height`,
/// with empty space on one axis.
pub(crate) fn resize_dimensions(
width: u32,
height: u32,
nwidth: u32,
nheight: u32,
fill: bool,
) -> (u32, u32) {
let wratio = f64::from(nwidth) / f64::from(width);
let hratio = f64::from(nheight) / f64::from(height);
let ratio = if fill {
f64::max(wratio, hratio)
} else {
f64::min(wratio, hratio)
};
let nw = max((f64::from(width) * ratio).round() as u64, 1);
let nh = max((f64::from(height) * ratio).round() as u64, 1);
if nw > u64::from(u32::MAX) {
let ratio = f64::from(u32::MAX) / f64::from(width);
(u32::MAX, max((f64::from(height) * ratio).round() as u32, 1))
} else if nh > u64::from(u32::MAX) {
let ratio = f64::from(u32::MAX) / f64::from(height);
(max((f64::from(width) * ratio).round() as u32, 1), u32::MAX)
} else {
(nw as u32, nh as u32)
}
}
#[cfg(test)]
mod test {
quickcheck! {
fn resize_bounds_correctly_width(old_w: u32, new_w: u32) -> bool {
if old_w == 0 || new_w == 0 { return true; }
// In this case, the scaling is limited by scaling of height.
// We could check that case separately but it does not conform to the same expectation.
if u64::from(new_w) * 400u64 >= u64::from(old_w) * u64::from(u32::MAX) { return true; }
let result = super::resize_dimensions(old_w, 400, new_w, u32::MAX, false);
let exact = (400_f64 * f64::from(new_w) / f64::from(old_w)).round() as u32;
result.0 == new_w && result.1 == exact.max(1)
}
}
quickcheck! {
fn resize_bounds_correctly_height(old_h: u32, new_h: u32) -> bool {
if old_h == 0 || new_h == 0 { return true; }
// In this case, the scaling is limited by scaling of width.
// We could check that case separately but it does not conform to the same expectation.
if 400u64 * u64::from(new_h) >= u64::from(old_h) * u64::from(u32::MAX) { return true; }
let result = super::resize_dimensions(400, old_h, u32::MAX, new_h, false);
let exact = (400_f64 * f64::from(new_h) / f64::from(old_h)).round() as u32;
result.1 == new_h && result.0 == exact.max(1)
}
}
#[test]
fn resize_handles_fill() {
let result = super::resize_dimensions(100, 200, 200, 500, true);
assert!(result.0 == 250);
assert!(result.1 == 500);
let result = super::resize_dimensions(200, 100, 500, 200, true);
assert!(result.0 == 500);
assert!(result.1 == 250);
}
#[test]
fn resize_never_rounds_to_zero() {
let result = super::resize_dimensions(1, 150, 128, 128, false);
assert!(result.0 > 0);
assert!(result.1 > 0);
}
#[test]
fn resize_handles_overflow() {
let result = super::resize_dimensions(100, u32::MAX, 200, u32::MAX, true);
assert!(result.0 == 100);
assert!(result.1 == u32::MAX);
let result = super::resize_dimensions(u32::MAX, 100, u32::MAX, 200, true);
assert!(result.0 == u32::MAX);
assert!(result.1 == 100);
}
#[test]
fn resize_rounds() {
// Only truncation will result in (3840, 2229) and (2160, 3719)
let result = super::resize_dimensions(4264, 2476, 3840, 2160, true);
assert_eq!(result, (3840, 2230));
let result = super::resize_dimensions(2476, 4264, 2160, 3840, false);
assert_eq!(result, (2160, 3720));
}
#[test]
fn resize_handles_zero() {
let result = super::resize_dimensions(0, 100, 100, 100, false);
assert_eq!(result, (1, 100));
let result = super::resize_dimensions(100, 0, 100, 100, false);
assert_eq!(result, (100, 1));
let result = super::resize_dimensions(100, 100, 0, 100, false);
assert_eq!(result, (1, 1));
let result = super::resize_dimensions(100, 100, 100, 0, false);
assert_eq!(result, (1, 1));
}
}

192
vendor/image/src/metadata.rs vendored Normal file
View File

@@ -0,0 +1,192 @@
//! Types describing image metadata
pub(crate) mod cicp;
use std::io::{Cursor, Read};
use byteorder_lite::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt};
pub use self::cicp::{
Cicp, CicpColorPrimaries, CicpMatrixCoefficients, CicpTransferCharacteristics, CicpTransform,
CicpVideoFullRangeFlag,
};
/// Describes the transformations to be applied to the image.
/// Compatible with [Exif orientation](https://web.archive.org/web/20200412005226/https://www.impulseadventure.com/photo/exif-orientation.html).
///
/// Orientation is specified in the file's metadata, and is often written by cameras.
///
/// You can apply it to an image via [`DynamicImage::apply_orientation`](crate::DynamicImage::apply_orientation).
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum Orientation {
/// Do not perform any transformations.
NoTransforms,
/// Rotate by 90 degrees clockwise.
Rotate90,
/// Rotate by 180 degrees. Can be performed in-place.
Rotate180,
/// Rotate by 270 degrees clockwise. Equivalent to rotating by 90 degrees counter-clockwise.
Rotate270,
/// Flip horizontally. Can be performed in-place.
FlipHorizontal,
/// Flip vertically. Can be performed in-place.
FlipVertical,
/// Rotate by 90 degrees clockwise and flip horizontally.
Rotate90FlipH,
/// Rotate by 270 degrees clockwise and flip horizontally.
Rotate270FlipH,
}
impl Orientation {
/// Converts from [Exif orientation](https://web.archive.org/web/20200412005226/https://www.impulseadventure.com/photo/exif-orientation.html)
#[must_use]
pub fn from_exif(exif_orientation: u8) -> Option<Self> {
match exif_orientation {
1 => Some(Self::NoTransforms),
2 => Some(Self::FlipHorizontal),
3 => Some(Self::Rotate180),
4 => Some(Self::FlipVertical),
5 => Some(Self::Rotate90FlipH),
6 => Some(Self::Rotate90),
7 => Some(Self::Rotate270FlipH),
8 => Some(Self::Rotate270),
0 | 9.. => None,
}
}
/// Converts into [Exif orientation](https://web.archive.org/web/20200412005226/https://www.impulseadventure.com/photo/exif-orientation.html)
#[must_use]
pub fn to_exif(self) -> u8 {
match self {
Self::NoTransforms => 1,
Self::FlipHorizontal => 2,
Self::Rotate180 => 3,
Self::FlipVertical => 4,
Self::Rotate90FlipH => 5,
Self::Rotate90 => 6,
Self::Rotate270FlipH => 7,
Self::Rotate270 => 8,
}
}
/// Extracts the image orientation from a raw Exif chunk.
///
/// You can obtain the Exif chunk using
/// [ImageDecoder::exif_metadata](crate::ImageDecoder::exif_metadata).
///
/// It is more convenient to use [ImageDecoder::orientation](crate::ImageDecoder::orientation)
/// than to invoke this function.
/// Only use this function if you extract and process the Exif chunk separately.
#[must_use]
pub fn from_exif_chunk(chunk: &[u8]) -> Option<Self> {
Self::from_exif_chunk_inner(chunk).map(|res| res.0)
}
/// Extracts the image orientation from a raw Exif chunk and sets the orientation in the Exif chunk to `Orientation::NoTransforms`.
/// This is useful if you want to apply the orientation yourself, and then encode the image with the rest of the Exif chunk intact.
///
/// If the orientation data is not cleared from the Exif chunk after you apply the orientation data yourself,
/// the image will end up being rotated once again by any software that correctly handles Exif, leading to an incorrect result.
///
/// If the Exif value is present but invalid, `None` is returned and the Exif chunk is not modified.
#[must_use]
pub fn remove_from_exif_chunk(chunk: &mut [u8]) -> Option<Self> {
if let Some((orientation, offset, endian)) = Self::from_exif_chunk_inner(chunk) {
let mut writer = Cursor::new(chunk);
writer.set_position(offset);
let no_orientation: u16 = Self::NoTransforms.to_exif().into();
match endian {
ExifEndian::Big => writer.write_u16::<BigEndian>(no_orientation).unwrap(),
ExifEndian::Little => writer.write_u16::<LittleEndian>(no_orientation).unwrap(),
}
Some(orientation)
} else {
None
}
}
/// Returns the orientation, the offset in the Exif chunk where it was found, and Exif chunk endianness
#[must_use]
fn from_exif_chunk_inner(chunk: &[u8]) -> Option<(Self, u64, ExifEndian)> {
let mut reader = Cursor::new(chunk);
let mut magic = [0; 4];
reader.read_exact(&mut magic).ok()?;
match magic {
[0x49, 0x49, 42, 0] => {
return Self::locate_orientation_entry::<LittleEndian>(&mut reader)
.map(|(orient, offset)| (orient, offset, ExifEndian::Little));
}
[0x4d, 0x4d, 0, 42] => {
return Self::locate_orientation_entry::<BigEndian>(&mut reader)
.map(|(orient, offset)| (orient, offset, ExifEndian::Big));
}
_ => {}
}
None
}
/// Extracted into a helper function to be generic over endianness
fn locate_orientation_entry<B>(reader: &mut Cursor<&[u8]>) -> Option<(Self, u64)>
where
B: byteorder_lite::ByteOrder,
{
let ifd_offset = reader.read_u32::<B>().ok()?;
reader.set_position(u64::from(ifd_offset));
let entries = reader.read_u16::<B>().ok()?;
for _ in 0..entries {
let tag = reader.read_u16::<B>().ok()?;
let format = reader.read_u16::<B>().ok()?;
let count = reader.read_u32::<B>().ok()?;
let value = reader.read_u16::<B>().ok()?;
let _padding = reader.read_u16::<B>().ok()?;
if tag == 0x112 && format == 3 && count == 1 {
let offset = reader.position() - 4; // we've read 4 bytes (2 * u16) past the start of the value
let orientation = Self::from_exif(value.min(255) as u8);
return orientation.map(|orient| (orient, offset));
}
}
// If we reached this point without returning early, there was no orientation
None
}
}
#[derive(Debug, Copy, Clone)]
enum ExifEndian {
Big,
Little,
}
#[cfg(all(test, feature = "jpeg"))]
mod tests {
use crate::{codecs::jpeg::JpegDecoder, ImageDecoder as _};
// This brings all the items from the parent module into scope,
// so you can directly use `add` instead of `super::add`.
use super::*;
const TEST_IMAGE: &[u8] = include_bytes!("../tests/images/jpg/portrait_2.jpg");
#[test] // This attribute marks the function as a test function.
fn test_extraction_and_clearing() {
let reader = Cursor::new(TEST_IMAGE);
let mut decoder = JpegDecoder::new(reader).expect("Failed to decode test image");
let mut exif_chunk = decoder
.exif_metadata()
.expect("Failed to extract Exif chunk")
.expect("No Exif chunk found in test image");
let orientation = Orientation::from_exif_chunk(&exif_chunk)
.expect("Failed to extract orientation from Exif chunk");
assert_eq!(orientation, Orientation::FlipHorizontal);
let orientation = Orientation::remove_from_exif_chunk(&mut exif_chunk)
.expect("Failed to remove orientation from Exif chunk");
assert_eq!(orientation, Orientation::FlipHorizontal);
// Now that the orientation has been cleared, any subsequent extractions should return NoTransforms
let orientation = Orientation::from_exif_chunk(&exif_chunk)
.expect("Failed to extract orientation from Exif chunk after clearing it");
assert_eq!(orientation, Orientation::NoTransforms);
}
}

1632
vendor/image/src/metadata/cicp.rs vendored Normal file

File diff suppressed because it is too large Load Diff

556
vendor/image/src/traits.rs vendored Normal file
View File

@@ -0,0 +1,556 @@
//! This module provides useful traits that were deprecated in rust
// Note copied from the stdlib under MIT license
use num_traits::{Bounded, Num, NumCast};
use std::ops::AddAssign;
use crate::color::{Luma, LumaA, Rgb, Rgba};
use crate::ExtendedColorType;
/// Types which are safe to treat as an immutable byte slice in a pixel layout
/// for image encoding.
pub trait EncodableLayout: seals::EncodableLayout {
/// Get the bytes of this value.
fn as_bytes(&self) -> &[u8];
}
impl EncodableLayout for [u8] {
fn as_bytes(&self) -> &[u8] {
bytemuck::cast_slice(self)
}
}
impl EncodableLayout for [u16] {
fn as_bytes(&self) -> &[u8] {
bytemuck::cast_slice(self)
}
}
impl EncodableLayout for [f32] {
fn as_bytes(&self) -> &[u8] {
bytemuck::cast_slice(self)
}
}
/// The type of each channel in a pixel. For example, this can be `u8`, `u16`, `f32`.
// TODO rename to `PixelComponent`? Split up into separate traits? Seal?
pub trait Primitive: Copy + NumCast + Num + PartialOrd<Self> + Clone + Bounded {
/// The maximum value for this type of primitive within the context of color.
/// For floats, the maximum is `1.0`, whereas the integer types inherit their usual maximum values.
const DEFAULT_MAX_VALUE: Self;
/// The minimum value for this type of primitive within the context of color.
/// For floats, the minimum is `0.0`, whereas the integer types inherit their usual minimum values.
const DEFAULT_MIN_VALUE: Self;
}
macro_rules! declare_primitive {
($base:ty: ($from:expr)..$to:expr) => {
impl Primitive for $base {
const DEFAULT_MAX_VALUE: Self = $to;
const DEFAULT_MIN_VALUE: Self = $from;
}
};
}
declare_primitive!(usize: (0)..Self::MAX);
declare_primitive!(u8: (0)..Self::MAX);
declare_primitive!(u16: (0)..Self::MAX);
declare_primitive!(u32: (0)..Self::MAX);
declare_primitive!(u64: (0)..Self::MAX);
declare_primitive!(isize: (Self::MIN)..Self::MAX);
declare_primitive!(i8: (Self::MIN)..Self::MAX);
declare_primitive!(i16: (Self::MIN)..Self::MAX);
declare_primitive!(i32: (Self::MIN)..Self::MAX);
declare_primitive!(i64: (Self::MIN)..Self::MAX);
declare_primitive!(f32: (0.0)..1.0);
declare_primitive!(f64: (0.0)..1.0);
/// An `Enlargable::Larger` value should be enough to calculate
/// the sum (average) of a few hundred or thousand Enlargeable values.
pub trait Enlargeable: Sized + Bounded + NumCast {
type Larger: Copy + NumCast + Num + PartialOrd<Self::Larger> + Clone + Bounded + AddAssign;
fn clamp_from(n: Self::Larger) -> Self {
if n > Self::max_value().to_larger() {
Self::max_value()
} else if n < Self::min_value().to_larger() {
Self::min_value()
} else {
NumCast::from(n).unwrap()
}
}
fn to_larger(self) -> Self::Larger {
NumCast::from(self).unwrap()
}
}
impl Enlargeable for u8 {
type Larger = u32;
}
impl Enlargeable for u16 {
type Larger = u32;
}
impl Enlargeable for u32 {
type Larger = u64;
}
impl Enlargeable for u64 {
type Larger = u128;
}
impl Enlargeable for usize {
// Note: On 32-bit architectures, u64 should be enough here.
type Larger = u128;
}
impl Enlargeable for i8 {
type Larger = i32;
}
impl Enlargeable for i16 {
type Larger = i32;
}
impl Enlargeable for i32 {
type Larger = i64;
}
impl Enlargeable for i64 {
type Larger = i128;
}
impl Enlargeable for isize {
// Note: On 32-bit architectures, i64 should be enough here.
type Larger = i128;
}
impl Enlargeable for f32 {
type Larger = f64;
}
impl Enlargeable for f64 {
type Larger = f64;
}
/// Linear interpolation without involving floating numbers.
pub trait Lerp: Bounded + NumCast {
type Ratio: Primitive;
fn lerp(a: Self, b: Self, ratio: Self::Ratio) -> Self {
let a = <Self::Ratio as NumCast>::from(a).unwrap();
let b = <Self::Ratio as NumCast>::from(b).unwrap();
let res = a + (b - a) * ratio;
if res > NumCast::from(Self::max_value()).unwrap() {
Self::max_value()
} else if res < NumCast::from(0).unwrap() {
NumCast::from(0).unwrap()
} else {
NumCast::from(res).unwrap()
}
}
}
impl Lerp for u8 {
type Ratio = f32;
}
impl Lerp for u16 {
type Ratio = f32;
}
impl Lerp for u32 {
type Ratio = f64;
}
impl Lerp for f32 {
type Ratio = f32;
fn lerp(a: Self, b: Self, ratio: Self::Ratio) -> Self {
a + (b - a) * ratio
}
}
/// The pixel with an associated `ColorType`.
/// Not all possible pixels represent one of the predefined `ColorType`s.
pub trait PixelWithColorType:
Pixel + private::SealedPixelWithColorType<TransformableSubpixel = <Self as Pixel>::Subpixel>
{
/// This pixel has the format of one of the predefined `ColorType`s,
/// such as `Rgb8`, `La16` or `Rgba32F`.
/// This is needed for automatically detecting
/// a color format when saving an image as a file.
const COLOR_TYPE: ExtendedColorType;
}
impl PixelWithColorType for Rgb<u8> {
const COLOR_TYPE: ExtendedColorType = ExtendedColorType::Rgb8;
}
impl PixelWithColorType for Rgb<u16> {
const COLOR_TYPE: ExtendedColorType = ExtendedColorType::Rgb16;
}
impl PixelWithColorType for Rgb<f32> {
const COLOR_TYPE: ExtendedColorType = ExtendedColorType::Rgb32F;
}
impl PixelWithColorType for Rgba<u8> {
const COLOR_TYPE: ExtendedColorType = ExtendedColorType::Rgba8;
}
impl PixelWithColorType for Rgba<u16> {
const COLOR_TYPE: ExtendedColorType = ExtendedColorType::Rgba16;
}
impl PixelWithColorType for Rgba<f32> {
const COLOR_TYPE: ExtendedColorType = ExtendedColorType::Rgba32F;
}
impl PixelWithColorType for Luma<u8> {
const COLOR_TYPE: ExtendedColorType = ExtendedColorType::L8;
}
impl PixelWithColorType for Luma<u16> {
const COLOR_TYPE: ExtendedColorType = ExtendedColorType::L16;
}
impl PixelWithColorType for LumaA<u8> {
const COLOR_TYPE: ExtendedColorType = ExtendedColorType::La8;
}
impl PixelWithColorType for LumaA<u16> {
const COLOR_TYPE: ExtendedColorType = ExtendedColorType::La16;
}
/// Prevents down-stream users from implementing the `Primitive` trait
pub(crate) mod private {
use crate::color::*;
use crate::metadata::cicp::{self, CicpApplicable};
#[derive(Clone, Copy, Debug)]
pub enum LayoutWithColor {
Rgb,
Rgba,
Luma,
LumaAlpha,
}
impl From<ColorType> for LayoutWithColor {
fn from(color: ColorType) -> LayoutWithColor {
match color {
ColorType::L8 | ColorType::L16 => LayoutWithColor::Luma,
ColorType::La8 | ColorType::La16 => LayoutWithColor::LumaAlpha,
ColorType::Rgb8 | ColorType::Rgb16 | ColorType::Rgb32F => LayoutWithColor::Rgb,
ColorType::Rgba8 | ColorType::Rgba16 | ColorType::Rgba32F => LayoutWithColor::Rgba,
}
}
}
impl LayoutWithColor {
pub(crate) fn channels(self) -> usize {
match self {
Self::Rgb => 3,
Self::Rgba => 4,
Self::Luma => 1,
Self::LumaAlpha => 2,
}
}
}
#[derive(Clone, Copy)]
pub struct PrivateToken;
pub trait SealedPixelWithColorType {
#[expect(private_bounds)] // This is a sealed trait.
type TransformableSubpixel: HelpDispatchTransform;
fn layout(_: PrivateToken) -> LayoutWithColor;
}
impl SealedPixelWithColorType for Rgb<u8> {
type TransformableSubpixel = u8;
fn layout(_: PrivateToken) -> LayoutWithColor {
LayoutWithColor::Rgb
}
}
impl SealedPixelWithColorType for Rgb<u16> {
type TransformableSubpixel = u16;
fn layout(_: PrivateToken) -> LayoutWithColor {
LayoutWithColor::Rgb
}
}
impl SealedPixelWithColorType for Rgb<f32> {
type TransformableSubpixel = f32;
fn layout(_: PrivateToken) -> LayoutWithColor {
LayoutWithColor::Rgb
}
}
impl SealedPixelWithColorType for Rgba<u8> {
type TransformableSubpixel = u8;
fn layout(_: PrivateToken) -> LayoutWithColor {
LayoutWithColor::Rgba
}
}
impl SealedPixelWithColorType for Rgba<u16> {
type TransformableSubpixel = u16;
fn layout(_: PrivateToken) -> LayoutWithColor {
LayoutWithColor::Rgba
}
}
impl SealedPixelWithColorType for Rgba<f32> {
type TransformableSubpixel = f32;
fn layout(_: PrivateToken) -> LayoutWithColor {
LayoutWithColor::Rgba
}
}
impl SealedPixelWithColorType for Luma<u8> {
type TransformableSubpixel = u8;
fn layout(_: PrivateToken) -> LayoutWithColor {
LayoutWithColor::Luma
}
}
impl SealedPixelWithColorType for LumaA<u8> {
type TransformableSubpixel = u8;
fn layout(_: PrivateToken) -> LayoutWithColor {
LayoutWithColor::LumaAlpha
}
}
impl SealedPixelWithColorType for Luma<u16> {
type TransformableSubpixel = u16;
fn layout(_: PrivateToken) -> LayoutWithColor {
LayoutWithColor::Luma
}
}
impl SealedPixelWithColorType for Luma<f32> {
type TransformableSubpixel = f32;
fn layout(_: PrivateToken) -> LayoutWithColor {
LayoutWithColor::Luma
}
}
impl SealedPixelWithColorType for LumaA<u16> {
type TransformableSubpixel = u16;
fn layout(_: PrivateToken) -> LayoutWithColor {
LayoutWithColor::LumaAlpha
}
}
impl SealedPixelWithColorType for LumaA<f32> {
type TransformableSubpixel = f32;
fn layout(_: PrivateToken) -> LayoutWithColor {
LayoutWithColor::LumaAlpha
}
}
// Consider a situation in a function bounded `Self: Pixel + PixelWithColorType`. Then, if we
// tried this directly:
//
// <
// <Self as SealedPixelWithColorType>::TransformableSubpixel as HelpDispatchTransform
// >::transform_on::<Self>(tr, LayoutWithColor::Rgb);
//
// the type checker is mightily confused. I think what's going on is as follows: It find the
// fact that `Self::Subpixel` is used for `TransformableSubpixel` from the bound on
// `PixelWithColorType`, but then there is no existing bound on `Subpixel` that would guarantee
// that `HelpDispatchTransform` is fulfilled. That would only be available by substituting
// _back_ so that the bound on `TransformableSubpixel` gets applied to the `Subpixel` generic,
// too. But now there are no variables here, so unification of bounds takes place we never
// never get to see the bound (until next gen, I guess?). It finally find that there is still
// an unfulfilled bound and complains.
//
// Hence we must avoid mentioning the `Pixel` and `PixelWithColorType` bound so that _only_ the
// `TransformableSubpixel` is available. Then all substitutions work forwards, and since we
// return a `TransformableSubpixel` we get the function back without new variables to solve
// for, and that can then be unified just fine. This extra function essentially introduces that
// missing unknown which can unify the available impl set. Yay.
pub(crate) fn dispatch_transform_from_sealed<P: SealedPixelWithColorType>(
transform: &cicp::CicpTransform,
into: LayoutWithColor,
) -> &'_ CicpApplicable<'_, P::TransformableSubpixel> {
<P::TransformableSubpixel as HelpDispatchTransform>::transform_on::<P>(transform, into)
}
pub(crate) fn double_dispatch_transform_from_sealed<
P: SealedPixelWithColorType,
Into: SealedPixelWithColorType,
>(
transform: &cicp::CicpTransform,
) -> &'_ CicpApplicable<'_, P::TransformableSubpixel> {
dispatch_transform_from_sealed::<P>(transform, Into::layout(PrivateToken))
}
pub(crate) trait HelpDispatchTransform: Sized + 'static {
fn transform_on<O: SealedPixelWithColorType<TransformableSubpixel = Self>>(
transform: &cicp::CicpTransform,
into: LayoutWithColor,
) -> &'_ (dyn Fn(&[Self], &mut [Self]) + Send + Sync);
}
impl HelpDispatchTransform for u8 {
fn transform_on<O: SealedPixelWithColorType<TransformableSubpixel = Self>>(
transform: &cicp::CicpTransform,
into: LayoutWithColor,
) -> &'_ (dyn Fn(&[Self], &mut [Self]) + Send + Sync) {
&**transform.select_transform_u8::<O>(into)
}
}
impl HelpDispatchTransform for u16 {
fn transform_on<O: SealedPixelWithColorType<TransformableSubpixel = Self>>(
transform: &cicp::CicpTransform,
into: LayoutWithColor,
) -> &'_ (dyn Fn(&[Self], &mut [Self]) + Send + Sync) {
&**transform.select_transform_u16::<O>(into)
}
}
impl HelpDispatchTransform for f32 {
fn transform_on<O: SealedPixelWithColorType<TransformableSubpixel = Self>>(
transform: &cicp::CicpTransform,
into: LayoutWithColor,
) -> &'_ (dyn Fn(&[Self], &mut [Self]) + Send + Sync) {
&**transform.select_transform_f32::<O>(into)
}
}
}
/// A generalized pixel.
///
/// A pixel object is usually not used standalone but as a view into an image buffer.
pub trait Pixel: Copy + Clone {
/// The scalar type that is used to store each channel in this pixel.
type Subpixel: Primitive;
/// The number of channels of this pixel type.
const CHANNEL_COUNT: u8;
/// Returns the components as a slice.
fn channels(&self) -> &[Self::Subpixel];
/// Returns the components as a mutable slice
fn channels_mut(&mut self) -> &mut [Self::Subpixel];
/// A string that can help to interpret the meaning each channel
/// See [gimp babl](http://gegl.org/babl/).
const COLOR_MODEL: &'static str;
/// Returns true if the alpha channel is contained.
const HAS_ALPHA: bool = false;
/// Returns the channels of this pixel as a 4 tuple. If the pixel
/// has less than 4 channels the remainder is filled with the maximum value
#[deprecated(since = "0.24.0", note = "Use `channels()` or `channels_mut()`")]
fn channels4(
&self,
) -> (
Self::Subpixel,
Self::Subpixel,
Self::Subpixel,
Self::Subpixel,
);
/// Construct a pixel from the 4 channels a, b, c and d.
/// If the pixel does not contain 4 channels the extra are ignored.
#[deprecated(
since = "0.24.0",
note = "Use the constructor of the pixel, for example `Rgba([r,g,b,a])` or `Pixel::from_slice`"
)]
fn from_channels(
a: Self::Subpixel,
b: Self::Subpixel,
c: Self::Subpixel,
d: Self::Subpixel,
) -> Self;
/// Returns a view into a slice.
///
/// Note: The slice length is not checked on creation. Thus the caller has to ensure
/// that the slice is long enough to prevent panics if the pixel is used later on.
fn from_slice(slice: &[Self::Subpixel]) -> &Self;
/// Returns mutable view into a mutable slice.
///
/// Note: The slice length is not checked on creation. Thus the caller has to ensure
/// that the slice is long enough to prevent panics if the pixel is used later on.
fn from_slice_mut(slice: &mut [Self::Subpixel]) -> &mut Self;
/// Convert this pixel to RGB
fn to_rgb(&self) -> Rgb<Self::Subpixel>;
/// Convert this pixel to RGB with an alpha channel
fn to_rgba(&self) -> Rgba<Self::Subpixel>;
/// Convert this pixel to luma
fn to_luma(&self) -> Luma<Self::Subpixel>;
/// Convert this pixel to luma with an alpha channel
fn to_luma_alpha(&self) -> LumaA<Self::Subpixel>;
/// Apply the function ```f``` to each channel of this pixel.
fn map<F>(&self, f: F) -> Self
where
F: FnMut(Self::Subpixel) -> Self::Subpixel;
/// Apply the function ```f``` to each channel of this pixel.
fn apply<F>(&mut self, f: F)
where
F: FnMut(Self::Subpixel) -> Self::Subpixel;
/// Apply the function ```f``` to each channel except the alpha channel.
/// Apply the function ```g``` to the alpha channel.
fn map_with_alpha<F, G>(&self, f: F, g: G) -> Self
where
F: FnMut(Self::Subpixel) -> Self::Subpixel,
G: FnMut(Self::Subpixel) -> Self::Subpixel;
/// Apply the function ```f``` to each channel except the alpha channel.
/// Apply the function ```g``` to the alpha channel. Works in-place.
fn apply_with_alpha<F, G>(&mut self, f: F, g: G)
where
F: FnMut(Self::Subpixel) -> Self::Subpixel,
G: FnMut(Self::Subpixel) -> Self::Subpixel;
/// Apply the function ```f``` to each channel except the alpha channel.
fn map_without_alpha<F>(&self, f: F) -> Self
where
F: FnMut(Self::Subpixel) -> Self::Subpixel,
{
let mut this = *self;
this.apply_with_alpha(f, |x| x);
this
}
/// Apply the function ```f``` to each channel except the alpha channel.
/// Works in place.
fn apply_without_alpha<F>(&mut self, f: F)
where
F: FnMut(Self::Subpixel) -> Self::Subpixel,
{
self.apply_with_alpha(f, |x| x);
}
/// Apply the function ```f``` to each channel of this pixel and
/// ```other``` pairwise.
fn map2<F>(&self, other: &Self, f: F) -> Self
where
F: FnMut(Self::Subpixel, Self::Subpixel) -> Self::Subpixel;
/// Apply the function ```f``` to each channel of this pixel and
/// ```other``` pairwise. Works in-place.
fn apply2<F>(&mut self, other: &Self, f: F)
where
F: FnMut(Self::Subpixel, Self::Subpixel) -> Self::Subpixel;
/// Invert this pixel
fn invert(&mut self);
/// Blend the color of a given pixel into ourself, taking into account alpha channels
fn blend(&mut self, other: &Self);
}
/// Private module for supertraits of sealed traits.
mod seals {
pub trait EncodableLayout {}
impl EncodableLayout for [u8] {}
impl EncodableLayout for [u16] {}
impl EncodableLayout for [f32] {}
}

136
vendor/image/src/utils/mod.rs vendored Normal file
View File

@@ -0,0 +1,136 @@
//! Utilities
use std::collections::TryReserveError;
use std::iter::repeat;
#[inline(always)]
pub(crate) fn expand_packed<F>(buf: &mut [u8], channels: usize, bit_depth: u8, mut func: F)
where
F: FnMut(u8, &mut [u8]),
{
let pixels = buf.len() / channels * bit_depth as usize;
let extra = pixels % 8;
let entries = pixels / 8
+ match extra {
0 => 0,
_ => 1,
};
let mask = ((1u16 << bit_depth) - 1) as u8;
let i = (0..entries)
.rev() // Reverse iterator
.flat_map(|idx|
// This has to be reversed to
(0..8/bit_depth).map(|i| i*bit_depth).zip(repeat(idx)))
.skip(extra);
let buf_len = buf.len();
let j_inv = (channels..buf_len).step_by(channels);
for ((shift, i), j_inv) in i.zip(j_inv) {
let j = buf_len - j_inv;
let pixel = (buf[i] & (mask << shift)) >> shift;
func(pixel, &mut buf[j..(j + channels)]);
}
}
/// Expand a buffer of packed 1, 2, or 4 bits integers into u8's. Assumes that
/// every `row_size` entries there are padding bits up to the next byte boundary.
#[allow(dead_code)]
// When no image formats that use it are enabled
pub(crate) fn expand_bits(bit_depth: u8, row_size: u32, buf: &[u8]) -> Vec<u8> {
// Note: this conversion assumes that the scanlines begin on byte boundaries
let mask = (1u8 << bit_depth as usize) - 1;
let scaling_factor = 255 / ((1 << bit_depth as usize) - 1);
let bit_width = row_size * u32::from(bit_depth);
let skip = if bit_width % 8 == 0 {
0
} else {
(8 - bit_width % 8) / u32::from(bit_depth)
};
let row_len = row_size + skip;
let mut p = Vec::new();
let mut i = 0;
for v in buf {
for shift_inv in 1..=8 / bit_depth {
let shift = 8 - bit_depth * shift_inv;
// skip the pixels that can be neglected because scanlines should
// start at byte boundaries
if i % (row_len as usize) < (row_size as usize) {
let pixel = (v & (mask << shift as usize)) >> shift as usize;
p.push(pixel * scaling_factor);
}
i += 1;
}
}
p
}
/// Checks if the provided dimensions would cause an overflow.
#[allow(dead_code)]
// When no image formats that use it are enabled
pub(crate) fn check_dimension_overflow(width: u32, height: u32, bytes_per_pixel: u8) -> bool {
u64::from(width) * u64::from(height) > u64::MAX / u64::from(bytes_per_pixel)
}
#[allow(dead_code)]
// When no image formats that use it are enabled
pub(crate) fn vec_copy_to_u8<T>(vec: &[T]) -> Vec<u8>
where
T: bytemuck::Pod,
{
bytemuck::cast_slice(vec).to_owned()
}
#[inline]
pub(crate) fn clamp<N>(a: N, min: N, max: N) -> N
where
N: PartialOrd,
{
if a < min {
min
} else if a > max {
max
} else {
a
}
}
#[inline]
pub(crate) fn vec_try_with_capacity<T>(capacity: usize) -> Result<Vec<T>, TryReserveError> {
let mut vec = Vec::new();
vec.try_reserve_exact(capacity)?;
Ok(vec)
}
#[cfg(test)]
mod test {
#[test]
fn gray_to_luma8_skip() {
let check = |bit_depth, w, from, to| {
assert_eq!(super::expand_bits(bit_depth, w, from), to);
};
// Bit depth 1, skip is more than half a byte
check(
1,
10,
&[0b11110000, 0b11000000, 0b00001111, 0b11000000],
vec![
255, 255, 255, 255, 0, 0, 0, 0, 255, 255, 0, 0, 0, 0, 255, 255, 255, 255, 255, 255,
],
);
// Bit depth 2, skip is more than half a byte
check(
2,
5,
&[0b11110000, 0b11000000, 0b00001111, 0b11000000],
vec![255, 255, 0, 0, 255, 0, 0, 255, 255, 255],
);
// Bit depth 2, skip is 0
check(
2,
4,
&[0b11110000, 0b00001111],
vec![255, 255, 0, 0, 0, 0, 255, 255],
);
// Bit depth 4, skip is half a byte
check(4, 1, &[0b11110011, 0b00001100], vec![255, 0]);
}
}