Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

128
vendor/png/src/decoder/interlace_info.rs vendored Normal file
View File

@@ -0,0 +1,128 @@
use std::ops::Range;
use crate::adam7::{Adam7Info, Adam7Iterator};
/// Describes which interlacing algorithm applies to a decoded row.
///
/// PNG (2003) specifies two interlace modes, but reserves future extensions.
///
/// See also [Reader.next_interlaced_row](crate::Reader::next_interlaced_row).
#[derive(Clone, Copy, Debug)]
pub enum InterlaceInfo {
/// The `null` method means no interlacing.
Null(NullInfo),
/// [The `Adam7` algorithm](https://en.wikipedia.org/wiki/Adam7_algorithm) derives its name
/// from doing 7 passes over the image, only decoding a subset of all pixels in each pass.
/// The following table shows pictorially what parts of each 8x8 area of the image is found in
/// each pass:
///
/// ```txt
/// 1 6 4 6 2 6 4 6
/// 7 7 7 7 7 7 7 7
/// 5 6 5 6 5 6 5 6
/// 7 7 7 7 7 7 7 7
/// 3 6 4 6 3 6 4 6
/// 7 7 7 7 7 7 7 7
/// 5 6 5 6 5 6 5 6
/// 7 7 7 7 7 7 7 7
/// ```
Adam7(Adam7Info),
}
#[derive(Clone, Copy, Debug)]
pub struct NullInfo {
line: u32,
}
impl InterlaceInfo {
pub(crate) fn line_number(&self) -> u32 {
match self {
InterlaceInfo::Null(NullInfo { line }) => *line,
InterlaceInfo::Adam7(Adam7Info { line, .. }) => *line,
}
}
pub(crate) fn get_adam7_info(&self) -> Option<&Adam7Info> {
match self {
InterlaceInfo::Null(_) => None,
InterlaceInfo::Adam7(adam7info) => Some(adam7info),
}
}
}
pub(crate) struct InterlaceInfoIter(IterImpl);
impl InterlaceInfoIter {
pub fn empty() -> Self {
Self(IterImpl::None(0..0))
}
pub fn new(width: u32, height: u32, interlaced: bool) -> Self {
if interlaced {
Self(IterImpl::Adam7(Adam7Iterator::new(width, height)))
} else {
Self(IterImpl::None(0..height))
}
}
}
impl Iterator for InterlaceInfoIter {
type Item = InterlaceInfo;
fn next(&mut self) -> Option<InterlaceInfo> {
match self.0 {
IterImpl::Adam7(ref mut adam7) => Some(InterlaceInfo::Adam7(adam7.next()?)),
IterImpl::None(ref mut height) => Some(InterlaceInfo::Null(NullInfo {
line: height.next()?,
})),
}
}
}
enum IterImpl {
None(Range<u32>),
Adam7(Adam7Iterator),
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn null() {
assert_eq!(
InterlaceInfoIter::new(8, 8, false)
.map(|info| info.line_number())
.collect::<Vec<_>>(),
vec![0, 1, 2, 3, 4, 5, 6, 7],
);
}
#[test]
fn adam7() {
assert_eq!(
InterlaceInfoIter::new(8, 8, true)
.map(|info| info.line_number())
.collect::<Vec<_>>(),
vec![
0, // pass 1
0, // pass 2
0, // pass 3
0, 1, // pass 4
0, 1, // pass 5
0, 1, 2, 3, // pass 6
0, 1, 2, 3, // pass 7
],
);
}
#[test]
fn empty() {
assert_eq!(
InterlaceInfoIter::empty()
.map(|info| info.line_number())
.collect::<Vec<_>>(),
vec![],
);
}
}

732
vendor/png/src/decoder/mod.rs vendored Normal file
View File

@@ -0,0 +1,732 @@
mod interlace_info;
mod read_decoder;
pub(crate) mod stream;
pub(crate) mod transform;
mod unfiltering_buffer;
mod zlib;
use self::read_decoder::{ImageDataCompletionStatus, ReadDecoder};
use self::stream::{DecodeOptions, DecodingError, FormatErrorInner};
use self::transform::{create_transform_fn, TransformFn};
use self::unfiltering_buffer::UnfilteringBuffer;
use std::io::{BufRead, Seek};
use std::mem;
use crate::adam7::Adam7Info;
use crate::common::{
BitDepth, BytesPerPixel, ColorType, Info, ParameterErrorKind, Transformations,
};
use crate::FrameControl;
pub use zlib::{UnfilterBuf, UnfilterRegion};
pub use interlace_info::InterlaceInfo;
use interlace_info::InterlaceInfoIter;
/*
pub enum InterlaceHandling {
/// Outputs the raw rows
RawRows,
/// Fill missing the pixels from the existing ones
Rectangle,
/// Only fill the needed pixels
Sparkle
}
*/
/// Output info.
///
/// This describes one particular frame of the image that was written into the output buffer.
#[derive(Debug, PartialEq, Eq)]
pub struct OutputInfo {
/// The pixel width of this frame.
pub width: u32,
/// The pixel height of this frame.
pub height: u32,
/// The chosen output color type.
pub color_type: ColorType,
/// The chosen output bit depth.
pub bit_depth: BitDepth,
/// The byte count of each scan line in the image.
pub line_size: usize,
}
impl OutputInfo {
/// Returns the size needed to hold a decoded frame
/// If the output buffer was larger then bytes after this count should be ignored. They may
/// still have been changed.
pub fn buffer_size(&self) -> usize {
self.line_size * self.height as usize
}
}
#[derive(Clone, Copy, Debug)]
/// Limits on the resources the `Decoder` is allowed too use
pub struct Limits {
/// maximum number of bytes the decoder is allowed to allocate, default is 64Mib
pub bytes: usize,
}
impl Limits {
pub(crate) fn reserve_bytes(&mut self, bytes: usize) -> Result<(), DecodingError> {
if self.bytes >= bytes {
self.bytes -= bytes;
Ok(())
} else {
Err(DecodingError::LimitsExceeded)
}
}
}
impl Default for Limits {
fn default() -> Limits {
Limits {
bytes: 1024 * 1024 * 64,
}
}
}
/// PNG Decoder
pub struct Decoder<R: BufRead + Seek> {
read_decoder: ReadDecoder<R>,
/// Output transformations
transform: Transformations,
}
/// A row of data with interlace information attached.
#[derive(Clone, Copy, Debug)]
pub struct InterlacedRow<'data> {
data: &'data [u8],
interlace: InterlaceInfo,
}
impl<'data> InterlacedRow<'data> {
pub fn data(&self) -> &'data [u8] {
self.data
}
pub fn interlace(&self) -> &InterlaceInfo {
&self.interlace
}
}
/// A row of data without interlace information.
#[derive(Clone, Copy, Debug)]
pub struct Row<'data> {
data: &'data [u8],
}
impl<'data> Row<'data> {
pub fn data(&self) -> &'data [u8] {
self.data
}
}
impl<R: BufRead + Seek> Decoder<R> {
/// Create a new decoder configuration with default limits.
pub fn new(r: R) -> Decoder<R> {
Decoder::new_with_limits(r, Limits::default())
}
/// Create a new decoder configuration with custom limits.
pub fn new_with_limits(r: R, limits: Limits) -> Decoder<R> {
let mut read_decoder = ReadDecoder::new(r);
read_decoder.set_limits(limits);
Decoder {
read_decoder,
transform: Transformations::IDENTITY,
}
}
/// Create a new decoder configuration with custom [`DecodeOptions`].
pub fn new_with_options(r: R, decode_options: DecodeOptions) -> Decoder<R> {
let mut read_decoder = ReadDecoder::with_options(r, decode_options);
read_decoder.set_limits(Limits::default());
Decoder {
read_decoder,
transform: Transformations::IDENTITY,
}
}
/// Limit resource usage.
///
/// Note that your allocations, e.g. when reading into a pre-allocated buffer, are __NOT__
/// considered part of the limits. Nevertheless, required intermediate buffers such as for
/// singular lines is checked against the limit.
///
/// Note that this is a best-effort basis.
///
/// ```
/// use std::fs::File;
/// use std::io::BufReader;
/// use png::{Decoder, Limits};
/// // This image is 32×32, 1bit per pixel. The reader buffers one row which requires 4 bytes.
/// let mut limits = Limits::default();
/// limits.bytes = 3;
/// let mut decoder = Decoder::new_with_limits(BufReader::new(File::open("tests/pngsuite/basi0g01.png").unwrap()), limits);
/// assert!(decoder.read_info().is_err());
///
/// // This image is 32x32 pixels, so the decoder will allocate less than 10Kib
/// let mut limits = Limits::default();
/// limits.bytes = 10*1024;
/// let mut decoder = Decoder::new_with_limits(BufReader::new(File::open("tests/pngsuite/basi0g01.png").unwrap()), limits);
/// assert!(decoder.read_info().is_ok());
/// ```
pub fn set_limits(&mut self, limits: Limits) {
self.read_decoder.set_limits(limits);
}
/// Read the PNG header and return the information contained within.
///
/// Most image metadata will not be read until `read_info` is called, so those fields will be
/// None or empty.
pub fn read_header_info(&mut self) -> Result<&Info<'static>, DecodingError> {
self.read_decoder.read_header_info()
}
/// Reads all meta data until the first IDAT chunk
pub fn read_info(mut self) -> Result<Reader<R>, DecodingError> {
let info = self.read_header_info()?;
let unfiltering_buffer = UnfilteringBuffer::new(info);
let mut reader = Reader {
decoder: self.read_decoder,
bpp: BytesPerPixel::One,
subframe: SubframeInfo::not_yet_init(),
remaining_frames: 0, // Temporary value - fixed below after reading `acTL` and `fcTL`.
unfiltering_buffer,
transform: self.transform,
transform_fn: None,
scratch_buffer: Vec::new(),
finished: false,
};
// Check if the decoding buffer of a single raw line has a valid size.
//
// FIXME: this check and the next can be delayed until processing image data. This would
// allow usage where only the metadata is processes, or where the image is processed
// line-by-line even on targets that can not fit the whole image into their address space.
// We should strive for a balance between implementation complexity (still ensure that the
// no-overflow preconditions are met for internal calculation) and use possibilities.
if reader.info().checked_raw_row_length().is_none() {
return Err(DecodingError::LimitsExceeded);
}
// Check if the output buffer has a valid size.
//
// FIXME: see above and
// <https://github.com/image-rs/image-png/pull/608#issuecomment-3003576956>
if reader.output_buffer_size().is_none() {
return Err(DecodingError::LimitsExceeded);
}
reader.read_until_image_data()?;
reader.remaining_frames = match reader.info().animation_control.as_ref() {
None => 1, // No `acTL` => only expecting `IDAT` frame.
Some(animation) => {
let mut num_frames = animation.num_frames as usize;
if reader.info().frame_control.is_none() {
// No `fcTL` before `IDAT` => `IDAT` is not part of the animation, but
// represents an *extra*, default frame for non-APNG-aware decoders.
num_frames += 1;
}
num_frames
}
};
Ok(reader)
}
/// Set the allowed and performed transformations.
///
/// A transformation is a pre-processing on the raw image data modifying content or encoding.
/// Many options have an impact on memory or CPU usage during decoding.
pub fn set_transformations(&mut self, transform: Transformations) {
self.transform = transform;
}
/// Set the decoder to ignore all text chunks while parsing.
///
/// eg.
/// ```
/// use std::fs::File;
/// use std::io::BufReader;
/// use png::Decoder;
/// let mut decoder = Decoder::new(BufReader::new(File::open("tests/pngsuite/basi0g01.png").unwrap()));
/// decoder.set_ignore_text_chunk(true);
/// assert!(decoder.read_info().is_ok());
/// ```
pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) {
self.read_decoder.set_ignore_text_chunk(ignore_text_chunk);
}
/// Set the decoder to ignore iccp chunks while parsing.
///
/// eg.
/// ```
/// use std::fs::File;
/// use std::io::BufReader;
/// use png::Decoder;
/// let mut decoder = Decoder::new(BufReader::new(File::open("tests/iccp/broken_iccp.png").unwrap()));
/// decoder.set_ignore_iccp_chunk(true);
/// assert!(decoder.read_info().is_ok());
/// ```
pub fn set_ignore_iccp_chunk(&mut self, ignore_iccp_chunk: bool) {
self.read_decoder.set_ignore_iccp_chunk(ignore_iccp_chunk);
}
/// Set the decoder to ignore and not verify the Adler-32 checksum
/// and CRC code.
pub fn ignore_checksums(&mut self, ignore_checksums: bool) {
self.read_decoder.ignore_checksums(ignore_checksums);
}
}
/// PNG reader (mostly high-level interface)
///
/// Provides a high level that iterates over lines or whole images.
pub struct Reader<R: BufRead + Seek> {
decoder: ReadDecoder<R>,
bpp: BytesPerPixel,
subframe: SubframeInfo,
/// How many frames remain to be decoded. Decremented after each `IDAT` or `fdAT` sequence.
remaining_frames: usize,
/// Buffer with not-yet-`unfilter`-ed image rows
unfiltering_buffer: UnfilteringBuffer,
/// Output transformations
transform: Transformations,
/// Function that can transform decompressed, unfiltered rows into final output.
/// See the `transform.rs` module for more details.
transform_fn: Option<TransformFn>,
/// This buffer is only used so that `next_row` and `next_interlaced_row` can return reference
/// to a byte slice. In a future version of this library, this buffer will be removed and
/// `next_row` and `next_interlaced_row` will write directly into a user provided output buffer.
scratch_buffer: Vec<u8>,
/// Whether `ImageEnd` was already reached by `fn finish`.
finished: bool,
}
/// The subframe specific information.
///
/// In APNG the frames are constructed by combining previous frame and a new subframe (through a
/// combination of `dispose_op` and `overlay_op`). These sub frames specify individual dimension
/// information and reuse the global interlace options. This struct encapsulates the state of where
/// in a particular IDAT-frame or subframe we are.
struct SubframeInfo {
width: u32,
height: u32,
rowlen: usize,
current_interlace_info: Option<InterlaceInfo>,
interlace_info_iter: InterlaceInfoIter,
consumed_and_flushed: bool,
}
impl<R: BufRead + Seek> Reader<R> {
/// Advances to the start of the next animation frame and
/// returns a reference to the `FrameControl` info that describes it.
/// Skips and discards the image data of the previous frame if necessary.
///
/// Returns a [`ParameterError`] when there are no more animation frames.
/// To avoid this the caller can check if [`Info::animation_control`] exists
/// and consult [`AnimationControl::num_frames`].
pub fn next_frame_info(&mut self) -> Result<&FrameControl, DecodingError> {
let remaining_frames = if self.subframe.consumed_and_flushed {
self.remaining_frames
} else {
// One remaining frame will be consumed by the `finish_decoding` call below.
self.remaining_frames - 1
};
if remaining_frames == 0 {
return Err(DecodingError::Parameter(
ParameterErrorKind::PolledAfterEndOfImage.into(),
));
}
if !self.subframe.consumed_and_flushed {
self.subframe.current_interlace_info = None;
self.finish_decoding()?;
}
self.read_until_image_data()?;
// The PNG standard (and `StreamingDecoder `) guarantes that there is an `fcTL` chunk
// before the start of image data in a sequence of `fdAT` chunks. Therefore `unwrap`
// below is guaranteed to not panic.
Ok(self.info().frame_control.as_ref().unwrap())
}
/// Reads all meta data until the next frame data starts.
/// Requires IHDR before the IDAT and fcTL before fdAT.
fn read_until_image_data(&mut self) -> Result<(), DecodingError> {
self.decoder.read_until_image_data()?;
self.subframe = SubframeInfo::new(self.info());
self.bpp = self.info().bpp_in_prediction();
self.unfiltering_buffer.reset_all();
// Allocate output buffer.
let buflen = self.unguarded_output_line_size(self.subframe.width);
self.decoder.reserve_bytes(buflen)?;
Ok(())
}
/// Get information on the image.
///
/// The structure will change as new frames of an animated image are decoded.
pub fn info(&self) -> &Info<'static> {
self.decoder.info().unwrap()
}
/// Decodes the next frame into `buf`.
///
/// Note that this decodes raw subframes that need to be mixed according to blend-op and
/// dispose-op by the caller.
///
/// The caller must always provide a buffer large enough to hold a complete frame (the APNG
/// specification restricts subframes to the dimensions given in the image header). The region
/// that has been written be checked afterwards by calling `info` after a successful call and
/// inspecting the `frame_control` data. This requirement may be lifted in a later version of
/// `png`.
///
/// Output lines will be written in row-major, packed matrix with width and height of the read
/// frame (or subframe), all samples are in big endian byte order where this matters.
pub fn next_frame(&mut self, buf: &mut [u8]) -> Result<OutputInfo, DecodingError> {
if self.remaining_frames == 0 {
return Err(DecodingError::Parameter(
ParameterErrorKind::PolledAfterEndOfImage.into(),
));
} else if self.subframe.consumed_and_flushed {
// Advance until the next `fdAT`
// (along the way we should encounter the fcTL for this frame).
self.read_until_image_data()?;
}
// Note that we only check if the buffer size calculation holds in a call to decoding the
// frame. Consequently, we can represent the `Info` and frameless decoding even when the
// target architecture's address space is too small for a frame. However reading the actual
let required_len = self
.output_buffer_size()
.ok_or(DecodingError::LimitsExceeded)?;
if buf.len() < required_len {
return Err(DecodingError::Parameter(
ParameterErrorKind::ImageBufferSize {
expected: required_len,
actual: buf.len(),
}
.into(),
));
}
let (color_type, bit_depth) = self.output_color_type();
let output_info = OutputInfo {
width: self.subframe.width,
height: self.subframe.height,
color_type,
bit_depth,
line_size: self.unguarded_output_line_size(self.subframe.width),
};
if self.info().interlaced {
let stride = self.unguarded_output_line_size(self.info().width);
let samples = color_type.samples() as u8;
let bits_pp = samples * (bit_depth as u8);
let expand = crate::adam7::expand_pass;
while let Some(InterlacedRow {
data: row,
interlace,
..
}) = self.next_interlaced_row()?
{
// `unwrap` won't panic, because we checked `self.info().interlaced` above.
let adam7info = interlace.get_adam7_info().unwrap();
expand(buf, stride, row, adam7info, bits_pp);
}
} else {
let current_interlace_info = self.subframe.current_interlace_info.as_ref();
let already_done_rows = current_interlace_info
.map(|info| info.line_number())
.unwrap_or(self.subframe.height);
for row in buf
.chunks_exact_mut(output_info.line_size)
.take(self.subframe.height as usize)
.skip(already_done_rows as usize)
{
self.next_interlaced_row_impl(self.subframe.rowlen, row)?;
}
}
// Advance over the rest of data for this (sub-)frame.
self.finish_decoding()?;
Ok(output_info)
}
fn mark_subframe_as_consumed_and_flushed(&mut self) {
assert!(self.remaining_frames > 0);
self.remaining_frames -= 1;
self.subframe.consumed_and_flushed = true;
}
/// Advance over the rest of data for this (sub-)frame.
/// Called after decoding the last row of a frame.
fn finish_decoding(&mut self) -> Result<(), DecodingError> {
// Double-check that all rows of this frame have been decoded (i.e. that the potential
// `finish_decoding` call below won't be discarding any data).
assert!(self.subframe.current_interlace_info.is_none());
// Discard the remaining data in the current sequence of `IDAT` or `fdAT` chunks.
if !self.subframe.consumed_and_flushed {
self.decoder.finish_decoding_image_data()?;
self.mark_subframe_as_consumed_and_flushed();
}
Ok(())
}
/// Returns the next processed row of the image (discarding `InterlaceInfo`).
///
/// See also [`Reader.read_row`], which reads into a caller-provided buffer.
pub fn next_row(&mut self) -> Result<Option<Row<'_>>, DecodingError> {
self.next_interlaced_row()
.map(|v| v.map(|v| Row { data: v.data }))
}
/// Returns the next processed row of the image.
///
/// See also [`Reader.read_row`], which reads into a caller-provided buffer.
pub fn next_interlaced_row(&mut self) -> Result<Option<InterlacedRow<'_>>, DecodingError> {
let mut output_buffer = mem::take(&mut self.scratch_buffer);
let max_line_size = self
.output_line_size(self.info().width)
.ok_or(DecodingError::LimitsExceeded)?;
output_buffer.resize(max_line_size, 0u8);
let result = self.read_row(&mut output_buffer);
self.scratch_buffer = output_buffer;
result.map(move |option| {
option.map(move |interlace| {
let output_line_size = self.output_line_size_for_interlace_info(&interlace);
InterlacedRow {
data: &self.scratch_buffer[..output_line_size],
interlace,
}
})
})
}
/// Reads the next row of the image into the provided `output_buffer`.
/// `Ok(None)` will be returned if the current image frame has no more rows.
///
/// `output_buffer` needs to be long enough to accommodate [`Reader.output_line_size`] for
/// [`Info.width`] (initial interlaced rows may need less than that).
///
/// See also [`Reader.next_row`] and [`Reader.next_interlaced_row`], which read into a
/// `Reader`-owned buffer.
pub fn read_row(
&mut self,
output_buffer: &mut [u8],
) -> Result<Option<InterlaceInfo>, DecodingError> {
let interlace = match self.subframe.current_interlace_info.as_ref() {
None => {
self.finish_decoding()?;
return Ok(None);
}
Some(interlace) => *interlace,
};
if interlace.line_number() == 0 {
self.unfiltering_buffer.reset_prev_row();
}
let rowlen = match interlace {
InterlaceInfo::Null(_) => self.subframe.rowlen,
InterlaceInfo::Adam7(Adam7Info { samples: width, .. }) => {
self.info().raw_row_length_from_width(width)
}
};
let output_line_size = self.output_line_size_for_interlace_info(&interlace);
let output_buffer = &mut output_buffer[..output_line_size];
self.next_interlaced_row_impl(rowlen, output_buffer)?;
Ok(Some(interlace))
}
fn output_line_size_for_interlace_info(&self, interlace: &InterlaceInfo) -> usize {
let width = match interlace {
InterlaceInfo::Adam7(Adam7Info { samples: width, .. }) => *width,
InterlaceInfo::Null(_) => self.subframe.width,
};
self.unguarded_output_line_size(width)
}
/// Read the rest of the image and chunks and finish up, including text chunks or others
/// This will discard the rest of the image if the image is not read already with [`Reader::next_frame`], [`Reader::next_row`] or [`Reader::next_interlaced_row`]
pub fn finish(&mut self) -> Result<(), DecodingError> {
if self.finished {
return Err(DecodingError::Parameter(
ParameterErrorKind::PolledAfterEndOfImage.into(),
));
}
self.remaining_frames = 0;
self.unfiltering_buffer.reset_all();
self.decoder.read_until_end_of_input()?;
self.finished = true;
Ok(())
}
/// Fetch the next interlaced row and filter it according to our own transformations.
fn next_interlaced_row_impl(
&mut self,
rowlen: usize,
output_buffer: &mut [u8],
) -> Result<(), DecodingError> {
self.next_raw_interlaced_row(rowlen)?;
let row = self.unfiltering_buffer.prev_row();
assert_eq!(row.len(), rowlen - 1);
// Apply transformations and write resulting data to buffer.
let transform_fn = {
if self.transform_fn.is_none() {
self.transform_fn = Some(create_transform_fn(self.info(), self.transform)?);
}
self.transform_fn.as_deref().unwrap()
};
transform_fn(row, output_buffer, self.info());
self.subframe.current_interlace_info = self.subframe.interlace_info_iter.next();
Ok(())
}
/// Returns the color type and the number of bits per sample
/// of the data returned by `Reader::next_row` and Reader::frames`.
pub fn output_color_type(&self) -> (ColorType, BitDepth) {
use crate::common::ColorType::*;
let t = self.transform;
let info = self.info();
if t == Transformations::IDENTITY {
(info.color_type, info.bit_depth)
} else {
let bits = match info.bit_depth as u8 {
16 if t.intersects(Transformations::STRIP_16) => 8,
n if n < 8
&& (t.contains(Transformations::EXPAND)
|| t.contains(Transformations::ALPHA)) =>
{
8
}
n => n,
};
let color_type =
if t.contains(Transformations::EXPAND) || t.contains(Transformations::ALPHA) {
let has_trns = info.trns.is_some() || t.contains(Transformations::ALPHA);
match info.color_type {
Grayscale if has_trns => GrayscaleAlpha,
Rgb if has_trns => Rgba,
Indexed if has_trns => Rgba,
Indexed => Rgb,
ct => ct,
}
} else {
info.color_type
};
(color_type, BitDepth::from_u8(bits).unwrap())
}
}
/// Return the number of bytes required to hold a deinterlaced image frame that is decoded
/// using the given input transformations.
///
/// Returns `None` if the output buffer does not fit into the memory space of the machine,
/// otherwise returns the byte length in `Some`. The length is smaller than [`isize::MAX`].
pub fn output_buffer_size(&self) -> Option<usize> {
let (width, height) = self.info().size();
let (color, depth) = self.output_color_type();
// The subtraction should always work, but we do this for consistency. Also note that by
// calling `checked_raw_row_length` the row buffer is guaranteed to work whereas if we
// ran other function that didn't include the filter byte that could later fail on an image
// that is `1xN`...
let linelen = color.checked_raw_row_length(depth, width)?.checked_sub(1)?;
let height = usize::try_from(height).ok()?;
let imglen = linelen.checked_mul(height)?;
// Ensure that it fits into address space not only `usize` to allocate.
(imglen <= isize::MAX as usize).then_some(imglen)
}
/// Returns the number of bytes required to hold a deinterlaced row.
pub(crate) fn unguarded_output_line_size(&self, width: u32) -> usize {
let (color, depth) = self.output_color_type();
color.raw_row_length_from_width(depth, width) - 1
}
/// Returns the number of bytes required to hold a deinterlaced row.
///
/// Returns `None` if the output buffer does not fit into the memory space of the machine,
/// otherwise returns the byte length in `Some`. The length is smaller than [`isize::MAX`].
pub fn output_line_size(&self, width: u32) -> Option<usize> {
let (color, depth) = self.output_color_type();
let length = color.checked_raw_row_length(depth, width)?.checked_sub(1)?;
// Ensure that it fits into address space not only `usize` to allocate.
(length <= isize::MAX as usize).then_some(length)
}
/// Unfilter the next raw interlaced row into `self.unfiltering_buffer`.
fn next_raw_interlaced_row(&mut self, rowlen: usize) -> Result<(), DecodingError> {
// Read image data until we have at least one full row (but possibly more than one).
while self.unfiltering_buffer.curr_row_len() < rowlen {
if self.subframe.consumed_and_flushed {
return Err(DecodingError::Format(
FormatErrorInner::NoMoreImageData.into(),
));
}
let mut buffer = self.unfiltering_buffer.as_unfilled_buffer();
match self.decoder.decode_image_data(Some(&mut buffer))? {
ImageDataCompletionStatus::ExpectingMoreData => (),
ImageDataCompletionStatus::Done => self.mark_subframe_as_consumed_and_flushed(),
}
}
self.unfiltering_buffer.unfilter_curr_row(rowlen, self.bpp)
}
}
impl SubframeInfo {
fn not_yet_init() -> Self {
SubframeInfo {
width: 0,
height: 0,
rowlen: 0,
current_interlace_info: None,
interlace_info_iter: InterlaceInfoIter::empty(),
consumed_and_flushed: false,
}
}
fn new(info: &Info) -> Self {
// The apng fctnl overrides width and height.
// All other data is set by the main info struct.
let (width, height) = if let Some(fc) = info.frame_control {
(fc.width, fc.height)
} else {
(info.width, info.height)
};
let mut interlace_info_iter = InterlaceInfoIter::new(width, height, info.interlaced);
let current_interlace_info = interlace_info_iter.next();
SubframeInfo {
width,
height,
rowlen: info.raw_row_length_from_width(width),
current_interlace_info,
interlace_info_iter,
consumed_and_flushed: false,
}
}
}

153
vendor/png/src/decoder/read_decoder.rs vendored Normal file
View File

@@ -0,0 +1,153 @@
use super::stream::{DecodeOptions, Decoded, DecodingError, FormatErrorInner, StreamingDecoder};
use super::zlib::UnfilterBuf;
use super::Limits;
use std::io::{BufRead, ErrorKind, Read, Seek};
use crate::chunk;
use crate::common::Info;
/// Helper for encapsulating reading input from `Read` and feeding it into a `StreamingDecoder`
/// while hiding low-level `Decoded` events and only exposing a few high-level reading operations
/// like:
///
/// * `read_header_info` - reading until `IHDR` chunk
/// * `read_until_image_data` - reading until `IDAT` / `fdAT` sequence
/// * `decode_image_data` - reading from `IDAT` / `fdAT` sequence into `Vec<u8>`
/// * `finish_decoding_image_data()` - discarding remaining data from `IDAT` / `fdAT` sequence
/// * `read_until_end_of_input()` - reading until `IEND` chunk
pub(crate) struct ReadDecoder<R: Read> {
reader: R,
decoder: StreamingDecoder,
}
impl<R: BufRead + Seek> ReadDecoder<R> {
pub fn new(r: R) -> Self {
Self {
reader: r,
decoder: StreamingDecoder::new(),
}
}
pub fn with_options(r: R, options: DecodeOptions) -> Self {
let mut decoder = StreamingDecoder::new_with_options(options);
decoder.limits = Limits::default();
Self { reader: r, decoder }
}
pub fn set_limits(&mut self, limits: Limits) {
self.decoder.limits = limits;
}
pub fn reserve_bytes(&mut self, bytes: usize) -> Result<(), DecodingError> {
self.decoder.limits.reserve_bytes(bytes)
}
pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) {
self.decoder.set_ignore_text_chunk(ignore_text_chunk);
}
pub fn set_ignore_iccp_chunk(&mut self, ignore_iccp_chunk: bool) {
self.decoder.set_ignore_iccp_chunk(ignore_iccp_chunk);
}
pub fn ignore_checksums(&mut self, ignore_checksums: bool) {
self.decoder.set_ignore_adler32(ignore_checksums);
self.decoder.set_ignore_crc(ignore_checksums);
}
/// Returns the next decoded chunk. If the chunk is an ImageData chunk, its contents are written
/// into image_data.
fn decode_next(
&mut self,
image_data: Option<&mut UnfilterBuf<'_>>,
) -> Result<Decoded, DecodingError> {
let (consumed, result) = {
let buf = self.reader.fill_buf()?;
if buf.is_empty() {
return Err(DecodingError::IoError(ErrorKind::UnexpectedEof.into()));
}
self.decoder.update(buf, image_data)?
};
self.reader.consume(consumed);
Ok(result)
}
/// Reads until the end of `IHDR` chunk.
///
/// Prerequisite: None (idempotent).
pub fn read_header_info(&mut self) -> Result<&Info<'static>, DecodingError> {
while self.info().is_none() {
if let Decoded::ChunkComplete(chunk::IEND) = self.decode_next(None)? {
unreachable!()
}
}
Ok(self.info().unwrap())
}
/// Reads until the start of the next `IDAT` or `fdAT` chunk.
///
/// Prerequisite: **Not** within `IDAT` / `fdAT` chunk sequence.
pub fn read_until_image_data(&mut self) -> Result<(), DecodingError> {
loop {
match self.decode_next(None)? {
Decoded::ChunkBegin(_, chunk::IDAT) | Decoded::ChunkBegin(_, chunk::fdAT) => break,
Decoded::ChunkComplete(chunk::IEND) => {
return Err(DecodingError::Format(
FormatErrorInner::MissingImageData.into(),
))
}
// Ignore all other chunk events. Any other chunk may be between IDAT chunks, fdAT
// chunks and their control chunks.
_ => {}
}
}
Ok(())
}
/// Reads `image_data` and reports whether there may be additional data afterwards (i.e. if it
/// is okay to call `decode_image_data` and/or `finish_decoding_image_data` again)..
///
/// Prerequisite: Input is currently positioned within `IDAT` / `fdAT` chunk sequence.
pub fn decode_image_data(
&mut self,
image_data: Option<&mut UnfilterBuf<'_>>,
) -> Result<ImageDataCompletionStatus, DecodingError> {
match self.decode_next(image_data)? {
Decoded::ImageData => Ok(ImageDataCompletionStatus::ExpectingMoreData),
Decoded::ImageDataFlushed => Ok(ImageDataCompletionStatus::Done),
// Ignore other events that may happen within an `IDAT` / `fdAT` chunks sequence.
_ => Ok(ImageDataCompletionStatus::ExpectingMoreData),
}
}
/// Consumes and discards the rest of an `IDAT` / `fdAT` chunk sequence.
///
/// Prerequisite: Input is currently positioned within `IDAT` / `fdAT` chunk sequence.
pub fn finish_decoding_image_data(&mut self) -> Result<(), DecodingError> {
loop {
if let ImageDataCompletionStatus::Done = self.decode_image_data(None)? {
return Ok(());
}
}
}
/// Reads until the `IEND` chunk.
///
/// Prerequisite: `IEND` chunk hasn't been reached yet.
pub fn read_until_end_of_input(&mut self) -> Result<(), DecodingError> {
while !matches!(self.decode_next(None)?, Decoded::ChunkComplete(chunk::IEND)) {}
Ok(())
}
pub fn info(&self) -> Option<&Info<'static>> {
self.decoder.info.as_ref()
}
}
#[derive(Debug, Eq, PartialEq)]
pub(crate) enum ImageDataCompletionStatus {
ExpectingMoreData,
Done,
}

3213
vendor/png/src/decoder/stream.rs vendored Normal file

File diff suppressed because it is too large Load Diff

203
vendor/png/src/decoder/transform.rs vendored Normal file
View File

@@ -0,0 +1,203 @@
//! Transforming a decompressed, unfiltered row into the final output.
mod palette;
use crate::{BitDepth, ColorType, DecodingError, Info, Transformations};
use super::stream::FormatErrorInner;
/// Type of a function that can transform a decompressed, unfiltered row (the
/// 1st argument) into the final pixels (the 2nd argument), optionally using
/// image metadata (e.g. PLTE data can be accessed using the 3rd argument).
///
/// TODO: If some precomputed state is needed (e.g. to make `expand_paletted...`
/// faster) then consider changing this into `Box<dyn Fn(...)>`.
pub type TransformFn = Box<dyn Fn(&[u8], &mut [u8], &Info) + Send + Sync>;
/// Returns a transformation function that should be applied to image rows based
/// on 1) decoded image metadata (`info`) and 2) the transformations requested
/// by the crate client (`transform`).
pub fn create_transform_fn(
info: &Info,
transform: Transformations,
) -> Result<TransformFn, DecodingError> {
let color_type = info.color_type;
let bit_depth = info.bit_depth as u8;
let trns = info.trns.is_some() || transform.contains(Transformations::ALPHA);
let expand =
transform.contains(Transformations::EXPAND) || transform.contains(Transformations::ALPHA);
let strip16 = bit_depth == 16 && transform.contains(Transformations::STRIP_16);
match color_type {
ColorType::Indexed if expand => {
if info.palette.is_none() {
Err(DecodingError::Format(
FormatErrorInner::PaletteRequired.into(),
))
} else if let BitDepth::Sixteen = info.bit_depth {
// This should have been caught earlier but let's check again. Can't hurt.
Err(DecodingError::Format(
FormatErrorInner::InvalidColorBitDepth {
color_type: ColorType::Indexed,
bit_depth: BitDepth::Sixteen,
}
.into(),
))
} else {
Ok(if trns {
palette::create_expansion_into_rgba8(info)
} else {
palette::create_expansion_into_rgb8(info)
})
}
}
ColorType::Grayscale | ColorType::GrayscaleAlpha if bit_depth < 8 && expand => {
Ok(Box::new(if trns {
expand_gray_u8_with_trns
} else {
expand_gray_u8
}))
}
ColorType::Grayscale | ColorType::Rgb if expand && trns => {
Ok(Box::new(if bit_depth == 8 {
expand_trns_line
} else if strip16 {
expand_trns_and_strip_line16
} else {
assert_eq!(bit_depth, 16);
expand_trns_line16
}))
}
ColorType::Grayscale | ColorType::GrayscaleAlpha | ColorType::Rgb | ColorType::Rgba
if strip16 =>
{
Ok(Box::new(transform_row_strip16))
}
_ => Ok(Box::new(copy_row)),
}
}
fn copy_row(row: &[u8], output_buffer: &mut [u8], _: &Info) {
output_buffer.copy_from_slice(row);
}
fn transform_row_strip16(row: &[u8], output_buffer: &mut [u8], _: &Info) {
for i in 0..row.len() / 2 {
output_buffer[i] = row[2 * i];
}
}
#[inline(always)]
fn unpack_bits<F>(input: &[u8], output: &mut [u8], channels: usize, bit_depth: u8, func: F)
where
F: Fn(u8, &mut [u8]),
{
// Only [1, 2, 4, 8] are valid bit depths
assert!(matches!(bit_depth, 1 | 2 | 4 | 8));
// Check that `input` is capable of producing a buffer as long as `output`:
// number of shift lookups per bit depth * channels * input length
assert!((8 / bit_depth as usize * channels).saturating_mul(input.len()) >= output.len());
let mut buf_chunks = output.chunks_exact_mut(channels);
let mut iter = input.iter();
// `shift` iterates through the corresponding bit depth sequence:
// 1 => &[7, 6, 5, 4, 3, 2, 1, 0],
// 2 => &[6, 4, 2, 0],
// 4 => &[4, 0],
// 8 => &[0],
//
// `(0..8).step_by(bit_depth.into()).rev()` doesn't always optimize well so
// shifts are calculated instead. (2023-08, Rust 1.71)
if bit_depth == 8 {
for (&curr, chunk) in iter.zip(&mut buf_chunks) {
func(curr, chunk);
}
} else {
let mask = ((1u16 << bit_depth) - 1) as u8;
// These variables are initialized in the loop
let mut shift = -1;
let mut curr = 0;
for chunk in buf_chunks {
if shift < 0 {
shift = 8 - bit_depth as i32;
curr = *iter.next().expect("input for unpack bits is not empty");
}
let pixel = (curr >> shift) & mask;
func(pixel, chunk);
shift -= bit_depth as i32;
}
}
}
fn expand_trns_line(input: &[u8], output: &mut [u8], info: &Info) {
let channels = info.color_type.samples();
let trns = info.trns.as_deref();
for (input, output) in input
.chunks_exact(channels)
.zip(output.chunks_exact_mut(channels + 1))
{
output[..channels].copy_from_slice(input);
output[channels] = if Some(input) == trns { 0 } else { 0xFF };
}
}
fn expand_trns_line16(input: &[u8], output: &mut [u8], info: &Info) {
let channels = info.color_type.samples();
let trns = info.trns.as_deref();
for (input, output) in input
.chunks_exact(channels * 2)
.zip(output.chunks_exact_mut(channels * 2 + 2))
{
output[..channels * 2].copy_from_slice(input);
if Some(input) == trns {
output[channels * 2] = 0;
output[channels * 2 + 1] = 0
} else {
output[channels * 2] = 0xFF;
output[channels * 2 + 1] = 0xFF
};
}
}
fn expand_trns_and_strip_line16(input: &[u8], output: &mut [u8], info: &Info) {
let channels = info.color_type.samples();
let trns = info.trns.as_deref();
for (input, output) in input
.chunks_exact(channels * 2)
.zip(output.chunks_exact_mut(channels + 1))
{
for i in 0..channels {
output[i] = input[i * 2];
}
output[channels] = if Some(input) == trns { 0 } else { 0xFF };
}
}
fn expand_gray_u8(row: &[u8], buffer: &mut [u8], info: &Info) {
let scaling_factor = (255) / ((1u16 << info.bit_depth as u8) - 1) as u8;
unpack_bits(row, buffer, 1, info.bit_depth as u8, |val, chunk| {
chunk[0] = val * scaling_factor
});
}
fn expand_gray_u8_with_trns(row: &[u8], buffer: &mut [u8], info: &Info) {
let scaling_factor = (255) / ((1u16 << info.bit_depth as u8) - 1) as u8;
let trns = info.trns.as_deref();
unpack_bits(row, buffer, 2, info.bit_depth as u8, |pixel, chunk| {
chunk[1] = if let Some(trns) = trns {
if pixel == trns[0] {
0
} else {
0xFF
}
} else {
0xFF
};
chunk[0] = pixel * scaling_factor
});
}

View File

@@ -0,0 +1,361 @@
//! Helpers for taking a slice of indices (indices into `PLTE` and/or `trNS`
//! entries) and transforming this into RGB or RGBA output.
//!
//! # Memoization
//!
//! To achieve higher throughput, `create_rgba_palette` combines entries from
//! `PLTE` and `trNS` chunks into a single lookup table. This is based on the
//! ideas explored in <https://crbug.com/706134>.
//!
//! Memoization is a trade-off:
//! * On one hand, memoization requires spending X ns before starting to call
//! `expand_paletted_...` functions.
//! * On the other hand, memoization improves the throughput of the
//! `expand_paletted_...` functions - they take Y ns less to process each byte
//!
//! Based on X and Y, we can try to calculate the breakeven point. It seems
//! that memoization is a net benefit for images bigger than around 13x13 pixels.
use super::{unpack_bits, TransformFn};
use crate::{BitDepth, Info};
pub fn create_expansion_into_rgb8(info: &Info) -> TransformFn {
let rgba_palette = create_rgba_palette(info);
if info.bit_depth == BitDepth::Eight {
Box::new(move |input, output, _info| expand_8bit_into_rgb8(input, output, &rgba_palette))
} else {
Box::new(move |input, output, info| expand_into_rgb8(input, output, info, &rgba_palette))
}
}
pub fn create_expansion_into_rgba8(info: &Info) -> TransformFn {
let rgba_palette = create_rgba_palette(info);
Box::new(move |input, output, info| {
expand_paletted_into_rgba8(input, output, info, &rgba_palette)
})
}
fn create_rgba_palette(info: &Info) -> [[u8; 4]; 256] {
let palette = info.palette.as_deref().expect("Caller should verify");
let trns = info.trns.as_deref().unwrap_or(&[]);
// > The tRNS chunk shall not contain more alpha values than there are palette
// entries, but a tRNS chunk may contain fewer values than there are palette
// entries. In this case, the alpha value for all remaining palette entries is
// assumed to be 255.
//
// It seems, accepted reading is to fully *ignore* an invalid tRNS as if it were
// completely empty / all pixels are non-transparent.
let trns = if trns.len() <= palette.len() / 3 {
trns
} else {
&[]
};
// Default to black, opaque entries.
let mut rgba_palette = [[0, 0, 0, 0xFF]; 256];
// Copy `palette` (RGB) entries into `rgba_palette`. This may clobber alpha
// values in `rgba_palette` - we need to fix this later.
{
let mut palette_iter = palette;
let mut rgba_iter = &mut rgba_palette[..];
while palette_iter.len() >= 4 {
// Copying 4 bytes at a time is more efficient than copying 3.
// OTOH, this clobbers the alpha value in `rgba_iter[0][3]` - we
// need to fix this later.
rgba_iter[0].copy_from_slice(&palette_iter[0..4]);
palette_iter = &palette_iter[3..];
rgba_iter = &mut rgba_iter[1..];
}
if !palette_iter.is_empty() {
rgba_iter[0][0..3].copy_from_slice(&palette_iter[0..3]);
}
}
// Copy `trns` (alpha) entries into `rgba_palette`. `trns.len()` may be
// smaller than `palette.len()` and therefore this is not sufficient to fix
// all the clobbered alpha values.
for (alpha, rgba) in trns.iter().copied().zip(rgba_palette.iter_mut()) {
rgba[3] = alpha;
}
// Unclobber the remaining alpha values.
for rgba in rgba_palette[trns.len()..(palette.len() / 3)].iter_mut() {
rgba[3] = 0xFF;
}
rgba_palette
}
fn expand_8bit_into_rgb8(mut input: &[u8], mut output: &mut [u8], rgba_palette: &[[u8; 4]; 256]) {
while output.len() >= 4 {
// Copying 4 bytes at a time is more efficient than 3.
let rgba = &rgba_palette[input[0] as usize];
output[0..4].copy_from_slice(rgba);
input = &input[1..];
output = &mut output[3..];
}
if !output.is_empty() {
let rgba = &rgba_palette[input[0] as usize];
output[0..3].copy_from_slice(&rgba[0..3]);
}
}
fn expand_into_rgb8(row: &[u8], buffer: &mut [u8], info: &Info, rgba_palette: &[[u8; 4]; 256]) {
unpack_bits(row, buffer, 3, info.bit_depth as u8, |i, chunk| {
let rgba = &rgba_palette[i as usize];
chunk[0] = rgba[0];
chunk[1] = rgba[1];
chunk[2] = rgba[2];
})
}
fn expand_paletted_into_rgba8(
row: &[u8],
buffer: &mut [u8],
info: &Info,
rgba_palette: &[[u8; 4]; 256],
) {
unpack_bits(row, buffer, 4, info.bit_depth as u8, |i, chunk| {
chunk.copy_from_slice(&rgba_palette[i as usize]);
});
}
#[cfg(test)]
mod test {
use crate::{BitDepth, ColorType, Info, Transformations};
/// Old, non-memoized version of the code is used as a test oracle.
fn oracle_expand_paletted_into_rgb8(row: &[u8], buffer: &mut [u8], info: &Info) {
let palette = info.palette.as_deref().expect("Caller should verify");
let black = [0, 0, 0];
super::unpack_bits(row, buffer, 3, info.bit_depth as u8, |i, chunk| {
let rgb = palette
.get(3 * i as usize..3 * i as usize + 3)
.unwrap_or(&black);
chunk[0] = rgb[0];
chunk[1] = rgb[1];
chunk[2] = rgb[2];
})
}
/// Old, non-memoized version of the code is used as a test oracle.
fn oracle_expand_paletted_into_rgba8(row: &[u8], buffer: &mut [u8], info: &Info) {
let palette = info.palette.as_deref().expect("Caller should verify");
let trns = info.trns.as_deref().unwrap_or(&[]);
let black = [0, 0, 0];
// > The tRNS chunk shall not contain more alpha values than there are palette
// entries, but a tRNS chunk may contain fewer values than there are palette
// entries. In this case, the alpha value for all remaining palette entries is
// assumed to be 255.
//
// It seems, accepted reading is to fully *ignore* an invalid tRNS as if it were
// completely empty / all pixels are non-transparent.
let trns = if trns.len() <= palette.len() / 3 {
trns
} else {
&[]
};
super::unpack_bits(row, buffer, 4, info.bit_depth as u8, |i, chunk| {
let (rgb, a) = (
palette
.get(3 * i as usize..3 * i as usize + 3)
.unwrap_or(&black),
*trns.get(i as usize).unwrap_or(&0xFF),
);
chunk[0] = rgb[0];
chunk[1] = rgb[1];
chunk[2] = rgb[2];
chunk[3] = a;
});
}
fn create_info<'a>(src_bit_depth: u8, palette: &'a [u8], trns: Option<&'a [u8]>) -> Info<'a> {
Info {
color_type: ColorType::Indexed,
bit_depth: BitDepth::from_u8(src_bit_depth).unwrap(),
palette: Some(palette.into()),
trns: trns.map(Into::into),
..Info::default()
}
}
fn expand_paletted(
src: &[u8],
src_bit_depth: u8,
palette: &[u8],
trns: Option<&[u8]>,
) -> Vec<u8> {
let info = create_info(src_bit_depth, palette, trns);
let output_bytes_per_input_sample = match trns {
None => 3,
Some(_) => 4,
};
let samples_count_per_byte = (8 / src_bit_depth) as usize;
let samples_count = src.len() * samples_count_per_byte;
let mut dst = vec![0; samples_count * output_bytes_per_input_sample];
let transform_fn =
super::super::create_transform_fn(&info, Transformations::EXPAND).unwrap();
transform_fn(src, dst.as_mut_slice(), &info);
{
// Compare the memoization-based calculations with the old, non-memoized code.
let mut simple_dst = vec![0; samples_count * output_bytes_per_input_sample];
if trns.is_none() {
oracle_expand_paletted_into_rgb8(src, &mut simple_dst, &info)
} else {
oracle_expand_paletted_into_rgba8(src, &mut simple_dst, &info)
}
assert_eq!(&dst, &simple_dst);
}
dst
}
#[test]
fn test_expand_paletted_rgba_8bit() {
let actual = expand_paletted(
&[0, 1, 2, 3], // src
8, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
4, 5, 6, // entry #1
8, 9, 10, // entry #2
12, 13, 14, // entry #3
],
Some(&[3, 7, 11, 15]), // trns
);
assert_eq!(actual, (0..16).collect::<Vec<u8>>());
}
#[test]
fn test_expand_paletted_rgb_8bit() {
let actual = expand_paletted(
&[0, 1, 2, 3], // src
8, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
3, 4, 5, // entry #1
6, 7, 8, // entry #2
9, 10, 11, // entry #3
],
None, // trns
);
assert_eq!(actual, (0..12).collect::<Vec<u8>>());
}
#[test]
fn test_expand_paletted_rgba_4bit() {
let actual = expand_paletted(
&[0x01, 0x23], // src
4, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
4, 5, 6, // entry #1
8, 9, 10, // entry #2
12, 13, 14, // entry #3
],
Some(&[3, 7, 11, 15]), // trns
);
assert_eq!(actual, (0..16).collect::<Vec<u8>>());
}
#[test]
fn test_expand_paletted_rgb_4bit() {
let actual = expand_paletted(
&[0x01, 0x23], // src
4, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
3, 4, 5, // entry #1
6, 7, 8, // entry #2
9, 10, 11, // entry #3
],
None, // trns
);
assert_eq!(actual, (0..12).collect::<Vec<u8>>());
}
#[test]
fn test_expand_paletted_rgba_8bit_more_trns_entries_than_palette_entries() {
let actual = expand_paletted(
&[0, 1, 2, 3], // src
8, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
4, 5, 6, // entry #1
8, 9, 10, // entry #2
12, 13, 14, // entry #3
],
Some(&[123; 5]), // trns
);
// Invalid (too-long) `trns` means that we'll use 0xFF / opaque alpha everywhere.
assert_eq!(
actual,
vec![0, 1, 2, 0xFF, 4, 5, 6, 0xFF, 8, 9, 10, 0xFF, 12, 13, 14, 0xFF],
);
}
#[test]
fn test_expand_paletted_rgba_8bit_less_trns_entries_than_palette_entries() {
let actual = expand_paletted(
&[0, 1, 2, 3], // src
8, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
4, 5, 6, // entry #1
8, 9, 10, // entry #2
12, 13, 14, // entry #3
],
Some(&[3, 7]), // trns
);
// Too-short `trns` is treated differently from too-long - only missing entries are
// replaced with 0XFF / opaque.
assert_eq!(
actual,
vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0xFF, 12, 13, 14, 0xFF],
);
}
#[test]
fn test_create_rgba_palette() {
fn create_expected_rgba_palette(plte: &[u8], trns: &[u8]) -> [[u8; 4]; 256] {
let mut rgba = [[1, 2, 3, 4]; 256];
for (i, rgba) in rgba.iter_mut().enumerate() {
rgba[0] = plte.get(i * 3 + 0).map(|&r| r).unwrap_or(0);
rgba[1] = plte.get(i * 3 + 1).map(|&g| g).unwrap_or(0);
rgba[2] = plte.get(i * 3 + 2).map(|&b| b).unwrap_or(0);
rgba[3] = trns.get(i * 1 + 0).map(|&a| a).unwrap_or(0xFF);
}
rgba
}
for plte_len in 1..=32 {
for trns_len in 0..=plte_len {
let plte: Vec<u8> = (0..plte_len * 3).collect();
let trns: Vec<u8> = (0..trns_len).map(|alpha| alpha + 200).collect();
let info = create_info(8, &plte, Some(&trns));
let expected = create_expected_rgba_palette(&plte, &trns);
let actual = super::create_rgba_palette(&info);
assert_eq!(actual, expected);
}
}
}
}

View File

@@ -0,0 +1,230 @@
use super::stream::{DecodingError, FormatErrorInner};
use super::zlib::UnfilterBuf;
use crate::common::BytesPerPixel;
use crate::filter::{unfilter, RowFilter};
use crate::Info;
// Buffer for temporarily holding decompressed, not-yet-`unfilter`-ed rows.
pub(crate) struct UnfilteringBuffer {
/// Vec containing the uncompressed image data currently being processed.
data_stream: Vec<u8>,
/// Index in `data_stream` where the previous row starts.
/// This excludes the filter type byte - it points at the first byte of actual pixel data.
/// The pixel data is already-`unfilter`-ed.
///
/// If `prev_start == current_start` then it means that there is no previous row.
prev_start: usize,
/// Index in `data_stream` where the current row starts.
/// This points at the filter type byte of the current row (i.e. the actual pixel data starts at `current_start + 1`)
/// The pixel data is not-yet-`unfilter`-ed.
///
/// `current_start` can wrap around the length.
current_start: usize,
/// Logical length of data that must be preserved.
filled: usize,
/// Length of data that can be modified.
available: usize,
/// The number of bytes before we shift the buffer back.
shift_back_limit: usize,
}
impl UnfilteringBuffer {
pub const GROWTH_BYTES: usize = 8 * 1024;
/// Asserts in debug builds that all the invariants hold. No-op in release
/// builds. Intended to be called after creating or mutating `self` to
/// ensure that the final state preserves the invariants.
fn debug_assert_invariants(&self) {
debug_assert!(self.prev_start <= self.current_start);
debug_assert!(self.current_start <= self.available);
debug_assert!(self.available <= self.filled);
debug_assert!(self.filled <= self.data_stream.len());
}
/// Create a buffer tuned for filtering rows of the image type.
pub fn new(info: &Info<'_>) -> Self {
// We don't need all of `info` here so if that becomes a structural problem then these
// derived constants can be extracted into a parameter struct. For instance they may be
// adjusted according to platform hardware such as cache sizes.
let data_stream_capacity = {
let max_data = info
.checked_raw_row_length()
// In the current state this is really dependent on IDAT sizes and the compression
// settings. We aim to avoid overallocation here, but that occurs in part due to
// the algorithm for draining the buffer, which at the time of writing is at each
// individual IDAT chunk boundary. So this is set for a quadratic image roughly
// fitting into a single 4k chunk at compression.. A very arbitrary choice made
// from (probably overfitting) a benchmark of that image size. With a different
// algorithm we may come to different buffer uses and have to re-evaluate.
.and_then(|v| v.checked_mul(info.height.min(128) as usize))
// In the worst case this is additional room for use of unmasked SIMD moves. But
// the other idea here is that the allocator generally aligns the buffer.
.and_then(|v| checked_next_multiple_of(v, 256))
.unwrap_or(usize::MAX);
// We do not want to pre-allocate too much in case of a faulty image (no DOS by
// pretending to be very very large) and also we want to avoid allocating more data
// than we need for the image itself.
max_data.min(128 * 1024)
};
let shift_back_limit = {
// Prefer shifting by powers of two and only after having done some number of
// lines that then become free at the end of the buffer.
let rowlen_pot = info
.checked_raw_row_length()
// Ensure some number of rows are actually present before shifting back, i.e. next
// time around we want to be able to decode them without reallocating the buffer.
.and_then(|v| v.checked_mul(4))
// And also, we should be able to use aligned memcopy on the whole thing. Well at
// least that is the idea but the parameter is just benchmarking. Higher numbers
// did not result in performance gains but lowers also, so this is fickle. Maybe
// our shift back behavior can not be tuned very well.
.and_then(|v| checked_next_multiple_of(v, 64))
.unwrap_or(isize::MAX as usize);
// But never shift back before we have a number of pages freed.
rowlen_pot.max(128 * 1024)
};
let result = Self {
data_stream: Vec::with_capacity(data_stream_capacity),
prev_start: 0,
current_start: 0,
filled: 0,
available: 0,
shift_back_limit,
};
result.debug_assert_invariants();
result
}
/// Called to indicate that there is no previous row (e.g. when the current
/// row is the first scanline of a given Adam7 pass).
pub fn reset_prev_row(&mut self) {
self.prev_start = self.current_start;
self.debug_assert_invariants();
}
pub fn reset_all(&mut self) {
self.data_stream.clear();
self.prev_start = 0;
self.current_start = 0;
self.filled = 0;
self.available = 0;
}
/// Returns the previous (already `unfilter`-ed) row.
pub fn prev_row(&self) -> &[u8] {
&self.data_stream[self.prev_start..self.current_start]
}
/// Returns how many bytes of the current row are present in the buffer.
pub fn curr_row_len(&self) -> usize {
self.available - self.current_start
}
/// Returns a `&mut Vec<u8>` suitable for passing to
/// `ReadDecoder.decode_image_data` or `StreamingDecoder.update`.
///
/// Invariants of `self` depend on the assumption that the caller will only
/// append new bytes to the returned vector (which is indeed the behavior of
/// `ReadDecoder` and `StreamingDecoder`). TODO: Consider protecting the
/// invariants by returning an append-only view of the vector
/// (`FnMut(&[u8])`??? or maybe `std::io::Write`???).
pub fn as_unfilled_buffer(&mut self) -> UnfilterBuf<'_> {
if self.prev_start >= self.shift_back_limit
// Avoid the shift back if the buffer is still very empty. Consider how we got here: a
// previous decompression filled the buffer, then we unfiltered, we're now refilling
// the buffer again. The condition implies, the previous decompression filled at most
// half the buffer. Likely the same will happen again so the following decompression
// attempt will not yet be limited by the buffer length.
&& self.filled >= self.data_stream.len() / 2
{
// We have to relocate the data to the start of the buffer. Benchmarking suggests that
// the codegen for an unbounded range is better / different than the one for a bounded
// range. We prefer the former if the data overhead is not too high. `16` was
// determined experimentally and might be system (memory) dependent. There's also the
// question if we could be a little smarter and avoid crossing page boundaries when
// that is not required. Alas, microbenchmarking TBD.
if let Some(16..) = self.data_stream.len().checked_sub(self.filled) {
self.data_stream
.copy_within(self.prev_start..self.filled, 0);
} else {
self.data_stream.copy_within(self.prev_start.., 0);
}
// The data kept its relative position to `filled` which now lands exactly at
// the distance between prev_start and filled.
self.current_start -= self.prev_start;
self.available -= self.prev_start;
self.filled -= self.prev_start;
self.prev_start = 0;
}
if self.filled + Self::GROWTH_BYTES > self.data_stream.len() {
self.data_stream.resize(self.filled + Self::GROWTH_BYTES, 0);
}
UnfilterBuf {
buffer: &mut self.data_stream,
filled: &mut self.filled,
available: &mut self.available,
}
}
/// Runs `unfilter` on the current row, and then shifts rows so that the current row becomes the previous row.
///
/// Will panic if `self.curr_row_len() < rowlen`.
pub fn unfilter_curr_row(
&mut self,
rowlen: usize,
bpp: BytesPerPixel,
) -> Result<(), DecodingError> {
debug_assert!(rowlen >= 2); // 1 byte for `FilterType` and at least 1 byte of pixel data.
let (prev, row) = self.data_stream.split_at_mut(self.current_start);
let prev: &[u8] = &prev[self.prev_start..];
debug_assert!(prev.is_empty() || prev.len() == (rowlen - 1));
// Get the filter type.
let filter = RowFilter::from_u8(row[0]).ok_or(DecodingError::Format(
FormatErrorInner::UnknownFilterMethod(row[0]).into(),
))?;
let row = &mut row[1..rowlen];
unfilter(filter, bpp, prev, row);
self.prev_start = self.current_start + 1;
self.current_start += rowlen;
self.debug_assert_invariants();
Ok(())
}
}
fn checked_next_multiple_of(val: usize, factor: usize) -> Option<usize> {
if factor == 0 {
return None;
}
let remainder = val % factor;
if remainder > 0 {
val.checked_add(factor - remainder)
} else {
Some(val)
}
}
#[test]
fn next_multiple_of_backport_testsuite() {
assert_eq!(checked_next_multiple_of(1, 0), None);
assert_eq!(checked_next_multiple_of(2, 0), None);
assert_eq!(checked_next_multiple_of(1, 2), Some(2));
assert_eq!(checked_next_multiple_of(2, 2), Some(2));
assert_eq!(checked_next_multiple_of(2, 5), Some(5));
assert_eq!(checked_next_multiple_of(1, usize::MAX), Some(usize::MAX));
assert_eq!(checked_next_multiple_of(usize::MAX, 2), None);
}

213
vendor/png/src/decoder/zlib.rs vendored Normal file
View File

@@ -0,0 +1,213 @@
use super::{stream::FormatErrorInner, unfiltering_buffer::UnfilteringBuffer, DecodingError};
use fdeflate::Decompressor;
/// An inplace buffer for decompression and filtering of PNG rowlines.
///
/// The underlying data structure is a vector, with additional markers denoting a region of bytes
/// that are utilized by the decompression but not yet available to arbitrary modifications. The
/// caller can still shift around data between calls to the stream decompressor as long as the data
/// in the marked region is not modified and the indices adjusted accordingly. See
/// [`UnfilterRegion`] that contains these markers.
///
/// Violating the invariants, i.e. modifying bytes in the marked region, results in absurdly wacky
/// decompression output or panics but not undefined behavior.
pub struct UnfilterBuf<'data> {
/// The data container. Starts with arbitrary data unrelated to the decoder, a slice of decoder
/// private data followed by free space for further decoder output. The regions are delimited
/// by `filled` and `available` which must be updated accordingly.
pub(crate) buffer: &'data mut Vec<u8>,
/// Where we record changes to the out position.
pub(crate) filled: &'data mut usize,
/// Where we record changes to the available byte.
pub(crate) available: &'data mut usize,
}
/// A region into a buffer utilized as a [`UnfilterBuf`].
///
/// The span of data denoted by `filled..available` is the region of bytes that must be preserved
/// for use by the decompression algorithm. It may be moved, e.g. by subtracting the same amount
/// from both of these fields. Always ensure that `filled <= available`, the library does not
/// violate this invariant when modifying this struct as an [`UnfilterBuf`].
#[derive(Default, Clone, Copy)]
pub struct UnfilterRegion {
/// The past-the-end index of byte that are allowed to be modified.
pub available: usize,
/// The past-the-end of bytes that have been written to.
pub filled: usize,
}
/// Ergonomics wrapper around `miniz_oxide::inflate::stream` for zlib compressed data.
pub(super) struct ZlibStream {
/// Current decoding state.
state: Box<fdeflate::Decompressor>,
/// If there has been a call to decompress already.
started: bool,
/// Ignore and do not calculate the Adler-32 checksum. Defaults to `true`.
///
/// This flag overrides `TINFL_FLAG_COMPUTE_ADLER32`.
///
/// This flag should not be modified after decompression has started.
ignore_adler32: bool,
}
impl ZlibStream {
// [PNG spec](https://www.w3.org/TR/2003/REC-PNG-20031110/#10Compression) says that
// "deflate/inflate compression with a sliding window (which is an upper bound on the
// distances appearing in the deflate stream) of at most 32768 bytes".
//
// `fdeflate` requires that we keep this many most recently decompressed bytes in the
// `out_buffer` - this allows referring back to them when handling "length and distance
// codes" in the deflate stream).
const LOOKBACK_SIZE: usize = 32768;
pub(crate) fn new() -> Self {
ZlibStream {
state: Box::new(Decompressor::new()),
started: false,
ignore_adler32: true,
}
}
pub(crate) fn reset(&mut self) {
self.started = false;
*self.state = Decompressor::new();
}
/// Set the `ignore_adler32` flag and return `true` if the flag was
/// successfully set.
///
/// The default is `true`.
///
/// This flag cannot be modified after decompression has started until the
/// [ZlibStream] is reset.
pub(crate) fn set_ignore_adler32(&mut self, flag: bool) -> bool {
if !self.started {
self.ignore_adler32 = flag;
true
} else {
false
}
}
/// Return the `ignore_adler32` flag.
pub(crate) fn ignore_adler32(&self) -> bool {
self.ignore_adler32
}
/// Fill the decoded buffer as far as possible from `data`.
/// On success returns the number of consumed input bytes.
pub(crate) fn decompress(
&mut self,
data: &[u8],
image_data: &mut UnfilterBuf<'_>,
) -> Result<usize, DecodingError> {
// There may be more data past the adler32 checksum at the end of the deflate stream. We
// match libpng's default behavior and ignore any trailing data. In the future we may want
// to add a flag to control this behavior.
if self.state.is_done() {
return Ok(data.len());
}
if !self.started && self.ignore_adler32 {
self.state.ignore_adler32();
}
let (buffer, filled) = image_data.borrow_mut();
let output_limit = (filled + UnfilteringBuffer::GROWTH_BYTES).min(buffer.len());
let (in_consumed, out_consumed) = self
.state
.read(data, &mut buffer[..output_limit], filled, false)
.map_err(|err| {
DecodingError::Format(FormatErrorInner::CorruptFlateStream { err }.into())
})?;
self.started = true;
let filled = filled + out_consumed;
image_data.filled(filled);
if self.state.is_done() {
image_data.commit(filled);
} else {
// See [`Self::LOOKBACK_SIZE`].
image_data.commit(filled.saturating_sub(Self::LOOKBACK_SIZE));
}
Ok(in_consumed)
}
/// Called after all consecutive IDAT chunks were handled.
///
/// The compressed stream can be split on arbitrary byte boundaries. This enables some cleanup
/// within the decompressor and flushing additional data which may have been kept back in case
/// more data were passed to it.
pub(crate) fn finish_compressed_chunks(
&mut self,
image_data: &mut UnfilterBuf<'_>,
) -> Result<(), DecodingError> {
if !self.started {
return Ok(());
}
if self.state.is_done() {
// We can end up here only after the [`decompress`] call above has detected the state
// to be done, too. In this case the filled and committed amount of data are already
// equal to each other. So neither of them needs to be touched in any way.
return Ok(());
}
let (_, mut filled) = image_data.borrow_mut();
while !self.state.is_done() {
let (buffer, _) = image_data.borrow_mut();
let (_in_consumed, out_consumed) =
self.state.read(&[], buffer, filled, true).map_err(|err| {
DecodingError::Format(FormatErrorInner::CorruptFlateStream { err }.into())
})?;
filled += out_consumed;
if !self.state.is_done() {
image_data.flush_allocate();
}
}
image_data.filled(filled);
image_data.commit(filled);
Ok(())
}
}
impl UnfilterRegion {
/// Use this region to decompress new filtered rowline data.
///
/// Pass the wrapped buffer to
/// [`StreamingDecoder::update`][`super::stream::StreamingDecoder::update`] to fill it with
/// data and update the region indices.
pub fn as_buf<'data>(&'data mut self, buffer: &'data mut Vec<u8>) -> UnfilterBuf<'data> {
UnfilterBuf {
buffer,
filled: &mut self.filled,
available: &mut self.available,
}
}
}
impl UnfilterBuf<'_> {
pub(crate) fn borrow_mut(&mut self) -> (&mut [u8], usize) {
(self.buffer, *self.filled)
}
pub(crate) fn filled(&mut self, filled: usize) {
*self.filled = filled;
}
pub(crate) fn commit(&mut self, howmany: usize) {
*self.available = howmany;
}
pub(crate) fn flush_allocate(&mut self) {
let len = self.buffer.len() + 32 * 1024;
self.buffer.resize(len, 0);
}
}