Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

1023
vendor/png/src/adam7.rs vendored Normal file

File diff suppressed because it is too large Load Diff

48
vendor/png/src/benchable_apis.rs vendored Normal file
View File

@@ -0,0 +1,48 @@
//! Development-time-only helper module for exporting private APIs so that they can be benchmarked.
//! This module is gated behind the "benchmarks" feature.
use crate::adam7::{expand_pass, Adam7Iterator};
use crate::common::BytesPerPixel;
use crate::filter::{Filter, RowFilter};
use crate::{BitDepth, ColorType, Info};
/// Re-exporting `unfilter` to make it easier to benchmark, despite some items being only
/// `pub(crate)`: `fn unfilter`, `enum BytesPerPixel`.
pub fn unfilter(filter: Filter, tbpp: u8, previous: &[u8], current: &mut [u8]) {
let filter = RowFilter::from_method(filter).unwrap(); // RowFilter type is private
let tbpp = BytesPerPixel::from_usize(tbpp as usize);
crate::filter::unfilter(filter, tbpp, previous, current)
}
pub fn adam7(img: &mut [u8], buffer: &[u8], width: u32, height: u32, bpp: u8) {
fn bytes_of_width(width: u32, bpp: u8) -> usize {
let total = (u64::from(width) * u64::from(bpp)).div_ceil(8);
usize::try_from(total).unwrap()
}
let img_row_stride = bytes_of_width(width, bpp);
for adam7 in Adam7Iterator::new(width as u32, height as u32).into_iter() {
// We use the same buffer for all interlace passes, to avoid counting the creation time in
// the benchmark here. But the expansion expects us to pass a slice so make sure we use the
// correct one. As of writing the implementation is not sensitive to this but it may become
// so.
let used_bytes = bytes_of_width(adam7.width, bpp);
expand_pass(img, img_row_stride, &buffer[..used_bytes], &adam7, bpp);
}
}
pub use crate::decoder::transform::{create_transform_fn, TransformFn};
pub fn create_info_from_plte_trns_bitdepth<'a>(
plte: &'a [u8],
trns: Option<&'a [u8]>,
bit_depth: u8,
) -> Info<'a> {
Info {
color_type: ColorType::Indexed,
bit_depth: BitDepth::from_u8(bit_depth).unwrap(),
palette: Some(plte.into()),
trns: trns.map(Into::into),
..Info::default()
}
}

108
vendor/png/src/chunk.rs vendored Normal file
View File

@@ -0,0 +1,108 @@
//! Chunk types and functions
#![allow(dead_code)]
#![allow(non_upper_case_globals)]
use core::fmt;
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct ChunkType(pub [u8; 4]);
// -- Critical chunks --
/// Image header
pub const IHDR: ChunkType = ChunkType(*b"IHDR");
/// Palette
pub const PLTE: ChunkType = ChunkType(*b"PLTE");
/// Image data
pub const IDAT: ChunkType = ChunkType(*b"IDAT");
/// Image trailer
pub const IEND: ChunkType = ChunkType(*b"IEND");
// -- Ancillary chunks --
/// Transparency
pub const tRNS: ChunkType = ChunkType(*b"tRNS");
/// Background colour
pub const bKGD: ChunkType = ChunkType(*b"bKGD");
/// Image last-modification time
pub const tIME: ChunkType = ChunkType(*b"tIME");
/// Physical pixel dimensions
pub const pHYs: ChunkType = ChunkType(*b"pHYs");
/// Source system's pixel chromaticities
pub const cHRM: ChunkType = ChunkType(*b"cHRM");
/// Source system's gamma value
pub const gAMA: ChunkType = ChunkType(*b"gAMA");
/// sRGB color space chunk
pub const sRGB: ChunkType = ChunkType(*b"sRGB");
/// ICC profile chunk
pub const iCCP: ChunkType = ChunkType(*b"iCCP");
/// Coding-independent code points for video signal type identification chunk
pub const cICP: ChunkType = ChunkType(*b"cICP");
/// Mastering Display Color Volume chunk
pub const mDCV: ChunkType = ChunkType(*b"mDCV");
/// Content Light Level Information chunk
pub const cLLI: ChunkType = ChunkType(*b"cLLI");
/// EXIF metadata chunk
pub const eXIf: ChunkType = ChunkType(*b"eXIf");
/// Latin-1 uncompressed textual data
pub const tEXt: ChunkType = ChunkType(*b"tEXt");
/// Latin-1 compressed textual data
pub const zTXt: ChunkType = ChunkType(*b"zTXt");
/// UTF-8 textual data
pub const iTXt: ChunkType = ChunkType(*b"iTXt");
// Significant bits
pub const sBIT: ChunkType = ChunkType(*b"sBIT");
// -- Extension chunks --
/// Animation control
pub const acTL: ChunkType = ChunkType(*b"acTL");
/// Frame control
pub const fcTL: ChunkType = ChunkType(*b"fcTL");
/// Frame data
pub const fdAT: ChunkType = ChunkType(*b"fdAT");
// -- Chunk type determination --
/// Returns true if the chunk is critical.
pub fn is_critical(ChunkType(type_): ChunkType) -> bool {
type_[0] & 32 == 0
}
/// Returns true if the chunk is private.
pub fn is_private(ChunkType(type_): ChunkType) -> bool {
type_[1] & 32 != 0
}
/// Checks whether the reserved bit of the chunk name is set.
/// If it is set the chunk name is invalid.
pub fn reserved_set(ChunkType(type_): ChunkType) -> bool {
type_[2] & 32 != 0
}
/// Returns true if the chunk is safe to copy if unknown.
pub fn safe_to_copy(ChunkType(type_): ChunkType) -> bool {
type_[3] & 32 != 0
}
impl fmt::Debug for ChunkType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
struct DebugType([u8; 4]);
impl fmt::Debug for DebugType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for &c in &self.0[..] {
write!(f, "{}", char::from(c).escape_debug())?;
}
Ok(())
}
}
f.debug_struct("ChunkType")
.field("type", &DebugType(self.0))
.field("critical", &is_critical(*self))
.field("private", &is_private(*self))
.field("reserved", &reserved_set(*self))
.field("safecopy", &safe_to_copy(*self))
.finish()
}
}

971
vendor/png/src/common.rs vendored Normal file
View File

@@ -0,0 +1,971 @@
//! Common types shared between the encoder and decoder
use crate::text_metadata::{ITXtChunk, TEXtChunk, ZTXtChunk};
#[allow(unused_imports)] // used by doc comments only
use crate::Filter;
use crate::{chunk, encoder};
use io::Write;
use std::{borrow::Cow, convert::TryFrom, fmt, io};
/// Describes how a pixel is encoded.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum ColorType {
/// 1 grayscale sample.
Grayscale = 0,
/// 1 red sample, 1 green sample, 1 blue sample.
Rgb = 2,
/// 1 sample for the palette index.
Indexed = 3,
/// 1 grayscale sample, then 1 alpha sample.
GrayscaleAlpha = 4,
/// 1 red sample, 1 green sample, 1 blue sample, and finally, 1 alpha sample.
Rgba = 6,
}
impl ColorType {
/// Returns the number of samples used per pixel encoded in this way.
pub fn samples(self) -> usize {
self.samples_u8().into()
}
pub(crate) fn samples_u8(self) -> u8 {
use self::ColorType::*;
match self {
Grayscale | Indexed => 1,
Rgb => 3,
GrayscaleAlpha => 2,
Rgba => 4,
}
}
/// u8 -> Self. Temporary solution until Rust provides a canonical one.
pub fn from_u8(n: u8) -> Option<ColorType> {
match n {
0 => Some(ColorType::Grayscale),
2 => Some(ColorType::Rgb),
3 => Some(ColorType::Indexed),
4 => Some(ColorType::GrayscaleAlpha),
6 => Some(ColorType::Rgba),
_ => None,
}
}
pub(crate) fn checked_raw_row_length(self, depth: BitDepth, width: u32) -> Option<usize> {
// No overflow can occur in 64 bits, we multiply 32-bit with 5 more bits.
let bits = u64::from(width) * u64::from(self.samples_u8()) * u64::from(depth.into_u8());
TryFrom::try_from(1 + (bits + 7) / 8).ok()
}
pub(crate) fn raw_row_length_from_width(self, depth: BitDepth, width: u32) -> usize {
let samples = width as usize * self.samples();
1 + match depth {
BitDepth::Sixteen => samples * 2,
BitDepth::Eight => samples,
subbyte => {
let samples_per_byte = 8 / subbyte as usize;
let whole = samples / samples_per_byte;
let fract = usize::from(samples % samples_per_byte > 0);
whole + fract
}
}
}
pub(crate) fn is_combination_invalid(self, bit_depth: BitDepth) -> bool {
// Section 11.2.2 of the PNG standard disallows several combinations
// of bit depth and color type
((bit_depth == BitDepth::One || bit_depth == BitDepth::Two || bit_depth == BitDepth::Four)
&& (self == ColorType::Rgb
|| self == ColorType::GrayscaleAlpha
|| self == ColorType::Rgba))
|| (bit_depth == BitDepth::Sixteen && self == ColorType::Indexed)
}
pub(crate) fn bits_per_pixel(&self, bit_depth: BitDepth) -> usize {
self.samples() * bit_depth as usize
}
pub(crate) fn bytes_per_pixel(&self, bit_depth: BitDepth) -> usize {
// If adjusting this for expansion or other transformation passes, remember to keep the old
// implementation for bpp_in_prediction, which is internal to the png specification.
self.samples() * ((bit_depth as usize + 7) >> 3)
}
}
/// Bit depth of the PNG file.
/// Specifies the number of bits per sample.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum BitDepth {
One = 1,
Two = 2,
Four = 4,
Eight = 8,
Sixteen = 16,
}
/// Internal count of bytes per pixel.
/// This is used for filtering which never uses sub-byte units. This essentially reduces the number
/// of possible byte chunk lengths to a very small set of values appropriate to be defined as an
/// enum.
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub(crate) enum BytesPerPixel {
One = 1,
Two = 2,
Three = 3,
Four = 4,
Six = 6,
Eight = 8,
}
impl BitDepth {
/// u8 -> Self. Temporary solution until Rust provides a canonical one.
pub fn from_u8(n: u8) -> Option<BitDepth> {
match n {
1 => Some(BitDepth::One),
2 => Some(BitDepth::Two),
4 => Some(BitDepth::Four),
8 => Some(BitDepth::Eight),
16 => Some(BitDepth::Sixteen),
_ => None,
}
}
pub(crate) fn into_u8(self) -> u8 {
self as u8
}
}
/// Pixel dimensions information
#[derive(Clone, Copy, Debug)]
pub struct PixelDimensions {
/// Pixels per unit, X axis
pub xppu: u32,
/// Pixels per unit, Y axis
pub yppu: u32,
/// Either *Meter* or *Unspecified*
pub unit: Unit,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
/// Physical unit of the pixel dimensions
pub enum Unit {
Unspecified = 0,
Meter = 1,
}
impl Unit {
/// u8 -> Self. Temporary solution until Rust provides a canonical one.
pub fn from_u8(n: u8) -> Option<Unit> {
match n {
0 => Some(Unit::Unspecified),
1 => Some(Unit::Meter),
_ => None,
}
}
}
/// How to reset buffer of an animated png (APNG) at the end of a frame.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum DisposeOp {
/// Leave the buffer unchanged.
None = 0,
/// Clear buffer with the background color.
Background = 1,
/// Reset the buffer to the state before the current frame.
Previous = 2,
}
impl DisposeOp {
/// u8 -> Self. Using enum_primitive or transmute is probably the right thing but this will do for now.
pub fn from_u8(n: u8) -> Option<DisposeOp> {
match n {
0 => Some(DisposeOp::None),
1 => Some(DisposeOp::Background),
2 => Some(DisposeOp::Previous),
_ => None,
}
}
}
impl fmt::Display for DisposeOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let name = match *self {
DisposeOp::None => "DISPOSE_OP_NONE",
DisposeOp::Background => "DISPOSE_OP_BACKGROUND",
DisposeOp::Previous => "DISPOSE_OP_PREVIOUS",
};
write!(f, "{}", name)
}
}
/// How pixels are written into the buffer.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum BlendOp {
/// Pixels overwrite the value at their position.
Source = 0,
/// The new pixels are blended into the current state based on alpha.
Over = 1,
}
impl BlendOp {
/// u8 -> Self. Using enum_primitive or transmute is probably the right thing but this will do for now.
pub fn from_u8(n: u8) -> Option<BlendOp> {
match n {
0 => Some(BlendOp::Source),
1 => Some(BlendOp::Over),
_ => None,
}
}
}
impl fmt::Display for BlendOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let name = match *self {
BlendOp::Source => "BLEND_OP_SOURCE",
BlendOp::Over => "BLEND_OP_OVER",
};
write!(f, "{}", name)
}
}
/// Frame control information
#[derive(Clone, Copy, Debug)]
pub struct FrameControl {
/// Sequence number of the animation chunk, starting from 0
pub sequence_number: u32,
/// Width of the following frame
pub width: u32,
/// Height of the following frame
pub height: u32,
/// X position at which to render the following frame
pub x_offset: u32,
/// Y position at which to render the following frame
pub y_offset: u32,
/// Frame delay fraction numerator
pub delay_num: u16,
/// Frame delay fraction denominator
pub delay_den: u16,
/// Type of frame area disposal to be done after rendering this frame
pub dispose_op: DisposeOp,
/// Type of frame area rendering for this frame
pub blend_op: BlendOp,
}
impl Default for FrameControl {
fn default() -> FrameControl {
FrameControl {
sequence_number: 0,
width: 0,
height: 0,
x_offset: 0,
y_offset: 0,
delay_num: 1,
delay_den: 30,
dispose_op: DisposeOp::None,
blend_op: BlendOp::Source,
}
}
}
impl FrameControl {
pub fn set_seq_num(&mut self, s: u32) {
self.sequence_number = s;
}
pub fn inc_seq_num(&mut self, i: u32) {
self.sequence_number += i;
}
pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
let mut data = [0u8; 26];
data[..4].copy_from_slice(&self.sequence_number.to_be_bytes());
data[4..8].copy_from_slice(&self.width.to_be_bytes());
data[8..12].copy_from_slice(&self.height.to_be_bytes());
data[12..16].copy_from_slice(&self.x_offset.to_be_bytes());
data[16..20].copy_from_slice(&self.y_offset.to_be_bytes());
data[20..22].copy_from_slice(&self.delay_num.to_be_bytes());
data[22..24].copy_from_slice(&self.delay_den.to_be_bytes());
data[24] = self.dispose_op as u8;
data[25] = self.blend_op as u8;
encoder::write_chunk(w, chunk::fcTL, &data)
}
}
/// Animation control information
#[derive(Clone, Copy, Debug)]
pub struct AnimationControl {
/// Number of frames
pub num_frames: u32,
/// Number of times to loop this APNG. 0 indicates infinite looping.
pub num_plays: u32,
}
impl AnimationControl {
pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
let mut data = [0; 8];
data[..4].copy_from_slice(&self.num_frames.to_be_bytes());
data[4..].copy_from_slice(&self.num_plays.to_be_bytes());
encoder::write_chunk(w, chunk::acTL, &data)
}
}
/// The type and strength of applied compression.
///
/// This is a simple, high-level interface that will automatically choose
/// the appropriate DEFLATE compression mode and PNG filter.
///
/// If you need more control over the encoding parameters,
/// you can set the [DeflateCompression] and [Filter] manually.
#[derive(Debug, Clone, Copy)]
#[non_exhaustive]
pub enum Compression {
/// No compression whatsoever. Fastest, but results in large files.
NoCompression,
/// Extremely fast but light compression.
///
/// Note: When used in streaming mode, this compression level can actually result in files
/// *larger* than would be produced by `NoCompression` on incompressible data because
/// it doesn't do any buffering of the output stream to detect whether the data is being compressed or not.
Fastest,
/// Extremely fast compression with a decent compression ratio.
///
/// Significantly outperforms libpng and other popular encoders by using a [specialized DEFLATE
/// implementation tuned for PNG](https://crates.io/crates/fdeflate), while still providing
/// better compression ratio than the fastest modes of other encoders.
///
/// Like `Compression::Fast` this can currently produce files larger than `NoCompression` in
/// streaming mode when given incompressible data. This may change in the future.
Fast,
/// Balances encoding speed and compression ratio
Balanced,
/// Spend much more time to produce a slightly smaller file than with `Balanced`.
High,
}
impl Default for Compression {
fn default() -> Self {
Self::Balanced
}
}
/// Advanced compression settings with more customization options than [Compression].
///
/// Note that this setting only affects DEFLATE compression.
/// Another setting that influences the compression ratio and lets you choose
/// between encoding speed and compression ratio is the [Filter].
///
/// ### Stability guarantees
///
/// The implementation details of DEFLATE compression may evolve over time,
/// even without a semver-breaking change to the version of `png` crate.
///
/// If a certain compression setting is superseded by other options,
/// it may be marked deprecated and remapped to a different option.
/// You will see a deprecation notice when compiling code relying on such options.
#[non_exhaustive]
#[derive(Debug, Clone, Copy)]
pub enum DeflateCompression {
/// Do not compress the data at all.
///
/// Useful for incompressible images, or when speed is paramount and you don't care about size
/// at all.
///
/// This mode also disables filters, forcing [Filter::NoFilter].
NoCompression,
/// Excellent for creating lightly compressed PNG images very quickly.
///
/// Uses the [fdeflate](https://crates.io/crates/fdeflate) crate under the hood to achieve
/// speeds far exceeding what libpng is capable of while still providing a decent compression
/// ratio.
///
/// Note: When used in streaming mode, this compression level can actually result in files
/// *larger* than would be produced by `NoCompression` because it doesn't do any buffering of
/// the output stream to detect whether the data is being compressed or not.
FdeflateUltraFast,
/// Compression level between 1 and 9, where higher values mean better compression at the cost of
/// speed.
///
/// This is currently implemented via [flate2](https://crates.io/crates/flate2) crate
/// by passing through the [compression level](flate2::Compression::new).
///
/// The implementation details and the exact meaning of each level may change in the future,
/// including in semver-compatible releases.
Level(u8),
// Other variants can be added in the future
}
impl Default for DeflateCompression {
fn default() -> Self {
Self::from_simple(Compression::Balanced)
}
}
impl DeflateCompression {
pub(crate) fn from_simple(value: Compression) -> Self {
match value {
Compression::NoCompression => Self::NoCompression,
Compression::Fastest => Self::FdeflateUltraFast,
Compression::Fast => Self::FdeflateUltraFast,
Compression::Balanced => Self::Level(flate2::Compression::default().level() as u8),
Compression::High => Self::Level(flate2::Compression::best().level() as u8),
}
}
}
/// An unsigned integer scaled version of a floating point value,
/// equivalent to an integer quotient with fixed denominator (100_000)).
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct ScaledFloat(u32);
impl ScaledFloat {
const SCALING: f32 = 100_000.0;
/// Gets whether the value is within the clamped range of this type.
pub fn in_range(value: f32) -> bool {
value >= 0.0 && (value * Self::SCALING).floor() <= u32::MAX as f32
}
/// Gets whether the value can be exactly converted in round-trip.
#[allow(clippy::float_cmp)] // Stupid tool, the exact float compare is _the entire point_.
pub fn exact(value: f32) -> bool {
let there = Self::forward(value);
let back = Self::reverse(there);
value == back
}
fn forward(value: f32) -> u32 {
(value.max(0.0) * Self::SCALING).floor() as u32
}
fn reverse(encoded: u32) -> f32 {
encoded as f32 / Self::SCALING
}
/// Slightly inaccurate scaling and quantization.
/// Clamps the value into the representable range if it is negative or too large.
pub fn new(value: f32) -> Self {
Self(Self::forward(value))
}
/// Fully accurate construction from a value scaled as per specification.
pub fn from_scaled(val: u32) -> Self {
Self(val)
}
/// Get the accurate encoded value.
pub fn into_scaled(self) -> u32 {
self.0
}
/// Get the unscaled value as a floating point.
pub fn into_value(self) -> f32 {
Self::reverse(self.0)
}
pub(crate) fn encode_gama<W: Write>(self, w: &mut W) -> encoder::Result<()> {
encoder::write_chunk(w, chunk::gAMA, &self.into_scaled().to_be_bytes())
}
}
/// Chromaticities of the color space primaries
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct SourceChromaticities {
pub white: (ScaledFloat, ScaledFloat),
pub red: (ScaledFloat, ScaledFloat),
pub green: (ScaledFloat, ScaledFloat),
pub blue: (ScaledFloat, ScaledFloat),
}
impl SourceChromaticities {
pub fn new(white: (f32, f32), red: (f32, f32), green: (f32, f32), blue: (f32, f32)) -> Self {
SourceChromaticities {
white: (ScaledFloat::new(white.0), ScaledFloat::new(white.1)),
red: (ScaledFloat::new(red.0), ScaledFloat::new(red.1)),
green: (ScaledFloat::new(green.0), ScaledFloat::new(green.1)),
blue: (ScaledFloat::new(blue.0), ScaledFloat::new(blue.1)),
}
}
#[rustfmt::skip]
pub fn to_be_bytes(self) -> [u8; 32] {
let white_x = self.white.0.into_scaled().to_be_bytes();
let white_y = self.white.1.into_scaled().to_be_bytes();
let red_x = self.red.0.into_scaled().to_be_bytes();
let red_y = self.red.1.into_scaled().to_be_bytes();
let green_x = self.green.0.into_scaled().to_be_bytes();
let green_y = self.green.1.into_scaled().to_be_bytes();
let blue_x = self.blue.0.into_scaled().to_be_bytes();
let blue_y = self.blue.1.into_scaled().to_be_bytes();
[
white_x[0], white_x[1], white_x[2], white_x[3],
white_y[0], white_y[1], white_y[2], white_y[3],
red_x[0], red_x[1], red_x[2], red_x[3],
red_y[0], red_y[1], red_y[2], red_y[3],
green_x[0], green_x[1], green_x[2], green_x[3],
green_y[0], green_y[1], green_y[2], green_y[3],
blue_x[0], blue_x[1], blue_x[2], blue_x[3],
blue_y[0], blue_y[1], blue_y[2], blue_y[3],
]
}
pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
encoder::write_chunk(w, chunk::cHRM, &self.to_be_bytes())
}
}
/// The rendering intent for an sRGB image.
///
/// Presence of this data also indicates that the image conforms to the sRGB color space.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SrgbRenderingIntent {
/// For images preferring good adaptation to the output device gamut at the expense of colorimetric accuracy, such as photographs.
Perceptual = 0,
/// For images requiring colour appearance matching (relative to the output device white point), such as logos.
RelativeColorimetric = 1,
/// For images preferring preservation of saturation at the expense of hue and lightness, such as charts and graphs.
Saturation = 2,
/// For images requiring preservation of absolute colorimetry, such as previews of images destined for a different output device (proofs).
AbsoluteColorimetric = 3,
}
impl SrgbRenderingIntent {
pub(crate) fn into_raw(self) -> u8 {
self as u8
}
pub(crate) fn from_raw(raw: u8) -> Option<Self> {
match raw {
0 => Some(SrgbRenderingIntent::Perceptual),
1 => Some(SrgbRenderingIntent::RelativeColorimetric),
2 => Some(SrgbRenderingIntent::Saturation),
3 => Some(SrgbRenderingIntent::AbsoluteColorimetric),
_ => None,
}
}
pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
encoder::write_chunk(w, chunk::sRGB, &[self.into_raw()])
}
}
/// Coding-independent code points (cICP) specify the color space (primaries),
/// transfer function, matrix coefficients and scaling factor of the image using
/// the code points specified in [ITU-T-H.273](https://www.itu.int/rec/T-REC-H.273).
///
/// See https://www.w3.org/TR/png-3/#cICP-chunk for more details.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct CodingIndependentCodePoints {
/// Id number of the color primaries defined in
/// [ITU-T-H.273](https://www.itu.int/rec/T-REC-H.273) in "Table 2 -
/// Interpretation of colour primaries (ColourPrimaries) value".
pub color_primaries: u8,
/// Id number of the transfer characteristics defined in
/// [ITU-T-H.273](https://www.itu.int/rec/T-REC-H.273) in "Table 3 -
/// Interpretation of transfer characteristics (TransferCharacteristics)
/// value".
pub transfer_function: u8,
/// Id number of the matrix coefficients defined in
/// [ITU-T-H.273](https://www.itu.int/rec/T-REC-H.273) in "Table 4 -
/// Interpretation of matrix coefficients (MatrixCoefficients) value".
///
/// This field is included to faithfully replicate the base
/// [ITU-T-H.273](https://www.itu.int/rec/T-REC-H.273) specification, but matrix coefficients
/// will always be set to 0, because RGB is currently the only supported color mode in PNG.
pub matrix_coefficients: u8,
/// Whether the image is
/// [a full range image](https://www.w3.org/TR/png-3/#dfn-full-range-image)
/// or
/// [a narrow range image](https://www.w3.org/TR/png-3/#dfn-narrow-range-image).
///
/// This field is included to faithfully replicate the base
/// [ITU-T-H.273](https://www.itu.int/rec/T-REC-H.273) specification, but it has limited
/// practical application to PNG images, because narrow-range images are [quite
/// rare](https://github.com/w3c/png/issues/312#issuecomment-2327349614) in practice.
pub is_video_full_range_image: bool,
}
/// Mastering Display Color Volume (mDCV) used at the point of content creation,
/// as specified in [SMPTE-ST-2086](https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8353899).
///
/// See https://www.w3.org/TR/png-3/#mDCV-chunk for more details.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct MasteringDisplayColorVolume {
/// Mastering display chromaticities.
pub chromaticities: SourceChromaticities,
/// Mastering display maximum luminance.
///
/// The value is expressed in units of 0.0001 cd/m^2 - for example if this field
/// is set to `10000000` then it indicates 1000 cd/m^2.
pub max_luminance: u32,
/// Mastering display minimum luminance.
///
/// The value is expressed in units of 0.0001 cd/m^2 - for example if this field
/// is set to `10000000` then it indicates 1000 cd/m^2.
pub min_luminance: u32,
}
/// Content light level information of HDR content.
///
/// See https://www.w3.org/TR/png-3/#cLLI-chunk for more details.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct ContentLightLevelInfo {
/// Maximum Content Light Level indicates the maximum light level of any
/// single pixel (in cd/m^2, also known as nits) of the entire playback
/// sequence.
///
/// The value is expressed in units of 0.0001 cd/m^2 - for example if this field
/// is set to `10000000` then it indicates 1000 cd/m^2.
///
/// A value of zero means that the value is unknown or not currently calculable.
pub max_content_light_level: u32,
/// Maximum Frame Average Light Level indicates the maximum value of the
/// frame average light level (in cd/m^2, also known as nits) of the entire
/// playback sequence. It is calculated by first averaging the decoded
/// luminance values of all the pixels in each frame, and then using the
/// value for the frame with the highest value.
///
/// The value is expressed in units of 0.0001 cd/m^2 - for example if this field
/// is set to `10000000` then it indicates 1000 cd/m^2.
///
/// A value of zero means that the value is unknown or not currently calculable.
pub max_frame_average_light_level: u32,
}
/// PNG info struct
#[derive(Clone, Debug)]
#[non_exhaustive]
pub struct Info<'a> {
pub width: u32,
pub height: u32,
pub bit_depth: BitDepth,
/// How colors are stored in the image.
pub color_type: ColorType,
pub interlaced: bool,
/// The image's `sBIT` chunk, if present; contains significant bits of the sample.
pub sbit: Option<Cow<'a, [u8]>>,
/// The image's `tRNS` chunk, if present; contains the alpha channel of the image's palette, 1 byte per entry.
pub trns: Option<Cow<'a, [u8]>>,
pub pixel_dims: Option<PixelDimensions>,
/// The image's `PLTE` chunk, if present; contains the RGB channels (in that order) of the image's palettes, 3 bytes per entry (1 per channel).
pub palette: Option<Cow<'a, [u8]>>,
/// The contents of the image's gAMA chunk, if present.
/// Prefer `source_gamma` to also get the derived replacement gamma from sRGB chunks.
pub gama_chunk: Option<ScaledFloat>,
/// The contents of the image's `cHRM` chunk, if present.
/// Prefer `source_chromaticities` to also get the derived replacements from sRGB chunks.
pub chrm_chunk: Option<SourceChromaticities>,
/// The contents of the image's `bKGD` chunk, if present.
pub bkgd: Option<Cow<'a, [u8]>>,
pub frame_control: Option<FrameControl>,
pub animation_control: Option<AnimationControl>,
/// Gamma of the source system.
/// Set by both `gAMA` as well as to a replacement by `sRGB` chunk.
pub source_gamma: Option<ScaledFloat>,
/// Chromaticities of the source system.
/// Set by both `cHRM` as well as to a replacement by `sRGB` chunk.
pub source_chromaticities: Option<SourceChromaticities>,
/// The rendering intent of an SRGB image.
///
/// Presence of this value also indicates that the image conforms to the SRGB color space.
pub srgb: Option<SrgbRenderingIntent>,
/// The ICC profile for the image.
pub icc_profile: Option<Cow<'a, [u8]>>,
/// The coding-independent code points for video signal type identification of the image.
pub coding_independent_code_points: Option<CodingIndependentCodePoints>,
/// The mastering display color volume for the image.
pub mastering_display_color_volume: Option<MasteringDisplayColorVolume>,
/// The content light information for the image.
pub content_light_level: Option<ContentLightLevelInfo>,
/// The EXIF metadata for the image.
pub exif_metadata: Option<Cow<'a, [u8]>>,
/// tEXt field
pub uncompressed_latin1_text: Vec<TEXtChunk>,
/// zTXt field
pub compressed_latin1_text: Vec<ZTXtChunk>,
/// iTXt field
pub utf8_text: Vec<ITXtChunk>,
}
impl Default for Info<'_> {
fn default() -> Info<'static> {
Info {
width: 0,
height: 0,
bit_depth: BitDepth::Eight,
color_type: ColorType::Grayscale,
interlaced: false,
palette: None,
sbit: None,
trns: None,
gama_chunk: None,
chrm_chunk: None,
bkgd: None,
pixel_dims: None,
frame_control: None,
animation_control: None,
source_gamma: None,
source_chromaticities: None,
srgb: None,
icc_profile: None,
coding_independent_code_points: None,
mastering_display_color_volume: None,
content_light_level: None,
exif_metadata: None,
uncompressed_latin1_text: Vec::new(),
compressed_latin1_text: Vec::new(),
utf8_text: Vec::new(),
}
}
}
impl Info<'_> {
/// A utility constructor for a default info with width and height.
pub fn with_size(width: u32, height: u32) -> Self {
Info {
width,
height,
..Default::default()
}
}
/// Size of the image, width then height.
pub fn size(&self) -> (u32, u32) {
(self.width, self.height)
}
/// Returns true if the image is an APNG image.
pub fn is_animated(&self) -> bool {
self.frame_control.is_some() && self.animation_control.is_some()
}
/// Returns the frame control information of the image.
pub fn animation_control(&self) -> Option<&AnimationControl> {
self.animation_control.as_ref()
}
/// Returns the frame control information of the current frame
pub fn frame_control(&self) -> Option<&FrameControl> {
self.frame_control.as_ref()
}
/// Returns the number of bits per pixel.
pub fn bits_per_pixel(&self) -> usize {
self.color_type.bits_per_pixel(self.bit_depth)
}
/// Returns the number of bytes per pixel.
pub fn bytes_per_pixel(&self) -> usize {
// If adjusting this for expansion or other transformation passes, remember to keep the old
// implementation for bpp_in_prediction, which is internal to the png specification.
self.color_type.bytes_per_pixel(self.bit_depth)
}
/// Return the number of bytes for this pixel used in prediction.
///
/// Some filters use prediction, over the raw bytes of a scanline. Where a previous pixel is
/// require for such forms the specification instead references previous bytes. That is, for
/// a gray pixel of bit depth 2, the pixel used in prediction is actually 4 pixels prior. This
/// has the consequence that the number of possible values is rather small. To make this fact
/// more obvious in the type system and the optimizer we use an explicit enum here.
pub(crate) fn bpp_in_prediction(&self) -> BytesPerPixel {
BytesPerPixel::from_usize(self.bytes_per_pixel())
}
/// Returns the number of bytes needed for one deinterlaced image.
pub fn raw_bytes(&self) -> usize {
self.height as usize * self.raw_row_length()
}
/// Returns the number of bytes needed for one deinterlaced row.
pub fn raw_row_length(&self) -> usize {
self.raw_row_length_from_width(self.width)
}
pub(crate) fn checked_raw_row_length(&self) -> Option<usize> {
self.color_type
.checked_raw_row_length(self.bit_depth, self.width)
}
/// Returns the number of bytes needed for one deinterlaced row of width `width`.
pub fn raw_row_length_from_width(&self, width: u32) -> usize {
self.color_type
.raw_row_length_from_width(self.bit_depth, width)
}
/// Gamma dependent on sRGB chunk
pub fn gamma(&self) -> Option<ScaledFloat> {
if self.srgb.is_some() {
Some(crate::srgb::substitute_gamma())
} else {
self.gama_chunk
}
}
/// Chromaticities dependent on sRGB chunk
pub fn chromaticities(&self) -> Option<SourceChromaticities> {
if self.srgb.is_some() {
Some(crate::srgb::substitute_chromaticities())
} else {
self.chrm_chunk
}
}
/// Mark the image data as conforming to the SRGB color space with the specified rendering intent.
///
/// Any ICC profiles will be ignored.
///
/// Source gamma and chromaticities will be written only if they're set to fallback
/// values specified in [11.3.2.5](https://www.w3.org/TR/png-3/#sRGB-gAMA-cHRM).
pub(crate) fn set_source_srgb(&mut self, rendering_intent: SrgbRenderingIntent) {
self.srgb = Some(rendering_intent);
self.icc_profile = None;
}
}
impl BytesPerPixel {
pub(crate) fn from_usize(bpp: usize) -> Self {
match bpp {
1 => BytesPerPixel::One,
2 => BytesPerPixel::Two,
3 => BytesPerPixel::Three,
4 => BytesPerPixel::Four,
6 => BytesPerPixel::Six, // Only rgb×16bit
8 => BytesPerPixel::Eight, // Only rgba×16bit
_ => unreachable!("Not a possible byte rounded pixel width"),
}
}
pub(crate) fn into_usize(self) -> usize {
self as usize
}
}
bitflags::bitflags! {
/// Output transformations
///
/// Many flags from libpng are not yet supported. A PR discussing/adding them would be nice.
///
#[doc = "
```c
/// Discard the alpha channel
const STRIP_ALPHA = 0x0002; // read only
/// Expand 1; 2 and 4-bit samples to bytes
const PACKING = 0x0004; // read and write
/// Change order of packed pixels to LSB first
const PACKSWAP = 0x0008; // read and write
/// Invert monochrome images
const INVERT_MONO = 0x0020; // read and write
/// Normalize pixels to the sBIT depth
const SHIFT = 0x0040; // read and write
/// Flip RGB to BGR; RGBA to BGRA
const BGR = 0x0080; // read and write
/// Flip RGBA to ARGB or GA to AG
const SWAP_ALPHA = 0x0100; // read and write
/// Byte-swap 16-bit samples
const SWAP_ENDIAN = 0x0200; // read and write
/// Change alpha from opacity to transparency
const INVERT_ALPHA = 0x0400; // read and write
const STRIP_FILLER = 0x0800; // write only
const STRIP_FILLER_BEFORE = 0x0800; // write only
const STRIP_FILLER_AFTER = 0x1000; // write only
const GRAY_TO_RGB = 0x2000; // read only
const EXPAND_16 = 0x4000; // read only
/// Similar to STRIP_16 but in libpng considering gamma?
/// Not entirely sure the documentation says it is more
/// accurate but doesn't say precisely how.
const SCALE_16 = 0x8000; // read only
```
"]
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct Transformations: u32 {
/// No transformation
const IDENTITY = 0x00000; // read and write */
/// Strip 16-bit samples to 8 bits
const STRIP_16 = 0x00001; // read only */
/// Expand paletted images to RGB; expand grayscale images of
/// less than 8-bit depth to 8-bit depth; and expand tRNS chunks
/// to alpha channels.
const EXPAND = 0x00010; // read only */
/// Expand paletted images to include an alpha channel. Implies `EXPAND`.
const ALPHA = 0x10000; // read only */
}
}
impl Transformations {
/// Transform every input to 8bit grayscale or color.
///
/// This sets `EXPAND` and `STRIP_16` which is similar to the default transformation used by
/// this library prior to `0.17`.
pub fn normalize_to_color8() -> Transformations {
Transformations::EXPAND | Transformations::STRIP_16
}
}
/// Instantiate the default transformations, the identity transform.
impl Default for Transformations {
fn default() -> Transformations {
Transformations::IDENTITY
}
}
#[derive(Debug)]
pub struct ParameterError {
inner: ParameterErrorKind,
}
#[derive(Debug)]
pub(crate) enum ParameterErrorKind {
/// A provided buffer must be have the exact size to hold the image data. Where the buffer can
/// be allocated by the caller, they must ensure that it has a minimum size as hinted previously.
/// Even though the size is calculated from image data, this does counts as a parameter error
/// because they must react to a value produced by this library, which can have been subjected
/// to limits.
ImageBufferSize { expected: usize, actual: usize },
/// A bit like return `None` from an iterator.
/// We use it to differentiate between failing to seek to the next image in a sequence and the
/// absence of a next image. This is an error of the caller because they should have checked
/// the number of images by inspecting the header data returned when opening the image. This
/// library will perform the checks necessary to ensure that data was accurate or error with a
/// format error otherwise.
PolledAfterEndOfImage,
/// Attempt to continue decoding after a fatal, non-resumable error was reported (e.g. after
/// [`DecodingError::Format`]). The only case when it is possible to resume after an error
/// is an `UnexpectedEof` scenario - see [`DecodingError::IoError`].
PolledAfterFatalError,
}
impl From<ParameterErrorKind> for ParameterError {
fn from(inner: ParameterErrorKind) -> Self {
ParameterError { inner }
}
}
impl fmt::Display for ParameterError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use ParameterErrorKind::*;
match self.inner {
ImageBufferSize { expected, actual } => {
write!(fmt, "wrong data size, expected {} got {}", expected, actual)
}
PolledAfterEndOfImage => write!(fmt, "End of image has been reached"),
PolledAfterFatalError => {
write!(fmt, "A fatal decoding error has been encounted earlier")
}
}
}
}

128
vendor/png/src/decoder/interlace_info.rs vendored Normal file
View File

@@ -0,0 +1,128 @@
use std::ops::Range;
use crate::adam7::{Adam7Info, Adam7Iterator};
/// Describes which interlacing algorithm applies to a decoded row.
///
/// PNG (2003) specifies two interlace modes, but reserves future extensions.
///
/// See also [Reader.next_interlaced_row](crate::Reader::next_interlaced_row).
#[derive(Clone, Copy, Debug)]
pub enum InterlaceInfo {
/// The `null` method means no interlacing.
Null(NullInfo),
/// [The `Adam7` algorithm](https://en.wikipedia.org/wiki/Adam7_algorithm) derives its name
/// from doing 7 passes over the image, only decoding a subset of all pixels in each pass.
/// The following table shows pictorially what parts of each 8x8 area of the image is found in
/// each pass:
///
/// ```txt
/// 1 6 4 6 2 6 4 6
/// 7 7 7 7 7 7 7 7
/// 5 6 5 6 5 6 5 6
/// 7 7 7 7 7 7 7 7
/// 3 6 4 6 3 6 4 6
/// 7 7 7 7 7 7 7 7
/// 5 6 5 6 5 6 5 6
/// 7 7 7 7 7 7 7 7
/// ```
Adam7(Adam7Info),
}
#[derive(Clone, Copy, Debug)]
pub struct NullInfo {
line: u32,
}
impl InterlaceInfo {
pub(crate) fn line_number(&self) -> u32 {
match self {
InterlaceInfo::Null(NullInfo { line }) => *line,
InterlaceInfo::Adam7(Adam7Info { line, .. }) => *line,
}
}
pub(crate) fn get_adam7_info(&self) -> Option<&Adam7Info> {
match self {
InterlaceInfo::Null(_) => None,
InterlaceInfo::Adam7(adam7info) => Some(adam7info),
}
}
}
pub(crate) struct InterlaceInfoIter(IterImpl);
impl InterlaceInfoIter {
pub fn empty() -> Self {
Self(IterImpl::None(0..0))
}
pub fn new(width: u32, height: u32, interlaced: bool) -> Self {
if interlaced {
Self(IterImpl::Adam7(Adam7Iterator::new(width, height)))
} else {
Self(IterImpl::None(0..height))
}
}
}
impl Iterator for InterlaceInfoIter {
type Item = InterlaceInfo;
fn next(&mut self) -> Option<InterlaceInfo> {
match self.0 {
IterImpl::Adam7(ref mut adam7) => Some(InterlaceInfo::Adam7(adam7.next()?)),
IterImpl::None(ref mut height) => Some(InterlaceInfo::Null(NullInfo {
line: height.next()?,
})),
}
}
}
enum IterImpl {
None(Range<u32>),
Adam7(Adam7Iterator),
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn null() {
assert_eq!(
InterlaceInfoIter::new(8, 8, false)
.map(|info| info.line_number())
.collect::<Vec<_>>(),
vec![0, 1, 2, 3, 4, 5, 6, 7],
);
}
#[test]
fn adam7() {
assert_eq!(
InterlaceInfoIter::new(8, 8, true)
.map(|info| info.line_number())
.collect::<Vec<_>>(),
vec![
0, // pass 1
0, // pass 2
0, // pass 3
0, 1, // pass 4
0, 1, // pass 5
0, 1, 2, 3, // pass 6
0, 1, 2, 3, // pass 7
],
);
}
#[test]
fn empty() {
assert_eq!(
InterlaceInfoIter::empty()
.map(|info| info.line_number())
.collect::<Vec<_>>(),
vec![],
);
}
}

732
vendor/png/src/decoder/mod.rs vendored Normal file
View File

@@ -0,0 +1,732 @@
mod interlace_info;
mod read_decoder;
pub(crate) mod stream;
pub(crate) mod transform;
mod unfiltering_buffer;
mod zlib;
use self::read_decoder::{ImageDataCompletionStatus, ReadDecoder};
use self::stream::{DecodeOptions, DecodingError, FormatErrorInner};
use self::transform::{create_transform_fn, TransformFn};
use self::unfiltering_buffer::UnfilteringBuffer;
use std::io::{BufRead, Seek};
use std::mem;
use crate::adam7::Adam7Info;
use crate::common::{
BitDepth, BytesPerPixel, ColorType, Info, ParameterErrorKind, Transformations,
};
use crate::FrameControl;
pub use zlib::{UnfilterBuf, UnfilterRegion};
pub use interlace_info::InterlaceInfo;
use interlace_info::InterlaceInfoIter;
/*
pub enum InterlaceHandling {
/// Outputs the raw rows
RawRows,
/// Fill missing the pixels from the existing ones
Rectangle,
/// Only fill the needed pixels
Sparkle
}
*/
/// Output info.
///
/// This describes one particular frame of the image that was written into the output buffer.
#[derive(Debug, PartialEq, Eq)]
pub struct OutputInfo {
/// The pixel width of this frame.
pub width: u32,
/// The pixel height of this frame.
pub height: u32,
/// The chosen output color type.
pub color_type: ColorType,
/// The chosen output bit depth.
pub bit_depth: BitDepth,
/// The byte count of each scan line in the image.
pub line_size: usize,
}
impl OutputInfo {
/// Returns the size needed to hold a decoded frame
/// If the output buffer was larger then bytes after this count should be ignored. They may
/// still have been changed.
pub fn buffer_size(&self) -> usize {
self.line_size * self.height as usize
}
}
#[derive(Clone, Copy, Debug)]
/// Limits on the resources the `Decoder` is allowed too use
pub struct Limits {
/// maximum number of bytes the decoder is allowed to allocate, default is 64Mib
pub bytes: usize,
}
impl Limits {
pub(crate) fn reserve_bytes(&mut self, bytes: usize) -> Result<(), DecodingError> {
if self.bytes >= bytes {
self.bytes -= bytes;
Ok(())
} else {
Err(DecodingError::LimitsExceeded)
}
}
}
impl Default for Limits {
fn default() -> Limits {
Limits {
bytes: 1024 * 1024 * 64,
}
}
}
/// PNG Decoder
pub struct Decoder<R: BufRead + Seek> {
read_decoder: ReadDecoder<R>,
/// Output transformations
transform: Transformations,
}
/// A row of data with interlace information attached.
#[derive(Clone, Copy, Debug)]
pub struct InterlacedRow<'data> {
data: &'data [u8],
interlace: InterlaceInfo,
}
impl<'data> InterlacedRow<'data> {
pub fn data(&self) -> &'data [u8] {
self.data
}
pub fn interlace(&self) -> &InterlaceInfo {
&self.interlace
}
}
/// A row of data without interlace information.
#[derive(Clone, Copy, Debug)]
pub struct Row<'data> {
data: &'data [u8],
}
impl<'data> Row<'data> {
pub fn data(&self) -> &'data [u8] {
self.data
}
}
impl<R: BufRead + Seek> Decoder<R> {
/// Create a new decoder configuration with default limits.
pub fn new(r: R) -> Decoder<R> {
Decoder::new_with_limits(r, Limits::default())
}
/// Create a new decoder configuration with custom limits.
pub fn new_with_limits(r: R, limits: Limits) -> Decoder<R> {
let mut read_decoder = ReadDecoder::new(r);
read_decoder.set_limits(limits);
Decoder {
read_decoder,
transform: Transformations::IDENTITY,
}
}
/// Create a new decoder configuration with custom [`DecodeOptions`].
pub fn new_with_options(r: R, decode_options: DecodeOptions) -> Decoder<R> {
let mut read_decoder = ReadDecoder::with_options(r, decode_options);
read_decoder.set_limits(Limits::default());
Decoder {
read_decoder,
transform: Transformations::IDENTITY,
}
}
/// Limit resource usage.
///
/// Note that your allocations, e.g. when reading into a pre-allocated buffer, are __NOT__
/// considered part of the limits. Nevertheless, required intermediate buffers such as for
/// singular lines is checked against the limit.
///
/// Note that this is a best-effort basis.
///
/// ```
/// use std::fs::File;
/// use std::io::BufReader;
/// use png::{Decoder, Limits};
/// // This image is 32×32, 1bit per pixel. The reader buffers one row which requires 4 bytes.
/// let mut limits = Limits::default();
/// limits.bytes = 3;
/// let mut decoder = Decoder::new_with_limits(BufReader::new(File::open("tests/pngsuite/basi0g01.png").unwrap()), limits);
/// assert!(decoder.read_info().is_err());
///
/// // This image is 32x32 pixels, so the decoder will allocate less than 10Kib
/// let mut limits = Limits::default();
/// limits.bytes = 10*1024;
/// let mut decoder = Decoder::new_with_limits(BufReader::new(File::open("tests/pngsuite/basi0g01.png").unwrap()), limits);
/// assert!(decoder.read_info().is_ok());
/// ```
pub fn set_limits(&mut self, limits: Limits) {
self.read_decoder.set_limits(limits);
}
/// Read the PNG header and return the information contained within.
///
/// Most image metadata will not be read until `read_info` is called, so those fields will be
/// None or empty.
pub fn read_header_info(&mut self) -> Result<&Info<'static>, DecodingError> {
self.read_decoder.read_header_info()
}
/// Reads all meta data until the first IDAT chunk
pub fn read_info(mut self) -> Result<Reader<R>, DecodingError> {
let info = self.read_header_info()?;
let unfiltering_buffer = UnfilteringBuffer::new(info);
let mut reader = Reader {
decoder: self.read_decoder,
bpp: BytesPerPixel::One,
subframe: SubframeInfo::not_yet_init(),
remaining_frames: 0, // Temporary value - fixed below after reading `acTL` and `fcTL`.
unfiltering_buffer,
transform: self.transform,
transform_fn: None,
scratch_buffer: Vec::new(),
finished: false,
};
// Check if the decoding buffer of a single raw line has a valid size.
//
// FIXME: this check and the next can be delayed until processing image data. This would
// allow usage where only the metadata is processes, or where the image is processed
// line-by-line even on targets that can not fit the whole image into their address space.
// We should strive for a balance between implementation complexity (still ensure that the
// no-overflow preconditions are met for internal calculation) and use possibilities.
if reader.info().checked_raw_row_length().is_none() {
return Err(DecodingError::LimitsExceeded);
}
// Check if the output buffer has a valid size.
//
// FIXME: see above and
// <https://github.com/image-rs/image-png/pull/608#issuecomment-3003576956>
if reader.output_buffer_size().is_none() {
return Err(DecodingError::LimitsExceeded);
}
reader.read_until_image_data()?;
reader.remaining_frames = match reader.info().animation_control.as_ref() {
None => 1, // No `acTL` => only expecting `IDAT` frame.
Some(animation) => {
let mut num_frames = animation.num_frames as usize;
if reader.info().frame_control.is_none() {
// No `fcTL` before `IDAT` => `IDAT` is not part of the animation, but
// represents an *extra*, default frame for non-APNG-aware decoders.
num_frames += 1;
}
num_frames
}
};
Ok(reader)
}
/// Set the allowed and performed transformations.
///
/// A transformation is a pre-processing on the raw image data modifying content or encoding.
/// Many options have an impact on memory or CPU usage during decoding.
pub fn set_transformations(&mut self, transform: Transformations) {
self.transform = transform;
}
/// Set the decoder to ignore all text chunks while parsing.
///
/// eg.
/// ```
/// use std::fs::File;
/// use std::io::BufReader;
/// use png::Decoder;
/// let mut decoder = Decoder::new(BufReader::new(File::open("tests/pngsuite/basi0g01.png").unwrap()));
/// decoder.set_ignore_text_chunk(true);
/// assert!(decoder.read_info().is_ok());
/// ```
pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) {
self.read_decoder.set_ignore_text_chunk(ignore_text_chunk);
}
/// Set the decoder to ignore iccp chunks while parsing.
///
/// eg.
/// ```
/// use std::fs::File;
/// use std::io::BufReader;
/// use png::Decoder;
/// let mut decoder = Decoder::new(BufReader::new(File::open("tests/iccp/broken_iccp.png").unwrap()));
/// decoder.set_ignore_iccp_chunk(true);
/// assert!(decoder.read_info().is_ok());
/// ```
pub fn set_ignore_iccp_chunk(&mut self, ignore_iccp_chunk: bool) {
self.read_decoder.set_ignore_iccp_chunk(ignore_iccp_chunk);
}
/// Set the decoder to ignore and not verify the Adler-32 checksum
/// and CRC code.
pub fn ignore_checksums(&mut self, ignore_checksums: bool) {
self.read_decoder.ignore_checksums(ignore_checksums);
}
}
/// PNG reader (mostly high-level interface)
///
/// Provides a high level that iterates over lines or whole images.
pub struct Reader<R: BufRead + Seek> {
decoder: ReadDecoder<R>,
bpp: BytesPerPixel,
subframe: SubframeInfo,
/// How many frames remain to be decoded. Decremented after each `IDAT` or `fdAT` sequence.
remaining_frames: usize,
/// Buffer with not-yet-`unfilter`-ed image rows
unfiltering_buffer: UnfilteringBuffer,
/// Output transformations
transform: Transformations,
/// Function that can transform decompressed, unfiltered rows into final output.
/// See the `transform.rs` module for more details.
transform_fn: Option<TransformFn>,
/// This buffer is only used so that `next_row` and `next_interlaced_row` can return reference
/// to a byte slice. In a future version of this library, this buffer will be removed and
/// `next_row` and `next_interlaced_row` will write directly into a user provided output buffer.
scratch_buffer: Vec<u8>,
/// Whether `ImageEnd` was already reached by `fn finish`.
finished: bool,
}
/// The subframe specific information.
///
/// In APNG the frames are constructed by combining previous frame and a new subframe (through a
/// combination of `dispose_op` and `overlay_op`). These sub frames specify individual dimension
/// information and reuse the global interlace options. This struct encapsulates the state of where
/// in a particular IDAT-frame or subframe we are.
struct SubframeInfo {
width: u32,
height: u32,
rowlen: usize,
current_interlace_info: Option<InterlaceInfo>,
interlace_info_iter: InterlaceInfoIter,
consumed_and_flushed: bool,
}
impl<R: BufRead + Seek> Reader<R> {
/// Advances to the start of the next animation frame and
/// returns a reference to the `FrameControl` info that describes it.
/// Skips and discards the image data of the previous frame if necessary.
///
/// Returns a [`ParameterError`] when there are no more animation frames.
/// To avoid this the caller can check if [`Info::animation_control`] exists
/// and consult [`AnimationControl::num_frames`].
pub fn next_frame_info(&mut self) -> Result<&FrameControl, DecodingError> {
let remaining_frames = if self.subframe.consumed_and_flushed {
self.remaining_frames
} else {
// One remaining frame will be consumed by the `finish_decoding` call below.
self.remaining_frames - 1
};
if remaining_frames == 0 {
return Err(DecodingError::Parameter(
ParameterErrorKind::PolledAfterEndOfImage.into(),
));
}
if !self.subframe.consumed_and_flushed {
self.subframe.current_interlace_info = None;
self.finish_decoding()?;
}
self.read_until_image_data()?;
// The PNG standard (and `StreamingDecoder `) guarantes that there is an `fcTL` chunk
// before the start of image data in a sequence of `fdAT` chunks. Therefore `unwrap`
// below is guaranteed to not panic.
Ok(self.info().frame_control.as_ref().unwrap())
}
/// Reads all meta data until the next frame data starts.
/// Requires IHDR before the IDAT and fcTL before fdAT.
fn read_until_image_data(&mut self) -> Result<(), DecodingError> {
self.decoder.read_until_image_data()?;
self.subframe = SubframeInfo::new(self.info());
self.bpp = self.info().bpp_in_prediction();
self.unfiltering_buffer.reset_all();
// Allocate output buffer.
let buflen = self.unguarded_output_line_size(self.subframe.width);
self.decoder.reserve_bytes(buflen)?;
Ok(())
}
/// Get information on the image.
///
/// The structure will change as new frames of an animated image are decoded.
pub fn info(&self) -> &Info<'static> {
self.decoder.info().unwrap()
}
/// Decodes the next frame into `buf`.
///
/// Note that this decodes raw subframes that need to be mixed according to blend-op and
/// dispose-op by the caller.
///
/// The caller must always provide a buffer large enough to hold a complete frame (the APNG
/// specification restricts subframes to the dimensions given in the image header). The region
/// that has been written be checked afterwards by calling `info` after a successful call and
/// inspecting the `frame_control` data. This requirement may be lifted in a later version of
/// `png`.
///
/// Output lines will be written in row-major, packed matrix with width and height of the read
/// frame (or subframe), all samples are in big endian byte order where this matters.
pub fn next_frame(&mut self, buf: &mut [u8]) -> Result<OutputInfo, DecodingError> {
if self.remaining_frames == 0 {
return Err(DecodingError::Parameter(
ParameterErrorKind::PolledAfterEndOfImage.into(),
));
} else if self.subframe.consumed_and_flushed {
// Advance until the next `fdAT`
// (along the way we should encounter the fcTL for this frame).
self.read_until_image_data()?;
}
// Note that we only check if the buffer size calculation holds in a call to decoding the
// frame. Consequently, we can represent the `Info` and frameless decoding even when the
// target architecture's address space is too small for a frame. However reading the actual
let required_len = self
.output_buffer_size()
.ok_or(DecodingError::LimitsExceeded)?;
if buf.len() < required_len {
return Err(DecodingError::Parameter(
ParameterErrorKind::ImageBufferSize {
expected: required_len,
actual: buf.len(),
}
.into(),
));
}
let (color_type, bit_depth) = self.output_color_type();
let output_info = OutputInfo {
width: self.subframe.width,
height: self.subframe.height,
color_type,
bit_depth,
line_size: self.unguarded_output_line_size(self.subframe.width),
};
if self.info().interlaced {
let stride = self.unguarded_output_line_size(self.info().width);
let samples = color_type.samples() as u8;
let bits_pp = samples * (bit_depth as u8);
let expand = crate::adam7::expand_pass;
while let Some(InterlacedRow {
data: row,
interlace,
..
}) = self.next_interlaced_row()?
{
// `unwrap` won't panic, because we checked `self.info().interlaced` above.
let adam7info = interlace.get_adam7_info().unwrap();
expand(buf, stride, row, adam7info, bits_pp);
}
} else {
let current_interlace_info = self.subframe.current_interlace_info.as_ref();
let already_done_rows = current_interlace_info
.map(|info| info.line_number())
.unwrap_or(self.subframe.height);
for row in buf
.chunks_exact_mut(output_info.line_size)
.take(self.subframe.height as usize)
.skip(already_done_rows as usize)
{
self.next_interlaced_row_impl(self.subframe.rowlen, row)?;
}
}
// Advance over the rest of data for this (sub-)frame.
self.finish_decoding()?;
Ok(output_info)
}
fn mark_subframe_as_consumed_and_flushed(&mut self) {
assert!(self.remaining_frames > 0);
self.remaining_frames -= 1;
self.subframe.consumed_and_flushed = true;
}
/// Advance over the rest of data for this (sub-)frame.
/// Called after decoding the last row of a frame.
fn finish_decoding(&mut self) -> Result<(), DecodingError> {
// Double-check that all rows of this frame have been decoded (i.e. that the potential
// `finish_decoding` call below won't be discarding any data).
assert!(self.subframe.current_interlace_info.is_none());
// Discard the remaining data in the current sequence of `IDAT` or `fdAT` chunks.
if !self.subframe.consumed_and_flushed {
self.decoder.finish_decoding_image_data()?;
self.mark_subframe_as_consumed_and_flushed();
}
Ok(())
}
/// Returns the next processed row of the image (discarding `InterlaceInfo`).
///
/// See also [`Reader.read_row`], which reads into a caller-provided buffer.
pub fn next_row(&mut self) -> Result<Option<Row<'_>>, DecodingError> {
self.next_interlaced_row()
.map(|v| v.map(|v| Row { data: v.data }))
}
/// Returns the next processed row of the image.
///
/// See also [`Reader.read_row`], which reads into a caller-provided buffer.
pub fn next_interlaced_row(&mut self) -> Result<Option<InterlacedRow<'_>>, DecodingError> {
let mut output_buffer = mem::take(&mut self.scratch_buffer);
let max_line_size = self
.output_line_size(self.info().width)
.ok_or(DecodingError::LimitsExceeded)?;
output_buffer.resize(max_line_size, 0u8);
let result = self.read_row(&mut output_buffer);
self.scratch_buffer = output_buffer;
result.map(move |option| {
option.map(move |interlace| {
let output_line_size = self.output_line_size_for_interlace_info(&interlace);
InterlacedRow {
data: &self.scratch_buffer[..output_line_size],
interlace,
}
})
})
}
/// Reads the next row of the image into the provided `output_buffer`.
/// `Ok(None)` will be returned if the current image frame has no more rows.
///
/// `output_buffer` needs to be long enough to accommodate [`Reader.output_line_size`] for
/// [`Info.width`] (initial interlaced rows may need less than that).
///
/// See also [`Reader.next_row`] and [`Reader.next_interlaced_row`], which read into a
/// `Reader`-owned buffer.
pub fn read_row(
&mut self,
output_buffer: &mut [u8],
) -> Result<Option<InterlaceInfo>, DecodingError> {
let interlace = match self.subframe.current_interlace_info.as_ref() {
None => {
self.finish_decoding()?;
return Ok(None);
}
Some(interlace) => *interlace,
};
if interlace.line_number() == 0 {
self.unfiltering_buffer.reset_prev_row();
}
let rowlen = match interlace {
InterlaceInfo::Null(_) => self.subframe.rowlen,
InterlaceInfo::Adam7(Adam7Info { samples: width, .. }) => {
self.info().raw_row_length_from_width(width)
}
};
let output_line_size = self.output_line_size_for_interlace_info(&interlace);
let output_buffer = &mut output_buffer[..output_line_size];
self.next_interlaced_row_impl(rowlen, output_buffer)?;
Ok(Some(interlace))
}
fn output_line_size_for_interlace_info(&self, interlace: &InterlaceInfo) -> usize {
let width = match interlace {
InterlaceInfo::Adam7(Adam7Info { samples: width, .. }) => *width,
InterlaceInfo::Null(_) => self.subframe.width,
};
self.unguarded_output_line_size(width)
}
/// Read the rest of the image and chunks and finish up, including text chunks or others
/// This will discard the rest of the image if the image is not read already with [`Reader::next_frame`], [`Reader::next_row`] or [`Reader::next_interlaced_row`]
pub fn finish(&mut self) -> Result<(), DecodingError> {
if self.finished {
return Err(DecodingError::Parameter(
ParameterErrorKind::PolledAfterEndOfImage.into(),
));
}
self.remaining_frames = 0;
self.unfiltering_buffer.reset_all();
self.decoder.read_until_end_of_input()?;
self.finished = true;
Ok(())
}
/// Fetch the next interlaced row and filter it according to our own transformations.
fn next_interlaced_row_impl(
&mut self,
rowlen: usize,
output_buffer: &mut [u8],
) -> Result<(), DecodingError> {
self.next_raw_interlaced_row(rowlen)?;
let row = self.unfiltering_buffer.prev_row();
assert_eq!(row.len(), rowlen - 1);
// Apply transformations and write resulting data to buffer.
let transform_fn = {
if self.transform_fn.is_none() {
self.transform_fn = Some(create_transform_fn(self.info(), self.transform)?);
}
self.transform_fn.as_deref().unwrap()
};
transform_fn(row, output_buffer, self.info());
self.subframe.current_interlace_info = self.subframe.interlace_info_iter.next();
Ok(())
}
/// Returns the color type and the number of bits per sample
/// of the data returned by `Reader::next_row` and Reader::frames`.
pub fn output_color_type(&self) -> (ColorType, BitDepth) {
use crate::common::ColorType::*;
let t = self.transform;
let info = self.info();
if t == Transformations::IDENTITY {
(info.color_type, info.bit_depth)
} else {
let bits = match info.bit_depth as u8 {
16 if t.intersects(Transformations::STRIP_16) => 8,
n if n < 8
&& (t.contains(Transformations::EXPAND)
|| t.contains(Transformations::ALPHA)) =>
{
8
}
n => n,
};
let color_type =
if t.contains(Transformations::EXPAND) || t.contains(Transformations::ALPHA) {
let has_trns = info.trns.is_some() || t.contains(Transformations::ALPHA);
match info.color_type {
Grayscale if has_trns => GrayscaleAlpha,
Rgb if has_trns => Rgba,
Indexed if has_trns => Rgba,
Indexed => Rgb,
ct => ct,
}
} else {
info.color_type
};
(color_type, BitDepth::from_u8(bits).unwrap())
}
}
/// Return the number of bytes required to hold a deinterlaced image frame that is decoded
/// using the given input transformations.
///
/// Returns `None` if the output buffer does not fit into the memory space of the machine,
/// otherwise returns the byte length in `Some`. The length is smaller than [`isize::MAX`].
pub fn output_buffer_size(&self) -> Option<usize> {
let (width, height) = self.info().size();
let (color, depth) = self.output_color_type();
// The subtraction should always work, but we do this for consistency. Also note that by
// calling `checked_raw_row_length` the row buffer is guaranteed to work whereas if we
// ran other function that didn't include the filter byte that could later fail on an image
// that is `1xN`...
let linelen = color.checked_raw_row_length(depth, width)?.checked_sub(1)?;
let height = usize::try_from(height).ok()?;
let imglen = linelen.checked_mul(height)?;
// Ensure that it fits into address space not only `usize` to allocate.
(imglen <= isize::MAX as usize).then_some(imglen)
}
/// Returns the number of bytes required to hold a deinterlaced row.
pub(crate) fn unguarded_output_line_size(&self, width: u32) -> usize {
let (color, depth) = self.output_color_type();
color.raw_row_length_from_width(depth, width) - 1
}
/// Returns the number of bytes required to hold a deinterlaced row.
///
/// Returns `None` if the output buffer does not fit into the memory space of the machine,
/// otherwise returns the byte length in `Some`. The length is smaller than [`isize::MAX`].
pub fn output_line_size(&self, width: u32) -> Option<usize> {
let (color, depth) = self.output_color_type();
let length = color.checked_raw_row_length(depth, width)?.checked_sub(1)?;
// Ensure that it fits into address space not only `usize` to allocate.
(length <= isize::MAX as usize).then_some(length)
}
/// Unfilter the next raw interlaced row into `self.unfiltering_buffer`.
fn next_raw_interlaced_row(&mut self, rowlen: usize) -> Result<(), DecodingError> {
// Read image data until we have at least one full row (but possibly more than one).
while self.unfiltering_buffer.curr_row_len() < rowlen {
if self.subframe.consumed_and_flushed {
return Err(DecodingError::Format(
FormatErrorInner::NoMoreImageData.into(),
));
}
let mut buffer = self.unfiltering_buffer.as_unfilled_buffer();
match self.decoder.decode_image_data(Some(&mut buffer))? {
ImageDataCompletionStatus::ExpectingMoreData => (),
ImageDataCompletionStatus::Done => self.mark_subframe_as_consumed_and_flushed(),
}
}
self.unfiltering_buffer.unfilter_curr_row(rowlen, self.bpp)
}
}
impl SubframeInfo {
fn not_yet_init() -> Self {
SubframeInfo {
width: 0,
height: 0,
rowlen: 0,
current_interlace_info: None,
interlace_info_iter: InterlaceInfoIter::empty(),
consumed_and_flushed: false,
}
}
fn new(info: &Info) -> Self {
// The apng fctnl overrides width and height.
// All other data is set by the main info struct.
let (width, height) = if let Some(fc) = info.frame_control {
(fc.width, fc.height)
} else {
(info.width, info.height)
};
let mut interlace_info_iter = InterlaceInfoIter::new(width, height, info.interlaced);
let current_interlace_info = interlace_info_iter.next();
SubframeInfo {
width,
height,
rowlen: info.raw_row_length_from_width(width),
current_interlace_info,
interlace_info_iter,
consumed_and_flushed: false,
}
}
}

153
vendor/png/src/decoder/read_decoder.rs vendored Normal file
View File

@@ -0,0 +1,153 @@
use super::stream::{DecodeOptions, Decoded, DecodingError, FormatErrorInner, StreamingDecoder};
use super::zlib::UnfilterBuf;
use super::Limits;
use std::io::{BufRead, ErrorKind, Read, Seek};
use crate::chunk;
use crate::common::Info;
/// Helper for encapsulating reading input from `Read` and feeding it into a `StreamingDecoder`
/// while hiding low-level `Decoded` events and only exposing a few high-level reading operations
/// like:
///
/// * `read_header_info` - reading until `IHDR` chunk
/// * `read_until_image_data` - reading until `IDAT` / `fdAT` sequence
/// * `decode_image_data` - reading from `IDAT` / `fdAT` sequence into `Vec<u8>`
/// * `finish_decoding_image_data()` - discarding remaining data from `IDAT` / `fdAT` sequence
/// * `read_until_end_of_input()` - reading until `IEND` chunk
pub(crate) struct ReadDecoder<R: Read> {
reader: R,
decoder: StreamingDecoder,
}
impl<R: BufRead + Seek> ReadDecoder<R> {
pub fn new(r: R) -> Self {
Self {
reader: r,
decoder: StreamingDecoder::new(),
}
}
pub fn with_options(r: R, options: DecodeOptions) -> Self {
let mut decoder = StreamingDecoder::new_with_options(options);
decoder.limits = Limits::default();
Self { reader: r, decoder }
}
pub fn set_limits(&mut self, limits: Limits) {
self.decoder.limits = limits;
}
pub fn reserve_bytes(&mut self, bytes: usize) -> Result<(), DecodingError> {
self.decoder.limits.reserve_bytes(bytes)
}
pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) {
self.decoder.set_ignore_text_chunk(ignore_text_chunk);
}
pub fn set_ignore_iccp_chunk(&mut self, ignore_iccp_chunk: bool) {
self.decoder.set_ignore_iccp_chunk(ignore_iccp_chunk);
}
pub fn ignore_checksums(&mut self, ignore_checksums: bool) {
self.decoder.set_ignore_adler32(ignore_checksums);
self.decoder.set_ignore_crc(ignore_checksums);
}
/// Returns the next decoded chunk. If the chunk is an ImageData chunk, its contents are written
/// into image_data.
fn decode_next(
&mut self,
image_data: Option<&mut UnfilterBuf<'_>>,
) -> Result<Decoded, DecodingError> {
let (consumed, result) = {
let buf = self.reader.fill_buf()?;
if buf.is_empty() {
return Err(DecodingError::IoError(ErrorKind::UnexpectedEof.into()));
}
self.decoder.update(buf, image_data)?
};
self.reader.consume(consumed);
Ok(result)
}
/// Reads until the end of `IHDR` chunk.
///
/// Prerequisite: None (idempotent).
pub fn read_header_info(&mut self) -> Result<&Info<'static>, DecodingError> {
while self.info().is_none() {
if let Decoded::ChunkComplete(chunk::IEND) = self.decode_next(None)? {
unreachable!()
}
}
Ok(self.info().unwrap())
}
/// Reads until the start of the next `IDAT` or `fdAT` chunk.
///
/// Prerequisite: **Not** within `IDAT` / `fdAT` chunk sequence.
pub fn read_until_image_data(&mut self) -> Result<(), DecodingError> {
loop {
match self.decode_next(None)? {
Decoded::ChunkBegin(_, chunk::IDAT) | Decoded::ChunkBegin(_, chunk::fdAT) => break,
Decoded::ChunkComplete(chunk::IEND) => {
return Err(DecodingError::Format(
FormatErrorInner::MissingImageData.into(),
))
}
// Ignore all other chunk events. Any other chunk may be between IDAT chunks, fdAT
// chunks and their control chunks.
_ => {}
}
}
Ok(())
}
/// Reads `image_data` and reports whether there may be additional data afterwards (i.e. if it
/// is okay to call `decode_image_data` and/or `finish_decoding_image_data` again)..
///
/// Prerequisite: Input is currently positioned within `IDAT` / `fdAT` chunk sequence.
pub fn decode_image_data(
&mut self,
image_data: Option<&mut UnfilterBuf<'_>>,
) -> Result<ImageDataCompletionStatus, DecodingError> {
match self.decode_next(image_data)? {
Decoded::ImageData => Ok(ImageDataCompletionStatus::ExpectingMoreData),
Decoded::ImageDataFlushed => Ok(ImageDataCompletionStatus::Done),
// Ignore other events that may happen within an `IDAT` / `fdAT` chunks sequence.
_ => Ok(ImageDataCompletionStatus::ExpectingMoreData),
}
}
/// Consumes and discards the rest of an `IDAT` / `fdAT` chunk sequence.
///
/// Prerequisite: Input is currently positioned within `IDAT` / `fdAT` chunk sequence.
pub fn finish_decoding_image_data(&mut self) -> Result<(), DecodingError> {
loop {
if let ImageDataCompletionStatus::Done = self.decode_image_data(None)? {
return Ok(());
}
}
}
/// Reads until the `IEND` chunk.
///
/// Prerequisite: `IEND` chunk hasn't been reached yet.
pub fn read_until_end_of_input(&mut self) -> Result<(), DecodingError> {
while !matches!(self.decode_next(None)?, Decoded::ChunkComplete(chunk::IEND)) {}
Ok(())
}
pub fn info(&self) -> Option<&Info<'static>> {
self.decoder.info.as_ref()
}
}
#[derive(Debug, Eq, PartialEq)]
pub(crate) enum ImageDataCompletionStatus {
ExpectingMoreData,
Done,
}

3213
vendor/png/src/decoder/stream.rs vendored Normal file

File diff suppressed because it is too large Load Diff

203
vendor/png/src/decoder/transform.rs vendored Normal file
View File

@@ -0,0 +1,203 @@
//! Transforming a decompressed, unfiltered row into the final output.
mod palette;
use crate::{BitDepth, ColorType, DecodingError, Info, Transformations};
use super::stream::FormatErrorInner;
/// Type of a function that can transform a decompressed, unfiltered row (the
/// 1st argument) into the final pixels (the 2nd argument), optionally using
/// image metadata (e.g. PLTE data can be accessed using the 3rd argument).
///
/// TODO: If some precomputed state is needed (e.g. to make `expand_paletted...`
/// faster) then consider changing this into `Box<dyn Fn(...)>`.
pub type TransformFn = Box<dyn Fn(&[u8], &mut [u8], &Info) + Send + Sync>;
/// Returns a transformation function that should be applied to image rows based
/// on 1) decoded image metadata (`info`) and 2) the transformations requested
/// by the crate client (`transform`).
pub fn create_transform_fn(
info: &Info,
transform: Transformations,
) -> Result<TransformFn, DecodingError> {
let color_type = info.color_type;
let bit_depth = info.bit_depth as u8;
let trns = info.trns.is_some() || transform.contains(Transformations::ALPHA);
let expand =
transform.contains(Transformations::EXPAND) || transform.contains(Transformations::ALPHA);
let strip16 = bit_depth == 16 && transform.contains(Transformations::STRIP_16);
match color_type {
ColorType::Indexed if expand => {
if info.palette.is_none() {
Err(DecodingError::Format(
FormatErrorInner::PaletteRequired.into(),
))
} else if let BitDepth::Sixteen = info.bit_depth {
// This should have been caught earlier but let's check again. Can't hurt.
Err(DecodingError::Format(
FormatErrorInner::InvalidColorBitDepth {
color_type: ColorType::Indexed,
bit_depth: BitDepth::Sixteen,
}
.into(),
))
} else {
Ok(if trns {
palette::create_expansion_into_rgba8(info)
} else {
palette::create_expansion_into_rgb8(info)
})
}
}
ColorType::Grayscale | ColorType::GrayscaleAlpha if bit_depth < 8 && expand => {
Ok(Box::new(if trns {
expand_gray_u8_with_trns
} else {
expand_gray_u8
}))
}
ColorType::Grayscale | ColorType::Rgb if expand && trns => {
Ok(Box::new(if bit_depth == 8 {
expand_trns_line
} else if strip16 {
expand_trns_and_strip_line16
} else {
assert_eq!(bit_depth, 16);
expand_trns_line16
}))
}
ColorType::Grayscale | ColorType::GrayscaleAlpha | ColorType::Rgb | ColorType::Rgba
if strip16 =>
{
Ok(Box::new(transform_row_strip16))
}
_ => Ok(Box::new(copy_row)),
}
}
fn copy_row(row: &[u8], output_buffer: &mut [u8], _: &Info) {
output_buffer.copy_from_slice(row);
}
fn transform_row_strip16(row: &[u8], output_buffer: &mut [u8], _: &Info) {
for i in 0..row.len() / 2 {
output_buffer[i] = row[2 * i];
}
}
#[inline(always)]
fn unpack_bits<F>(input: &[u8], output: &mut [u8], channels: usize, bit_depth: u8, func: F)
where
F: Fn(u8, &mut [u8]),
{
// Only [1, 2, 4, 8] are valid bit depths
assert!(matches!(bit_depth, 1 | 2 | 4 | 8));
// Check that `input` is capable of producing a buffer as long as `output`:
// number of shift lookups per bit depth * channels * input length
assert!((8 / bit_depth as usize * channels).saturating_mul(input.len()) >= output.len());
let mut buf_chunks = output.chunks_exact_mut(channels);
let mut iter = input.iter();
// `shift` iterates through the corresponding bit depth sequence:
// 1 => &[7, 6, 5, 4, 3, 2, 1, 0],
// 2 => &[6, 4, 2, 0],
// 4 => &[4, 0],
// 8 => &[0],
//
// `(0..8).step_by(bit_depth.into()).rev()` doesn't always optimize well so
// shifts are calculated instead. (2023-08, Rust 1.71)
if bit_depth == 8 {
for (&curr, chunk) in iter.zip(&mut buf_chunks) {
func(curr, chunk);
}
} else {
let mask = ((1u16 << bit_depth) - 1) as u8;
// These variables are initialized in the loop
let mut shift = -1;
let mut curr = 0;
for chunk in buf_chunks {
if shift < 0 {
shift = 8 - bit_depth as i32;
curr = *iter.next().expect("input for unpack bits is not empty");
}
let pixel = (curr >> shift) & mask;
func(pixel, chunk);
shift -= bit_depth as i32;
}
}
}
fn expand_trns_line(input: &[u8], output: &mut [u8], info: &Info) {
let channels = info.color_type.samples();
let trns = info.trns.as_deref();
for (input, output) in input
.chunks_exact(channels)
.zip(output.chunks_exact_mut(channels + 1))
{
output[..channels].copy_from_slice(input);
output[channels] = if Some(input) == trns { 0 } else { 0xFF };
}
}
fn expand_trns_line16(input: &[u8], output: &mut [u8], info: &Info) {
let channels = info.color_type.samples();
let trns = info.trns.as_deref();
for (input, output) in input
.chunks_exact(channels * 2)
.zip(output.chunks_exact_mut(channels * 2 + 2))
{
output[..channels * 2].copy_from_slice(input);
if Some(input) == trns {
output[channels * 2] = 0;
output[channels * 2 + 1] = 0
} else {
output[channels * 2] = 0xFF;
output[channels * 2 + 1] = 0xFF
};
}
}
fn expand_trns_and_strip_line16(input: &[u8], output: &mut [u8], info: &Info) {
let channels = info.color_type.samples();
let trns = info.trns.as_deref();
for (input, output) in input
.chunks_exact(channels * 2)
.zip(output.chunks_exact_mut(channels + 1))
{
for i in 0..channels {
output[i] = input[i * 2];
}
output[channels] = if Some(input) == trns { 0 } else { 0xFF };
}
}
fn expand_gray_u8(row: &[u8], buffer: &mut [u8], info: &Info) {
let scaling_factor = (255) / ((1u16 << info.bit_depth as u8) - 1) as u8;
unpack_bits(row, buffer, 1, info.bit_depth as u8, |val, chunk| {
chunk[0] = val * scaling_factor
});
}
fn expand_gray_u8_with_trns(row: &[u8], buffer: &mut [u8], info: &Info) {
let scaling_factor = (255) / ((1u16 << info.bit_depth as u8) - 1) as u8;
let trns = info.trns.as_deref();
unpack_bits(row, buffer, 2, info.bit_depth as u8, |pixel, chunk| {
chunk[1] = if let Some(trns) = trns {
if pixel == trns[0] {
0
} else {
0xFF
}
} else {
0xFF
};
chunk[0] = pixel * scaling_factor
});
}

View File

@@ -0,0 +1,361 @@
//! Helpers for taking a slice of indices (indices into `PLTE` and/or `trNS`
//! entries) and transforming this into RGB or RGBA output.
//!
//! # Memoization
//!
//! To achieve higher throughput, `create_rgba_palette` combines entries from
//! `PLTE` and `trNS` chunks into a single lookup table. This is based on the
//! ideas explored in <https://crbug.com/706134>.
//!
//! Memoization is a trade-off:
//! * On one hand, memoization requires spending X ns before starting to call
//! `expand_paletted_...` functions.
//! * On the other hand, memoization improves the throughput of the
//! `expand_paletted_...` functions - they take Y ns less to process each byte
//!
//! Based on X and Y, we can try to calculate the breakeven point. It seems
//! that memoization is a net benefit for images bigger than around 13x13 pixels.
use super::{unpack_bits, TransformFn};
use crate::{BitDepth, Info};
pub fn create_expansion_into_rgb8(info: &Info) -> TransformFn {
let rgba_palette = create_rgba_palette(info);
if info.bit_depth == BitDepth::Eight {
Box::new(move |input, output, _info| expand_8bit_into_rgb8(input, output, &rgba_palette))
} else {
Box::new(move |input, output, info| expand_into_rgb8(input, output, info, &rgba_palette))
}
}
pub fn create_expansion_into_rgba8(info: &Info) -> TransformFn {
let rgba_palette = create_rgba_palette(info);
Box::new(move |input, output, info| {
expand_paletted_into_rgba8(input, output, info, &rgba_palette)
})
}
fn create_rgba_palette(info: &Info) -> [[u8; 4]; 256] {
let palette = info.palette.as_deref().expect("Caller should verify");
let trns = info.trns.as_deref().unwrap_or(&[]);
// > The tRNS chunk shall not contain more alpha values than there are palette
// entries, but a tRNS chunk may contain fewer values than there are palette
// entries. In this case, the alpha value for all remaining palette entries is
// assumed to be 255.
//
// It seems, accepted reading is to fully *ignore* an invalid tRNS as if it were
// completely empty / all pixels are non-transparent.
let trns = if trns.len() <= palette.len() / 3 {
trns
} else {
&[]
};
// Default to black, opaque entries.
let mut rgba_palette = [[0, 0, 0, 0xFF]; 256];
// Copy `palette` (RGB) entries into `rgba_palette`. This may clobber alpha
// values in `rgba_palette` - we need to fix this later.
{
let mut palette_iter = palette;
let mut rgba_iter = &mut rgba_palette[..];
while palette_iter.len() >= 4 {
// Copying 4 bytes at a time is more efficient than copying 3.
// OTOH, this clobbers the alpha value in `rgba_iter[0][3]` - we
// need to fix this later.
rgba_iter[0].copy_from_slice(&palette_iter[0..4]);
palette_iter = &palette_iter[3..];
rgba_iter = &mut rgba_iter[1..];
}
if !palette_iter.is_empty() {
rgba_iter[0][0..3].copy_from_slice(&palette_iter[0..3]);
}
}
// Copy `trns` (alpha) entries into `rgba_palette`. `trns.len()` may be
// smaller than `palette.len()` and therefore this is not sufficient to fix
// all the clobbered alpha values.
for (alpha, rgba) in trns.iter().copied().zip(rgba_palette.iter_mut()) {
rgba[3] = alpha;
}
// Unclobber the remaining alpha values.
for rgba in rgba_palette[trns.len()..(palette.len() / 3)].iter_mut() {
rgba[3] = 0xFF;
}
rgba_palette
}
fn expand_8bit_into_rgb8(mut input: &[u8], mut output: &mut [u8], rgba_palette: &[[u8; 4]; 256]) {
while output.len() >= 4 {
// Copying 4 bytes at a time is more efficient than 3.
let rgba = &rgba_palette[input[0] as usize];
output[0..4].copy_from_slice(rgba);
input = &input[1..];
output = &mut output[3..];
}
if !output.is_empty() {
let rgba = &rgba_palette[input[0] as usize];
output[0..3].copy_from_slice(&rgba[0..3]);
}
}
fn expand_into_rgb8(row: &[u8], buffer: &mut [u8], info: &Info, rgba_palette: &[[u8; 4]; 256]) {
unpack_bits(row, buffer, 3, info.bit_depth as u8, |i, chunk| {
let rgba = &rgba_palette[i as usize];
chunk[0] = rgba[0];
chunk[1] = rgba[1];
chunk[2] = rgba[2];
})
}
fn expand_paletted_into_rgba8(
row: &[u8],
buffer: &mut [u8],
info: &Info,
rgba_palette: &[[u8; 4]; 256],
) {
unpack_bits(row, buffer, 4, info.bit_depth as u8, |i, chunk| {
chunk.copy_from_slice(&rgba_palette[i as usize]);
});
}
#[cfg(test)]
mod test {
use crate::{BitDepth, ColorType, Info, Transformations};
/// Old, non-memoized version of the code is used as a test oracle.
fn oracle_expand_paletted_into_rgb8(row: &[u8], buffer: &mut [u8], info: &Info) {
let palette = info.palette.as_deref().expect("Caller should verify");
let black = [0, 0, 0];
super::unpack_bits(row, buffer, 3, info.bit_depth as u8, |i, chunk| {
let rgb = palette
.get(3 * i as usize..3 * i as usize + 3)
.unwrap_or(&black);
chunk[0] = rgb[0];
chunk[1] = rgb[1];
chunk[2] = rgb[2];
})
}
/// Old, non-memoized version of the code is used as a test oracle.
fn oracle_expand_paletted_into_rgba8(row: &[u8], buffer: &mut [u8], info: &Info) {
let palette = info.palette.as_deref().expect("Caller should verify");
let trns = info.trns.as_deref().unwrap_or(&[]);
let black = [0, 0, 0];
// > The tRNS chunk shall not contain more alpha values than there are palette
// entries, but a tRNS chunk may contain fewer values than there are palette
// entries. In this case, the alpha value for all remaining palette entries is
// assumed to be 255.
//
// It seems, accepted reading is to fully *ignore* an invalid tRNS as if it were
// completely empty / all pixels are non-transparent.
let trns = if trns.len() <= palette.len() / 3 {
trns
} else {
&[]
};
super::unpack_bits(row, buffer, 4, info.bit_depth as u8, |i, chunk| {
let (rgb, a) = (
palette
.get(3 * i as usize..3 * i as usize + 3)
.unwrap_or(&black),
*trns.get(i as usize).unwrap_or(&0xFF),
);
chunk[0] = rgb[0];
chunk[1] = rgb[1];
chunk[2] = rgb[2];
chunk[3] = a;
});
}
fn create_info<'a>(src_bit_depth: u8, palette: &'a [u8], trns: Option<&'a [u8]>) -> Info<'a> {
Info {
color_type: ColorType::Indexed,
bit_depth: BitDepth::from_u8(src_bit_depth).unwrap(),
palette: Some(palette.into()),
trns: trns.map(Into::into),
..Info::default()
}
}
fn expand_paletted(
src: &[u8],
src_bit_depth: u8,
palette: &[u8],
trns: Option<&[u8]>,
) -> Vec<u8> {
let info = create_info(src_bit_depth, palette, trns);
let output_bytes_per_input_sample = match trns {
None => 3,
Some(_) => 4,
};
let samples_count_per_byte = (8 / src_bit_depth) as usize;
let samples_count = src.len() * samples_count_per_byte;
let mut dst = vec![0; samples_count * output_bytes_per_input_sample];
let transform_fn =
super::super::create_transform_fn(&info, Transformations::EXPAND).unwrap();
transform_fn(src, dst.as_mut_slice(), &info);
{
// Compare the memoization-based calculations with the old, non-memoized code.
let mut simple_dst = vec![0; samples_count * output_bytes_per_input_sample];
if trns.is_none() {
oracle_expand_paletted_into_rgb8(src, &mut simple_dst, &info)
} else {
oracle_expand_paletted_into_rgba8(src, &mut simple_dst, &info)
}
assert_eq!(&dst, &simple_dst);
}
dst
}
#[test]
fn test_expand_paletted_rgba_8bit() {
let actual = expand_paletted(
&[0, 1, 2, 3], // src
8, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
4, 5, 6, // entry #1
8, 9, 10, // entry #2
12, 13, 14, // entry #3
],
Some(&[3, 7, 11, 15]), // trns
);
assert_eq!(actual, (0..16).collect::<Vec<u8>>());
}
#[test]
fn test_expand_paletted_rgb_8bit() {
let actual = expand_paletted(
&[0, 1, 2, 3], // src
8, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
3, 4, 5, // entry #1
6, 7, 8, // entry #2
9, 10, 11, // entry #3
],
None, // trns
);
assert_eq!(actual, (0..12).collect::<Vec<u8>>());
}
#[test]
fn test_expand_paletted_rgba_4bit() {
let actual = expand_paletted(
&[0x01, 0x23], // src
4, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
4, 5, 6, // entry #1
8, 9, 10, // entry #2
12, 13, 14, // entry #3
],
Some(&[3, 7, 11, 15]), // trns
);
assert_eq!(actual, (0..16).collect::<Vec<u8>>());
}
#[test]
fn test_expand_paletted_rgb_4bit() {
let actual = expand_paletted(
&[0x01, 0x23], // src
4, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
3, 4, 5, // entry #1
6, 7, 8, // entry #2
9, 10, 11, // entry #3
],
None, // trns
);
assert_eq!(actual, (0..12).collect::<Vec<u8>>());
}
#[test]
fn test_expand_paletted_rgba_8bit_more_trns_entries_than_palette_entries() {
let actual = expand_paletted(
&[0, 1, 2, 3], // src
8, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
4, 5, 6, // entry #1
8, 9, 10, // entry #2
12, 13, 14, // entry #3
],
Some(&[123; 5]), // trns
);
// Invalid (too-long) `trns` means that we'll use 0xFF / opaque alpha everywhere.
assert_eq!(
actual,
vec![0, 1, 2, 0xFF, 4, 5, 6, 0xFF, 8, 9, 10, 0xFF, 12, 13, 14, 0xFF],
);
}
#[test]
fn test_expand_paletted_rgba_8bit_less_trns_entries_than_palette_entries() {
let actual = expand_paletted(
&[0, 1, 2, 3], // src
8, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
4, 5, 6, // entry #1
8, 9, 10, // entry #2
12, 13, 14, // entry #3
],
Some(&[3, 7]), // trns
);
// Too-short `trns` is treated differently from too-long - only missing entries are
// replaced with 0XFF / opaque.
assert_eq!(
actual,
vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0xFF, 12, 13, 14, 0xFF],
);
}
#[test]
fn test_create_rgba_palette() {
fn create_expected_rgba_palette(plte: &[u8], trns: &[u8]) -> [[u8; 4]; 256] {
let mut rgba = [[1, 2, 3, 4]; 256];
for (i, rgba) in rgba.iter_mut().enumerate() {
rgba[0] = plte.get(i * 3 + 0).map(|&r| r).unwrap_or(0);
rgba[1] = plte.get(i * 3 + 1).map(|&g| g).unwrap_or(0);
rgba[2] = plte.get(i * 3 + 2).map(|&b| b).unwrap_or(0);
rgba[3] = trns.get(i * 1 + 0).map(|&a| a).unwrap_or(0xFF);
}
rgba
}
for plte_len in 1..=32 {
for trns_len in 0..=plte_len {
let plte: Vec<u8> = (0..plte_len * 3).collect();
let trns: Vec<u8> = (0..trns_len).map(|alpha| alpha + 200).collect();
let info = create_info(8, &plte, Some(&trns));
let expected = create_expected_rgba_palette(&plte, &trns);
let actual = super::create_rgba_palette(&info);
assert_eq!(actual, expected);
}
}
}
}

View File

@@ -0,0 +1,230 @@
use super::stream::{DecodingError, FormatErrorInner};
use super::zlib::UnfilterBuf;
use crate::common::BytesPerPixel;
use crate::filter::{unfilter, RowFilter};
use crate::Info;
// Buffer for temporarily holding decompressed, not-yet-`unfilter`-ed rows.
pub(crate) struct UnfilteringBuffer {
/// Vec containing the uncompressed image data currently being processed.
data_stream: Vec<u8>,
/// Index in `data_stream` where the previous row starts.
/// This excludes the filter type byte - it points at the first byte of actual pixel data.
/// The pixel data is already-`unfilter`-ed.
///
/// If `prev_start == current_start` then it means that there is no previous row.
prev_start: usize,
/// Index in `data_stream` where the current row starts.
/// This points at the filter type byte of the current row (i.e. the actual pixel data starts at `current_start + 1`)
/// The pixel data is not-yet-`unfilter`-ed.
///
/// `current_start` can wrap around the length.
current_start: usize,
/// Logical length of data that must be preserved.
filled: usize,
/// Length of data that can be modified.
available: usize,
/// The number of bytes before we shift the buffer back.
shift_back_limit: usize,
}
impl UnfilteringBuffer {
pub const GROWTH_BYTES: usize = 8 * 1024;
/// Asserts in debug builds that all the invariants hold. No-op in release
/// builds. Intended to be called after creating or mutating `self` to
/// ensure that the final state preserves the invariants.
fn debug_assert_invariants(&self) {
debug_assert!(self.prev_start <= self.current_start);
debug_assert!(self.current_start <= self.available);
debug_assert!(self.available <= self.filled);
debug_assert!(self.filled <= self.data_stream.len());
}
/// Create a buffer tuned for filtering rows of the image type.
pub fn new(info: &Info<'_>) -> Self {
// We don't need all of `info` here so if that becomes a structural problem then these
// derived constants can be extracted into a parameter struct. For instance they may be
// adjusted according to platform hardware such as cache sizes.
let data_stream_capacity = {
let max_data = info
.checked_raw_row_length()
// In the current state this is really dependent on IDAT sizes and the compression
// settings. We aim to avoid overallocation here, but that occurs in part due to
// the algorithm for draining the buffer, which at the time of writing is at each
// individual IDAT chunk boundary. So this is set for a quadratic image roughly
// fitting into a single 4k chunk at compression.. A very arbitrary choice made
// from (probably overfitting) a benchmark of that image size. With a different
// algorithm we may come to different buffer uses and have to re-evaluate.
.and_then(|v| v.checked_mul(info.height.min(128) as usize))
// In the worst case this is additional room for use of unmasked SIMD moves. But
// the other idea here is that the allocator generally aligns the buffer.
.and_then(|v| checked_next_multiple_of(v, 256))
.unwrap_or(usize::MAX);
// We do not want to pre-allocate too much in case of a faulty image (no DOS by
// pretending to be very very large) and also we want to avoid allocating more data
// than we need for the image itself.
max_data.min(128 * 1024)
};
let shift_back_limit = {
// Prefer shifting by powers of two and only after having done some number of
// lines that then become free at the end of the buffer.
let rowlen_pot = info
.checked_raw_row_length()
// Ensure some number of rows are actually present before shifting back, i.e. next
// time around we want to be able to decode them without reallocating the buffer.
.and_then(|v| v.checked_mul(4))
// And also, we should be able to use aligned memcopy on the whole thing. Well at
// least that is the idea but the parameter is just benchmarking. Higher numbers
// did not result in performance gains but lowers also, so this is fickle. Maybe
// our shift back behavior can not be tuned very well.
.and_then(|v| checked_next_multiple_of(v, 64))
.unwrap_or(isize::MAX as usize);
// But never shift back before we have a number of pages freed.
rowlen_pot.max(128 * 1024)
};
let result = Self {
data_stream: Vec::with_capacity(data_stream_capacity),
prev_start: 0,
current_start: 0,
filled: 0,
available: 0,
shift_back_limit,
};
result.debug_assert_invariants();
result
}
/// Called to indicate that there is no previous row (e.g. when the current
/// row is the first scanline of a given Adam7 pass).
pub fn reset_prev_row(&mut self) {
self.prev_start = self.current_start;
self.debug_assert_invariants();
}
pub fn reset_all(&mut self) {
self.data_stream.clear();
self.prev_start = 0;
self.current_start = 0;
self.filled = 0;
self.available = 0;
}
/// Returns the previous (already `unfilter`-ed) row.
pub fn prev_row(&self) -> &[u8] {
&self.data_stream[self.prev_start..self.current_start]
}
/// Returns how many bytes of the current row are present in the buffer.
pub fn curr_row_len(&self) -> usize {
self.available - self.current_start
}
/// Returns a `&mut Vec<u8>` suitable for passing to
/// `ReadDecoder.decode_image_data` or `StreamingDecoder.update`.
///
/// Invariants of `self` depend on the assumption that the caller will only
/// append new bytes to the returned vector (which is indeed the behavior of
/// `ReadDecoder` and `StreamingDecoder`). TODO: Consider protecting the
/// invariants by returning an append-only view of the vector
/// (`FnMut(&[u8])`??? or maybe `std::io::Write`???).
pub fn as_unfilled_buffer(&mut self) -> UnfilterBuf<'_> {
if self.prev_start >= self.shift_back_limit
// Avoid the shift back if the buffer is still very empty. Consider how we got here: a
// previous decompression filled the buffer, then we unfiltered, we're now refilling
// the buffer again. The condition implies, the previous decompression filled at most
// half the buffer. Likely the same will happen again so the following decompression
// attempt will not yet be limited by the buffer length.
&& self.filled >= self.data_stream.len() / 2
{
// We have to relocate the data to the start of the buffer. Benchmarking suggests that
// the codegen for an unbounded range is better / different than the one for a bounded
// range. We prefer the former if the data overhead is not too high. `16` was
// determined experimentally and might be system (memory) dependent. There's also the
// question if we could be a little smarter and avoid crossing page boundaries when
// that is not required. Alas, microbenchmarking TBD.
if let Some(16..) = self.data_stream.len().checked_sub(self.filled) {
self.data_stream
.copy_within(self.prev_start..self.filled, 0);
} else {
self.data_stream.copy_within(self.prev_start.., 0);
}
// The data kept its relative position to `filled` which now lands exactly at
// the distance between prev_start and filled.
self.current_start -= self.prev_start;
self.available -= self.prev_start;
self.filled -= self.prev_start;
self.prev_start = 0;
}
if self.filled + Self::GROWTH_BYTES > self.data_stream.len() {
self.data_stream.resize(self.filled + Self::GROWTH_BYTES, 0);
}
UnfilterBuf {
buffer: &mut self.data_stream,
filled: &mut self.filled,
available: &mut self.available,
}
}
/// Runs `unfilter` on the current row, and then shifts rows so that the current row becomes the previous row.
///
/// Will panic if `self.curr_row_len() < rowlen`.
pub fn unfilter_curr_row(
&mut self,
rowlen: usize,
bpp: BytesPerPixel,
) -> Result<(), DecodingError> {
debug_assert!(rowlen >= 2); // 1 byte for `FilterType` and at least 1 byte of pixel data.
let (prev, row) = self.data_stream.split_at_mut(self.current_start);
let prev: &[u8] = &prev[self.prev_start..];
debug_assert!(prev.is_empty() || prev.len() == (rowlen - 1));
// Get the filter type.
let filter = RowFilter::from_u8(row[0]).ok_or(DecodingError::Format(
FormatErrorInner::UnknownFilterMethod(row[0]).into(),
))?;
let row = &mut row[1..rowlen];
unfilter(filter, bpp, prev, row);
self.prev_start = self.current_start + 1;
self.current_start += rowlen;
self.debug_assert_invariants();
Ok(())
}
}
fn checked_next_multiple_of(val: usize, factor: usize) -> Option<usize> {
if factor == 0 {
return None;
}
let remainder = val % factor;
if remainder > 0 {
val.checked_add(factor - remainder)
} else {
Some(val)
}
}
#[test]
fn next_multiple_of_backport_testsuite() {
assert_eq!(checked_next_multiple_of(1, 0), None);
assert_eq!(checked_next_multiple_of(2, 0), None);
assert_eq!(checked_next_multiple_of(1, 2), Some(2));
assert_eq!(checked_next_multiple_of(2, 2), Some(2));
assert_eq!(checked_next_multiple_of(2, 5), Some(5));
assert_eq!(checked_next_multiple_of(1, usize::MAX), Some(usize::MAX));
assert_eq!(checked_next_multiple_of(usize::MAX, 2), None);
}

213
vendor/png/src/decoder/zlib.rs vendored Normal file
View File

@@ -0,0 +1,213 @@
use super::{stream::FormatErrorInner, unfiltering_buffer::UnfilteringBuffer, DecodingError};
use fdeflate::Decompressor;
/// An inplace buffer for decompression and filtering of PNG rowlines.
///
/// The underlying data structure is a vector, with additional markers denoting a region of bytes
/// that are utilized by the decompression but not yet available to arbitrary modifications. The
/// caller can still shift around data between calls to the stream decompressor as long as the data
/// in the marked region is not modified and the indices adjusted accordingly. See
/// [`UnfilterRegion`] that contains these markers.
///
/// Violating the invariants, i.e. modifying bytes in the marked region, results in absurdly wacky
/// decompression output or panics but not undefined behavior.
pub struct UnfilterBuf<'data> {
/// The data container. Starts with arbitrary data unrelated to the decoder, a slice of decoder
/// private data followed by free space for further decoder output. The regions are delimited
/// by `filled` and `available` which must be updated accordingly.
pub(crate) buffer: &'data mut Vec<u8>,
/// Where we record changes to the out position.
pub(crate) filled: &'data mut usize,
/// Where we record changes to the available byte.
pub(crate) available: &'data mut usize,
}
/// A region into a buffer utilized as a [`UnfilterBuf`].
///
/// The span of data denoted by `filled..available` is the region of bytes that must be preserved
/// for use by the decompression algorithm. It may be moved, e.g. by subtracting the same amount
/// from both of these fields. Always ensure that `filled <= available`, the library does not
/// violate this invariant when modifying this struct as an [`UnfilterBuf`].
#[derive(Default, Clone, Copy)]
pub struct UnfilterRegion {
/// The past-the-end index of byte that are allowed to be modified.
pub available: usize,
/// The past-the-end of bytes that have been written to.
pub filled: usize,
}
/// Ergonomics wrapper around `miniz_oxide::inflate::stream` for zlib compressed data.
pub(super) struct ZlibStream {
/// Current decoding state.
state: Box<fdeflate::Decompressor>,
/// If there has been a call to decompress already.
started: bool,
/// Ignore and do not calculate the Adler-32 checksum. Defaults to `true`.
///
/// This flag overrides `TINFL_FLAG_COMPUTE_ADLER32`.
///
/// This flag should not be modified after decompression has started.
ignore_adler32: bool,
}
impl ZlibStream {
// [PNG spec](https://www.w3.org/TR/2003/REC-PNG-20031110/#10Compression) says that
// "deflate/inflate compression with a sliding window (which is an upper bound on the
// distances appearing in the deflate stream) of at most 32768 bytes".
//
// `fdeflate` requires that we keep this many most recently decompressed bytes in the
// `out_buffer` - this allows referring back to them when handling "length and distance
// codes" in the deflate stream).
const LOOKBACK_SIZE: usize = 32768;
pub(crate) fn new() -> Self {
ZlibStream {
state: Box::new(Decompressor::new()),
started: false,
ignore_adler32: true,
}
}
pub(crate) fn reset(&mut self) {
self.started = false;
*self.state = Decompressor::new();
}
/// Set the `ignore_adler32` flag and return `true` if the flag was
/// successfully set.
///
/// The default is `true`.
///
/// This flag cannot be modified after decompression has started until the
/// [ZlibStream] is reset.
pub(crate) fn set_ignore_adler32(&mut self, flag: bool) -> bool {
if !self.started {
self.ignore_adler32 = flag;
true
} else {
false
}
}
/// Return the `ignore_adler32` flag.
pub(crate) fn ignore_adler32(&self) -> bool {
self.ignore_adler32
}
/// Fill the decoded buffer as far as possible from `data`.
/// On success returns the number of consumed input bytes.
pub(crate) fn decompress(
&mut self,
data: &[u8],
image_data: &mut UnfilterBuf<'_>,
) -> Result<usize, DecodingError> {
// There may be more data past the adler32 checksum at the end of the deflate stream. We
// match libpng's default behavior and ignore any trailing data. In the future we may want
// to add a flag to control this behavior.
if self.state.is_done() {
return Ok(data.len());
}
if !self.started && self.ignore_adler32 {
self.state.ignore_adler32();
}
let (buffer, filled) = image_data.borrow_mut();
let output_limit = (filled + UnfilteringBuffer::GROWTH_BYTES).min(buffer.len());
let (in_consumed, out_consumed) = self
.state
.read(data, &mut buffer[..output_limit], filled, false)
.map_err(|err| {
DecodingError::Format(FormatErrorInner::CorruptFlateStream { err }.into())
})?;
self.started = true;
let filled = filled + out_consumed;
image_data.filled(filled);
if self.state.is_done() {
image_data.commit(filled);
} else {
// See [`Self::LOOKBACK_SIZE`].
image_data.commit(filled.saturating_sub(Self::LOOKBACK_SIZE));
}
Ok(in_consumed)
}
/// Called after all consecutive IDAT chunks were handled.
///
/// The compressed stream can be split on arbitrary byte boundaries. This enables some cleanup
/// within the decompressor and flushing additional data which may have been kept back in case
/// more data were passed to it.
pub(crate) fn finish_compressed_chunks(
&mut self,
image_data: &mut UnfilterBuf<'_>,
) -> Result<(), DecodingError> {
if !self.started {
return Ok(());
}
if self.state.is_done() {
// We can end up here only after the [`decompress`] call above has detected the state
// to be done, too. In this case the filled and committed amount of data are already
// equal to each other. So neither of them needs to be touched in any way.
return Ok(());
}
let (_, mut filled) = image_data.borrow_mut();
while !self.state.is_done() {
let (buffer, _) = image_data.borrow_mut();
let (_in_consumed, out_consumed) =
self.state.read(&[], buffer, filled, true).map_err(|err| {
DecodingError::Format(FormatErrorInner::CorruptFlateStream { err }.into())
})?;
filled += out_consumed;
if !self.state.is_done() {
image_data.flush_allocate();
}
}
image_data.filled(filled);
image_data.commit(filled);
Ok(())
}
}
impl UnfilterRegion {
/// Use this region to decompress new filtered rowline data.
///
/// Pass the wrapped buffer to
/// [`StreamingDecoder::update`][`super::stream::StreamingDecoder::update`] to fill it with
/// data and update the region indices.
pub fn as_buf<'data>(&'data mut self, buffer: &'data mut Vec<u8>) -> UnfilterBuf<'data> {
UnfilterBuf {
buffer,
filled: &mut self.filled,
available: &mut self.available,
}
}
}
impl UnfilterBuf<'_> {
pub(crate) fn borrow_mut(&mut self) -> (&mut [u8], usize) {
(self.buffer, *self.filled)
}
pub(crate) fn filled(&mut self, filled: usize) {
*self.filled = filled;
}
pub(crate) fn commit(&mut self, howmany: usize) {
*self.available = howmany;
}
pub(crate) fn flush_allocate(&mut self) {
let len = self.buffer.len() + 32 * 1024;
self.buffer.resize(len, 0);
}
}

2559
vendor/png/src/encoder.rs vendored Normal file

File diff suppressed because it is too large Load Diff

1000
vendor/png/src/filter.rs vendored Normal file

File diff suppressed because it is too large Load Diff

96
vendor/png/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,96 @@
//! # PNG encoder and decoder
//!
//! This crate contains a PNG encoder and decoder. It supports reading of single lines or whole frames.
//!
//! ## The decoder
//!
//! The most important types for decoding purposes are [`Decoder`] and
//! [`Reader`]. They both wrap a [`std::io::Read`].
//! `Decoder` serves as a builder for `Reader`. Calling [`Decoder::read_info`] reads from the `Read` until the
//! image data is reached.
//!
//! ### Using the decoder
//! ```
//! use std::fs::File;
//! use std::io::BufReader;
//! // The decoder is a build for reader and can be used to set various decoding options
//! // via `Transformations`. The default output transformation is `Transformations::IDENTITY`.
//! let decoder = png::Decoder::new(BufReader::new(File::open("tests/pngsuite/basi0g01.png").unwrap()));
//! let mut reader = decoder.read_info().unwrap();
//! // Allocate the output buffer.
//! let mut buf = vec![0; reader.output_buffer_size().unwrap()];
//! // Read the next frame. An APNG might contain multiple frames.
//! let info = reader.next_frame(&mut buf).unwrap();
//! // Grab the bytes of the image.
//! let bytes = &buf[..info.buffer_size()];
//! // Inspect more details of the last read frame.
//! let in_animation = reader.info().frame_control.is_some();
//! ```
//!
//! ## Encoder
//! ### Using the encoder
//!
//! ```no_run
//! // For reading and opening files
//! use std::path::Path;
//! use std::fs::File;
//! use std::io::BufWriter;
//!
//! let path = Path::new(r"/path/to/image.png");
//! let file = File::create(path).unwrap();
//! let ref mut w = BufWriter::new(file);
//!
//! let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1.
//! encoder.set_color(png::ColorType::Rgba);
//! encoder.set_depth(png::BitDepth::Eight);
//! encoder.set_source_gamma(png::ScaledFloat::from_scaled(45455)); // 1.0 / 2.2, scaled by 100000
//! encoder.set_source_gamma(png::ScaledFloat::new(1.0 / 2.2)); // 1.0 / 2.2, unscaled, but rounded
//! let source_chromaticities = png::SourceChromaticities::new( // Using unscaled instantiation here
//! (0.31270, 0.32900),
//! (0.64000, 0.33000),
//! (0.30000, 0.60000),
//! (0.15000, 0.06000)
//! );
//! encoder.set_source_chromaticities(source_chromaticities);
//! let mut writer = encoder.write_header().unwrap();
//!
//! let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black.
//! writer.write_image_data(&data).unwrap(); // Save
//! ```
//!
#![forbid(unsafe_code)]
// Silence certain clippy warnings until our MSRV is higher.
//
// The #[default] attribute was stabilized in Rust 1.62.0.
#![allow(clippy::derivable_impls)]
// IIUC format args capture was stabilized in Rust 1.58.1.
#![allow(clippy::uninlined_format_args)]
mod adam7;
pub mod chunk;
mod common;
mod decoder;
mod encoder;
mod filter;
mod srgb;
pub mod text_metadata;
mod traits;
pub use crate::adam7::{
expand_pass as expand_interlaced_row, expand_pass_splat as splat_interlaced_row,
};
pub use crate::adam7::{Adam7Info, Adam7Variant};
pub use crate::common::*;
pub use crate::decoder::stream::{DecodeOptions, Decoded, DecodingError, StreamingDecoder};
pub use crate::decoder::{Decoder, InterlaceInfo, InterlacedRow, Limits, OutputInfo, Reader};
pub use crate::decoder::{UnfilterBuf, UnfilterRegion};
pub use crate::encoder::{Encoder, EncodingError, StreamWriter, Writer};
pub use crate::filter::Filter;
#[cfg(test)]
pub(crate) mod test_utils;
#[cfg(feature = "benchmarks")]
pub mod benchable_apis;

30
vendor/png/src/srgb.rs vendored Normal file
View File

@@ -0,0 +1,30 @@
use crate::{ScaledFloat, SourceChromaticities};
/// Get the gamma that should be substituted for images conforming to the sRGB color space.
pub fn substitute_gamma() -> ScaledFloat {
// Value taken from https://www.w3.org/TR/2003/REC-PNG-20031110/#11sRGB
ScaledFloat::from_scaled(45455)
}
/// Get the chromaticities that should be substituted for images conforming to the sRGB color space.
pub fn substitute_chromaticities() -> SourceChromaticities {
// Values taken from https://www.w3.org/TR/2003/REC-PNG-20031110/#11sRGB
SourceChromaticities {
white: (
ScaledFloat::from_scaled(31270),
ScaledFloat::from_scaled(32900),
),
red: (
ScaledFloat::from_scaled(64000),
ScaledFloat::from_scaled(33000),
),
green: (
ScaledFloat::from_scaled(30000),
ScaledFloat::from_scaled(60000),
),
blue: (
ScaledFloat::from_scaled(15000),
ScaledFloat::from_scaled(6000),
),
}
}

117
vendor/png/src/test_utils.rs vendored Normal file
View File

@@ -0,0 +1,117 @@
//! A set of test utilities.
//!
//! There is some overlap between this module and `src/encoder.rs` module, but:
//!
//! * This module (unlike `src/encoder.rs`) performs no validation of the data being written - this
//! allows building testcases that use arbitrary, potentially invalid PNGs as input.
//! * This module can be reused from `benches/decoder.rs` (a separate crate).
use byteorder::WriteBytesExt;
use std::io::Write;
/// Generates a store-only, non-compressed image:
///
/// * `00` compression mode (i.e.`BTYPE` = `00` = no compression) is used
/// * No filter is applied to the image rows
///
/// Currently the image always has the following properties:
///
/// * Single `IDAT` chunk
/// * Zlib chunks of maximum possible size
/// * 8-bit RGBA
///
/// These images are somewhat artificial, but may be useful for benchmarking performance of parts
/// outside of `fdeflate` crate and/or the `unfilter` function (e.g. these images were originally
/// used to evaluate changes to minimize copying of image pixels between various buffers - see
/// [this
/// discussion](https://github.com/image-rs/image-png/discussions/416#discussioncomment-7436871)
/// for more details).
pub fn write_noncompressed_png(w: &mut impl Write, size: u32, idat_bytes: usize) {
write_png_sig(w);
write_rgba8_ihdr_with_width(w, size);
write_rgba8_idats(w, size, idat_bytes);
write_iend(w);
}
/// Writes PNG signature.
/// See http://www.libpng.org/pub/png/spec/1.2/PNG-Structure.html#PNG-file-signature
pub fn write_png_sig(w: &mut impl Write) {
const SIG: [u8; 8] = [137, 80, 78, 71, 13, 10, 26, 10];
w.write_all(&SIG).unwrap();
}
/// Writes an arbitrary PNG chunk.
pub fn write_chunk(w: &mut impl Write, chunk_type: &[u8], data: &[u8]) {
assert_eq!(chunk_type.len(), 4);
let crc = {
let input = chunk_type
.iter()
.copied()
.chain(data.iter().copied())
.collect::<Vec<_>>();
crc32fast::hash(input.as_slice())
};
w.write_u32::<byteorder::BigEndian>(data.len() as u32)
.unwrap();
w.write_all(chunk_type).unwrap();
w.write_all(data).unwrap();
w.write_u32::<byteorder::BigEndian>(crc).unwrap();
}
/// Writes an IHDR chunk that indicates a non-interlaced RGBA8 that uses the same height and
/// `width`. See http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IHDR
pub fn write_rgba8_ihdr_with_width(w: &mut impl Write, width: u32) {
let mut data = Vec::new();
data.write_u32::<byteorder::BigEndian>(width).unwrap();
data.write_u32::<byteorder::BigEndian>(width).unwrap(); // height
data.write_u8(8).unwrap(); // bit depth = always 8-bits per channel
data.write_u8(6).unwrap(); // color type = color + alpha
data.write_u8(0).unwrap(); // compression method (0 is the only allowed value)
data.write_u8(0).unwrap(); // filter method (0 is the only allowed value)
data.write_u8(0).unwrap(); // interlace method = no interlacing
write_chunk(w, b"IHDR", &data);
}
/// Generates RGBA8 `width` x `height` image and wraps it in a store-only zlib container.
pub fn generate_rgba8_with_width_and_height(width: u32, height: u32) -> Vec<u8> {
// Generate arbitrary test pixels.
let image_pixels = {
let mut row = Vec::new();
row.write_u8(0).unwrap(); // filter = no filter
let row_pixels = (0..width).flat_map(|i| {
let color: u8 = (i * 255 / width) as u8;
let alpha: u8 = 0xff;
[color, 255 - color, color / 2, alpha]
});
row.extend(row_pixels);
std::iter::repeat(row)
.take(height as usize)
.flatten()
.collect::<Vec<_>>()
};
let mut zlib_data = Vec::new();
let mut store_only_compressor =
fdeflate::StoredOnlyCompressor::new(std::io::Cursor::new(&mut zlib_data)).unwrap();
store_only_compressor.write_data(&image_pixels).unwrap();
store_only_compressor.finish().unwrap();
zlib_data
}
/// Writes an IDAT chunk.
pub fn write_rgba8_idats(w: &mut impl Write, size: u32, idat_bytes: usize) {
let data = generate_rgba8_with_width_and_height(size, size);
for chunk in data.chunks(idat_bytes) {
write_chunk(w, b"IDAT", chunk);
}
}
/// Writes an IEND chunk.
/// See http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IEND
pub fn write_iend(w: &mut impl Write) {
write_chunk(w, b"IEND", &[]);
}

582
vendor/png/src/text_metadata.rs vendored Normal file
View File

@@ -0,0 +1,582 @@
//! # Text chunks (tEXt/zTXt/iTXt) structs and functions
//!
//! The [PNG spec](https://www.w3.org/TR/2003/REC-PNG-20031110/#11textinfo) optionally allows for
//! embedded text chunks in the file. They may appear either before or after the image data
//! chunks. There are three kinds of text chunks.
//! - `tEXt`: This has a `keyword` and `text` field, and is ISO 8859-1 encoded.
//! - `zTXt`: This is semantically the same as `tEXt`, i.e. it has the same fields and
//! encoding, but the `text` field is compressed before being written into the PNG file.
//! - `iTXt`: This chunk allows for its `text` field to be any valid UTF-8, and supports
//! compression of the text field as well.
//!
//! The `ISO 8859-1` encoding technically doesn't allow any control characters
//! to be used, but in practice these values are encountered anyway. This can
//! either be the extended `ISO-8859-1` encoding with control characters or the
//! `Windows-1252` encoding. This crate assumes the `ISO-8859-1` encoding is
//! used.
//!
//! ## Reading text chunks
//!
//! As a PNG is decoded, any text chunk encountered is appended the
//! [`Info`](`crate::common::Info`) struct, in the `uncompressed_latin1_text`,
//! `compressed_latin1_text`, and the `utf8_text` fields depending on whether the encountered
//! chunk is `tEXt`, `zTXt`, or `iTXt`.
//!
//! ```
//! use std::fs::File;
//! use std::io::BufReader;
//! use std::iter::FromIterator;
//! use std::path::PathBuf;
//!
//! // Opening a png file that has a zTXt chunk
//! let decoder = png::Decoder::new(
//! BufReader::new(File::open("tests/text_chunk_examples/ztxt_example.png").unwrap())
//! );
//! let mut reader = decoder.read_info().unwrap();
//! // If the text chunk is before the image data frames, `reader.info()` already contains the text.
//! for text_chunk in &reader.info().compressed_latin1_text {
//! println!("{:?}", text_chunk.keyword); // Prints the keyword
//! println!("{:#?}", text_chunk); // Prints out the text chunk.
//! // To get the uncompressed text, use the `get_text` method.
//! println!("{}", text_chunk.get_text().unwrap());
//! }
//! ```
//!
//! ## Writing text chunks
//!
//! There are two ways to write text chunks: the first is to add the appropriate text structs directly to the encoder header before the header is written to file.
//! To add a text chunk at any point in the stream, use the `write_text_chunk` method.
//!
//! ```
//! # use png::text_metadata::{ITXtChunk, ZTXtChunk};
//! # use std::env;
//! # use std::fs::File;
//! # use std::io::BufWriter;
//! # use std::iter::FromIterator;
//! # use std::path::PathBuf;
//! # let file = File::create(PathBuf::from_iter(["target", "text_chunk.png"])).unwrap();
//! # let ref mut w = BufWriter::new(file);
//! let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1.
//! encoder.set_color(png::ColorType::Rgba);
//! encoder.set_depth(png::BitDepth::Eight);
//! // Adding text chunks to the header
//! encoder
//! .add_text_chunk(
//! "Testing tEXt".to_string(),
//! "This is a tEXt chunk that will appear before the IDAT chunks.".to_string(),
//! )
//! .unwrap();
//! encoder
//! .add_ztxt_chunk(
//! "Testing zTXt".to_string(),
//! "This is a zTXt chunk that is compressed in the png file.".to_string(),
//! )
//! .unwrap();
//! encoder
//! .add_itxt_chunk(
//! "Testing iTXt".to_string(),
//! "iTXt chunks support all of UTF8. Example: हिंदी.".to_string(),
//! )
//! .unwrap();
//!
//! let mut writer = encoder.write_header().unwrap();
//!
//! let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black.
//! writer.write_image_data(&data).unwrap(); // Save
//!
//! // We can add a tEXt/zTXt/iTXt at any point before the encoder is dropped from scope. These chunks will be at the end of the png file.
//! let tail_ztxt_chunk = ZTXtChunk::new("Comment".to_string(), "A zTXt chunk after the image data.".to_string());
//! writer.write_text_chunk(&tail_ztxt_chunk).unwrap();
//!
//! // The fields of the text chunk are public, so they can be mutated before being written to the file.
//! let mut tail_itxt_chunk = ITXtChunk::new("Author".to_string(), "सायंतन खान".to_string());
//! tail_itxt_chunk.compressed = true;
//! tail_itxt_chunk.language_tag = "hi".to_string();
//! tail_itxt_chunk.translated_keyword = "लेखक".to_string();
//! writer.write_text_chunk(&tail_itxt_chunk).unwrap();
//! ```
#![warn(missing_docs)]
use crate::{chunk, encoder, DecodingError, EncodingError};
use fdeflate::BoundedDecompressionError;
use flate2::write::ZlibEncoder;
use flate2::Compression;
use std::{convert::TryFrom, io::Write};
/// Default decompression limit for compressed text chunks.
pub const DECOMPRESSION_LIMIT: usize = 2097152; // 2 MiB
/// Text encoding errors that is wrapped by the standard EncodingError type
#[derive(Debug, Clone, Copy)]
pub(crate) enum TextEncodingError {
/// Unrepresentable characters in string
Unrepresentable,
/// Keyword longer than 79 bytes or empty
InvalidKeywordSize,
/// Error encountered while compressing text
CompressionError,
}
/// Text decoding error that is wrapped by the standard DecodingError type
#[derive(Debug, Clone, Copy)]
pub(crate) enum TextDecodingError {
/// Unrepresentable characters in string
Unrepresentable,
/// Keyword longer than 79 bytes or empty
InvalidKeywordSize,
/// Missing null separator
MissingNullSeparator,
/// Compressed text cannot be uncompressed
InflationError,
/// Needs more space to decompress
OutOfDecompressionSpace,
/// Using an unspecified value for the compression method
InvalidCompressionMethod,
/// Using a byte that is not 0 or 255 as compression flag in iTXt chunk
InvalidCompressionFlag,
/// Missing the compression flag
MissingCompressionFlag,
}
/// A generalized text chunk trait
pub trait EncodableTextChunk {
/// Encode text chunk as `Vec<u8>` to a `Write`
fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError>;
}
/// Struct representing a tEXt chunk
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct TEXtChunk {
/// Keyword field of the tEXt chunk. Needs to be between 1-79 bytes when encoded as Latin-1.
pub keyword: String,
/// Text field of tEXt chunk. Can be at most 2GB.
pub text: String,
}
fn decode_iso_8859_1(text: &[u8]) -> String {
text.iter().map(|&b| b as char).collect()
}
pub(crate) fn encode_iso_8859_1(text: &str) -> Result<Vec<u8>, TextEncodingError> {
encode_iso_8859_1_iter(text).collect()
}
fn encode_iso_8859_1_into(buf: &mut Vec<u8>, text: &str) -> Result<(), TextEncodingError> {
for b in encode_iso_8859_1_iter(text) {
buf.push(b?);
}
Ok(())
}
fn encode_iso_8859_1_iter(text: &str) -> impl Iterator<Item = Result<u8, TextEncodingError>> + '_ {
text.chars()
.map(|c| u8::try_from(c as u32).map_err(|_| TextEncodingError::Unrepresentable))
}
fn decode_ascii(text: &[u8]) -> Result<&str, TextDecodingError> {
if text.is_ascii() {
// `from_utf8` cannot panic because we're already checked that `text` is ASCII-7.
// And this is the only safe way to get ASCII-7 string from `&[u8]`.
Ok(std::str::from_utf8(text).expect("unreachable"))
} else {
Err(TextDecodingError::Unrepresentable)
}
}
impl TEXtChunk {
/// Constructs a new TEXtChunk.
/// Not sure whether it should take &str or String.
pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self {
Self {
keyword: keyword.into(),
text: text.into(),
}
}
/// Decodes a slice of bytes to a String using Latin-1 decoding.
/// The decoder runs in strict mode, and any decoding errors are passed along to the caller.
pub(crate) fn decode(
keyword_slice: &[u8],
text_slice: &[u8],
) -> Result<Self, TextDecodingError> {
if keyword_slice.is_empty() || keyword_slice.len() > 79 {
return Err(TextDecodingError::InvalidKeywordSize);
}
Ok(Self {
keyword: decode_iso_8859_1(keyword_slice),
text: decode_iso_8859_1(text_slice),
})
}
}
impl EncodableTextChunk for TEXtChunk {
/// Encodes TEXtChunk to a Writer. The keyword and text are separated by a byte of zeroes.
fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> {
let mut data = encode_iso_8859_1(&self.keyword)?;
if data.is_empty() || data.len() > 79 {
return Err(TextEncodingError::InvalidKeywordSize.into());
}
data.push(0);
encode_iso_8859_1_into(&mut data, &self.text)?;
encoder::write_chunk(w, chunk::tEXt, &data)
}
}
/// Struct representing a zTXt chunk
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ZTXtChunk {
/// Keyword field of the tEXt chunk. Needs to be between 1-79 bytes when encoded as Latin-1.
pub keyword: String,
/// Text field of zTXt chunk. It is compressed by default, but can be uncompressed if necessary.
text: OptCompressed,
}
/// Private enum encoding the compressed and uncompressed states of zTXt/iTXt text field.
#[derive(Clone, Debug, PartialEq, Eq)]
enum OptCompressed {
/// Compressed version of text field. Can be at most 2GB.
Compressed(Vec<u8>),
/// Uncompressed text field.
Uncompressed(String),
}
impl ZTXtChunk {
/// Creates a new ZTXt chunk.
pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self {
Self {
keyword: keyword.into(),
text: OptCompressed::Uncompressed(text.into()),
}
}
pub(crate) fn decode(
keyword_slice: &[u8],
compression_method: u8,
text_slice: &[u8],
) -> Result<Self, TextDecodingError> {
if keyword_slice.is_empty() || keyword_slice.len() > 79 {
return Err(TextDecodingError::InvalidKeywordSize);
}
if compression_method != 0 {
return Err(TextDecodingError::InvalidCompressionMethod);
}
Ok(Self {
keyword: decode_iso_8859_1(keyword_slice),
text: OptCompressed::Compressed(text_slice.to_vec()),
})
}
/// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `DECOMPRESSION_LIMIT` bytes.
pub fn decompress_text(&mut self) -> Result<(), DecodingError> {
self.decompress_text_with_limit(DECOMPRESSION_LIMIT)
}
/// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `limit` bytes.
pub fn decompress_text_with_limit(&mut self, limit: usize) -> Result<(), DecodingError> {
match &self.text {
OptCompressed::Compressed(v) => {
let uncompressed_raw = match fdeflate::decompress_to_vec_bounded(&v[..], limit) {
Ok(s) => s,
Err(BoundedDecompressionError::OutputTooLarge { .. }) => {
return Err(DecodingError::from(
TextDecodingError::OutOfDecompressionSpace,
));
}
Err(_) => {
return Err(DecodingError::from(TextDecodingError::InflationError));
}
};
self.text = OptCompressed::Uncompressed(decode_iso_8859_1(&uncompressed_raw));
}
OptCompressed::Uncompressed(_) => {}
};
Ok(())
}
/// Decompresses the inner text, and returns it as a `String`.
/// If decompression uses more the 2MiB, first call decompress with limit, and then this method.
pub fn get_text(&self) -> Result<String, DecodingError> {
match &self.text {
OptCompressed::Compressed(v) => {
let uncompressed_raw = fdeflate::decompress_to_vec(v)
.map_err(|_| DecodingError::from(TextDecodingError::InflationError))?;
Ok(decode_iso_8859_1(&uncompressed_raw))
}
OptCompressed::Uncompressed(s) => Ok(s.clone()),
}
}
/// Compresses the inner text, mutating its own state.
pub fn compress_text(&mut self) -> Result<(), EncodingError> {
match &self.text {
OptCompressed::Uncompressed(s) => {
let uncompressed_raw = encode_iso_8859_1(s)?;
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::fast());
encoder
.write_all(&uncompressed_raw)
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
self.text = OptCompressed::Compressed(
encoder
.finish()
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?,
);
}
OptCompressed::Compressed(_) => {}
}
Ok(())
}
}
impl EncodableTextChunk for ZTXtChunk {
fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> {
let mut data = encode_iso_8859_1(&self.keyword)?;
if data.is_empty() || data.len() > 79 {
return Err(TextEncodingError::InvalidKeywordSize.into());
}
// Null separator
data.push(0);
// Compression method: the only valid value is 0, as of 2021.
data.push(0);
match &self.text {
OptCompressed::Compressed(v) => {
data.extend_from_slice(&v[..]);
}
OptCompressed::Uncompressed(s) => {
// This code may have a bug. Check for correctness.
let uncompressed_raw = encode_iso_8859_1(s)?;
let mut encoder = ZlibEncoder::new(data, Compression::fast());
encoder
.write_all(&uncompressed_raw)
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
data = encoder
.finish()
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
}
};
encoder::write_chunk(w, chunk::zTXt, &data)
}
}
/// Struct encoding an iTXt chunk
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ITXtChunk {
/// The keyword field. This needs to be between 1-79 bytes when encoded as Latin-1.
pub keyword: String,
/// Indicates whether the text will be (or was) compressed in the PNG.
pub compressed: bool,
/// A hyphen separated list of languages that the keyword is translated to. This is ASCII-7 encoded.
pub language_tag: String,
/// Translated keyword. This is UTF-8 encoded.
pub translated_keyword: String,
/// Text field of iTXt chunk. It is compressed by default, but can be uncompressed if necessary.
text: OptCompressed,
}
impl ITXtChunk {
/// Constructs a new iTXt chunk. Leaves all but keyword and text to default values.
pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self {
Self {
keyword: keyword.into(),
compressed: false,
language_tag: "".to_string(),
translated_keyword: "".to_string(),
text: OptCompressed::Uncompressed(text.into()),
}
}
pub(crate) fn decode(
keyword_slice: &[u8],
compression_flag: u8,
compression_method: u8,
language_tag_slice: &[u8],
translated_keyword_slice: &[u8],
text_slice: &[u8],
) -> Result<Self, TextDecodingError> {
if keyword_slice.is_empty() || keyword_slice.len() > 79 {
return Err(TextDecodingError::InvalidKeywordSize);
}
let keyword = decode_iso_8859_1(keyword_slice);
let compressed = match compression_flag {
0 => false,
1 => true,
_ => return Err(TextDecodingError::InvalidCompressionFlag),
};
if compressed && compression_method != 0 {
return Err(TextDecodingError::InvalidCompressionMethod);
}
let language_tag = decode_ascii(language_tag_slice)?.to_owned();
let translated_keyword = std::str::from_utf8(translated_keyword_slice)
.map_err(|_| TextDecodingError::Unrepresentable)?
.to_string();
let text = if compressed {
OptCompressed::Compressed(text_slice.to_vec())
} else {
OptCompressed::Uncompressed(
String::from_utf8(text_slice.to_vec())
.map_err(|_| TextDecodingError::Unrepresentable)?,
)
};
Ok(Self {
keyword,
compressed,
language_tag,
translated_keyword,
text,
})
}
/// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `DECOMPRESSION_LIMIT` bytes.
pub fn decompress_text(&mut self) -> Result<(), DecodingError> {
self.decompress_text_with_limit(DECOMPRESSION_LIMIT)
}
/// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `limit` bytes.
pub fn decompress_text_with_limit(&mut self, limit: usize) -> Result<(), DecodingError> {
match &self.text {
OptCompressed::Compressed(v) => {
let uncompressed_raw = match fdeflate::decompress_to_vec_bounded(v, limit) {
Ok(s) => s,
Err(BoundedDecompressionError::OutputTooLarge { .. }) => {
return Err(DecodingError::from(
TextDecodingError::OutOfDecompressionSpace,
));
}
Err(_) => {
return Err(DecodingError::from(TextDecodingError::InflationError));
}
};
self.text = OptCompressed::Uncompressed(
String::from_utf8(uncompressed_raw)
.map_err(|_| TextDecodingError::Unrepresentable)?,
);
}
OptCompressed::Uncompressed(_) => {}
};
Ok(())
}
/// Decompresses the inner text, and returns it as a `String`.
/// If decompression takes more than 2 MiB, try `decompress_text_with_limit` followed by this method.
pub fn get_text(&self) -> Result<String, DecodingError> {
match &self.text {
OptCompressed::Compressed(v) => {
let uncompressed_raw = fdeflate::decompress_to_vec(v)
.map_err(|_| DecodingError::from(TextDecodingError::InflationError))?;
String::from_utf8(uncompressed_raw)
.map_err(|_| TextDecodingError::Unrepresentable.into())
}
OptCompressed::Uncompressed(s) => Ok(s.clone()),
}
}
/// Compresses the inner text, mutating its own state.
pub fn compress_text(&mut self) -> Result<(), EncodingError> {
match &self.text {
OptCompressed::Uncompressed(s) => {
let uncompressed_raw = s.as_bytes();
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::fast());
encoder
.write_all(uncompressed_raw)
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
self.text = OptCompressed::Compressed(
encoder
.finish()
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?,
);
}
OptCompressed::Compressed(_) => {}
}
Ok(())
}
}
impl EncodableTextChunk for ITXtChunk {
fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> {
// Keyword
let mut data = encode_iso_8859_1(&self.keyword)?;
if data.is_empty() || data.len() > 79 {
return Err(TextEncodingError::InvalidKeywordSize.into());
}
// Null separator
data.push(0);
// Compression flag
if self.compressed {
data.push(1);
} else {
data.push(0);
}
// Compression method
data.push(0);
// Language tag
if !self.language_tag.is_ascii() {
return Err(EncodingError::from(TextEncodingError::Unrepresentable));
}
data.extend(self.language_tag.as_bytes());
// Null separator
data.push(0);
// Translated keyword
data.extend_from_slice(self.translated_keyword.as_bytes());
// Null separator
data.push(0);
// Text
if self.compressed {
match &self.text {
OptCompressed::Compressed(v) => {
data.extend_from_slice(&v[..]);
}
OptCompressed::Uncompressed(s) => {
let uncompressed_raw = s.as_bytes();
let mut encoder = ZlibEncoder::new(data, Compression::fast());
encoder
.write_all(uncompressed_raw)
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
data = encoder
.finish()
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
}
}
} else {
match &self.text {
OptCompressed::Compressed(v) => {
let uncompressed_raw = fdeflate::decompress_to_vec(v)
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
data.extend_from_slice(&uncompressed_raw[..]);
}
OptCompressed::Uncompressed(s) => {
data.extend_from_slice(s.as_bytes());
}
}
}
encoder::write_chunk(w, chunk::iTXt, &data)
}
}

43
vendor/png/src/traits.rs vendored Normal file
View File

@@ -0,0 +1,43 @@
use std::io;
macro_rules! read_bytes_ext {
($output_type:ty) => {
impl<W: io::Read + ?Sized> ReadBytesExt<$output_type> for W {
#[inline]
fn read_be(&mut self) -> io::Result<$output_type> {
let mut bytes = [0u8; std::mem::size_of::<$output_type>()];
self.read_exact(&mut bytes)?;
Ok(<$output_type>::from_be_bytes(bytes))
}
}
};
}
macro_rules! write_bytes_ext {
($input_type:ty) => {
impl<W: io::Write + ?Sized> WriteBytesExt<$input_type> for W {
#[inline]
fn write_be(&mut self, n: $input_type) -> io::Result<()> {
self.write_all(&n.to_be_bytes())
}
}
};
}
/// Read extension to read big endian data
pub trait ReadBytesExt<T>: io::Read {
/// Read `T` from a bytes stream. Most significant byte first.
fn read_be(&mut self) -> io::Result<T>;
}
/// Write extension to write big endian data
pub trait WriteBytesExt<T>: io::Write {
/// Writes `T` to a bytes stream. Most significant byte first.
fn write_be(&mut self, _: T) -> io::Result<()>;
}
read_bytes_ext!(u8);
read_bytes_ext!(u16);
read_bytes_ext!(u32);
write_bytes_ext!(u32);