Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

1
vendor/png/.cargo-checksum.json vendored Normal file
View File

@@ -0,0 +1 @@
{"files":{"CHANGES.md":"10f69a3816c2e3ad5000190eacbfbdff49443a1ea4e857c0d76f47b90bfa6f4a","Cargo.lock":"ec5134bceca1b6cb6106843edad4abd9b17f4e11c095a622a2c4924f172187bf","Cargo.toml":"49f6371a3cb2902abffd8f1695d76ffa4c73c5c50c9302f799b9f31f9b45bb36","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"eaf40297c75da471f7cda1f3458e8d91b4b2ec866e609527a13acfa93b638652","README.md":"2a69e61783cf6433b712acfe5b32ba1c539f947ccd00b1c07312d5eec3445b46","benches/README.md":"0c60c3d497abdf6c032863aa47da41bc6bb4f5ff696d45dec0e6eb33459b14b0","benches/adam7.rs":"ac81f74f2547790f85b09344d79f881718353b31bcb39402b115582df408701c","benches/decoder.rs":"390e75054ee3a4237a57aa255ac35429f3d81c1d2fc18bd0464fbc13e85b1f8c","benches/expand_paletted.rs":"945123a752835f13a4cc1c82470602183f9613c71b1e81f6a8096e2bb227423b","benches/unfilter.rs":"9186de79e6eef9df127f22051180c8286b75a7409c9d13b15be63dd1ad62e3f9","examples/change-png-info.rs":"bc4f3d7af7cee03714e80c3d58f5633f24b3406721af8eedd0a72ac78da6356f","examples/corpus-bench.rs":"b994744b00335536903408bc95fa203b637ee5401e568b6043d1e86fd234007e","examples/png-generate.rs":"e4cca06b9cc3291b52261d88a2e016940620b613fc1402bb54ccc68f73026a96","examples/pngcheck.rs":"575084dc9fe8754677f9875c0b1790c3f52adb58c19de63b59d28f6fea47dd71","src/adam7.rs":"60cb6d675d95a0f193b0aadf38c43404d38a726f4dec63d13e0744a8db4c384c","src/benchable_apis.rs":"be9c486ebaaa698517b7c0ac5c4d9ecf812eee4e6454bd32dfcb790048b3a0f9","src/chunk.rs":"3bfd87e49b951e0ec4372e34d6374a4813cb499f89104f2a20e109c95b63a2ac","src/common.rs":"f7289605ac9034a31033a844d8abc6c362f23fcd19cbf636b5bd5e191a0c4d53","src/decoder/interlace_info.rs":"ab953aabc548af9bdafdefcaeacf19bced6010ea15831039d47715661010c1d2","src/decoder/mod.rs":"db5e8258b9517cea28188f6d43a387cc161454f758c6e29449d8b7476f5866cd","src/decoder/read_decoder.rs":"ddde714e6ee17298850f934fd2328f7095cd3c9d096930867edef0bfab91cc5d","src/decoder/stream.rs":"80f529c8924e72cf8c72f0671aa2327939e3fe688e8b4b22c79983593e69b65b","src/decoder/transform.rs":"3ec21b666d543fc6e2203313663a0214837cb5dc5bdb32d4a3c23d4a8567cecf","src/decoder/transform/palette.rs":"8eed7854aa122c1ecf8f3f41e2cccdc08d60a0c323f3ef16c3dd5f1710bde38a","src/decoder/unfiltering_buffer.rs":"d8753a284cf2f962bc56bf8f22ea8aba763b5f04734860e9515768e056091bd6","src/decoder/zlib.rs":"c6b6df0d1ff9b7aa6fea6636eda49ddfa2952c88f71208950706b6368e7da999","src/encoder.rs":"31cfcb7f39b1ec4495a906046819d52405aab3f21fc9bef2b2c2228aa5fb40a1","src/filter.rs":"1d3e23c1914723fc6e86a3c34a0c9af97fe776e0050ed6d733976af6f9fd1ac8","src/lib.rs":"750d56a7859502bf55e8f4050621f097e5c5ef36bd2be88f44aa44b7a1adc67c","src/srgb.rs":"da1609902064016853410633926d316b5289d4bbe1fa469b21f116c1c1b2c18e","src/test_utils.rs":"9002657a02768d68fef55041629f4b0067570fa93e2ed91e40367dca60eed91c","src/text_metadata.rs":"1df449a40f1798daf5b77195cccc644d3d5ae0e5cc629d71b5399b457b1438a8","src/traits.rs":"79d357244e493f5174ca11873b0d5c443fd4a5e6e1f7c6df400a1767c5ad05b2"},"package":"97baced388464909d42d89643fe4361939af9b7ce7a31ee32a168f832a70f2a0"}

296
vendor/png/CHANGES.md vendored Normal file
View File

@@ -0,0 +1,296 @@
## 0.18.0
### API Breaking Changes
* Removed deprecated `Info::encode` and `Encoder::set_srgb` methods.
* Improved the compression settings API for encoding.
* `Decoder` now requires a reader that implements `Seek` and `BufRead` traits.
* Bump bitflags dependency to 2.0.
* `StreamingDecoder::update` now takes a structured `UnfilterBuf` argument
instead of a direct reference to a vector. This allows in-place
decompression. There is a public constructor for `UnfilterBuf`.
* The methods `Decoder::output_buffer_size` and `output_line_size` now return
`Option<usize>` to reflect that these calculations no longer overflow on some
targets where the required buffers can not be represented in the address
space. They return the mathematically correct size where possible.
* The `Decoded` enum returned from `StreamingDecoder::update` was simplified to
no longer contains any chunk payload data. Instead, it now contains only
chunk events where every chunk that was started will eventually be ended by
`ChunkComplete`, `BadAncillaryChunk` or `SkippedAncillaryChunk`.
* Ancillary chunks, i.e. those not critical to decoder interpretation of the
file, which fail to parse are now terminated with a `BadAncillaryChunk` event
but no longer returned a `DecodingError`. This includes text chunks as well
as many metadata chunks (except for `fcTL` that we deem crucial to the
parser's understanding of the image sequence in an APNG despite being
technically ancillary).
### Additions
* Added `Reader::read_row` method.
* Add support for parsing eXIf chunk.
* Treat most auxiliary chunk errors as benign.
* Added `splat_interlaced_row`, which implements an alternative method for
merging Adam7 interlaced lines into the output buffer that is more suitable
for the presentation of progressive states of the buffer.
* Added `Adam7Variant` documenting the various methods for applying interlaced
rows and to prepare an API to progressively read frames through `Decoder`.
### Changes
* The decoding of Adam7 interlaced data is now much faster.
* The `acTL` chunk is now ignored when it is invalid, instead of producing
errors while reading or decoding the following APNG chunks.
* The requirement of the `fcTL` chunk for the default image to match the IHDR's
indicate image size is now enforced.
* More minor format errors in auxiliary chunks are now ignored by the decoder,
instead disregarding the malformed chunk.
* Adam7 Interlacing on 32-bit targets now handles some cases correctly that
previously wrote some bytes to the wrong pixel indices due to overflows.
## 0.17.16
* Make gAMA and cHRM fallback optional for sRGB ([#547])
* Pass through nightly feature to crc32fast crate to get SIMD crc32 on Aarch64 ([#545])
* Fix bug in iCCP chunk encoding ([#458])
* Deprecate Info::encode ([#550])
[#545]: https://github.com/image-rs/image-png/pull/545
[#458]: https://github.com/image-rs/image-png/pull/548
[#550]: https://github.com/image-rs/image-png/pull/550
[#547]: https://github.com/image-rs/image-png/pull/547
## 0.17.15
### Added
* Add a public API to advance to the next frame in APNG decoder ([#518])
* Add APIs to write ICC and EXIF chunks ([#526])
* Add support for parsing the sBIT chunk ([#524])
* Add support for parsing the bKGD chunk ([#538])
* Add support for parsing the cICP chunk ([#529])
* Add support for parsing mDCV and cLLI chunks ([#528], ([#543]))
### Changed
* Improve performance of Paeth filter during decoding ([#512], [#539])
### Fixed
* Avoid an infinite loop when retrying after a fatal error using `StreamingDecoder` ([#520])
* Fixed chunk order in encoded files ([#526])
[#495]: https://github.com/image-rs/image-png/pull/495
[#496]: https://github.com/image-rs/image-png/pull/496
[#512]: https://github.com/image-rs/image-png/pull/512
[#518]: https://github.com/image-rs/image-png/pull/518
[#520]: https://github.com/image-rs/image-png/pull/520
[#524]: https://github.com/image-rs/image-png/pull/524
[#526]: https://github.com/image-rs/image-png/pull/526
[#528]: https://github.com/image-rs/image-png/pull/528
[#529]: https://github.com/image-rs/image-png/pull/529
[#538]: https://github.com/image-rs/image-png/pull/538
[#539]: https://github.com/image-rs/image-png/pull/539
[#543]: https://github.com/image-rs/image-png/pull/543
## 0.17.14
* Updated to miniz_oxide 0.8.0.
* Added public API to consume interlaced rows one by one ([#495])
* Improved support for resuming decoding after an `UnexpectedEof`, which lets you start parsing a file before it's fully received over the network ([#496])
* Fixed some broken links in documentation, improved some documentation comments
[#495]: https://github.com/image-rs/image-png/pull/495
[#496]: https://github.com/image-rs/image-png/pull/496
## 0.17.13
* Fix `Send` bound on `Reader`.
## 0.17.12
* Reject zero-sized frames.
* Optimized decoding of paletted images.
* Removed remaining uses of miniz_oxide for decoding.
* Correct lifetime used for `Info` struct.
* Fix build issue with `-Z minimal-versions`.
## 0.17.11
* Ignore subsequent iCCP chunks to match libpng behavior.
* Added an option to ignore ancillary chunks with invalid CRC.
* Added `new_with_info` constructor for encoder.
* Removed hard-coded memory limits.
* No longer allow zero sized images.
* Added `Reader::finish` to read all the auxiliary chunks that comes after the
image.
## 0.17.10
* Added Transformations::ALPHA
* Enable encoding pixel dimensions
## 0.17.9
* Fixed a bug in ICC profile decompression.
* Improved unfilter performance.
## 0.17.8
* Increased MSRV to 1.57.0.
* Substantially optimized encoding and decoding:
- Autovectorize filtering and unfiltering.
- Make the "fast" compression preset use fdeflate.
- Switch decompression to always use fdeflate.
- Updated to miniz_oxide 0.7.
- Added an option to ignore checksums.
* Added corpus-bench example which measures the compression ratio and time to
re-encode and subsequently decode a corpus of images.
* More fuzz testing.
## 0.17.7
* Fixed handling broken tRNS chunk.
* Updated to miniz_oxide 0.6.
## 0.17.6
* Added `Decoder::read_header_info` to query the information contained in the
PNG header.
* Switched to using the flate2 crate for encoding.
## 0.17.5
* Fixed a regression, introduced by chunk validation, that made the decoder
sensitive to the order of `gAMA`, `cHRM`, and `sRGB` chunks.
## 0.17.4
* Added `{Decoder,StreamDecoder}::set_ignore_text_chunk` to disable decoding of
ancillary text chunks during the decoding process (chunks decoded by default).
* Added duplicate chunk checks. The decoder now enforces that standard chunks
such as palette, gamma, … occur at most once as specified.
* Added `#[forbid(unsafe_code)]` again. This may come at a minor performance
cost when decoding ASCII text for now.
* Fixed a bug where decoding of large chunks (>32kB) failed to produce the
correct result, or fail the image decoding. As new chunk types are decoded
this introduced regressions relative to previous versions.
## 0.17.3
* Fixed a bug where `Writer::finish` would not drop the underlying writer. This
would fail to flush and leak memory when using a buffered file writers.
* Calling `Writer::finish` will now eagerly flush the underlying writer,
returning any error that this operation may result in.
* Errors in inflate are now diagnosed with more details.
* The color and depth combination is now checked in stream decoder.
## 0.17.2
* Added support for encoding and decoding tEXt/zTXt/iTXt chunks.
* Added `Encoder::validate_sequence` to enable validation of the written frame
sequence, that is, if the number of written images is consistent with the
animation state.
* Validation is now off by default. The basis of the new validation had been
introduced in 0.17 but this fixes some cases where this validation was too
aggressive compared to previous versions.
* Added `Writer::finish` to fully check the write of the end of an image
instead of silently ignoring potential errors in `Drop`.
* The `Writer::write_chunk` method now validates that the computed chunk length
does not overflow the limit set by PNG.
* Fix an issue where the library would panic or even abort the process when
`flush` or `write` of an underlying writer panicked, or in some other uses of
`StreamWriter`.
## 0.17.1
* Fix panic in adaptive filter method `sum_buffer`
## 0.17.0
* Increased MSRV to 1.46.0
* Rework output info usage
* Implement APNG encoding
* Improve ergonomics of encoder set_palette and set_trns methods
* Make Info struct non-exhaustive
* Make encoder a core feature
* Default Transformations to Identity
* Add Adaptive filtering method for encoding
* Fix SCREAM_CASE on ColorType variants
* Forbid unsafe code
## 0.16.7
* Added `Encoder::set_trns` to register a transparency table to be written.
## 0.16.6
* Fixed silent integer overflows in buffer size calculation, resulting in
panics from assertions and out-of-bounds accesses when actually decoding.
This improves the stability of 32-bit and 16-bit targets and make decoding
run as stable as on 64-bit.
* Reject invalid color/depth combinations. Some would lead to mismatched output
buffer size and panics during decoding.
* Add `Clone` impl for `Info` struct.
## 0.16.5
* Decoding of APNG subframes is now officially supported and specified. Note
that dispose ops and positioning in the image need to be done by the caller.
* Added encoding of indexed data.
* Switched to `miniz_oxide` for decompressing image data, with 30%-50% speedup
in common cases and up to 200% in special ones.
* Fix accepting images only with consecutive IDAT chunks, rules out data loss.
## 0.16.4
* The fdAT frames are no longer inspected when the main image is read. This
would previously be the case for non-interlaced images. This would lead to
incorrect failure and, e.g. an error of the form `"invalid filter method"`.
* Fix always validating the last IDAT-chunks checksum, was sometimes ignored.
* Prevent encoding color/bit-depth combinations forbidden by the specification.
* The fixes for APNG/fdAT enable further implementation. The _next_ release is
expected to officially support APNG.
## 0.16.3
* Fix encoding with filtering methods Up, Avg, Paeth
* Optimize decoding throughput by up to +30%
## 0.16.2
* Added method constructing an owned stream encoder.
## 0.16.1
* Addressed files bloating the packed crate
## 0.16.0
* Fix a bug compressing images with deflate
* Address use of deprecated error interfaces
## 0.15.3
* Fix panic while trying to encode empty images. Such images are no longer
accepted and error when calling `write_header` before any data has been
written. The specification does not permit empty images.
## 0.15.2
* Fix `EXPAND` transformation to leave bit depths above 8 unchanged
## 0.15.1
* Fix encoding writing invalid chunks. Images written can be corrected: see
https://github.com/image-rs/image/issues/1074 for a recovery.
* Fix a panic in bit unpacking with checked arithmetic (e.g. in debug builds)
* Added better fuzzer integration
* Update `term`, `rand` dev-dependency
* Note: The `show` example program requires a newer compiler than 1.34.2 on
some targets due to depending on `glium`. This is not considered a breaking
bug.
## 0.15
Begin of changelog

929
vendor/png/Cargo.lock generated vendored Normal file
View File

@@ -0,0 +1,929 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "adler2"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
[[package]]
name = "anes"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]]
name = "anstream"
version = "0.6.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"is_terminal_polyfill",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd"
[[package]]
name = "anstyle-parse"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2"
dependencies = [
"windows-sys",
]
[[package]]
name = "anstyle-wincon"
version = "3.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a"
dependencies = [
"anstyle",
"once_cell_polyfill",
"windows-sys",
]
[[package]]
name = "approx"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6"
dependencies = [
"num-traits",
]
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "bitflags"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
[[package]]
name = "bumpalo"
version = "3.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899"
[[package]]
name = "byteorder"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "ciborium"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
dependencies = [
"ciborium-io",
"ciborium-ll",
"serde",
]
[[package]]
name = "ciborium-io"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
[[package]]
name = "ciborium-ll"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
dependencies = [
"ciborium-io",
"half",
]
[[package]]
name = "clap"
version = "4.5.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c5e4fcf9c21d2e544ca1ee9d8552de13019a42aa7dbf32747fa7aaf1df76e57"
dependencies = [
"clap_builder",
"clap_derive",
]
[[package]]
name = "clap_builder"
version = "4.5.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fecb53a0e6fcfb055f686001bc2e2592fa527efaf38dbe81a6a9563562e57d41"
dependencies = [
"anstream",
"anstyle",
"clap_lex",
"strsim",
]
[[package]]
name = "clap_derive"
version = "4.5.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14cb31bb0a7d536caef2639baa7fad459e15c3144efefa6dbd1c84562c4739f6"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "clap_lex"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
[[package]]
name = "colorchoice"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
[[package]]
name = "crc32fast"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
dependencies = [
"cfg-if",
]
[[package]]
name = "criterion"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1c047a62b0cc3e145fa84415a3191f628e980b194c2755aa12300a4e6cbd928"
dependencies = [
"anes",
"cast",
"ciborium",
"clap",
"criterion-plot",
"itertools",
"num-traits",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_json",
"tinytemplate",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b1bcc0dc7dfae599d84ad0b1a55f80cde8af3725da8313b528da95ef783e338"
dependencies = [
"cast",
"itertools",
]
[[package]]
name = "crossbeam-channel"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa"
dependencies = [
"cfg-if",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-deque"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e"
dependencies = [
"cfg-if",
"crossbeam-epoch",
"crossbeam-utils",
]
[[package]]
name = "crossbeam-epoch"
version = "0.9.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c00d6d2ea26e8b151d99093005cb442fb9a37aeaca582a03ec70946f49ab5ed9"
dependencies = [
"cfg-if",
"crossbeam-utils",
"lazy_static",
"memoffset",
"scopeguard",
]
[[package]]
name = "crossbeam-utils"
version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5e5bed1f1c269533fa816a0a5492b3545209a205ca1a54842be180eb63a16a6"
dependencies = [
"cfg-if",
"lazy_static",
]
[[package]]
name = "crunchy"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929"
[[package]]
name = "either"
version = "1.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457"
[[package]]
name = "fdeflate"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e6853b52649d4ac5c0bd02320cddc5ba956bdb407c4b75a2c6b75bf51500f8c"
dependencies = [
"simd-adler32",
]
[[package]]
name = "flate2"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece"
dependencies = [
"crc32fast",
"libz-rs-sys",
"miniz_oxide",
]
[[package]]
name = "getrandom"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
dependencies = [
"cfg-if",
"libc",
"r-efi",
"wasi",
]
[[package]]
name = "glob"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
[[package]]
name = "half"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "459196ed295495a68f7d7fe1d84f6c4b7ff0e21fe3017b2f283c6fac3ad803c9"
dependencies = [
"cfg-if",
"crunchy",
]
[[package]]
name = "heck"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
[[package]]
name = "hermit-abi"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "is_terminal_polyfill"
version = "1.70.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
[[package]]
name = "itertools"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
dependencies = [
"either",
]
[[package]]
name = "itoa"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35"
[[package]]
name = "js-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f"
dependencies = [
"once_cell",
"wasm-bindgen",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.167"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09d6582e104315a817dff97f75133544b2e094ee22447d2acf4a74e189ba06fc"
[[package]]
name = "libz-rs-sys"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6489ca9bd760fe9642d7644e827b0c9add07df89857b0416ee15c1cc1a3b8c5a"
dependencies = [
"zlib-rs",
]
[[package]]
name = "log"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
dependencies = [
"cfg-if",
]
[[package]]
name = "memchr"
version = "2.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0"
[[package]]
name = "memoffset"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
dependencies = [
"autocfg",
]
[[package]]
name = "miniz_oxide"
version = "0.8.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a"
dependencies = [
"adler2",
"simd-adler32",
]
[[package]]
name = "num-traits"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
dependencies = [
"autocfg",
]
[[package]]
name = "num_cpus"
version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1"
dependencies = [
"hermit-abi",
"libc",
]
[[package]]
name = "once_cell"
version = "1.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
[[package]]
name = "once_cell_polyfill"
version = "1.70.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad"
[[package]]
name = "oorandom"
version = "11.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
[[package]]
name = "plotters"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
[[package]]
name = "plotters-svg"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
dependencies = [
"plotters-backend",
]
[[package]]
name = "png"
version = "0.18.0"
dependencies = [
"approx",
"bitflags",
"byteorder",
"clap",
"crc32fast",
"criterion",
"fdeflate",
"flate2",
"glob",
"miniz_oxide",
"rand",
]
[[package]]
name = "ppv-lite86"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
[[package]]
name = "proc-macro2"
version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
dependencies = [
"proc-macro2",
]
[[package]]
name = "r-efi"
version = "5.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
[[package]]
name = "rand"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
dependencies = [
"rand_chacha",
"rand_core",
]
[[package]]
name = "rand_chacha"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
dependencies = [
"ppv-lite86",
"rand_core",
]
[[package]]
name = "rand_core"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38"
dependencies = [
"getrandom",
]
[[package]]
name = "rayon"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90"
dependencies = [
"autocfg",
"crossbeam-deque",
"either",
"rayon-core",
]
[[package]]
name = "rayon-core"
version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e"
dependencies = [
"crossbeam-channel",
"crossbeam-deque",
"crossbeam-utils",
"lazy_static",
"num_cpus",
]
[[package]]
name = "regex"
version = "1.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461"
dependencies = [
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
[[package]]
name = "rustversion"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
[[package]]
name = "ryu"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "serde"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.143"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "simd-adler32"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
[[package]]
name = "strsim"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
[[package]]
name = "syn"
version = "2.0.106"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]]
name = "unicode-ident"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
[[package]]
name = "utf8parse"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "walkdir"
version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
dependencies = [
"same-file",
"winapi",
"winapi-util",
]
[[package]]
name = "wasi"
version = "0.14.2+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3"
dependencies = [
"wit-bindgen-rt",
]
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5"
dependencies = [
"cfg-if",
"once_cell",
"rustversion",
"wasm-bindgen-macro",
]
[[package]]
name = "wasm-bindgen-backend"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
dependencies = [
"bumpalo",
"log",
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
]
[[package]]
name = "wasm-bindgen-macro-support"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de"
dependencies = [
"proc-macro2",
"quote",
"syn",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
version = "0.2.100"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d"
dependencies = [
"unicode-ident",
]
[[package]]
name = "web-sys"
version = "0.3.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
dependencies = [
"winapi",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-link"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
[[package]]
name = "windows-sys"
version = "0.60.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.53.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
dependencies = [
"windows-link",
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
[[package]]
name = "windows_aarch64_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
[[package]]
name = "windows_i686_gnu"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
[[package]]
name = "windows_i686_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
[[package]]
name = "windows_i686_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
[[package]]
name = "windows_x86_64_gnu"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
[[package]]
name = "windows_x86_64_msvc"
version = "0.53.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
[[package]]
name = "wit-bindgen-rt"
version = "0.39.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1"
dependencies = [
"bitflags",
]
[[package]]
name = "zlib-rs"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "868b928d7949e09af2f6086dfc1e01936064cc7a819253bce650d4e2a2d63ba8"

130
vendor/png/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,130 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.73"
name = "png"
version = "0.18.0"
authors = ["The image-rs Developers"]
build = false
include = [
"/LICENSE-MIT",
"/LICENSE-APACHE",
"/README.md",
"/CHANGES.md",
"/src/",
"/examples/",
"/benches/",
]
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "PNG decoding and encoding library in pure Rust"
readme = "README.md"
categories = ["multimedia::images"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/image-rs/image-png"
[features]
benchmarks = []
unstable = ["crc32fast/nightly"]
zlib-rs = ["flate2/zlib-rs"]
[lib]
name = "png"
path = "src/lib.rs"
[[example]]
name = "change-png-info"
path = "examples/change-png-info.rs"
[[example]]
name = "corpus-bench"
path = "examples/corpus-bench.rs"
[[example]]
name = "png-generate"
path = "examples/png-generate.rs"
[[example]]
name = "pngcheck"
path = "examples/pngcheck.rs"
[[bench]]
name = "adam7"
path = "benches/adam7.rs"
harness = false
required-features = ["benchmarks"]
[[bench]]
name = "decoder"
path = "benches/decoder.rs"
harness = false
[[bench]]
name = "expand_paletted"
path = "benches/expand_paletted.rs"
harness = false
required-features = ["benchmarks"]
[[bench]]
name = "unfilter"
path = "benches/unfilter.rs"
harness = false
required-features = ["benchmarks"]
[dependencies.bitflags]
version = "2.0"
[dependencies.crc32fast]
version = "1.2.0"
[dependencies.fdeflate]
version = "0.3.3"
[dependencies.flate2]
version = "1.0.35"
[dependencies.miniz_oxide]
version = "0.8"
features = ["simd"]
[dev-dependencies.approx]
version = "0.5.1"
[dev-dependencies.byteorder]
version = "1.5.0"
[dev-dependencies.clap]
version = "4.0"
features = ["derive"]
[dev-dependencies.criterion]
version = "0.7.0"
features = ["cargo_bench_support"]
default-features = false
[dev-dependencies.glob]
version = "0.3"
[dev-dependencies.rand]
version = "0.9.2"
[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies.criterion]
version = "0.7.0"
[lints.rust.unexpected_cfgs]
level = "warn"
priority = 0
check-cfg = ["cfg(fuzzing)"]

201
vendor/png/LICENSE-APACHE vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
vendor/png/LICENSE-MIT vendored Normal file
View File

@@ -0,0 +1,25 @@
Copyright (c) 2015 nwin
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

32
vendor/png/README.md vendored Normal file
View File

@@ -0,0 +1,32 @@
# PNG Decoder/Encoder
[![Build Status](https://github.com/image-rs/image-png/workflows/Rust%20CI/badge.svg)](https://github.com/image-rs/image-png/actions)
[![Documentation](https://docs.rs/png/badge.svg)](https://docs.rs/png)
[![Crates.io](https://img.shields.io/crates/v/png.svg)](https://crates.io/crates/png)
[![License](https://img.shields.io/crates/l/png.svg)](https://github.com/image-rs/image-png)
Robust and performant PNG decoder/encoder in pure Rust. Also supports [APNG](https://en.wikipedia.org/wiki/APNG).
No `unsafe` code, battle-tested, and fuzzed on [OSS-fuzz](https://github.com/google/oss-fuzz).
## Performance
Performance is typically on par with or better than libpng.
Includes a fast encoding mode powered by [fdeflate](https://crates.io/crates/fdeflate) that is dramatically faster than the fastest mode of libpng while *simultaneously* providing better compression ratio.
On nightly Rust compiler you can slightly speed up decoding of some images by enabling the `unstable` feature of this crate.
## License
Licensed under either of
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or https://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or https://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
additional terms or conditions.

6
vendor/png/benches/README.md vendored Normal file
View File

@@ -0,0 +1,6 @@
# Getting started with benchmarking
To run the benchmarks you need a nightly rust toolchain.
Then you launch it with
rustup run nightly cargo bench --features=benchmarks

41
vendor/png/benches/adam7.rs vendored Normal file
View File

@@ -0,0 +1,41 @@
//! Usage example:
//!
//! ```
//! $ alias bench="rustup run nightly cargo bench"
//! $ bench --bench=expand_adam7 --features=benchmarks -- --save-baseline my_baseline
//! ... tweak something, say the expansion of 8-bit ...
//! $ bench --bench=expand_adam7 --features=benchmarks -- bpp=8 --baseline my_baseline
//! ```
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
use png::benchable_apis::adam7;
fn expand_all(c: &mut Criterion) {
let expand_bpp = [1, 2, 4, 8, 16, 24, 32];
let expand_size = [1 << 4, 1 << 8, 1 << 12];
for &bpp in expand_bpp.iter() {
for &sz in expand_size.iter() {
bench_group_expand_full(c, sz, bpp);
}
}
}
criterion_group!(benches, expand_all);
criterion_main!(benches);
fn bench_group_expand_full(c: &mut Criterion, sz: u32, bpp: u8) {
let mut group = c.benchmark_group("expand-adam7");
group.throughput(Throughput::Bytes(
u64::from(sz) * (u64::from(sz) * u64::from(bpp)).div_ceil(8),
));
let row_bytes = (sz as usize * usize::from(bpp)).div_ceil(8);
let buffer = vec![0u8; (sz as usize) * row_bytes];
let buffer = core::cell::RefCell::new(buffer);
let rowdata = core::hint::black_box(vec![0u8; row_bytes]);
group.bench_with_input(format!("size={sz:?}/bpp={bpp}"), &buffer, |b, img| {
b.iter(|| adam7(&mut *img.borrow_mut(), &rowdata, sz, sz, bpp));
});
}

106
vendor/png/benches/decoder.rs vendored Normal file
View File

@@ -0,0 +1,106 @@
use std::{fs, io::Cursor};
use criterion::{
criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, Throughput,
};
use png::{Decoder, Reader, Transformations};
#[path = "../src/test_utils.rs"]
mod test_utils;
fn load_all(c: &mut Criterion) {
let mut g = c.benchmark_group("decode");
for entry in fs::read_dir("tests/benches/").unwrap().flatten() {
match entry.path().extension() {
Some(st) if st == "png" => {}
_ => continue,
}
let data = fs::read(entry.path()).unwrap();
bench_file(&mut g, data, entry.file_name().into_string().unwrap());
}
g.finish();
// Small IDATS
let mut g = c.benchmark_group("generated-noncompressed-4k-idat");
bench_noncompressed_png(&mut g, 8, 4096); // 256 B
bench_noncompressed_png(&mut g, 128, 4096); // 64 KB
bench_noncompressed_png(&mut g, 2048, 4096); // 16 MB
bench_noncompressed_png(&mut g, 12288, 4096); // 576 MB
g.finish();
// Normal IDATS
let mut g = c.benchmark_group("generated-noncompressed-64k-idat");
bench_noncompressed_png(&mut g, 128, 65536); // 64 KB
bench_noncompressed_png(&mut g, 2048, 65536); // 16 MB
bench_noncompressed_png(&mut g, 12288, 65536); // 576 MB
g.finish();
// Large IDATS
let mut g = c.benchmark_group("generated-noncompressed-2g-idat");
bench_noncompressed_png(&mut g, 2048, 0x7fffffff); // 16 MB
bench_noncompressed_png(&mut g, 12288, 0x7fffffff); // 576 MB
g.finish();
// Incremental decoding via `read_row`
let mut g = c.benchmark_group("row-by-row");
let mut data = Vec::new();
test_utils::write_noncompressed_png(&mut data, 128, 4096);
bench_read_row(&mut g, data, "128x128-4k-idat");
g.finish();
}
criterion_group! {benches, load_all}
criterion_main!(benches);
fn bench_noncompressed_png(g: &mut BenchmarkGroup<WallTime>, size: u32, idat_bytes: usize) {
let mut data = Vec::new();
test_utils::write_noncompressed_png(&mut data, size, idat_bytes);
bench_file(g, data, format!("{size}x{size}.png"));
}
/// This benchmarks decoding via a call to `Reader::next_frame`.
fn bench_file(g: &mut BenchmarkGroup<WallTime>, data: Vec<u8>, name: String) {
if data.len() > 1_000_000 {
g.sample_size(10);
}
let mut reader = create_reader(data.as_slice());
let mut image = vec![0; reader.output_buffer_size().unwrap()];
let info = reader.next_frame(&mut image).unwrap();
g.throughput(Throughput::Bytes(info.buffer_size() as u64));
g.bench_with_input(name, &data, |b, data| {
b.iter(|| {
let mut reader = create_reader(data.as_slice());
reader.next_frame(&mut image).unwrap();
})
});
}
/// This benchmarks decoding via a sequence of `Reader::read_row` calls.
fn bench_read_row(g: &mut BenchmarkGroup<WallTime>, data: Vec<u8>, name: &str) {
let reader = create_reader(data.as_slice());
let mut image = vec![0; reader.output_buffer_size().unwrap()];
let bytes_per_row = reader.output_line_size(reader.info().width).unwrap();
g.throughput(Throughput::Bytes(image.len() as u64));
g.bench_with_input(name, &data, |b, data| {
b.iter(|| {
let mut reader = create_reader(data.as_slice());
for output_row in image.chunks_exact_mut(bytes_per_row) {
reader.read_row(output_row).unwrap().unwrap();
}
})
});
}
fn create_reader(data: &[u8]) -> Reader<Cursor<&[u8]>> {
let mut decoder = Decoder::new(Cursor::new(data));
// Cover default transformations used by the `image` crate when constructing
// `image::codecs::png::PngDecoder`.
decoder.set_transformations(Transformations::EXPAND);
decoder.read_info().unwrap()
}

155
vendor/png/benches/expand_paletted.rs vendored Normal file
View File

@@ -0,0 +1,155 @@
//! Usage example:
//!
//! ```
//! $ alias bench="rustup run nightly cargo bench"
//! $ bench --bench=expand_paletted --features=benchmarks -- --save-baseline my_baseline
//! ... tweak something ...
//! $ bench --bench=expand_paletted --features=benchmarks -- --baseline my_baseline
//! ```
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
use png::benchable_apis::{create_info_from_plte_trns_bitdepth, create_transform_fn, TransformFn};
use png::{Info, Transformations};
use rand::Rng;
use std::fmt::{self, Display};
#[derive(Clone, Copy)]
enum TrnsPresence {
Present,
Absent,
}
impl Display for TrnsPresence {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
TrnsPresence::Present => write!(f, "trns=yes"),
TrnsPresence::Absent => write!(f, "trns=no"),
}
}
}
fn expand_paletted_all(c: &mut Criterion) {
let trns_options = [TrnsPresence::Absent, TrnsPresence::Present];
let bit_depths = [4, 8];
let input_size = {
let typical_l1_cache_size = 32 * 1024;
let mut factor = 1; // input
factor += 4; // RGBA output
factor += 1; // other data
typical_l1_cache_size / factor
};
for trns in trns_options.iter().copied() {
for bit_depth in bit_depths.iter().copied() {
bench_expand_palette(c, trns, bit_depth, input_size);
}
}
bench_create_fn(c, 256, 256); // Full PLTE and trNS
bench_create_fn(c, 224, 32); // Partial PLTE and trNS
bench_create_fn(c, 16, 1); // Guess: typical for small images?
}
criterion_group!(benches, expand_paletted_all);
criterion_main!(benches);
fn get_random_bytes<R: Rng>(rng: &mut R, n: usize) -> Vec<u8> {
use rand::Fill;
let mut result = vec![0u8; n];
result.as_mut_slice().try_fill(rng).unwrap();
result
}
struct Input {
palette: Vec<u8>,
trns: Option<Vec<u8>>,
src: Vec<u8>,
src_bit_depth: u8,
}
impl Input {
fn new(trns: TrnsPresence, src_bit_depth: u8, input_size_in_bytes: usize) -> Self {
let mut rng = rand::thread_rng();
// We provide RGB entries for 192 out of 256 possible indices and Alpha/Transparency
// entries for 32 out of 256 possible indices. Rationale for these numbers:
// * Oftentimes only a handful of colors at the edges of an icon need transparency
// * In general, code needs to handle out-of-bounds indices, so it seems desirable
// to explicitly test this.
let palette = get_random_bytes(&mut rng, 192.min(input_size_in_bytes) * 3);
let trns = match trns {
TrnsPresence::Absent => None,
TrnsPresence::Present => Some(get_random_bytes(&mut rng, 32.min(input_size_in_bytes))),
};
let src = get_random_bytes(&mut rng, input_size_in_bytes);
Self {
palette,
trns,
src,
src_bit_depth,
}
}
fn output_size_in_bytes(&self) -> usize {
let output_bytes_per_input_sample = match self.trns {
None => 3,
Some(_) => 4,
};
let samples_count_per_byte = (8 / self.src_bit_depth) as usize;
let samples_count = self.src.len() * samples_count_per_byte;
samples_count * output_bytes_per_input_sample
}
fn to_info(&self) -> Info {
create_info_from_plte_trns_bitdepth(&self.palette, self.trns.as_deref(), self.src_bit_depth)
}
}
#[inline(always)]
fn create_expand_palette_fn(info: &Info) -> TransformFn {
create_transform_fn(info, Transformations::EXPAND).unwrap()
}
fn bench_create_fn(c: &mut Criterion, plte_size: usize, trns_size: usize) {
let mut group = c.benchmark_group("expand_paletted(ctor)");
group.sample_size(1000);
let mut rng = rand::thread_rng();
let plte = get_random_bytes(&mut rng, 3 * plte_size as usize);
let trns = get_random_bytes(&mut rng, trns_size as usize);
let info = create_info_from_plte_trns_bitdepth(&plte, Some(&trns), 8);
group.bench_with_input(
format!("plte={plte_size}/trns={trns_size:?}"),
&info,
|b, info| {
b.iter(|| create_expand_palette_fn(info));
},
);
}
fn bench_expand_palette(
c: &mut Criterion,
trns: TrnsPresence,
src_bit_depth: u8,
input_size_in_bytes: usize,
) {
let mut group = c.benchmark_group("expand_paletted(exec)");
let input = Input::new(trns, src_bit_depth, input_size_in_bytes);
let transform_fn = create_expand_palette_fn(&input.to_info());
group.throughput(Throughput::Bytes(input.output_size_in_bytes() as u64));
group.sample_size(500);
group.bench_with_input(
format!("{trns}/src_bits={src_bit_depth}/src_size={input_size_in_bytes}"),
&input,
|b, input| {
let mut output = vec![0; input.output_size_in_bytes()];
let info = input.to_info();
b.iter(|| {
transform_fn(input.src.as_slice(), output.as_mut_slice(), &info);
});
},
);
}

51
vendor/png/benches/unfilter.rs vendored Normal file
View File

@@ -0,0 +1,51 @@
//! Usage example:
//!
//! ```
//! $ alias bench="rustup run nightly cargo bench"
//! $ bench --bench=unfilter --features=benchmarks -- --save-baseline my_baseline
//! ... tweak something, say the Sub filter ...
//! $ bench --bench=unfilter --features=benchmarks -- filter=Sub --baseline my_baseline
//! ```
use criterion::{criterion_group, criterion_main, Criterion, Throughput};
use png::benchable_apis::unfilter;
use png::Filter;
use rand::Rng;
fn unfilter_all(c: &mut Criterion) {
let bpps = [1, 2, 3, 4, 6, 8];
let filters = [Filter::Sub, Filter::Up, Filter::Avg, Filter::Paeth];
for &filter in filters.iter() {
for &bpp in bpps.iter() {
bench_unfilter(c, filter, bpp);
}
}
}
criterion_group!(benches, unfilter_all);
criterion_main!(benches);
fn bench_unfilter(c: &mut Criterion, filter: Filter, bpp: u8) {
let mut group = c.benchmark_group("unfilter");
fn get_random_bytes<R: Rng>(rng: &mut R, n: usize) -> Vec<u8> {
use rand::Fill;
let mut result = vec![0u8; n];
result.as_mut_slice().try_fill(rng).unwrap();
result
}
let mut rng = rand::thread_rng();
let row_size = 4096 * (bpp as usize);
let two_rows = get_random_bytes(&mut rng, row_size * 2);
group.throughput(Throughput::Bytes(row_size as u64));
group.bench_with_input(
format!("filter={filter:?}/bpp={bpp}"),
&two_rows,
|b, two_rows| {
let (prev_row, curr_row) = two_rows.split_at(row_size);
let mut curr_row = curr_row.to_vec();
b.iter(|| unfilter(filter, bpp, prev_row, curr_row.as_mut_slice()));
},
);
}

53
vendor/png/examples/change-png-info.rs vendored Normal file
View File

@@ -0,0 +1,53 @@
/// Tests "editing"/re-encoding of an image:
/// decoding, editing, re-encoding
use std::fs::File;
use std::io::{BufReader, BufWriter};
use std::path::Path;
use png::DecodingError::LimitsExceeded;
pub type BoxResult<T> = Result<T, Box<dyn std::error::Error + Send + Sync>>;
fn main() -> BoxResult<()> {
// # Decode
// Read test image from pngsuite
let path_in = Path::new(r"./tests/pngsuite/basi0g01.png");
// The decoder is a build for reader and can be used to set various decoding options
// via `Transformations`. The default output transformation is `Transformations::IDENTITY`.
let decoder = png::Decoder::new(BufReader::new(File::open(path_in)?));
let mut reader = decoder.read_info()?;
// Allocate the output buffer.
let png_info = reader.info();
let mut buf = vec![0; reader.output_buffer_size().ok_or(LimitsExceeded)?];
println!("{png_info:?}");
// # Encode
let path_out = Path::new(r"./target/test_modified.png");
let file = File::create(path_out)?;
let ref mut w = BufWriter::new(file);
// Get defaults for interlaced parameter.
let mut info_out = png_info.clone();
let info_default = png::Info::default();
// Edit previous info
info_out.interlaced = info_default.interlaced;
let mut encoder = png::Encoder::with_info(w, info_out)?;
encoder.set_depth(png_info.bit_depth);
// Edit some attribute
encoder.add_text_chunk(
"Testing tEXt".to_string(),
"This is a tEXt chunk that will appear before the IDAT chunks.".to_string(),
)?;
// Save picture with changed info
let mut writer = encoder.write_header()?;
let mut counter = 0u8;
while let Ok(info) = reader.next_frame(&mut buf) {
let bytes = &buf[..info.buffer_size()];
writer.write_image_data(&bytes)?;
counter += 1;
println!("Written frame: {}", counter);
}
Ok(())
}

194
vendor/png/examples/corpus-bench.rs vendored Normal file
View File

@@ -0,0 +1,194 @@
use std::{fs, io::Cursor, path::PathBuf};
use clap::Parser;
use png::Decoder;
#[derive(clap::ValueEnum, Clone)]
enum Speed {
Fast,
Default,
Best,
}
#[derive(clap::ValueEnum, Clone)]
enum Filter {
None,
Sub,
Up,
Average,
Paeth,
Adaptive,
}
#[derive(clap::Parser)]
struct Args {
directory: Option<PathBuf>,
#[clap(short, long, value_enum, default_value_t = Speed::Fast)]
speed: Speed,
#[clap(short, long, value_enum, default_value_t = Filter::Adaptive)]
filter: Filter,
}
#[inline(never)]
fn run_encode(
args: &Args,
dimensions: (u32, u32),
color_type: png::ColorType,
bit_depth: png::BitDepth,
image: &[u8],
) -> Vec<u8> {
let mut reencoded = Vec::new();
let mut encoder = png::Encoder::new(&mut reencoded, dimensions.0, dimensions.1);
encoder.set_color(color_type);
encoder.set_depth(bit_depth);
encoder.set_compression(match args.speed {
Speed::Fast => png::Compression::Fast,
Speed::Default => png::Compression::Balanced,
Speed::Best => png::Compression::High,
});
encoder.set_filter(match args.filter {
Filter::None => png::Filter::NoFilter,
Filter::Sub => png::Filter::Sub,
Filter::Up => png::Filter::Up,
Filter::Average => png::Filter::Avg,
Filter::Paeth => png::Filter::Paeth,
Filter::Adaptive => png::Filter::Adaptive,
});
let mut encoder = encoder.write_header().unwrap();
encoder.write_image_data(image).unwrap();
encoder.finish().unwrap();
reencoded
}
#[inline(never)]
fn run_decode(image: &[u8], output: &mut [u8]) {
let mut reader = Decoder::new(Cursor::new(image)).read_info().unwrap();
reader.next_frame(output).unwrap();
}
fn main() {
let mut total_uncompressed = 0;
let mut total_compressed = 0;
let mut total_pixels = 0;
let mut total_encode_time = 0;
let mut total_decode_time = 0;
let args = Args::parse();
println!(
"{:45} Ratio Encode Decode",
"Directory"
);
println!(
"{:45}------- -------------------- --------------------",
"---------"
);
let mut image2 = Vec::new();
let mut pending = vec![args.directory.clone().unwrap_or(PathBuf::from("."))];
while let Some(directory) = pending.pop() {
let mut dir_uncompressed = 0;
let mut dir_compressed = 0;
let mut dir_pixels = 0;
let mut dir_encode_time = 0;
let mut dir_decode_time = 0;
for entry in fs::read_dir(&directory).unwrap().flatten() {
if entry.file_type().unwrap().is_dir() {
pending.push(entry.path());
continue;
}
match entry.path().extension() {
Some(st) if st == "png" => {}
_ => continue,
}
// Parse
let data = fs::read(entry.path()).unwrap();
let mut decoder = Decoder::new(Cursor::new(&*data));
if decoder.read_header_info().ok().map(|h| h.color_type)
== Some(png::ColorType::Indexed)
{
decoder.set_transformations(
png::Transformations::EXPAND | png::Transformations::STRIP_16,
);
}
let mut reader = match decoder.read_info() {
Ok(reader) => reader,
Err(_) => continue,
};
let mut image = vec![0; reader.output_buffer_size().unwrap()];
let info = match reader.next_frame(&mut image) {
Ok(info) => info,
Err(_) => continue,
};
let (width, height) = (info.width, info.height);
let bit_depth = info.bit_depth;
let mut color_type = info.color_type;
// qoibench expands grayscale to RGB, so we do the same.
if bit_depth == png::BitDepth::Eight {
if color_type == png::ColorType::Grayscale {
image = image.into_iter().flat_map(|v| [v, v, v, 255]).collect();
color_type = png::ColorType::Rgba;
} else if color_type == png::ColorType::GrayscaleAlpha {
image = image
.chunks_exact(2)
.flat_map(|v| [v[0], v[0], v[0], v[1]])
.collect();
color_type = png::ColorType::Rgba;
}
}
// Re-encode
let start = std::time::Instant::now();
let reencoded = run_encode(&args, (width, height), color_type, bit_depth, &image);
let elapsed = start.elapsed().as_nanos() as u64;
// And decode again
image2.resize(image.len(), 0);
let start2 = std::time::Instant::now();
run_decode(&reencoded, &mut image2);
let elapsed2 = start2.elapsed().as_nanos() as u64;
assert_eq!(image, image2);
// Stats
dir_uncompressed += image.len();
dir_compressed += reencoded.len();
dir_pixels += (width * height) as u64;
dir_encode_time += elapsed;
dir_decode_time += elapsed2;
}
if dir_uncompressed > 0 {
println!(
"{:45}{:6.2}%{:8} mps {:6.2} GiB/s {:8} mps {:6.2} GiB/s",
directory.display(),
100.0 * dir_compressed as f64 / dir_uncompressed as f64,
dir_pixels * 1000 / dir_encode_time,
dir_uncompressed as f64 / (dir_encode_time as f64 * 1e-9 * (1 << 30) as f64),
dir_pixels * 1000 / dir_decode_time,
dir_uncompressed as f64 / (dir_decode_time as f64 * 1e-9 * (1 << 30) as f64)
);
}
total_uncompressed += dir_uncompressed;
total_compressed += dir_compressed;
total_pixels += dir_pixels;
total_encode_time += dir_encode_time;
total_decode_time += dir_decode_time;
}
println!();
println!(
"{:44}{:7.3}%{:8} mps {:6.3} GiB/s {:8} mps {:6.3} GiB/s",
"Total",
100.0 * total_compressed as f64 / total_uncompressed as f64,
total_pixels * 1000 / total_encode_time,
total_uncompressed as f64 / (total_encode_time as f64 * 1e-9 * (1 << 30) as f64),
total_pixels * 1000 / total_decode_time,
total_uncompressed as f64 / (total_decode_time as f64 * 1e-9 * (1 << 30) as f64)
);
}

55
vendor/png/examples/png-generate.rs vendored Normal file
View File

@@ -0,0 +1,55 @@
// For reading and opening files
use png::text_metadata::{ITXtChunk, ZTXtChunk};
use std::env;
use std::fs::File;
use std::io::BufWriter;
fn main() {
let path = env::args()
.nth(1)
.expect("Expected a filename to output to.");
let file = File::create(path).unwrap();
let w = &mut BufWriter::new(file);
let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1.
encoder.set_color(png::ColorType::Rgba);
encoder.set_depth(png::BitDepth::Eight);
// Adding text chunks to the header
encoder
.add_text_chunk(
"Testing tEXt".to_string(),
"This is a tEXt chunk that will appear before the IDAT chunks.".to_string(),
)
.unwrap();
encoder
.add_ztxt_chunk(
"Testing zTXt".to_string(),
"This is a zTXt chunk that is compressed in the png file.".to_string(),
)
.unwrap();
encoder
.add_itxt_chunk(
"Testing iTXt".to_string(),
"iTXt chunks support all of UTF8. Example: हिंदी.".to_string(),
)
.unwrap();
let mut writer = encoder.write_header().unwrap();
let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black.
writer.write_image_data(&data).unwrap(); // Save
// We can add a tEXt/zTXt/iTXt at any point before the encoder is dropped from scope. These chunks will be at the end of the png file.
let tail_ztxt_chunk = ZTXtChunk::new(
"Comment".to_string(),
"A zTXt chunk after the image data.".to_string(),
);
writer.write_text_chunk(&tail_ztxt_chunk).unwrap();
// The fields of the text chunk are public, so they can be mutated before being written to the file.
let mut tail_itxt_chunk = ITXtChunk::new("Author".to_string(), "सायंतन खान".to_string());
tail_itxt_chunk.compressed = true;
tail_itxt_chunk.language_tag = "hi".to_string();
tail_itxt_chunk.translated_keyword = "लेखक".to_string();
writer.write_text_chunk(&tail_itxt_chunk).unwrap();
}

295
vendor/png/examples/pngcheck.rs vendored Normal file
View File

@@ -0,0 +1,295 @@
#![allow(non_upper_case_globals)]
use std::fs::File;
use std::io;
use std::io::prelude::*;
use std::path::{Path, PathBuf};
use clap::Parser;
use png::chunk;
#[derive(Parser)]
#[command(about, version)]
struct Config {
/// test quietly (output only errors)
#[arg(short, long)]
quiet: bool,
/// test verbosely (print most chunk data)
#[arg(short, long)]
verbose: bool,
/// print contents of tEXt/zTXt/iTXt chunks (can be used with -q)
#[arg(short, long)]
text: bool,
paths: Vec<PathBuf>,
}
fn display_interlaced(i: bool) -> &'static str {
if i {
"interlaced"
} else {
"non-interlaced"
}
}
fn display_image_type(bits: u8, color: png::ColorType) -> String {
use png::ColorType::*;
format!(
"{}-bit {}",
bits,
match color {
Grayscale => "grayscale",
Rgb => "RGB",
Indexed => "palette",
GrayscaleAlpha => "grayscale+alpha",
Rgba => "RGB+alpha",
}
)
}
// channels after expansion of tRNS
fn final_channels(c: png::ColorType, trns: bool) -> u8 {
use png::ColorType::*;
match c {
Grayscale => 1 + u8::from(trns),
Rgb => 3,
Indexed => 3 + u8::from(trns),
GrayscaleAlpha => 2,
Rgba => 4,
}
}
fn check_image<P: AsRef<Path>>(c: &Config, fname: P) -> io::Result<()> {
// TODO improve performance by reusing allocations from decoder
use png::Decoded::*;
let data = &mut vec![0; 10 * 1024][..];
let mut reader = io::BufReader::new(File::open(&fname)?);
let fname = fname.as_ref().to_string_lossy();
let n = reader.read(data)?;
let mut buf = &data[..n];
let mut pos = 0;
let mut decoder = png::StreamingDecoder::new();
// Image data
let mut width = 0;
let mut height = 0;
let mut color = png::ColorType::Grayscale;
let mut bits = 0;
let mut trns = false;
let mut interlaced = false;
let mut compressed_size = 0;
let mut n_chunks = 0;
let mut have_idat = false;
macro_rules! c_ratio(
// TODO add palette entries to compressed_size
() => ({
compressed_size as f32/(
height as u64 *
(width as u64 * final_channels(color, trns) as u64 * bits as u64 + 7)>>3
) as f32
});
);
let display_error = |err| -> Result<_, io::Error> {
if c.verbose {
println!(": {}", err);
print!("ERRORS DETECTED");
println!(" in {}", fname);
} else {
if !c.quiet {
println!("ERROR: {}", fname)
}
print!("{}: ", fname);
println!("{}", err);
}
Ok(())
};
if c.verbose {
print!("File: ");
print!("{}", fname);
print!(" ({}) bytes", data.len())
}
loop {
if buf.is_empty() {
// circumvent borrow checker
assert!(!data.is_empty());
let n = reader.read(data)?;
// EOF
if n == 0 {
println!("ERROR: premature end of file {}", fname);
break;
}
buf = &data[..n];
}
match decoder.update(buf, None) {
Ok((_, ChunkComplete(chunk::IEND))) => {
if !have_idat {
// This isn't beautiful. But it works.
display_error(png::DecodingError::IoError(io::Error::new(
io::ErrorKind::InvalidData,
"IDAT chunk missing",
)))?;
break;
}
if !c.verbose && !c.quiet {
print!("OK: {}", fname);
println!(
" ({}x{}, {}{}, {}, {:.1}%)",
width,
height,
display_image_type(bits, color),
(if trns { "+trns" } else { "" }),
display_interlaced(interlaced),
100.0 * (1.0 - c_ratio!())
)
} else if !c.quiet {
println!();
print!("No errors detected ");
println!(
"in {} ({} chunks, {:.1}% compression)",
fname,
n_chunks,
100.0 * (1.0 - c_ratio!()),
)
}
break;
}
Ok((n, res)) => {
buf = &buf[n..];
pos += n;
match res {
ChunkBegin(len, type_str) => {
n_chunks += 1;
if c.verbose {
let chunk = type_str;
println!();
print!(" chunk ");
print!("{:?}", chunk);
print!(
" at offset {:#07x}, length {}",
pos - 4, // subtract chunk name length
len
)
}
match type_str {
chunk::IDAT => {
have_idat = true;
compressed_size += len
}
chunk::tRNS => {
trns = true;
}
_ => (),
}
}
ChunkComplete(chunk::IHDR) => {
width = decoder.info().unwrap().width;
height = decoder.info().unwrap().height;
bits = decoder.info().unwrap().bit_depth as u8;
color = decoder.info().unwrap().color_type;
interlaced = decoder.info().unwrap().interlaced;
if c.verbose {
println!();
print!(
" {} x {} image, {}{}, {}",
width,
height,
display_image_type(bits, color),
(if trns { "+trns" } else { "" }),
display_interlaced(interlaced),
);
}
}
ChunkComplete(chunk::acTL) => {
let actl = decoder.info().unwrap().animation_control.unwrap();
println!();
print!(" {} frames, {} plays", actl.num_frames, actl.num_plays,);
}
ChunkComplete(chunk::fdAT) => {
let fctl = decoder.info().unwrap().frame_control.unwrap();
println!();
println!(
" sequence #{}, {} x {} pixels @ ({}, {})",
fctl.sequence_number,
fctl.width,
fctl.height,
fctl.x_offset,
fctl.y_offset,
/*fctl.delay_num,
fctl.delay_den,
fctl.dispose_op,
fctl.blend_op,*/
);
print!(
" {}/{} s delay, dispose: {}, blend: {}",
fctl.delay_num,
if fctl.delay_den == 0 {
100
} else {
fctl.delay_den
},
fctl.dispose_op,
fctl.blend_op,
);
}
ImageData => {
//println!("got {} bytes of image data", data.len())
}
_ => (),
}
//println!("{} {:?}", n, res)
}
Err(err) => {
let _ = display_error(err);
break;
}
}
}
if c.text {
println!("Parsed tEXt chunks:");
for text_chunk in &decoder.info().unwrap().uncompressed_latin1_text {
println!("{:#?}", text_chunk);
}
println!("Parsed zTXt chunks:");
for text_chunk in &decoder.info().unwrap().compressed_latin1_text {
let mut cloned_text_chunk = text_chunk.clone();
cloned_text_chunk.decompress_text()?;
println!("{:#?}", cloned_text_chunk);
}
println!("Parsed iTXt chunks:");
for text_chunk in &decoder.info().unwrap().utf8_text {
let mut cloned_text_chunk = text_chunk.clone();
cloned_text_chunk.decompress_text()?;
println!("{:#?}", cloned_text_chunk);
}
}
Ok(())
}
fn main() {
let config = Config::parse();
for file in &config.paths {
let result = if let Some(glob) = file.to_str().filter(|n| n.contains('*')) {
glob::glob(glob)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
.and_then(|mut glob| {
glob.try_for_each(|entry| {
entry
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
.and_then(|file| check_image(&config, file))
})
})
} else {
check_image(&config, &file)
};
result.unwrap_or_else(|err| {
println!("{}: {}", file.display(), err);
std::process::exit(1)
});
}
}

1023
vendor/png/src/adam7.rs vendored Normal file

File diff suppressed because it is too large Load Diff

48
vendor/png/src/benchable_apis.rs vendored Normal file
View File

@@ -0,0 +1,48 @@
//! Development-time-only helper module for exporting private APIs so that they can be benchmarked.
//! This module is gated behind the "benchmarks" feature.
use crate::adam7::{expand_pass, Adam7Iterator};
use crate::common::BytesPerPixel;
use crate::filter::{Filter, RowFilter};
use crate::{BitDepth, ColorType, Info};
/// Re-exporting `unfilter` to make it easier to benchmark, despite some items being only
/// `pub(crate)`: `fn unfilter`, `enum BytesPerPixel`.
pub fn unfilter(filter: Filter, tbpp: u8, previous: &[u8], current: &mut [u8]) {
let filter = RowFilter::from_method(filter).unwrap(); // RowFilter type is private
let tbpp = BytesPerPixel::from_usize(tbpp as usize);
crate::filter::unfilter(filter, tbpp, previous, current)
}
pub fn adam7(img: &mut [u8], buffer: &[u8], width: u32, height: u32, bpp: u8) {
fn bytes_of_width(width: u32, bpp: u8) -> usize {
let total = (u64::from(width) * u64::from(bpp)).div_ceil(8);
usize::try_from(total).unwrap()
}
let img_row_stride = bytes_of_width(width, bpp);
for adam7 in Adam7Iterator::new(width as u32, height as u32).into_iter() {
// We use the same buffer for all interlace passes, to avoid counting the creation time in
// the benchmark here. But the expansion expects us to pass a slice so make sure we use the
// correct one. As of writing the implementation is not sensitive to this but it may become
// so.
let used_bytes = bytes_of_width(adam7.width, bpp);
expand_pass(img, img_row_stride, &buffer[..used_bytes], &adam7, bpp);
}
}
pub use crate::decoder::transform::{create_transform_fn, TransformFn};
pub fn create_info_from_plte_trns_bitdepth<'a>(
plte: &'a [u8],
trns: Option<&'a [u8]>,
bit_depth: u8,
) -> Info<'a> {
Info {
color_type: ColorType::Indexed,
bit_depth: BitDepth::from_u8(bit_depth).unwrap(),
palette: Some(plte.into()),
trns: trns.map(Into::into),
..Info::default()
}
}

108
vendor/png/src/chunk.rs vendored Normal file
View File

@@ -0,0 +1,108 @@
//! Chunk types and functions
#![allow(dead_code)]
#![allow(non_upper_case_globals)]
use core::fmt;
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct ChunkType(pub [u8; 4]);
// -- Critical chunks --
/// Image header
pub const IHDR: ChunkType = ChunkType(*b"IHDR");
/// Palette
pub const PLTE: ChunkType = ChunkType(*b"PLTE");
/// Image data
pub const IDAT: ChunkType = ChunkType(*b"IDAT");
/// Image trailer
pub const IEND: ChunkType = ChunkType(*b"IEND");
// -- Ancillary chunks --
/// Transparency
pub const tRNS: ChunkType = ChunkType(*b"tRNS");
/// Background colour
pub const bKGD: ChunkType = ChunkType(*b"bKGD");
/// Image last-modification time
pub const tIME: ChunkType = ChunkType(*b"tIME");
/// Physical pixel dimensions
pub const pHYs: ChunkType = ChunkType(*b"pHYs");
/// Source system's pixel chromaticities
pub const cHRM: ChunkType = ChunkType(*b"cHRM");
/// Source system's gamma value
pub const gAMA: ChunkType = ChunkType(*b"gAMA");
/// sRGB color space chunk
pub const sRGB: ChunkType = ChunkType(*b"sRGB");
/// ICC profile chunk
pub const iCCP: ChunkType = ChunkType(*b"iCCP");
/// Coding-independent code points for video signal type identification chunk
pub const cICP: ChunkType = ChunkType(*b"cICP");
/// Mastering Display Color Volume chunk
pub const mDCV: ChunkType = ChunkType(*b"mDCV");
/// Content Light Level Information chunk
pub const cLLI: ChunkType = ChunkType(*b"cLLI");
/// EXIF metadata chunk
pub const eXIf: ChunkType = ChunkType(*b"eXIf");
/// Latin-1 uncompressed textual data
pub const tEXt: ChunkType = ChunkType(*b"tEXt");
/// Latin-1 compressed textual data
pub const zTXt: ChunkType = ChunkType(*b"zTXt");
/// UTF-8 textual data
pub const iTXt: ChunkType = ChunkType(*b"iTXt");
// Significant bits
pub const sBIT: ChunkType = ChunkType(*b"sBIT");
// -- Extension chunks --
/// Animation control
pub const acTL: ChunkType = ChunkType(*b"acTL");
/// Frame control
pub const fcTL: ChunkType = ChunkType(*b"fcTL");
/// Frame data
pub const fdAT: ChunkType = ChunkType(*b"fdAT");
// -- Chunk type determination --
/// Returns true if the chunk is critical.
pub fn is_critical(ChunkType(type_): ChunkType) -> bool {
type_[0] & 32 == 0
}
/// Returns true if the chunk is private.
pub fn is_private(ChunkType(type_): ChunkType) -> bool {
type_[1] & 32 != 0
}
/// Checks whether the reserved bit of the chunk name is set.
/// If it is set the chunk name is invalid.
pub fn reserved_set(ChunkType(type_): ChunkType) -> bool {
type_[2] & 32 != 0
}
/// Returns true if the chunk is safe to copy if unknown.
pub fn safe_to_copy(ChunkType(type_): ChunkType) -> bool {
type_[3] & 32 != 0
}
impl fmt::Debug for ChunkType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
struct DebugType([u8; 4]);
impl fmt::Debug for DebugType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
for &c in &self.0[..] {
write!(f, "{}", char::from(c).escape_debug())?;
}
Ok(())
}
}
f.debug_struct("ChunkType")
.field("type", &DebugType(self.0))
.field("critical", &is_critical(*self))
.field("private", &is_private(*self))
.field("reserved", &reserved_set(*self))
.field("safecopy", &safe_to_copy(*self))
.finish()
}
}

971
vendor/png/src/common.rs vendored Normal file
View File

@@ -0,0 +1,971 @@
//! Common types shared between the encoder and decoder
use crate::text_metadata::{ITXtChunk, TEXtChunk, ZTXtChunk};
#[allow(unused_imports)] // used by doc comments only
use crate::Filter;
use crate::{chunk, encoder};
use io::Write;
use std::{borrow::Cow, convert::TryFrom, fmt, io};
/// Describes how a pixel is encoded.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum ColorType {
/// 1 grayscale sample.
Grayscale = 0,
/// 1 red sample, 1 green sample, 1 blue sample.
Rgb = 2,
/// 1 sample for the palette index.
Indexed = 3,
/// 1 grayscale sample, then 1 alpha sample.
GrayscaleAlpha = 4,
/// 1 red sample, 1 green sample, 1 blue sample, and finally, 1 alpha sample.
Rgba = 6,
}
impl ColorType {
/// Returns the number of samples used per pixel encoded in this way.
pub fn samples(self) -> usize {
self.samples_u8().into()
}
pub(crate) fn samples_u8(self) -> u8 {
use self::ColorType::*;
match self {
Grayscale | Indexed => 1,
Rgb => 3,
GrayscaleAlpha => 2,
Rgba => 4,
}
}
/// u8 -> Self. Temporary solution until Rust provides a canonical one.
pub fn from_u8(n: u8) -> Option<ColorType> {
match n {
0 => Some(ColorType::Grayscale),
2 => Some(ColorType::Rgb),
3 => Some(ColorType::Indexed),
4 => Some(ColorType::GrayscaleAlpha),
6 => Some(ColorType::Rgba),
_ => None,
}
}
pub(crate) fn checked_raw_row_length(self, depth: BitDepth, width: u32) -> Option<usize> {
// No overflow can occur in 64 bits, we multiply 32-bit with 5 more bits.
let bits = u64::from(width) * u64::from(self.samples_u8()) * u64::from(depth.into_u8());
TryFrom::try_from(1 + (bits + 7) / 8).ok()
}
pub(crate) fn raw_row_length_from_width(self, depth: BitDepth, width: u32) -> usize {
let samples = width as usize * self.samples();
1 + match depth {
BitDepth::Sixteen => samples * 2,
BitDepth::Eight => samples,
subbyte => {
let samples_per_byte = 8 / subbyte as usize;
let whole = samples / samples_per_byte;
let fract = usize::from(samples % samples_per_byte > 0);
whole + fract
}
}
}
pub(crate) fn is_combination_invalid(self, bit_depth: BitDepth) -> bool {
// Section 11.2.2 of the PNG standard disallows several combinations
// of bit depth and color type
((bit_depth == BitDepth::One || bit_depth == BitDepth::Two || bit_depth == BitDepth::Four)
&& (self == ColorType::Rgb
|| self == ColorType::GrayscaleAlpha
|| self == ColorType::Rgba))
|| (bit_depth == BitDepth::Sixteen && self == ColorType::Indexed)
}
pub(crate) fn bits_per_pixel(&self, bit_depth: BitDepth) -> usize {
self.samples() * bit_depth as usize
}
pub(crate) fn bytes_per_pixel(&self, bit_depth: BitDepth) -> usize {
// If adjusting this for expansion or other transformation passes, remember to keep the old
// implementation for bpp_in_prediction, which is internal to the png specification.
self.samples() * ((bit_depth as usize + 7) >> 3)
}
}
/// Bit depth of the PNG file.
/// Specifies the number of bits per sample.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum BitDepth {
One = 1,
Two = 2,
Four = 4,
Eight = 8,
Sixteen = 16,
}
/// Internal count of bytes per pixel.
/// This is used for filtering which never uses sub-byte units. This essentially reduces the number
/// of possible byte chunk lengths to a very small set of values appropriate to be defined as an
/// enum.
#[derive(Debug, Clone, Copy)]
#[repr(u8)]
pub(crate) enum BytesPerPixel {
One = 1,
Two = 2,
Three = 3,
Four = 4,
Six = 6,
Eight = 8,
}
impl BitDepth {
/// u8 -> Self. Temporary solution until Rust provides a canonical one.
pub fn from_u8(n: u8) -> Option<BitDepth> {
match n {
1 => Some(BitDepth::One),
2 => Some(BitDepth::Two),
4 => Some(BitDepth::Four),
8 => Some(BitDepth::Eight),
16 => Some(BitDepth::Sixteen),
_ => None,
}
}
pub(crate) fn into_u8(self) -> u8 {
self as u8
}
}
/// Pixel dimensions information
#[derive(Clone, Copy, Debug)]
pub struct PixelDimensions {
/// Pixels per unit, X axis
pub xppu: u32,
/// Pixels per unit, Y axis
pub yppu: u32,
/// Either *Meter* or *Unspecified*
pub unit: Unit,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
/// Physical unit of the pixel dimensions
pub enum Unit {
Unspecified = 0,
Meter = 1,
}
impl Unit {
/// u8 -> Self. Temporary solution until Rust provides a canonical one.
pub fn from_u8(n: u8) -> Option<Unit> {
match n {
0 => Some(Unit::Unspecified),
1 => Some(Unit::Meter),
_ => None,
}
}
}
/// How to reset buffer of an animated png (APNG) at the end of a frame.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum DisposeOp {
/// Leave the buffer unchanged.
None = 0,
/// Clear buffer with the background color.
Background = 1,
/// Reset the buffer to the state before the current frame.
Previous = 2,
}
impl DisposeOp {
/// u8 -> Self. Using enum_primitive or transmute is probably the right thing but this will do for now.
pub fn from_u8(n: u8) -> Option<DisposeOp> {
match n {
0 => Some(DisposeOp::None),
1 => Some(DisposeOp::Background),
2 => Some(DisposeOp::Previous),
_ => None,
}
}
}
impl fmt::Display for DisposeOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let name = match *self {
DisposeOp::None => "DISPOSE_OP_NONE",
DisposeOp::Background => "DISPOSE_OP_BACKGROUND",
DisposeOp::Previous => "DISPOSE_OP_PREVIOUS",
};
write!(f, "{}", name)
}
}
/// How pixels are written into the buffer.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum BlendOp {
/// Pixels overwrite the value at their position.
Source = 0,
/// The new pixels are blended into the current state based on alpha.
Over = 1,
}
impl BlendOp {
/// u8 -> Self. Using enum_primitive or transmute is probably the right thing but this will do for now.
pub fn from_u8(n: u8) -> Option<BlendOp> {
match n {
0 => Some(BlendOp::Source),
1 => Some(BlendOp::Over),
_ => None,
}
}
}
impl fmt::Display for BlendOp {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let name = match *self {
BlendOp::Source => "BLEND_OP_SOURCE",
BlendOp::Over => "BLEND_OP_OVER",
};
write!(f, "{}", name)
}
}
/// Frame control information
#[derive(Clone, Copy, Debug)]
pub struct FrameControl {
/// Sequence number of the animation chunk, starting from 0
pub sequence_number: u32,
/// Width of the following frame
pub width: u32,
/// Height of the following frame
pub height: u32,
/// X position at which to render the following frame
pub x_offset: u32,
/// Y position at which to render the following frame
pub y_offset: u32,
/// Frame delay fraction numerator
pub delay_num: u16,
/// Frame delay fraction denominator
pub delay_den: u16,
/// Type of frame area disposal to be done after rendering this frame
pub dispose_op: DisposeOp,
/// Type of frame area rendering for this frame
pub blend_op: BlendOp,
}
impl Default for FrameControl {
fn default() -> FrameControl {
FrameControl {
sequence_number: 0,
width: 0,
height: 0,
x_offset: 0,
y_offset: 0,
delay_num: 1,
delay_den: 30,
dispose_op: DisposeOp::None,
blend_op: BlendOp::Source,
}
}
}
impl FrameControl {
pub fn set_seq_num(&mut self, s: u32) {
self.sequence_number = s;
}
pub fn inc_seq_num(&mut self, i: u32) {
self.sequence_number += i;
}
pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
let mut data = [0u8; 26];
data[..4].copy_from_slice(&self.sequence_number.to_be_bytes());
data[4..8].copy_from_slice(&self.width.to_be_bytes());
data[8..12].copy_from_slice(&self.height.to_be_bytes());
data[12..16].copy_from_slice(&self.x_offset.to_be_bytes());
data[16..20].copy_from_slice(&self.y_offset.to_be_bytes());
data[20..22].copy_from_slice(&self.delay_num.to_be_bytes());
data[22..24].copy_from_slice(&self.delay_den.to_be_bytes());
data[24] = self.dispose_op as u8;
data[25] = self.blend_op as u8;
encoder::write_chunk(w, chunk::fcTL, &data)
}
}
/// Animation control information
#[derive(Clone, Copy, Debug)]
pub struct AnimationControl {
/// Number of frames
pub num_frames: u32,
/// Number of times to loop this APNG. 0 indicates infinite looping.
pub num_plays: u32,
}
impl AnimationControl {
pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
let mut data = [0; 8];
data[..4].copy_from_slice(&self.num_frames.to_be_bytes());
data[4..].copy_from_slice(&self.num_plays.to_be_bytes());
encoder::write_chunk(w, chunk::acTL, &data)
}
}
/// The type and strength of applied compression.
///
/// This is a simple, high-level interface that will automatically choose
/// the appropriate DEFLATE compression mode and PNG filter.
///
/// If you need more control over the encoding parameters,
/// you can set the [DeflateCompression] and [Filter] manually.
#[derive(Debug, Clone, Copy)]
#[non_exhaustive]
pub enum Compression {
/// No compression whatsoever. Fastest, but results in large files.
NoCompression,
/// Extremely fast but light compression.
///
/// Note: When used in streaming mode, this compression level can actually result in files
/// *larger* than would be produced by `NoCompression` on incompressible data because
/// it doesn't do any buffering of the output stream to detect whether the data is being compressed or not.
Fastest,
/// Extremely fast compression with a decent compression ratio.
///
/// Significantly outperforms libpng and other popular encoders by using a [specialized DEFLATE
/// implementation tuned for PNG](https://crates.io/crates/fdeflate), while still providing
/// better compression ratio than the fastest modes of other encoders.
///
/// Like `Compression::Fast` this can currently produce files larger than `NoCompression` in
/// streaming mode when given incompressible data. This may change in the future.
Fast,
/// Balances encoding speed and compression ratio
Balanced,
/// Spend much more time to produce a slightly smaller file than with `Balanced`.
High,
}
impl Default for Compression {
fn default() -> Self {
Self::Balanced
}
}
/// Advanced compression settings with more customization options than [Compression].
///
/// Note that this setting only affects DEFLATE compression.
/// Another setting that influences the compression ratio and lets you choose
/// between encoding speed and compression ratio is the [Filter].
///
/// ### Stability guarantees
///
/// The implementation details of DEFLATE compression may evolve over time,
/// even without a semver-breaking change to the version of `png` crate.
///
/// If a certain compression setting is superseded by other options,
/// it may be marked deprecated and remapped to a different option.
/// You will see a deprecation notice when compiling code relying on such options.
#[non_exhaustive]
#[derive(Debug, Clone, Copy)]
pub enum DeflateCompression {
/// Do not compress the data at all.
///
/// Useful for incompressible images, or when speed is paramount and you don't care about size
/// at all.
///
/// This mode also disables filters, forcing [Filter::NoFilter].
NoCompression,
/// Excellent for creating lightly compressed PNG images very quickly.
///
/// Uses the [fdeflate](https://crates.io/crates/fdeflate) crate under the hood to achieve
/// speeds far exceeding what libpng is capable of while still providing a decent compression
/// ratio.
///
/// Note: When used in streaming mode, this compression level can actually result in files
/// *larger* than would be produced by `NoCompression` because it doesn't do any buffering of
/// the output stream to detect whether the data is being compressed or not.
FdeflateUltraFast,
/// Compression level between 1 and 9, where higher values mean better compression at the cost of
/// speed.
///
/// This is currently implemented via [flate2](https://crates.io/crates/flate2) crate
/// by passing through the [compression level](flate2::Compression::new).
///
/// The implementation details and the exact meaning of each level may change in the future,
/// including in semver-compatible releases.
Level(u8),
// Other variants can be added in the future
}
impl Default for DeflateCompression {
fn default() -> Self {
Self::from_simple(Compression::Balanced)
}
}
impl DeflateCompression {
pub(crate) fn from_simple(value: Compression) -> Self {
match value {
Compression::NoCompression => Self::NoCompression,
Compression::Fastest => Self::FdeflateUltraFast,
Compression::Fast => Self::FdeflateUltraFast,
Compression::Balanced => Self::Level(flate2::Compression::default().level() as u8),
Compression::High => Self::Level(flate2::Compression::best().level() as u8),
}
}
}
/// An unsigned integer scaled version of a floating point value,
/// equivalent to an integer quotient with fixed denominator (100_000)).
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct ScaledFloat(u32);
impl ScaledFloat {
const SCALING: f32 = 100_000.0;
/// Gets whether the value is within the clamped range of this type.
pub fn in_range(value: f32) -> bool {
value >= 0.0 && (value * Self::SCALING).floor() <= u32::MAX as f32
}
/// Gets whether the value can be exactly converted in round-trip.
#[allow(clippy::float_cmp)] // Stupid tool, the exact float compare is _the entire point_.
pub fn exact(value: f32) -> bool {
let there = Self::forward(value);
let back = Self::reverse(there);
value == back
}
fn forward(value: f32) -> u32 {
(value.max(0.0) * Self::SCALING).floor() as u32
}
fn reverse(encoded: u32) -> f32 {
encoded as f32 / Self::SCALING
}
/// Slightly inaccurate scaling and quantization.
/// Clamps the value into the representable range if it is negative or too large.
pub fn new(value: f32) -> Self {
Self(Self::forward(value))
}
/// Fully accurate construction from a value scaled as per specification.
pub fn from_scaled(val: u32) -> Self {
Self(val)
}
/// Get the accurate encoded value.
pub fn into_scaled(self) -> u32 {
self.0
}
/// Get the unscaled value as a floating point.
pub fn into_value(self) -> f32 {
Self::reverse(self.0)
}
pub(crate) fn encode_gama<W: Write>(self, w: &mut W) -> encoder::Result<()> {
encoder::write_chunk(w, chunk::gAMA, &self.into_scaled().to_be_bytes())
}
}
/// Chromaticities of the color space primaries
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct SourceChromaticities {
pub white: (ScaledFloat, ScaledFloat),
pub red: (ScaledFloat, ScaledFloat),
pub green: (ScaledFloat, ScaledFloat),
pub blue: (ScaledFloat, ScaledFloat),
}
impl SourceChromaticities {
pub fn new(white: (f32, f32), red: (f32, f32), green: (f32, f32), blue: (f32, f32)) -> Self {
SourceChromaticities {
white: (ScaledFloat::new(white.0), ScaledFloat::new(white.1)),
red: (ScaledFloat::new(red.0), ScaledFloat::new(red.1)),
green: (ScaledFloat::new(green.0), ScaledFloat::new(green.1)),
blue: (ScaledFloat::new(blue.0), ScaledFloat::new(blue.1)),
}
}
#[rustfmt::skip]
pub fn to_be_bytes(self) -> [u8; 32] {
let white_x = self.white.0.into_scaled().to_be_bytes();
let white_y = self.white.1.into_scaled().to_be_bytes();
let red_x = self.red.0.into_scaled().to_be_bytes();
let red_y = self.red.1.into_scaled().to_be_bytes();
let green_x = self.green.0.into_scaled().to_be_bytes();
let green_y = self.green.1.into_scaled().to_be_bytes();
let blue_x = self.blue.0.into_scaled().to_be_bytes();
let blue_y = self.blue.1.into_scaled().to_be_bytes();
[
white_x[0], white_x[1], white_x[2], white_x[3],
white_y[0], white_y[1], white_y[2], white_y[3],
red_x[0], red_x[1], red_x[2], red_x[3],
red_y[0], red_y[1], red_y[2], red_y[3],
green_x[0], green_x[1], green_x[2], green_x[3],
green_y[0], green_y[1], green_y[2], green_y[3],
blue_x[0], blue_x[1], blue_x[2], blue_x[3],
blue_y[0], blue_y[1], blue_y[2], blue_y[3],
]
}
pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
encoder::write_chunk(w, chunk::cHRM, &self.to_be_bytes())
}
}
/// The rendering intent for an sRGB image.
///
/// Presence of this data also indicates that the image conforms to the sRGB color space.
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SrgbRenderingIntent {
/// For images preferring good adaptation to the output device gamut at the expense of colorimetric accuracy, such as photographs.
Perceptual = 0,
/// For images requiring colour appearance matching (relative to the output device white point), such as logos.
RelativeColorimetric = 1,
/// For images preferring preservation of saturation at the expense of hue and lightness, such as charts and graphs.
Saturation = 2,
/// For images requiring preservation of absolute colorimetry, such as previews of images destined for a different output device (proofs).
AbsoluteColorimetric = 3,
}
impl SrgbRenderingIntent {
pub(crate) fn into_raw(self) -> u8 {
self as u8
}
pub(crate) fn from_raw(raw: u8) -> Option<Self> {
match raw {
0 => Some(SrgbRenderingIntent::Perceptual),
1 => Some(SrgbRenderingIntent::RelativeColorimetric),
2 => Some(SrgbRenderingIntent::Saturation),
3 => Some(SrgbRenderingIntent::AbsoluteColorimetric),
_ => None,
}
}
pub fn encode<W: Write>(self, w: &mut W) -> encoder::Result<()> {
encoder::write_chunk(w, chunk::sRGB, &[self.into_raw()])
}
}
/// Coding-independent code points (cICP) specify the color space (primaries),
/// transfer function, matrix coefficients and scaling factor of the image using
/// the code points specified in [ITU-T-H.273](https://www.itu.int/rec/T-REC-H.273).
///
/// See https://www.w3.org/TR/png-3/#cICP-chunk for more details.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct CodingIndependentCodePoints {
/// Id number of the color primaries defined in
/// [ITU-T-H.273](https://www.itu.int/rec/T-REC-H.273) in "Table 2 -
/// Interpretation of colour primaries (ColourPrimaries) value".
pub color_primaries: u8,
/// Id number of the transfer characteristics defined in
/// [ITU-T-H.273](https://www.itu.int/rec/T-REC-H.273) in "Table 3 -
/// Interpretation of transfer characteristics (TransferCharacteristics)
/// value".
pub transfer_function: u8,
/// Id number of the matrix coefficients defined in
/// [ITU-T-H.273](https://www.itu.int/rec/T-REC-H.273) in "Table 4 -
/// Interpretation of matrix coefficients (MatrixCoefficients) value".
///
/// This field is included to faithfully replicate the base
/// [ITU-T-H.273](https://www.itu.int/rec/T-REC-H.273) specification, but matrix coefficients
/// will always be set to 0, because RGB is currently the only supported color mode in PNG.
pub matrix_coefficients: u8,
/// Whether the image is
/// [a full range image](https://www.w3.org/TR/png-3/#dfn-full-range-image)
/// or
/// [a narrow range image](https://www.w3.org/TR/png-3/#dfn-narrow-range-image).
///
/// This field is included to faithfully replicate the base
/// [ITU-T-H.273](https://www.itu.int/rec/T-REC-H.273) specification, but it has limited
/// practical application to PNG images, because narrow-range images are [quite
/// rare](https://github.com/w3c/png/issues/312#issuecomment-2327349614) in practice.
pub is_video_full_range_image: bool,
}
/// Mastering Display Color Volume (mDCV) used at the point of content creation,
/// as specified in [SMPTE-ST-2086](https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8353899).
///
/// See https://www.w3.org/TR/png-3/#mDCV-chunk for more details.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct MasteringDisplayColorVolume {
/// Mastering display chromaticities.
pub chromaticities: SourceChromaticities,
/// Mastering display maximum luminance.
///
/// The value is expressed in units of 0.0001 cd/m^2 - for example if this field
/// is set to `10000000` then it indicates 1000 cd/m^2.
pub max_luminance: u32,
/// Mastering display minimum luminance.
///
/// The value is expressed in units of 0.0001 cd/m^2 - for example if this field
/// is set to `10000000` then it indicates 1000 cd/m^2.
pub min_luminance: u32,
}
/// Content light level information of HDR content.
///
/// See https://www.w3.org/TR/png-3/#cLLI-chunk for more details.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct ContentLightLevelInfo {
/// Maximum Content Light Level indicates the maximum light level of any
/// single pixel (in cd/m^2, also known as nits) of the entire playback
/// sequence.
///
/// The value is expressed in units of 0.0001 cd/m^2 - for example if this field
/// is set to `10000000` then it indicates 1000 cd/m^2.
///
/// A value of zero means that the value is unknown or not currently calculable.
pub max_content_light_level: u32,
/// Maximum Frame Average Light Level indicates the maximum value of the
/// frame average light level (in cd/m^2, also known as nits) of the entire
/// playback sequence. It is calculated by first averaging the decoded
/// luminance values of all the pixels in each frame, and then using the
/// value for the frame with the highest value.
///
/// The value is expressed in units of 0.0001 cd/m^2 - for example if this field
/// is set to `10000000` then it indicates 1000 cd/m^2.
///
/// A value of zero means that the value is unknown or not currently calculable.
pub max_frame_average_light_level: u32,
}
/// PNG info struct
#[derive(Clone, Debug)]
#[non_exhaustive]
pub struct Info<'a> {
pub width: u32,
pub height: u32,
pub bit_depth: BitDepth,
/// How colors are stored in the image.
pub color_type: ColorType,
pub interlaced: bool,
/// The image's `sBIT` chunk, if present; contains significant bits of the sample.
pub sbit: Option<Cow<'a, [u8]>>,
/// The image's `tRNS` chunk, if present; contains the alpha channel of the image's palette, 1 byte per entry.
pub trns: Option<Cow<'a, [u8]>>,
pub pixel_dims: Option<PixelDimensions>,
/// The image's `PLTE` chunk, if present; contains the RGB channels (in that order) of the image's palettes, 3 bytes per entry (1 per channel).
pub palette: Option<Cow<'a, [u8]>>,
/// The contents of the image's gAMA chunk, if present.
/// Prefer `source_gamma` to also get the derived replacement gamma from sRGB chunks.
pub gama_chunk: Option<ScaledFloat>,
/// The contents of the image's `cHRM` chunk, if present.
/// Prefer `source_chromaticities` to also get the derived replacements from sRGB chunks.
pub chrm_chunk: Option<SourceChromaticities>,
/// The contents of the image's `bKGD` chunk, if present.
pub bkgd: Option<Cow<'a, [u8]>>,
pub frame_control: Option<FrameControl>,
pub animation_control: Option<AnimationControl>,
/// Gamma of the source system.
/// Set by both `gAMA` as well as to a replacement by `sRGB` chunk.
pub source_gamma: Option<ScaledFloat>,
/// Chromaticities of the source system.
/// Set by both `cHRM` as well as to a replacement by `sRGB` chunk.
pub source_chromaticities: Option<SourceChromaticities>,
/// The rendering intent of an SRGB image.
///
/// Presence of this value also indicates that the image conforms to the SRGB color space.
pub srgb: Option<SrgbRenderingIntent>,
/// The ICC profile for the image.
pub icc_profile: Option<Cow<'a, [u8]>>,
/// The coding-independent code points for video signal type identification of the image.
pub coding_independent_code_points: Option<CodingIndependentCodePoints>,
/// The mastering display color volume for the image.
pub mastering_display_color_volume: Option<MasteringDisplayColorVolume>,
/// The content light information for the image.
pub content_light_level: Option<ContentLightLevelInfo>,
/// The EXIF metadata for the image.
pub exif_metadata: Option<Cow<'a, [u8]>>,
/// tEXt field
pub uncompressed_latin1_text: Vec<TEXtChunk>,
/// zTXt field
pub compressed_latin1_text: Vec<ZTXtChunk>,
/// iTXt field
pub utf8_text: Vec<ITXtChunk>,
}
impl Default for Info<'_> {
fn default() -> Info<'static> {
Info {
width: 0,
height: 0,
bit_depth: BitDepth::Eight,
color_type: ColorType::Grayscale,
interlaced: false,
palette: None,
sbit: None,
trns: None,
gama_chunk: None,
chrm_chunk: None,
bkgd: None,
pixel_dims: None,
frame_control: None,
animation_control: None,
source_gamma: None,
source_chromaticities: None,
srgb: None,
icc_profile: None,
coding_independent_code_points: None,
mastering_display_color_volume: None,
content_light_level: None,
exif_metadata: None,
uncompressed_latin1_text: Vec::new(),
compressed_latin1_text: Vec::new(),
utf8_text: Vec::new(),
}
}
}
impl Info<'_> {
/// A utility constructor for a default info with width and height.
pub fn with_size(width: u32, height: u32) -> Self {
Info {
width,
height,
..Default::default()
}
}
/// Size of the image, width then height.
pub fn size(&self) -> (u32, u32) {
(self.width, self.height)
}
/// Returns true if the image is an APNG image.
pub fn is_animated(&self) -> bool {
self.frame_control.is_some() && self.animation_control.is_some()
}
/// Returns the frame control information of the image.
pub fn animation_control(&self) -> Option<&AnimationControl> {
self.animation_control.as_ref()
}
/// Returns the frame control information of the current frame
pub fn frame_control(&self) -> Option<&FrameControl> {
self.frame_control.as_ref()
}
/// Returns the number of bits per pixel.
pub fn bits_per_pixel(&self) -> usize {
self.color_type.bits_per_pixel(self.bit_depth)
}
/// Returns the number of bytes per pixel.
pub fn bytes_per_pixel(&self) -> usize {
// If adjusting this for expansion or other transformation passes, remember to keep the old
// implementation for bpp_in_prediction, which is internal to the png specification.
self.color_type.bytes_per_pixel(self.bit_depth)
}
/// Return the number of bytes for this pixel used in prediction.
///
/// Some filters use prediction, over the raw bytes of a scanline. Where a previous pixel is
/// require for such forms the specification instead references previous bytes. That is, for
/// a gray pixel of bit depth 2, the pixel used in prediction is actually 4 pixels prior. This
/// has the consequence that the number of possible values is rather small. To make this fact
/// more obvious in the type system and the optimizer we use an explicit enum here.
pub(crate) fn bpp_in_prediction(&self) -> BytesPerPixel {
BytesPerPixel::from_usize(self.bytes_per_pixel())
}
/// Returns the number of bytes needed for one deinterlaced image.
pub fn raw_bytes(&self) -> usize {
self.height as usize * self.raw_row_length()
}
/// Returns the number of bytes needed for one deinterlaced row.
pub fn raw_row_length(&self) -> usize {
self.raw_row_length_from_width(self.width)
}
pub(crate) fn checked_raw_row_length(&self) -> Option<usize> {
self.color_type
.checked_raw_row_length(self.bit_depth, self.width)
}
/// Returns the number of bytes needed for one deinterlaced row of width `width`.
pub fn raw_row_length_from_width(&self, width: u32) -> usize {
self.color_type
.raw_row_length_from_width(self.bit_depth, width)
}
/// Gamma dependent on sRGB chunk
pub fn gamma(&self) -> Option<ScaledFloat> {
if self.srgb.is_some() {
Some(crate::srgb::substitute_gamma())
} else {
self.gama_chunk
}
}
/// Chromaticities dependent on sRGB chunk
pub fn chromaticities(&self) -> Option<SourceChromaticities> {
if self.srgb.is_some() {
Some(crate::srgb::substitute_chromaticities())
} else {
self.chrm_chunk
}
}
/// Mark the image data as conforming to the SRGB color space with the specified rendering intent.
///
/// Any ICC profiles will be ignored.
///
/// Source gamma and chromaticities will be written only if they're set to fallback
/// values specified in [11.3.2.5](https://www.w3.org/TR/png-3/#sRGB-gAMA-cHRM).
pub(crate) fn set_source_srgb(&mut self, rendering_intent: SrgbRenderingIntent) {
self.srgb = Some(rendering_intent);
self.icc_profile = None;
}
}
impl BytesPerPixel {
pub(crate) fn from_usize(bpp: usize) -> Self {
match bpp {
1 => BytesPerPixel::One,
2 => BytesPerPixel::Two,
3 => BytesPerPixel::Three,
4 => BytesPerPixel::Four,
6 => BytesPerPixel::Six, // Only rgb×16bit
8 => BytesPerPixel::Eight, // Only rgba×16bit
_ => unreachable!("Not a possible byte rounded pixel width"),
}
}
pub(crate) fn into_usize(self) -> usize {
self as usize
}
}
bitflags::bitflags! {
/// Output transformations
///
/// Many flags from libpng are not yet supported. A PR discussing/adding them would be nice.
///
#[doc = "
```c
/// Discard the alpha channel
const STRIP_ALPHA = 0x0002; // read only
/// Expand 1; 2 and 4-bit samples to bytes
const PACKING = 0x0004; // read and write
/// Change order of packed pixels to LSB first
const PACKSWAP = 0x0008; // read and write
/// Invert monochrome images
const INVERT_MONO = 0x0020; // read and write
/// Normalize pixels to the sBIT depth
const SHIFT = 0x0040; // read and write
/// Flip RGB to BGR; RGBA to BGRA
const BGR = 0x0080; // read and write
/// Flip RGBA to ARGB or GA to AG
const SWAP_ALPHA = 0x0100; // read and write
/// Byte-swap 16-bit samples
const SWAP_ENDIAN = 0x0200; // read and write
/// Change alpha from opacity to transparency
const INVERT_ALPHA = 0x0400; // read and write
const STRIP_FILLER = 0x0800; // write only
const STRIP_FILLER_BEFORE = 0x0800; // write only
const STRIP_FILLER_AFTER = 0x1000; // write only
const GRAY_TO_RGB = 0x2000; // read only
const EXPAND_16 = 0x4000; // read only
/// Similar to STRIP_16 but in libpng considering gamma?
/// Not entirely sure the documentation says it is more
/// accurate but doesn't say precisely how.
const SCALE_16 = 0x8000; // read only
```
"]
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct Transformations: u32 {
/// No transformation
const IDENTITY = 0x00000; // read and write */
/// Strip 16-bit samples to 8 bits
const STRIP_16 = 0x00001; // read only */
/// Expand paletted images to RGB; expand grayscale images of
/// less than 8-bit depth to 8-bit depth; and expand tRNS chunks
/// to alpha channels.
const EXPAND = 0x00010; // read only */
/// Expand paletted images to include an alpha channel. Implies `EXPAND`.
const ALPHA = 0x10000; // read only */
}
}
impl Transformations {
/// Transform every input to 8bit grayscale or color.
///
/// This sets `EXPAND` and `STRIP_16` which is similar to the default transformation used by
/// this library prior to `0.17`.
pub fn normalize_to_color8() -> Transformations {
Transformations::EXPAND | Transformations::STRIP_16
}
}
/// Instantiate the default transformations, the identity transform.
impl Default for Transformations {
fn default() -> Transformations {
Transformations::IDENTITY
}
}
#[derive(Debug)]
pub struct ParameterError {
inner: ParameterErrorKind,
}
#[derive(Debug)]
pub(crate) enum ParameterErrorKind {
/// A provided buffer must be have the exact size to hold the image data. Where the buffer can
/// be allocated by the caller, they must ensure that it has a minimum size as hinted previously.
/// Even though the size is calculated from image data, this does counts as a parameter error
/// because they must react to a value produced by this library, which can have been subjected
/// to limits.
ImageBufferSize { expected: usize, actual: usize },
/// A bit like return `None` from an iterator.
/// We use it to differentiate between failing to seek to the next image in a sequence and the
/// absence of a next image. This is an error of the caller because they should have checked
/// the number of images by inspecting the header data returned when opening the image. This
/// library will perform the checks necessary to ensure that data was accurate or error with a
/// format error otherwise.
PolledAfterEndOfImage,
/// Attempt to continue decoding after a fatal, non-resumable error was reported (e.g. after
/// [`DecodingError::Format`]). The only case when it is possible to resume after an error
/// is an `UnexpectedEof` scenario - see [`DecodingError::IoError`].
PolledAfterFatalError,
}
impl From<ParameterErrorKind> for ParameterError {
fn from(inner: ParameterErrorKind) -> Self {
ParameterError { inner }
}
}
impl fmt::Display for ParameterError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use ParameterErrorKind::*;
match self.inner {
ImageBufferSize { expected, actual } => {
write!(fmt, "wrong data size, expected {} got {}", expected, actual)
}
PolledAfterEndOfImage => write!(fmt, "End of image has been reached"),
PolledAfterFatalError => {
write!(fmt, "A fatal decoding error has been encounted earlier")
}
}
}
}

128
vendor/png/src/decoder/interlace_info.rs vendored Normal file
View File

@@ -0,0 +1,128 @@
use std::ops::Range;
use crate::adam7::{Adam7Info, Adam7Iterator};
/// Describes which interlacing algorithm applies to a decoded row.
///
/// PNG (2003) specifies two interlace modes, but reserves future extensions.
///
/// See also [Reader.next_interlaced_row](crate::Reader::next_interlaced_row).
#[derive(Clone, Copy, Debug)]
pub enum InterlaceInfo {
/// The `null` method means no interlacing.
Null(NullInfo),
/// [The `Adam7` algorithm](https://en.wikipedia.org/wiki/Adam7_algorithm) derives its name
/// from doing 7 passes over the image, only decoding a subset of all pixels in each pass.
/// The following table shows pictorially what parts of each 8x8 area of the image is found in
/// each pass:
///
/// ```txt
/// 1 6 4 6 2 6 4 6
/// 7 7 7 7 7 7 7 7
/// 5 6 5 6 5 6 5 6
/// 7 7 7 7 7 7 7 7
/// 3 6 4 6 3 6 4 6
/// 7 7 7 7 7 7 7 7
/// 5 6 5 6 5 6 5 6
/// 7 7 7 7 7 7 7 7
/// ```
Adam7(Adam7Info),
}
#[derive(Clone, Copy, Debug)]
pub struct NullInfo {
line: u32,
}
impl InterlaceInfo {
pub(crate) fn line_number(&self) -> u32 {
match self {
InterlaceInfo::Null(NullInfo { line }) => *line,
InterlaceInfo::Adam7(Adam7Info { line, .. }) => *line,
}
}
pub(crate) fn get_adam7_info(&self) -> Option<&Adam7Info> {
match self {
InterlaceInfo::Null(_) => None,
InterlaceInfo::Adam7(adam7info) => Some(adam7info),
}
}
}
pub(crate) struct InterlaceInfoIter(IterImpl);
impl InterlaceInfoIter {
pub fn empty() -> Self {
Self(IterImpl::None(0..0))
}
pub fn new(width: u32, height: u32, interlaced: bool) -> Self {
if interlaced {
Self(IterImpl::Adam7(Adam7Iterator::new(width, height)))
} else {
Self(IterImpl::None(0..height))
}
}
}
impl Iterator for InterlaceInfoIter {
type Item = InterlaceInfo;
fn next(&mut self) -> Option<InterlaceInfo> {
match self.0 {
IterImpl::Adam7(ref mut adam7) => Some(InterlaceInfo::Adam7(adam7.next()?)),
IterImpl::None(ref mut height) => Some(InterlaceInfo::Null(NullInfo {
line: height.next()?,
})),
}
}
}
enum IterImpl {
None(Range<u32>),
Adam7(Adam7Iterator),
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn null() {
assert_eq!(
InterlaceInfoIter::new(8, 8, false)
.map(|info| info.line_number())
.collect::<Vec<_>>(),
vec![0, 1, 2, 3, 4, 5, 6, 7],
);
}
#[test]
fn adam7() {
assert_eq!(
InterlaceInfoIter::new(8, 8, true)
.map(|info| info.line_number())
.collect::<Vec<_>>(),
vec![
0, // pass 1
0, // pass 2
0, // pass 3
0, 1, // pass 4
0, 1, // pass 5
0, 1, 2, 3, // pass 6
0, 1, 2, 3, // pass 7
],
);
}
#[test]
fn empty() {
assert_eq!(
InterlaceInfoIter::empty()
.map(|info| info.line_number())
.collect::<Vec<_>>(),
vec![],
);
}
}

732
vendor/png/src/decoder/mod.rs vendored Normal file
View File

@@ -0,0 +1,732 @@
mod interlace_info;
mod read_decoder;
pub(crate) mod stream;
pub(crate) mod transform;
mod unfiltering_buffer;
mod zlib;
use self::read_decoder::{ImageDataCompletionStatus, ReadDecoder};
use self::stream::{DecodeOptions, DecodingError, FormatErrorInner};
use self::transform::{create_transform_fn, TransformFn};
use self::unfiltering_buffer::UnfilteringBuffer;
use std::io::{BufRead, Seek};
use std::mem;
use crate::adam7::Adam7Info;
use crate::common::{
BitDepth, BytesPerPixel, ColorType, Info, ParameterErrorKind, Transformations,
};
use crate::FrameControl;
pub use zlib::{UnfilterBuf, UnfilterRegion};
pub use interlace_info::InterlaceInfo;
use interlace_info::InterlaceInfoIter;
/*
pub enum InterlaceHandling {
/// Outputs the raw rows
RawRows,
/// Fill missing the pixels from the existing ones
Rectangle,
/// Only fill the needed pixels
Sparkle
}
*/
/// Output info.
///
/// This describes one particular frame of the image that was written into the output buffer.
#[derive(Debug, PartialEq, Eq)]
pub struct OutputInfo {
/// The pixel width of this frame.
pub width: u32,
/// The pixel height of this frame.
pub height: u32,
/// The chosen output color type.
pub color_type: ColorType,
/// The chosen output bit depth.
pub bit_depth: BitDepth,
/// The byte count of each scan line in the image.
pub line_size: usize,
}
impl OutputInfo {
/// Returns the size needed to hold a decoded frame
/// If the output buffer was larger then bytes after this count should be ignored. They may
/// still have been changed.
pub fn buffer_size(&self) -> usize {
self.line_size * self.height as usize
}
}
#[derive(Clone, Copy, Debug)]
/// Limits on the resources the `Decoder` is allowed too use
pub struct Limits {
/// maximum number of bytes the decoder is allowed to allocate, default is 64Mib
pub bytes: usize,
}
impl Limits {
pub(crate) fn reserve_bytes(&mut self, bytes: usize) -> Result<(), DecodingError> {
if self.bytes >= bytes {
self.bytes -= bytes;
Ok(())
} else {
Err(DecodingError::LimitsExceeded)
}
}
}
impl Default for Limits {
fn default() -> Limits {
Limits {
bytes: 1024 * 1024 * 64,
}
}
}
/// PNG Decoder
pub struct Decoder<R: BufRead + Seek> {
read_decoder: ReadDecoder<R>,
/// Output transformations
transform: Transformations,
}
/// A row of data with interlace information attached.
#[derive(Clone, Copy, Debug)]
pub struct InterlacedRow<'data> {
data: &'data [u8],
interlace: InterlaceInfo,
}
impl<'data> InterlacedRow<'data> {
pub fn data(&self) -> &'data [u8] {
self.data
}
pub fn interlace(&self) -> &InterlaceInfo {
&self.interlace
}
}
/// A row of data without interlace information.
#[derive(Clone, Copy, Debug)]
pub struct Row<'data> {
data: &'data [u8],
}
impl<'data> Row<'data> {
pub fn data(&self) -> &'data [u8] {
self.data
}
}
impl<R: BufRead + Seek> Decoder<R> {
/// Create a new decoder configuration with default limits.
pub fn new(r: R) -> Decoder<R> {
Decoder::new_with_limits(r, Limits::default())
}
/// Create a new decoder configuration with custom limits.
pub fn new_with_limits(r: R, limits: Limits) -> Decoder<R> {
let mut read_decoder = ReadDecoder::new(r);
read_decoder.set_limits(limits);
Decoder {
read_decoder,
transform: Transformations::IDENTITY,
}
}
/// Create a new decoder configuration with custom [`DecodeOptions`].
pub fn new_with_options(r: R, decode_options: DecodeOptions) -> Decoder<R> {
let mut read_decoder = ReadDecoder::with_options(r, decode_options);
read_decoder.set_limits(Limits::default());
Decoder {
read_decoder,
transform: Transformations::IDENTITY,
}
}
/// Limit resource usage.
///
/// Note that your allocations, e.g. when reading into a pre-allocated buffer, are __NOT__
/// considered part of the limits. Nevertheless, required intermediate buffers such as for
/// singular lines is checked against the limit.
///
/// Note that this is a best-effort basis.
///
/// ```
/// use std::fs::File;
/// use std::io::BufReader;
/// use png::{Decoder, Limits};
/// // This image is 32×32, 1bit per pixel. The reader buffers one row which requires 4 bytes.
/// let mut limits = Limits::default();
/// limits.bytes = 3;
/// let mut decoder = Decoder::new_with_limits(BufReader::new(File::open("tests/pngsuite/basi0g01.png").unwrap()), limits);
/// assert!(decoder.read_info().is_err());
///
/// // This image is 32x32 pixels, so the decoder will allocate less than 10Kib
/// let mut limits = Limits::default();
/// limits.bytes = 10*1024;
/// let mut decoder = Decoder::new_with_limits(BufReader::new(File::open("tests/pngsuite/basi0g01.png").unwrap()), limits);
/// assert!(decoder.read_info().is_ok());
/// ```
pub fn set_limits(&mut self, limits: Limits) {
self.read_decoder.set_limits(limits);
}
/// Read the PNG header and return the information contained within.
///
/// Most image metadata will not be read until `read_info` is called, so those fields will be
/// None or empty.
pub fn read_header_info(&mut self) -> Result<&Info<'static>, DecodingError> {
self.read_decoder.read_header_info()
}
/// Reads all meta data until the first IDAT chunk
pub fn read_info(mut self) -> Result<Reader<R>, DecodingError> {
let info = self.read_header_info()?;
let unfiltering_buffer = UnfilteringBuffer::new(info);
let mut reader = Reader {
decoder: self.read_decoder,
bpp: BytesPerPixel::One,
subframe: SubframeInfo::not_yet_init(),
remaining_frames: 0, // Temporary value - fixed below after reading `acTL` and `fcTL`.
unfiltering_buffer,
transform: self.transform,
transform_fn: None,
scratch_buffer: Vec::new(),
finished: false,
};
// Check if the decoding buffer of a single raw line has a valid size.
//
// FIXME: this check and the next can be delayed until processing image data. This would
// allow usage where only the metadata is processes, or where the image is processed
// line-by-line even on targets that can not fit the whole image into their address space.
// We should strive for a balance between implementation complexity (still ensure that the
// no-overflow preconditions are met for internal calculation) and use possibilities.
if reader.info().checked_raw_row_length().is_none() {
return Err(DecodingError::LimitsExceeded);
}
// Check if the output buffer has a valid size.
//
// FIXME: see above and
// <https://github.com/image-rs/image-png/pull/608#issuecomment-3003576956>
if reader.output_buffer_size().is_none() {
return Err(DecodingError::LimitsExceeded);
}
reader.read_until_image_data()?;
reader.remaining_frames = match reader.info().animation_control.as_ref() {
None => 1, // No `acTL` => only expecting `IDAT` frame.
Some(animation) => {
let mut num_frames = animation.num_frames as usize;
if reader.info().frame_control.is_none() {
// No `fcTL` before `IDAT` => `IDAT` is not part of the animation, but
// represents an *extra*, default frame for non-APNG-aware decoders.
num_frames += 1;
}
num_frames
}
};
Ok(reader)
}
/// Set the allowed and performed transformations.
///
/// A transformation is a pre-processing on the raw image data modifying content or encoding.
/// Many options have an impact on memory or CPU usage during decoding.
pub fn set_transformations(&mut self, transform: Transformations) {
self.transform = transform;
}
/// Set the decoder to ignore all text chunks while parsing.
///
/// eg.
/// ```
/// use std::fs::File;
/// use std::io::BufReader;
/// use png::Decoder;
/// let mut decoder = Decoder::new(BufReader::new(File::open("tests/pngsuite/basi0g01.png").unwrap()));
/// decoder.set_ignore_text_chunk(true);
/// assert!(decoder.read_info().is_ok());
/// ```
pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) {
self.read_decoder.set_ignore_text_chunk(ignore_text_chunk);
}
/// Set the decoder to ignore iccp chunks while parsing.
///
/// eg.
/// ```
/// use std::fs::File;
/// use std::io::BufReader;
/// use png::Decoder;
/// let mut decoder = Decoder::new(BufReader::new(File::open("tests/iccp/broken_iccp.png").unwrap()));
/// decoder.set_ignore_iccp_chunk(true);
/// assert!(decoder.read_info().is_ok());
/// ```
pub fn set_ignore_iccp_chunk(&mut self, ignore_iccp_chunk: bool) {
self.read_decoder.set_ignore_iccp_chunk(ignore_iccp_chunk);
}
/// Set the decoder to ignore and not verify the Adler-32 checksum
/// and CRC code.
pub fn ignore_checksums(&mut self, ignore_checksums: bool) {
self.read_decoder.ignore_checksums(ignore_checksums);
}
}
/// PNG reader (mostly high-level interface)
///
/// Provides a high level that iterates over lines or whole images.
pub struct Reader<R: BufRead + Seek> {
decoder: ReadDecoder<R>,
bpp: BytesPerPixel,
subframe: SubframeInfo,
/// How many frames remain to be decoded. Decremented after each `IDAT` or `fdAT` sequence.
remaining_frames: usize,
/// Buffer with not-yet-`unfilter`-ed image rows
unfiltering_buffer: UnfilteringBuffer,
/// Output transformations
transform: Transformations,
/// Function that can transform decompressed, unfiltered rows into final output.
/// See the `transform.rs` module for more details.
transform_fn: Option<TransformFn>,
/// This buffer is only used so that `next_row` and `next_interlaced_row` can return reference
/// to a byte slice. In a future version of this library, this buffer will be removed and
/// `next_row` and `next_interlaced_row` will write directly into a user provided output buffer.
scratch_buffer: Vec<u8>,
/// Whether `ImageEnd` was already reached by `fn finish`.
finished: bool,
}
/// The subframe specific information.
///
/// In APNG the frames are constructed by combining previous frame and a new subframe (through a
/// combination of `dispose_op` and `overlay_op`). These sub frames specify individual dimension
/// information and reuse the global interlace options. This struct encapsulates the state of where
/// in a particular IDAT-frame or subframe we are.
struct SubframeInfo {
width: u32,
height: u32,
rowlen: usize,
current_interlace_info: Option<InterlaceInfo>,
interlace_info_iter: InterlaceInfoIter,
consumed_and_flushed: bool,
}
impl<R: BufRead + Seek> Reader<R> {
/// Advances to the start of the next animation frame and
/// returns a reference to the `FrameControl` info that describes it.
/// Skips and discards the image data of the previous frame if necessary.
///
/// Returns a [`ParameterError`] when there are no more animation frames.
/// To avoid this the caller can check if [`Info::animation_control`] exists
/// and consult [`AnimationControl::num_frames`].
pub fn next_frame_info(&mut self) -> Result<&FrameControl, DecodingError> {
let remaining_frames = if self.subframe.consumed_and_flushed {
self.remaining_frames
} else {
// One remaining frame will be consumed by the `finish_decoding` call below.
self.remaining_frames - 1
};
if remaining_frames == 0 {
return Err(DecodingError::Parameter(
ParameterErrorKind::PolledAfterEndOfImage.into(),
));
}
if !self.subframe.consumed_and_flushed {
self.subframe.current_interlace_info = None;
self.finish_decoding()?;
}
self.read_until_image_data()?;
// The PNG standard (and `StreamingDecoder `) guarantes that there is an `fcTL` chunk
// before the start of image data in a sequence of `fdAT` chunks. Therefore `unwrap`
// below is guaranteed to not panic.
Ok(self.info().frame_control.as_ref().unwrap())
}
/// Reads all meta data until the next frame data starts.
/// Requires IHDR before the IDAT and fcTL before fdAT.
fn read_until_image_data(&mut self) -> Result<(), DecodingError> {
self.decoder.read_until_image_data()?;
self.subframe = SubframeInfo::new(self.info());
self.bpp = self.info().bpp_in_prediction();
self.unfiltering_buffer.reset_all();
// Allocate output buffer.
let buflen = self.unguarded_output_line_size(self.subframe.width);
self.decoder.reserve_bytes(buflen)?;
Ok(())
}
/// Get information on the image.
///
/// The structure will change as new frames of an animated image are decoded.
pub fn info(&self) -> &Info<'static> {
self.decoder.info().unwrap()
}
/// Decodes the next frame into `buf`.
///
/// Note that this decodes raw subframes that need to be mixed according to blend-op and
/// dispose-op by the caller.
///
/// The caller must always provide a buffer large enough to hold a complete frame (the APNG
/// specification restricts subframes to the dimensions given in the image header). The region
/// that has been written be checked afterwards by calling `info` after a successful call and
/// inspecting the `frame_control` data. This requirement may be lifted in a later version of
/// `png`.
///
/// Output lines will be written in row-major, packed matrix with width and height of the read
/// frame (or subframe), all samples are in big endian byte order where this matters.
pub fn next_frame(&mut self, buf: &mut [u8]) -> Result<OutputInfo, DecodingError> {
if self.remaining_frames == 0 {
return Err(DecodingError::Parameter(
ParameterErrorKind::PolledAfterEndOfImage.into(),
));
} else if self.subframe.consumed_and_flushed {
// Advance until the next `fdAT`
// (along the way we should encounter the fcTL for this frame).
self.read_until_image_data()?;
}
// Note that we only check if the buffer size calculation holds in a call to decoding the
// frame. Consequently, we can represent the `Info` and frameless decoding even when the
// target architecture's address space is too small for a frame. However reading the actual
let required_len = self
.output_buffer_size()
.ok_or(DecodingError::LimitsExceeded)?;
if buf.len() < required_len {
return Err(DecodingError::Parameter(
ParameterErrorKind::ImageBufferSize {
expected: required_len,
actual: buf.len(),
}
.into(),
));
}
let (color_type, bit_depth) = self.output_color_type();
let output_info = OutputInfo {
width: self.subframe.width,
height: self.subframe.height,
color_type,
bit_depth,
line_size: self.unguarded_output_line_size(self.subframe.width),
};
if self.info().interlaced {
let stride = self.unguarded_output_line_size(self.info().width);
let samples = color_type.samples() as u8;
let bits_pp = samples * (bit_depth as u8);
let expand = crate::adam7::expand_pass;
while let Some(InterlacedRow {
data: row,
interlace,
..
}) = self.next_interlaced_row()?
{
// `unwrap` won't panic, because we checked `self.info().interlaced` above.
let adam7info = interlace.get_adam7_info().unwrap();
expand(buf, stride, row, adam7info, bits_pp);
}
} else {
let current_interlace_info = self.subframe.current_interlace_info.as_ref();
let already_done_rows = current_interlace_info
.map(|info| info.line_number())
.unwrap_or(self.subframe.height);
for row in buf
.chunks_exact_mut(output_info.line_size)
.take(self.subframe.height as usize)
.skip(already_done_rows as usize)
{
self.next_interlaced_row_impl(self.subframe.rowlen, row)?;
}
}
// Advance over the rest of data for this (sub-)frame.
self.finish_decoding()?;
Ok(output_info)
}
fn mark_subframe_as_consumed_and_flushed(&mut self) {
assert!(self.remaining_frames > 0);
self.remaining_frames -= 1;
self.subframe.consumed_and_flushed = true;
}
/// Advance over the rest of data for this (sub-)frame.
/// Called after decoding the last row of a frame.
fn finish_decoding(&mut self) -> Result<(), DecodingError> {
// Double-check that all rows of this frame have been decoded (i.e. that the potential
// `finish_decoding` call below won't be discarding any data).
assert!(self.subframe.current_interlace_info.is_none());
// Discard the remaining data in the current sequence of `IDAT` or `fdAT` chunks.
if !self.subframe.consumed_and_flushed {
self.decoder.finish_decoding_image_data()?;
self.mark_subframe_as_consumed_and_flushed();
}
Ok(())
}
/// Returns the next processed row of the image (discarding `InterlaceInfo`).
///
/// See also [`Reader.read_row`], which reads into a caller-provided buffer.
pub fn next_row(&mut self) -> Result<Option<Row<'_>>, DecodingError> {
self.next_interlaced_row()
.map(|v| v.map(|v| Row { data: v.data }))
}
/// Returns the next processed row of the image.
///
/// See also [`Reader.read_row`], which reads into a caller-provided buffer.
pub fn next_interlaced_row(&mut self) -> Result<Option<InterlacedRow<'_>>, DecodingError> {
let mut output_buffer = mem::take(&mut self.scratch_buffer);
let max_line_size = self
.output_line_size(self.info().width)
.ok_or(DecodingError::LimitsExceeded)?;
output_buffer.resize(max_line_size, 0u8);
let result = self.read_row(&mut output_buffer);
self.scratch_buffer = output_buffer;
result.map(move |option| {
option.map(move |interlace| {
let output_line_size = self.output_line_size_for_interlace_info(&interlace);
InterlacedRow {
data: &self.scratch_buffer[..output_line_size],
interlace,
}
})
})
}
/// Reads the next row of the image into the provided `output_buffer`.
/// `Ok(None)` will be returned if the current image frame has no more rows.
///
/// `output_buffer` needs to be long enough to accommodate [`Reader.output_line_size`] for
/// [`Info.width`] (initial interlaced rows may need less than that).
///
/// See also [`Reader.next_row`] and [`Reader.next_interlaced_row`], which read into a
/// `Reader`-owned buffer.
pub fn read_row(
&mut self,
output_buffer: &mut [u8],
) -> Result<Option<InterlaceInfo>, DecodingError> {
let interlace = match self.subframe.current_interlace_info.as_ref() {
None => {
self.finish_decoding()?;
return Ok(None);
}
Some(interlace) => *interlace,
};
if interlace.line_number() == 0 {
self.unfiltering_buffer.reset_prev_row();
}
let rowlen = match interlace {
InterlaceInfo::Null(_) => self.subframe.rowlen,
InterlaceInfo::Adam7(Adam7Info { samples: width, .. }) => {
self.info().raw_row_length_from_width(width)
}
};
let output_line_size = self.output_line_size_for_interlace_info(&interlace);
let output_buffer = &mut output_buffer[..output_line_size];
self.next_interlaced_row_impl(rowlen, output_buffer)?;
Ok(Some(interlace))
}
fn output_line_size_for_interlace_info(&self, interlace: &InterlaceInfo) -> usize {
let width = match interlace {
InterlaceInfo::Adam7(Adam7Info { samples: width, .. }) => *width,
InterlaceInfo::Null(_) => self.subframe.width,
};
self.unguarded_output_line_size(width)
}
/// Read the rest of the image and chunks and finish up, including text chunks or others
/// This will discard the rest of the image if the image is not read already with [`Reader::next_frame`], [`Reader::next_row`] or [`Reader::next_interlaced_row`]
pub fn finish(&mut self) -> Result<(), DecodingError> {
if self.finished {
return Err(DecodingError::Parameter(
ParameterErrorKind::PolledAfterEndOfImage.into(),
));
}
self.remaining_frames = 0;
self.unfiltering_buffer.reset_all();
self.decoder.read_until_end_of_input()?;
self.finished = true;
Ok(())
}
/// Fetch the next interlaced row and filter it according to our own transformations.
fn next_interlaced_row_impl(
&mut self,
rowlen: usize,
output_buffer: &mut [u8],
) -> Result<(), DecodingError> {
self.next_raw_interlaced_row(rowlen)?;
let row = self.unfiltering_buffer.prev_row();
assert_eq!(row.len(), rowlen - 1);
// Apply transformations and write resulting data to buffer.
let transform_fn = {
if self.transform_fn.is_none() {
self.transform_fn = Some(create_transform_fn(self.info(), self.transform)?);
}
self.transform_fn.as_deref().unwrap()
};
transform_fn(row, output_buffer, self.info());
self.subframe.current_interlace_info = self.subframe.interlace_info_iter.next();
Ok(())
}
/// Returns the color type and the number of bits per sample
/// of the data returned by `Reader::next_row` and Reader::frames`.
pub fn output_color_type(&self) -> (ColorType, BitDepth) {
use crate::common::ColorType::*;
let t = self.transform;
let info = self.info();
if t == Transformations::IDENTITY {
(info.color_type, info.bit_depth)
} else {
let bits = match info.bit_depth as u8 {
16 if t.intersects(Transformations::STRIP_16) => 8,
n if n < 8
&& (t.contains(Transformations::EXPAND)
|| t.contains(Transformations::ALPHA)) =>
{
8
}
n => n,
};
let color_type =
if t.contains(Transformations::EXPAND) || t.contains(Transformations::ALPHA) {
let has_trns = info.trns.is_some() || t.contains(Transformations::ALPHA);
match info.color_type {
Grayscale if has_trns => GrayscaleAlpha,
Rgb if has_trns => Rgba,
Indexed if has_trns => Rgba,
Indexed => Rgb,
ct => ct,
}
} else {
info.color_type
};
(color_type, BitDepth::from_u8(bits).unwrap())
}
}
/// Return the number of bytes required to hold a deinterlaced image frame that is decoded
/// using the given input transformations.
///
/// Returns `None` if the output buffer does not fit into the memory space of the machine,
/// otherwise returns the byte length in `Some`. The length is smaller than [`isize::MAX`].
pub fn output_buffer_size(&self) -> Option<usize> {
let (width, height) = self.info().size();
let (color, depth) = self.output_color_type();
// The subtraction should always work, but we do this for consistency. Also note that by
// calling `checked_raw_row_length` the row buffer is guaranteed to work whereas if we
// ran other function that didn't include the filter byte that could later fail on an image
// that is `1xN`...
let linelen = color.checked_raw_row_length(depth, width)?.checked_sub(1)?;
let height = usize::try_from(height).ok()?;
let imglen = linelen.checked_mul(height)?;
// Ensure that it fits into address space not only `usize` to allocate.
(imglen <= isize::MAX as usize).then_some(imglen)
}
/// Returns the number of bytes required to hold a deinterlaced row.
pub(crate) fn unguarded_output_line_size(&self, width: u32) -> usize {
let (color, depth) = self.output_color_type();
color.raw_row_length_from_width(depth, width) - 1
}
/// Returns the number of bytes required to hold a deinterlaced row.
///
/// Returns `None` if the output buffer does not fit into the memory space of the machine,
/// otherwise returns the byte length in `Some`. The length is smaller than [`isize::MAX`].
pub fn output_line_size(&self, width: u32) -> Option<usize> {
let (color, depth) = self.output_color_type();
let length = color.checked_raw_row_length(depth, width)?.checked_sub(1)?;
// Ensure that it fits into address space not only `usize` to allocate.
(length <= isize::MAX as usize).then_some(length)
}
/// Unfilter the next raw interlaced row into `self.unfiltering_buffer`.
fn next_raw_interlaced_row(&mut self, rowlen: usize) -> Result<(), DecodingError> {
// Read image data until we have at least one full row (but possibly more than one).
while self.unfiltering_buffer.curr_row_len() < rowlen {
if self.subframe.consumed_and_flushed {
return Err(DecodingError::Format(
FormatErrorInner::NoMoreImageData.into(),
));
}
let mut buffer = self.unfiltering_buffer.as_unfilled_buffer();
match self.decoder.decode_image_data(Some(&mut buffer))? {
ImageDataCompletionStatus::ExpectingMoreData => (),
ImageDataCompletionStatus::Done => self.mark_subframe_as_consumed_and_flushed(),
}
}
self.unfiltering_buffer.unfilter_curr_row(rowlen, self.bpp)
}
}
impl SubframeInfo {
fn not_yet_init() -> Self {
SubframeInfo {
width: 0,
height: 0,
rowlen: 0,
current_interlace_info: None,
interlace_info_iter: InterlaceInfoIter::empty(),
consumed_and_flushed: false,
}
}
fn new(info: &Info) -> Self {
// The apng fctnl overrides width and height.
// All other data is set by the main info struct.
let (width, height) = if let Some(fc) = info.frame_control {
(fc.width, fc.height)
} else {
(info.width, info.height)
};
let mut interlace_info_iter = InterlaceInfoIter::new(width, height, info.interlaced);
let current_interlace_info = interlace_info_iter.next();
SubframeInfo {
width,
height,
rowlen: info.raw_row_length_from_width(width),
current_interlace_info,
interlace_info_iter,
consumed_and_flushed: false,
}
}
}

153
vendor/png/src/decoder/read_decoder.rs vendored Normal file
View File

@@ -0,0 +1,153 @@
use super::stream::{DecodeOptions, Decoded, DecodingError, FormatErrorInner, StreamingDecoder};
use super::zlib::UnfilterBuf;
use super::Limits;
use std::io::{BufRead, ErrorKind, Read, Seek};
use crate::chunk;
use crate::common::Info;
/// Helper for encapsulating reading input from `Read` and feeding it into a `StreamingDecoder`
/// while hiding low-level `Decoded` events and only exposing a few high-level reading operations
/// like:
///
/// * `read_header_info` - reading until `IHDR` chunk
/// * `read_until_image_data` - reading until `IDAT` / `fdAT` sequence
/// * `decode_image_data` - reading from `IDAT` / `fdAT` sequence into `Vec<u8>`
/// * `finish_decoding_image_data()` - discarding remaining data from `IDAT` / `fdAT` sequence
/// * `read_until_end_of_input()` - reading until `IEND` chunk
pub(crate) struct ReadDecoder<R: Read> {
reader: R,
decoder: StreamingDecoder,
}
impl<R: BufRead + Seek> ReadDecoder<R> {
pub fn new(r: R) -> Self {
Self {
reader: r,
decoder: StreamingDecoder::new(),
}
}
pub fn with_options(r: R, options: DecodeOptions) -> Self {
let mut decoder = StreamingDecoder::new_with_options(options);
decoder.limits = Limits::default();
Self { reader: r, decoder }
}
pub fn set_limits(&mut self, limits: Limits) {
self.decoder.limits = limits;
}
pub fn reserve_bytes(&mut self, bytes: usize) -> Result<(), DecodingError> {
self.decoder.limits.reserve_bytes(bytes)
}
pub fn set_ignore_text_chunk(&mut self, ignore_text_chunk: bool) {
self.decoder.set_ignore_text_chunk(ignore_text_chunk);
}
pub fn set_ignore_iccp_chunk(&mut self, ignore_iccp_chunk: bool) {
self.decoder.set_ignore_iccp_chunk(ignore_iccp_chunk);
}
pub fn ignore_checksums(&mut self, ignore_checksums: bool) {
self.decoder.set_ignore_adler32(ignore_checksums);
self.decoder.set_ignore_crc(ignore_checksums);
}
/// Returns the next decoded chunk. If the chunk is an ImageData chunk, its contents are written
/// into image_data.
fn decode_next(
&mut self,
image_data: Option<&mut UnfilterBuf<'_>>,
) -> Result<Decoded, DecodingError> {
let (consumed, result) = {
let buf = self.reader.fill_buf()?;
if buf.is_empty() {
return Err(DecodingError::IoError(ErrorKind::UnexpectedEof.into()));
}
self.decoder.update(buf, image_data)?
};
self.reader.consume(consumed);
Ok(result)
}
/// Reads until the end of `IHDR` chunk.
///
/// Prerequisite: None (idempotent).
pub fn read_header_info(&mut self) -> Result<&Info<'static>, DecodingError> {
while self.info().is_none() {
if let Decoded::ChunkComplete(chunk::IEND) = self.decode_next(None)? {
unreachable!()
}
}
Ok(self.info().unwrap())
}
/// Reads until the start of the next `IDAT` or `fdAT` chunk.
///
/// Prerequisite: **Not** within `IDAT` / `fdAT` chunk sequence.
pub fn read_until_image_data(&mut self) -> Result<(), DecodingError> {
loop {
match self.decode_next(None)? {
Decoded::ChunkBegin(_, chunk::IDAT) | Decoded::ChunkBegin(_, chunk::fdAT) => break,
Decoded::ChunkComplete(chunk::IEND) => {
return Err(DecodingError::Format(
FormatErrorInner::MissingImageData.into(),
))
}
// Ignore all other chunk events. Any other chunk may be between IDAT chunks, fdAT
// chunks and their control chunks.
_ => {}
}
}
Ok(())
}
/// Reads `image_data` and reports whether there may be additional data afterwards (i.e. if it
/// is okay to call `decode_image_data` and/or `finish_decoding_image_data` again)..
///
/// Prerequisite: Input is currently positioned within `IDAT` / `fdAT` chunk sequence.
pub fn decode_image_data(
&mut self,
image_data: Option<&mut UnfilterBuf<'_>>,
) -> Result<ImageDataCompletionStatus, DecodingError> {
match self.decode_next(image_data)? {
Decoded::ImageData => Ok(ImageDataCompletionStatus::ExpectingMoreData),
Decoded::ImageDataFlushed => Ok(ImageDataCompletionStatus::Done),
// Ignore other events that may happen within an `IDAT` / `fdAT` chunks sequence.
_ => Ok(ImageDataCompletionStatus::ExpectingMoreData),
}
}
/// Consumes and discards the rest of an `IDAT` / `fdAT` chunk sequence.
///
/// Prerequisite: Input is currently positioned within `IDAT` / `fdAT` chunk sequence.
pub fn finish_decoding_image_data(&mut self) -> Result<(), DecodingError> {
loop {
if let ImageDataCompletionStatus::Done = self.decode_image_data(None)? {
return Ok(());
}
}
}
/// Reads until the `IEND` chunk.
///
/// Prerequisite: `IEND` chunk hasn't been reached yet.
pub fn read_until_end_of_input(&mut self) -> Result<(), DecodingError> {
while !matches!(self.decode_next(None)?, Decoded::ChunkComplete(chunk::IEND)) {}
Ok(())
}
pub fn info(&self) -> Option<&Info<'static>> {
self.decoder.info.as_ref()
}
}
#[derive(Debug, Eq, PartialEq)]
pub(crate) enum ImageDataCompletionStatus {
ExpectingMoreData,
Done,
}

3213
vendor/png/src/decoder/stream.rs vendored Normal file

File diff suppressed because it is too large Load Diff

203
vendor/png/src/decoder/transform.rs vendored Normal file
View File

@@ -0,0 +1,203 @@
//! Transforming a decompressed, unfiltered row into the final output.
mod palette;
use crate::{BitDepth, ColorType, DecodingError, Info, Transformations};
use super::stream::FormatErrorInner;
/// Type of a function that can transform a decompressed, unfiltered row (the
/// 1st argument) into the final pixels (the 2nd argument), optionally using
/// image metadata (e.g. PLTE data can be accessed using the 3rd argument).
///
/// TODO: If some precomputed state is needed (e.g. to make `expand_paletted...`
/// faster) then consider changing this into `Box<dyn Fn(...)>`.
pub type TransformFn = Box<dyn Fn(&[u8], &mut [u8], &Info) + Send + Sync>;
/// Returns a transformation function that should be applied to image rows based
/// on 1) decoded image metadata (`info`) and 2) the transformations requested
/// by the crate client (`transform`).
pub fn create_transform_fn(
info: &Info,
transform: Transformations,
) -> Result<TransformFn, DecodingError> {
let color_type = info.color_type;
let bit_depth = info.bit_depth as u8;
let trns = info.trns.is_some() || transform.contains(Transformations::ALPHA);
let expand =
transform.contains(Transformations::EXPAND) || transform.contains(Transformations::ALPHA);
let strip16 = bit_depth == 16 && transform.contains(Transformations::STRIP_16);
match color_type {
ColorType::Indexed if expand => {
if info.palette.is_none() {
Err(DecodingError::Format(
FormatErrorInner::PaletteRequired.into(),
))
} else if let BitDepth::Sixteen = info.bit_depth {
// This should have been caught earlier but let's check again. Can't hurt.
Err(DecodingError::Format(
FormatErrorInner::InvalidColorBitDepth {
color_type: ColorType::Indexed,
bit_depth: BitDepth::Sixteen,
}
.into(),
))
} else {
Ok(if trns {
palette::create_expansion_into_rgba8(info)
} else {
palette::create_expansion_into_rgb8(info)
})
}
}
ColorType::Grayscale | ColorType::GrayscaleAlpha if bit_depth < 8 && expand => {
Ok(Box::new(if trns {
expand_gray_u8_with_trns
} else {
expand_gray_u8
}))
}
ColorType::Grayscale | ColorType::Rgb if expand && trns => {
Ok(Box::new(if bit_depth == 8 {
expand_trns_line
} else if strip16 {
expand_trns_and_strip_line16
} else {
assert_eq!(bit_depth, 16);
expand_trns_line16
}))
}
ColorType::Grayscale | ColorType::GrayscaleAlpha | ColorType::Rgb | ColorType::Rgba
if strip16 =>
{
Ok(Box::new(transform_row_strip16))
}
_ => Ok(Box::new(copy_row)),
}
}
fn copy_row(row: &[u8], output_buffer: &mut [u8], _: &Info) {
output_buffer.copy_from_slice(row);
}
fn transform_row_strip16(row: &[u8], output_buffer: &mut [u8], _: &Info) {
for i in 0..row.len() / 2 {
output_buffer[i] = row[2 * i];
}
}
#[inline(always)]
fn unpack_bits<F>(input: &[u8], output: &mut [u8], channels: usize, bit_depth: u8, func: F)
where
F: Fn(u8, &mut [u8]),
{
// Only [1, 2, 4, 8] are valid bit depths
assert!(matches!(bit_depth, 1 | 2 | 4 | 8));
// Check that `input` is capable of producing a buffer as long as `output`:
// number of shift lookups per bit depth * channels * input length
assert!((8 / bit_depth as usize * channels).saturating_mul(input.len()) >= output.len());
let mut buf_chunks = output.chunks_exact_mut(channels);
let mut iter = input.iter();
// `shift` iterates through the corresponding bit depth sequence:
// 1 => &[7, 6, 5, 4, 3, 2, 1, 0],
// 2 => &[6, 4, 2, 0],
// 4 => &[4, 0],
// 8 => &[0],
//
// `(0..8).step_by(bit_depth.into()).rev()` doesn't always optimize well so
// shifts are calculated instead. (2023-08, Rust 1.71)
if bit_depth == 8 {
for (&curr, chunk) in iter.zip(&mut buf_chunks) {
func(curr, chunk);
}
} else {
let mask = ((1u16 << bit_depth) - 1) as u8;
// These variables are initialized in the loop
let mut shift = -1;
let mut curr = 0;
for chunk in buf_chunks {
if shift < 0 {
shift = 8 - bit_depth as i32;
curr = *iter.next().expect("input for unpack bits is not empty");
}
let pixel = (curr >> shift) & mask;
func(pixel, chunk);
shift -= bit_depth as i32;
}
}
}
fn expand_trns_line(input: &[u8], output: &mut [u8], info: &Info) {
let channels = info.color_type.samples();
let trns = info.trns.as_deref();
for (input, output) in input
.chunks_exact(channels)
.zip(output.chunks_exact_mut(channels + 1))
{
output[..channels].copy_from_slice(input);
output[channels] = if Some(input) == trns { 0 } else { 0xFF };
}
}
fn expand_trns_line16(input: &[u8], output: &mut [u8], info: &Info) {
let channels = info.color_type.samples();
let trns = info.trns.as_deref();
for (input, output) in input
.chunks_exact(channels * 2)
.zip(output.chunks_exact_mut(channels * 2 + 2))
{
output[..channels * 2].copy_from_slice(input);
if Some(input) == trns {
output[channels * 2] = 0;
output[channels * 2 + 1] = 0
} else {
output[channels * 2] = 0xFF;
output[channels * 2 + 1] = 0xFF
};
}
}
fn expand_trns_and_strip_line16(input: &[u8], output: &mut [u8], info: &Info) {
let channels = info.color_type.samples();
let trns = info.trns.as_deref();
for (input, output) in input
.chunks_exact(channels * 2)
.zip(output.chunks_exact_mut(channels + 1))
{
for i in 0..channels {
output[i] = input[i * 2];
}
output[channels] = if Some(input) == trns { 0 } else { 0xFF };
}
}
fn expand_gray_u8(row: &[u8], buffer: &mut [u8], info: &Info) {
let scaling_factor = (255) / ((1u16 << info.bit_depth as u8) - 1) as u8;
unpack_bits(row, buffer, 1, info.bit_depth as u8, |val, chunk| {
chunk[0] = val * scaling_factor
});
}
fn expand_gray_u8_with_trns(row: &[u8], buffer: &mut [u8], info: &Info) {
let scaling_factor = (255) / ((1u16 << info.bit_depth as u8) - 1) as u8;
let trns = info.trns.as_deref();
unpack_bits(row, buffer, 2, info.bit_depth as u8, |pixel, chunk| {
chunk[1] = if let Some(trns) = trns {
if pixel == trns[0] {
0
} else {
0xFF
}
} else {
0xFF
};
chunk[0] = pixel * scaling_factor
});
}

View File

@@ -0,0 +1,361 @@
//! Helpers for taking a slice of indices (indices into `PLTE` and/or `trNS`
//! entries) and transforming this into RGB or RGBA output.
//!
//! # Memoization
//!
//! To achieve higher throughput, `create_rgba_palette` combines entries from
//! `PLTE` and `trNS` chunks into a single lookup table. This is based on the
//! ideas explored in <https://crbug.com/706134>.
//!
//! Memoization is a trade-off:
//! * On one hand, memoization requires spending X ns before starting to call
//! `expand_paletted_...` functions.
//! * On the other hand, memoization improves the throughput of the
//! `expand_paletted_...` functions - they take Y ns less to process each byte
//!
//! Based on X and Y, we can try to calculate the breakeven point. It seems
//! that memoization is a net benefit for images bigger than around 13x13 pixels.
use super::{unpack_bits, TransformFn};
use crate::{BitDepth, Info};
pub fn create_expansion_into_rgb8(info: &Info) -> TransformFn {
let rgba_palette = create_rgba_palette(info);
if info.bit_depth == BitDepth::Eight {
Box::new(move |input, output, _info| expand_8bit_into_rgb8(input, output, &rgba_palette))
} else {
Box::new(move |input, output, info| expand_into_rgb8(input, output, info, &rgba_palette))
}
}
pub fn create_expansion_into_rgba8(info: &Info) -> TransformFn {
let rgba_palette = create_rgba_palette(info);
Box::new(move |input, output, info| {
expand_paletted_into_rgba8(input, output, info, &rgba_palette)
})
}
fn create_rgba_palette(info: &Info) -> [[u8; 4]; 256] {
let palette = info.palette.as_deref().expect("Caller should verify");
let trns = info.trns.as_deref().unwrap_or(&[]);
// > The tRNS chunk shall not contain more alpha values than there are palette
// entries, but a tRNS chunk may contain fewer values than there are palette
// entries. In this case, the alpha value for all remaining palette entries is
// assumed to be 255.
//
// It seems, accepted reading is to fully *ignore* an invalid tRNS as if it were
// completely empty / all pixels are non-transparent.
let trns = if trns.len() <= palette.len() / 3 {
trns
} else {
&[]
};
// Default to black, opaque entries.
let mut rgba_palette = [[0, 0, 0, 0xFF]; 256];
// Copy `palette` (RGB) entries into `rgba_palette`. This may clobber alpha
// values in `rgba_palette` - we need to fix this later.
{
let mut palette_iter = palette;
let mut rgba_iter = &mut rgba_palette[..];
while palette_iter.len() >= 4 {
// Copying 4 bytes at a time is more efficient than copying 3.
// OTOH, this clobbers the alpha value in `rgba_iter[0][3]` - we
// need to fix this later.
rgba_iter[0].copy_from_slice(&palette_iter[0..4]);
palette_iter = &palette_iter[3..];
rgba_iter = &mut rgba_iter[1..];
}
if !palette_iter.is_empty() {
rgba_iter[0][0..3].copy_from_slice(&palette_iter[0..3]);
}
}
// Copy `trns` (alpha) entries into `rgba_palette`. `trns.len()` may be
// smaller than `palette.len()` and therefore this is not sufficient to fix
// all the clobbered alpha values.
for (alpha, rgba) in trns.iter().copied().zip(rgba_palette.iter_mut()) {
rgba[3] = alpha;
}
// Unclobber the remaining alpha values.
for rgba in rgba_palette[trns.len()..(palette.len() / 3)].iter_mut() {
rgba[3] = 0xFF;
}
rgba_palette
}
fn expand_8bit_into_rgb8(mut input: &[u8], mut output: &mut [u8], rgba_palette: &[[u8; 4]; 256]) {
while output.len() >= 4 {
// Copying 4 bytes at a time is more efficient than 3.
let rgba = &rgba_palette[input[0] as usize];
output[0..4].copy_from_slice(rgba);
input = &input[1..];
output = &mut output[3..];
}
if !output.is_empty() {
let rgba = &rgba_palette[input[0] as usize];
output[0..3].copy_from_slice(&rgba[0..3]);
}
}
fn expand_into_rgb8(row: &[u8], buffer: &mut [u8], info: &Info, rgba_palette: &[[u8; 4]; 256]) {
unpack_bits(row, buffer, 3, info.bit_depth as u8, |i, chunk| {
let rgba = &rgba_palette[i as usize];
chunk[0] = rgba[0];
chunk[1] = rgba[1];
chunk[2] = rgba[2];
})
}
fn expand_paletted_into_rgba8(
row: &[u8],
buffer: &mut [u8],
info: &Info,
rgba_palette: &[[u8; 4]; 256],
) {
unpack_bits(row, buffer, 4, info.bit_depth as u8, |i, chunk| {
chunk.copy_from_slice(&rgba_palette[i as usize]);
});
}
#[cfg(test)]
mod test {
use crate::{BitDepth, ColorType, Info, Transformations};
/// Old, non-memoized version of the code is used as a test oracle.
fn oracle_expand_paletted_into_rgb8(row: &[u8], buffer: &mut [u8], info: &Info) {
let palette = info.palette.as_deref().expect("Caller should verify");
let black = [0, 0, 0];
super::unpack_bits(row, buffer, 3, info.bit_depth as u8, |i, chunk| {
let rgb = palette
.get(3 * i as usize..3 * i as usize + 3)
.unwrap_or(&black);
chunk[0] = rgb[0];
chunk[1] = rgb[1];
chunk[2] = rgb[2];
})
}
/// Old, non-memoized version of the code is used as a test oracle.
fn oracle_expand_paletted_into_rgba8(row: &[u8], buffer: &mut [u8], info: &Info) {
let palette = info.palette.as_deref().expect("Caller should verify");
let trns = info.trns.as_deref().unwrap_or(&[]);
let black = [0, 0, 0];
// > The tRNS chunk shall not contain more alpha values than there are palette
// entries, but a tRNS chunk may contain fewer values than there are palette
// entries. In this case, the alpha value for all remaining palette entries is
// assumed to be 255.
//
// It seems, accepted reading is to fully *ignore* an invalid tRNS as if it were
// completely empty / all pixels are non-transparent.
let trns = if trns.len() <= palette.len() / 3 {
trns
} else {
&[]
};
super::unpack_bits(row, buffer, 4, info.bit_depth as u8, |i, chunk| {
let (rgb, a) = (
palette
.get(3 * i as usize..3 * i as usize + 3)
.unwrap_or(&black),
*trns.get(i as usize).unwrap_or(&0xFF),
);
chunk[0] = rgb[0];
chunk[1] = rgb[1];
chunk[2] = rgb[2];
chunk[3] = a;
});
}
fn create_info<'a>(src_bit_depth: u8, palette: &'a [u8], trns: Option<&'a [u8]>) -> Info<'a> {
Info {
color_type: ColorType::Indexed,
bit_depth: BitDepth::from_u8(src_bit_depth).unwrap(),
palette: Some(palette.into()),
trns: trns.map(Into::into),
..Info::default()
}
}
fn expand_paletted(
src: &[u8],
src_bit_depth: u8,
palette: &[u8],
trns: Option<&[u8]>,
) -> Vec<u8> {
let info = create_info(src_bit_depth, palette, trns);
let output_bytes_per_input_sample = match trns {
None => 3,
Some(_) => 4,
};
let samples_count_per_byte = (8 / src_bit_depth) as usize;
let samples_count = src.len() * samples_count_per_byte;
let mut dst = vec![0; samples_count * output_bytes_per_input_sample];
let transform_fn =
super::super::create_transform_fn(&info, Transformations::EXPAND).unwrap();
transform_fn(src, dst.as_mut_slice(), &info);
{
// Compare the memoization-based calculations with the old, non-memoized code.
let mut simple_dst = vec![0; samples_count * output_bytes_per_input_sample];
if trns.is_none() {
oracle_expand_paletted_into_rgb8(src, &mut simple_dst, &info)
} else {
oracle_expand_paletted_into_rgba8(src, &mut simple_dst, &info)
}
assert_eq!(&dst, &simple_dst);
}
dst
}
#[test]
fn test_expand_paletted_rgba_8bit() {
let actual = expand_paletted(
&[0, 1, 2, 3], // src
8, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
4, 5, 6, // entry #1
8, 9, 10, // entry #2
12, 13, 14, // entry #3
],
Some(&[3, 7, 11, 15]), // trns
);
assert_eq!(actual, (0..16).collect::<Vec<u8>>());
}
#[test]
fn test_expand_paletted_rgb_8bit() {
let actual = expand_paletted(
&[0, 1, 2, 3], // src
8, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
3, 4, 5, // entry #1
6, 7, 8, // entry #2
9, 10, 11, // entry #3
],
None, // trns
);
assert_eq!(actual, (0..12).collect::<Vec<u8>>());
}
#[test]
fn test_expand_paletted_rgba_4bit() {
let actual = expand_paletted(
&[0x01, 0x23], // src
4, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
4, 5, 6, // entry #1
8, 9, 10, // entry #2
12, 13, 14, // entry #3
],
Some(&[3, 7, 11, 15]), // trns
);
assert_eq!(actual, (0..16).collect::<Vec<u8>>());
}
#[test]
fn test_expand_paletted_rgb_4bit() {
let actual = expand_paletted(
&[0x01, 0x23], // src
4, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
3, 4, 5, // entry #1
6, 7, 8, // entry #2
9, 10, 11, // entry #3
],
None, // trns
);
assert_eq!(actual, (0..12).collect::<Vec<u8>>());
}
#[test]
fn test_expand_paletted_rgba_8bit_more_trns_entries_than_palette_entries() {
let actual = expand_paletted(
&[0, 1, 2, 3], // src
8, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
4, 5, 6, // entry #1
8, 9, 10, // entry #2
12, 13, 14, // entry #3
],
Some(&[123; 5]), // trns
);
// Invalid (too-long) `trns` means that we'll use 0xFF / opaque alpha everywhere.
assert_eq!(
actual,
vec![0, 1, 2, 0xFF, 4, 5, 6, 0xFF, 8, 9, 10, 0xFF, 12, 13, 14, 0xFF],
);
}
#[test]
fn test_expand_paletted_rgba_8bit_less_trns_entries_than_palette_entries() {
let actual = expand_paletted(
&[0, 1, 2, 3], // src
8, // src_bit_depth
&[
// palette
0, 1, 2, // entry #0
4, 5, 6, // entry #1
8, 9, 10, // entry #2
12, 13, 14, // entry #3
],
Some(&[3, 7]), // trns
);
// Too-short `trns` is treated differently from too-long - only missing entries are
// replaced with 0XFF / opaque.
assert_eq!(
actual,
vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0xFF, 12, 13, 14, 0xFF],
);
}
#[test]
fn test_create_rgba_palette() {
fn create_expected_rgba_palette(plte: &[u8], trns: &[u8]) -> [[u8; 4]; 256] {
let mut rgba = [[1, 2, 3, 4]; 256];
for (i, rgba) in rgba.iter_mut().enumerate() {
rgba[0] = plte.get(i * 3 + 0).map(|&r| r).unwrap_or(0);
rgba[1] = plte.get(i * 3 + 1).map(|&g| g).unwrap_or(0);
rgba[2] = plte.get(i * 3 + 2).map(|&b| b).unwrap_or(0);
rgba[3] = trns.get(i * 1 + 0).map(|&a| a).unwrap_or(0xFF);
}
rgba
}
for plte_len in 1..=32 {
for trns_len in 0..=plte_len {
let plte: Vec<u8> = (0..plte_len * 3).collect();
let trns: Vec<u8> = (0..trns_len).map(|alpha| alpha + 200).collect();
let info = create_info(8, &plte, Some(&trns));
let expected = create_expected_rgba_palette(&plte, &trns);
let actual = super::create_rgba_palette(&info);
assert_eq!(actual, expected);
}
}
}
}

View File

@@ -0,0 +1,230 @@
use super::stream::{DecodingError, FormatErrorInner};
use super::zlib::UnfilterBuf;
use crate::common::BytesPerPixel;
use crate::filter::{unfilter, RowFilter};
use crate::Info;
// Buffer for temporarily holding decompressed, not-yet-`unfilter`-ed rows.
pub(crate) struct UnfilteringBuffer {
/// Vec containing the uncompressed image data currently being processed.
data_stream: Vec<u8>,
/// Index in `data_stream` where the previous row starts.
/// This excludes the filter type byte - it points at the first byte of actual pixel data.
/// The pixel data is already-`unfilter`-ed.
///
/// If `prev_start == current_start` then it means that there is no previous row.
prev_start: usize,
/// Index in `data_stream` where the current row starts.
/// This points at the filter type byte of the current row (i.e. the actual pixel data starts at `current_start + 1`)
/// The pixel data is not-yet-`unfilter`-ed.
///
/// `current_start` can wrap around the length.
current_start: usize,
/// Logical length of data that must be preserved.
filled: usize,
/// Length of data that can be modified.
available: usize,
/// The number of bytes before we shift the buffer back.
shift_back_limit: usize,
}
impl UnfilteringBuffer {
pub const GROWTH_BYTES: usize = 8 * 1024;
/// Asserts in debug builds that all the invariants hold. No-op in release
/// builds. Intended to be called after creating or mutating `self` to
/// ensure that the final state preserves the invariants.
fn debug_assert_invariants(&self) {
debug_assert!(self.prev_start <= self.current_start);
debug_assert!(self.current_start <= self.available);
debug_assert!(self.available <= self.filled);
debug_assert!(self.filled <= self.data_stream.len());
}
/// Create a buffer tuned for filtering rows of the image type.
pub fn new(info: &Info<'_>) -> Self {
// We don't need all of `info` here so if that becomes a structural problem then these
// derived constants can be extracted into a parameter struct. For instance they may be
// adjusted according to platform hardware such as cache sizes.
let data_stream_capacity = {
let max_data = info
.checked_raw_row_length()
// In the current state this is really dependent on IDAT sizes and the compression
// settings. We aim to avoid overallocation here, but that occurs in part due to
// the algorithm for draining the buffer, which at the time of writing is at each
// individual IDAT chunk boundary. So this is set for a quadratic image roughly
// fitting into a single 4k chunk at compression.. A very arbitrary choice made
// from (probably overfitting) a benchmark of that image size. With a different
// algorithm we may come to different buffer uses and have to re-evaluate.
.and_then(|v| v.checked_mul(info.height.min(128) as usize))
// In the worst case this is additional room for use of unmasked SIMD moves. But
// the other idea here is that the allocator generally aligns the buffer.
.and_then(|v| checked_next_multiple_of(v, 256))
.unwrap_or(usize::MAX);
// We do not want to pre-allocate too much in case of a faulty image (no DOS by
// pretending to be very very large) and also we want to avoid allocating more data
// than we need for the image itself.
max_data.min(128 * 1024)
};
let shift_back_limit = {
// Prefer shifting by powers of two and only after having done some number of
// lines that then become free at the end of the buffer.
let rowlen_pot = info
.checked_raw_row_length()
// Ensure some number of rows are actually present before shifting back, i.e. next
// time around we want to be able to decode them without reallocating the buffer.
.and_then(|v| v.checked_mul(4))
// And also, we should be able to use aligned memcopy on the whole thing. Well at
// least that is the idea but the parameter is just benchmarking. Higher numbers
// did not result in performance gains but lowers also, so this is fickle. Maybe
// our shift back behavior can not be tuned very well.
.and_then(|v| checked_next_multiple_of(v, 64))
.unwrap_or(isize::MAX as usize);
// But never shift back before we have a number of pages freed.
rowlen_pot.max(128 * 1024)
};
let result = Self {
data_stream: Vec::with_capacity(data_stream_capacity),
prev_start: 0,
current_start: 0,
filled: 0,
available: 0,
shift_back_limit,
};
result.debug_assert_invariants();
result
}
/// Called to indicate that there is no previous row (e.g. when the current
/// row is the first scanline of a given Adam7 pass).
pub fn reset_prev_row(&mut self) {
self.prev_start = self.current_start;
self.debug_assert_invariants();
}
pub fn reset_all(&mut self) {
self.data_stream.clear();
self.prev_start = 0;
self.current_start = 0;
self.filled = 0;
self.available = 0;
}
/// Returns the previous (already `unfilter`-ed) row.
pub fn prev_row(&self) -> &[u8] {
&self.data_stream[self.prev_start..self.current_start]
}
/// Returns how many bytes of the current row are present in the buffer.
pub fn curr_row_len(&self) -> usize {
self.available - self.current_start
}
/// Returns a `&mut Vec<u8>` suitable for passing to
/// `ReadDecoder.decode_image_data` or `StreamingDecoder.update`.
///
/// Invariants of `self` depend on the assumption that the caller will only
/// append new bytes to the returned vector (which is indeed the behavior of
/// `ReadDecoder` and `StreamingDecoder`). TODO: Consider protecting the
/// invariants by returning an append-only view of the vector
/// (`FnMut(&[u8])`??? or maybe `std::io::Write`???).
pub fn as_unfilled_buffer(&mut self) -> UnfilterBuf<'_> {
if self.prev_start >= self.shift_back_limit
// Avoid the shift back if the buffer is still very empty. Consider how we got here: a
// previous decompression filled the buffer, then we unfiltered, we're now refilling
// the buffer again. The condition implies, the previous decompression filled at most
// half the buffer. Likely the same will happen again so the following decompression
// attempt will not yet be limited by the buffer length.
&& self.filled >= self.data_stream.len() / 2
{
// We have to relocate the data to the start of the buffer. Benchmarking suggests that
// the codegen for an unbounded range is better / different than the one for a bounded
// range. We prefer the former if the data overhead is not too high. `16` was
// determined experimentally and might be system (memory) dependent. There's also the
// question if we could be a little smarter and avoid crossing page boundaries when
// that is not required. Alas, microbenchmarking TBD.
if let Some(16..) = self.data_stream.len().checked_sub(self.filled) {
self.data_stream
.copy_within(self.prev_start..self.filled, 0);
} else {
self.data_stream.copy_within(self.prev_start.., 0);
}
// The data kept its relative position to `filled` which now lands exactly at
// the distance between prev_start and filled.
self.current_start -= self.prev_start;
self.available -= self.prev_start;
self.filled -= self.prev_start;
self.prev_start = 0;
}
if self.filled + Self::GROWTH_BYTES > self.data_stream.len() {
self.data_stream.resize(self.filled + Self::GROWTH_BYTES, 0);
}
UnfilterBuf {
buffer: &mut self.data_stream,
filled: &mut self.filled,
available: &mut self.available,
}
}
/// Runs `unfilter` on the current row, and then shifts rows so that the current row becomes the previous row.
///
/// Will panic if `self.curr_row_len() < rowlen`.
pub fn unfilter_curr_row(
&mut self,
rowlen: usize,
bpp: BytesPerPixel,
) -> Result<(), DecodingError> {
debug_assert!(rowlen >= 2); // 1 byte for `FilterType` and at least 1 byte of pixel data.
let (prev, row) = self.data_stream.split_at_mut(self.current_start);
let prev: &[u8] = &prev[self.prev_start..];
debug_assert!(prev.is_empty() || prev.len() == (rowlen - 1));
// Get the filter type.
let filter = RowFilter::from_u8(row[0]).ok_or(DecodingError::Format(
FormatErrorInner::UnknownFilterMethod(row[0]).into(),
))?;
let row = &mut row[1..rowlen];
unfilter(filter, bpp, prev, row);
self.prev_start = self.current_start + 1;
self.current_start += rowlen;
self.debug_assert_invariants();
Ok(())
}
}
fn checked_next_multiple_of(val: usize, factor: usize) -> Option<usize> {
if factor == 0 {
return None;
}
let remainder = val % factor;
if remainder > 0 {
val.checked_add(factor - remainder)
} else {
Some(val)
}
}
#[test]
fn next_multiple_of_backport_testsuite() {
assert_eq!(checked_next_multiple_of(1, 0), None);
assert_eq!(checked_next_multiple_of(2, 0), None);
assert_eq!(checked_next_multiple_of(1, 2), Some(2));
assert_eq!(checked_next_multiple_of(2, 2), Some(2));
assert_eq!(checked_next_multiple_of(2, 5), Some(5));
assert_eq!(checked_next_multiple_of(1, usize::MAX), Some(usize::MAX));
assert_eq!(checked_next_multiple_of(usize::MAX, 2), None);
}

213
vendor/png/src/decoder/zlib.rs vendored Normal file
View File

@@ -0,0 +1,213 @@
use super::{stream::FormatErrorInner, unfiltering_buffer::UnfilteringBuffer, DecodingError};
use fdeflate::Decompressor;
/// An inplace buffer for decompression and filtering of PNG rowlines.
///
/// The underlying data structure is a vector, with additional markers denoting a region of bytes
/// that are utilized by the decompression but not yet available to arbitrary modifications. The
/// caller can still shift around data between calls to the stream decompressor as long as the data
/// in the marked region is not modified and the indices adjusted accordingly. See
/// [`UnfilterRegion`] that contains these markers.
///
/// Violating the invariants, i.e. modifying bytes in the marked region, results in absurdly wacky
/// decompression output or panics but not undefined behavior.
pub struct UnfilterBuf<'data> {
/// The data container. Starts with arbitrary data unrelated to the decoder, a slice of decoder
/// private data followed by free space for further decoder output. The regions are delimited
/// by `filled` and `available` which must be updated accordingly.
pub(crate) buffer: &'data mut Vec<u8>,
/// Where we record changes to the out position.
pub(crate) filled: &'data mut usize,
/// Where we record changes to the available byte.
pub(crate) available: &'data mut usize,
}
/// A region into a buffer utilized as a [`UnfilterBuf`].
///
/// The span of data denoted by `filled..available` is the region of bytes that must be preserved
/// for use by the decompression algorithm. It may be moved, e.g. by subtracting the same amount
/// from both of these fields. Always ensure that `filled <= available`, the library does not
/// violate this invariant when modifying this struct as an [`UnfilterBuf`].
#[derive(Default, Clone, Copy)]
pub struct UnfilterRegion {
/// The past-the-end index of byte that are allowed to be modified.
pub available: usize,
/// The past-the-end of bytes that have been written to.
pub filled: usize,
}
/// Ergonomics wrapper around `miniz_oxide::inflate::stream` for zlib compressed data.
pub(super) struct ZlibStream {
/// Current decoding state.
state: Box<fdeflate::Decompressor>,
/// If there has been a call to decompress already.
started: bool,
/// Ignore and do not calculate the Adler-32 checksum. Defaults to `true`.
///
/// This flag overrides `TINFL_FLAG_COMPUTE_ADLER32`.
///
/// This flag should not be modified after decompression has started.
ignore_adler32: bool,
}
impl ZlibStream {
// [PNG spec](https://www.w3.org/TR/2003/REC-PNG-20031110/#10Compression) says that
// "deflate/inflate compression with a sliding window (which is an upper bound on the
// distances appearing in the deflate stream) of at most 32768 bytes".
//
// `fdeflate` requires that we keep this many most recently decompressed bytes in the
// `out_buffer` - this allows referring back to them when handling "length and distance
// codes" in the deflate stream).
const LOOKBACK_SIZE: usize = 32768;
pub(crate) fn new() -> Self {
ZlibStream {
state: Box::new(Decompressor::new()),
started: false,
ignore_adler32: true,
}
}
pub(crate) fn reset(&mut self) {
self.started = false;
*self.state = Decompressor::new();
}
/// Set the `ignore_adler32` flag and return `true` if the flag was
/// successfully set.
///
/// The default is `true`.
///
/// This flag cannot be modified after decompression has started until the
/// [ZlibStream] is reset.
pub(crate) fn set_ignore_adler32(&mut self, flag: bool) -> bool {
if !self.started {
self.ignore_adler32 = flag;
true
} else {
false
}
}
/// Return the `ignore_adler32` flag.
pub(crate) fn ignore_adler32(&self) -> bool {
self.ignore_adler32
}
/// Fill the decoded buffer as far as possible from `data`.
/// On success returns the number of consumed input bytes.
pub(crate) fn decompress(
&mut self,
data: &[u8],
image_data: &mut UnfilterBuf<'_>,
) -> Result<usize, DecodingError> {
// There may be more data past the adler32 checksum at the end of the deflate stream. We
// match libpng's default behavior and ignore any trailing data. In the future we may want
// to add a flag to control this behavior.
if self.state.is_done() {
return Ok(data.len());
}
if !self.started && self.ignore_adler32 {
self.state.ignore_adler32();
}
let (buffer, filled) = image_data.borrow_mut();
let output_limit = (filled + UnfilteringBuffer::GROWTH_BYTES).min(buffer.len());
let (in_consumed, out_consumed) = self
.state
.read(data, &mut buffer[..output_limit], filled, false)
.map_err(|err| {
DecodingError::Format(FormatErrorInner::CorruptFlateStream { err }.into())
})?;
self.started = true;
let filled = filled + out_consumed;
image_data.filled(filled);
if self.state.is_done() {
image_data.commit(filled);
} else {
// See [`Self::LOOKBACK_SIZE`].
image_data.commit(filled.saturating_sub(Self::LOOKBACK_SIZE));
}
Ok(in_consumed)
}
/// Called after all consecutive IDAT chunks were handled.
///
/// The compressed stream can be split on arbitrary byte boundaries. This enables some cleanup
/// within the decompressor and flushing additional data which may have been kept back in case
/// more data were passed to it.
pub(crate) fn finish_compressed_chunks(
&mut self,
image_data: &mut UnfilterBuf<'_>,
) -> Result<(), DecodingError> {
if !self.started {
return Ok(());
}
if self.state.is_done() {
// We can end up here only after the [`decompress`] call above has detected the state
// to be done, too. In this case the filled and committed amount of data are already
// equal to each other. So neither of them needs to be touched in any way.
return Ok(());
}
let (_, mut filled) = image_data.borrow_mut();
while !self.state.is_done() {
let (buffer, _) = image_data.borrow_mut();
let (_in_consumed, out_consumed) =
self.state.read(&[], buffer, filled, true).map_err(|err| {
DecodingError::Format(FormatErrorInner::CorruptFlateStream { err }.into())
})?;
filled += out_consumed;
if !self.state.is_done() {
image_data.flush_allocate();
}
}
image_data.filled(filled);
image_data.commit(filled);
Ok(())
}
}
impl UnfilterRegion {
/// Use this region to decompress new filtered rowline data.
///
/// Pass the wrapped buffer to
/// [`StreamingDecoder::update`][`super::stream::StreamingDecoder::update`] to fill it with
/// data and update the region indices.
pub fn as_buf<'data>(&'data mut self, buffer: &'data mut Vec<u8>) -> UnfilterBuf<'data> {
UnfilterBuf {
buffer,
filled: &mut self.filled,
available: &mut self.available,
}
}
}
impl UnfilterBuf<'_> {
pub(crate) fn borrow_mut(&mut self) -> (&mut [u8], usize) {
(self.buffer, *self.filled)
}
pub(crate) fn filled(&mut self, filled: usize) {
*self.filled = filled;
}
pub(crate) fn commit(&mut self, howmany: usize) {
*self.available = howmany;
}
pub(crate) fn flush_allocate(&mut self) {
let len = self.buffer.len() + 32 * 1024;
self.buffer.resize(len, 0);
}
}

2559
vendor/png/src/encoder.rs vendored Normal file

File diff suppressed because it is too large Load Diff

1000
vendor/png/src/filter.rs vendored Normal file

File diff suppressed because it is too large Load Diff

96
vendor/png/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,96 @@
//! # PNG encoder and decoder
//!
//! This crate contains a PNG encoder and decoder. It supports reading of single lines or whole frames.
//!
//! ## The decoder
//!
//! The most important types for decoding purposes are [`Decoder`] and
//! [`Reader`]. They both wrap a [`std::io::Read`].
//! `Decoder` serves as a builder for `Reader`. Calling [`Decoder::read_info`] reads from the `Read` until the
//! image data is reached.
//!
//! ### Using the decoder
//! ```
//! use std::fs::File;
//! use std::io::BufReader;
//! // The decoder is a build for reader and can be used to set various decoding options
//! // via `Transformations`. The default output transformation is `Transformations::IDENTITY`.
//! let decoder = png::Decoder::new(BufReader::new(File::open("tests/pngsuite/basi0g01.png").unwrap()));
//! let mut reader = decoder.read_info().unwrap();
//! // Allocate the output buffer.
//! let mut buf = vec![0; reader.output_buffer_size().unwrap()];
//! // Read the next frame. An APNG might contain multiple frames.
//! let info = reader.next_frame(&mut buf).unwrap();
//! // Grab the bytes of the image.
//! let bytes = &buf[..info.buffer_size()];
//! // Inspect more details of the last read frame.
//! let in_animation = reader.info().frame_control.is_some();
//! ```
//!
//! ## Encoder
//! ### Using the encoder
//!
//! ```no_run
//! // For reading and opening files
//! use std::path::Path;
//! use std::fs::File;
//! use std::io::BufWriter;
//!
//! let path = Path::new(r"/path/to/image.png");
//! let file = File::create(path).unwrap();
//! let ref mut w = BufWriter::new(file);
//!
//! let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1.
//! encoder.set_color(png::ColorType::Rgba);
//! encoder.set_depth(png::BitDepth::Eight);
//! encoder.set_source_gamma(png::ScaledFloat::from_scaled(45455)); // 1.0 / 2.2, scaled by 100000
//! encoder.set_source_gamma(png::ScaledFloat::new(1.0 / 2.2)); // 1.0 / 2.2, unscaled, but rounded
//! let source_chromaticities = png::SourceChromaticities::new( // Using unscaled instantiation here
//! (0.31270, 0.32900),
//! (0.64000, 0.33000),
//! (0.30000, 0.60000),
//! (0.15000, 0.06000)
//! );
//! encoder.set_source_chromaticities(source_chromaticities);
//! let mut writer = encoder.write_header().unwrap();
//!
//! let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black.
//! writer.write_image_data(&data).unwrap(); // Save
//! ```
//!
#![forbid(unsafe_code)]
// Silence certain clippy warnings until our MSRV is higher.
//
// The #[default] attribute was stabilized in Rust 1.62.0.
#![allow(clippy::derivable_impls)]
// IIUC format args capture was stabilized in Rust 1.58.1.
#![allow(clippy::uninlined_format_args)]
mod adam7;
pub mod chunk;
mod common;
mod decoder;
mod encoder;
mod filter;
mod srgb;
pub mod text_metadata;
mod traits;
pub use crate::adam7::{
expand_pass as expand_interlaced_row, expand_pass_splat as splat_interlaced_row,
};
pub use crate::adam7::{Adam7Info, Adam7Variant};
pub use crate::common::*;
pub use crate::decoder::stream::{DecodeOptions, Decoded, DecodingError, StreamingDecoder};
pub use crate::decoder::{Decoder, InterlaceInfo, InterlacedRow, Limits, OutputInfo, Reader};
pub use crate::decoder::{UnfilterBuf, UnfilterRegion};
pub use crate::encoder::{Encoder, EncodingError, StreamWriter, Writer};
pub use crate::filter::Filter;
#[cfg(test)]
pub(crate) mod test_utils;
#[cfg(feature = "benchmarks")]
pub mod benchable_apis;

30
vendor/png/src/srgb.rs vendored Normal file
View File

@@ -0,0 +1,30 @@
use crate::{ScaledFloat, SourceChromaticities};
/// Get the gamma that should be substituted for images conforming to the sRGB color space.
pub fn substitute_gamma() -> ScaledFloat {
// Value taken from https://www.w3.org/TR/2003/REC-PNG-20031110/#11sRGB
ScaledFloat::from_scaled(45455)
}
/// Get the chromaticities that should be substituted for images conforming to the sRGB color space.
pub fn substitute_chromaticities() -> SourceChromaticities {
// Values taken from https://www.w3.org/TR/2003/REC-PNG-20031110/#11sRGB
SourceChromaticities {
white: (
ScaledFloat::from_scaled(31270),
ScaledFloat::from_scaled(32900),
),
red: (
ScaledFloat::from_scaled(64000),
ScaledFloat::from_scaled(33000),
),
green: (
ScaledFloat::from_scaled(30000),
ScaledFloat::from_scaled(60000),
),
blue: (
ScaledFloat::from_scaled(15000),
ScaledFloat::from_scaled(6000),
),
}
}

117
vendor/png/src/test_utils.rs vendored Normal file
View File

@@ -0,0 +1,117 @@
//! A set of test utilities.
//!
//! There is some overlap between this module and `src/encoder.rs` module, but:
//!
//! * This module (unlike `src/encoder.rs`) performs no validation of the data being written - this
//! allows building testcases that use arbitrary, potentially invalid PNGs as input.
//! * This module can be reused from `benches/decoder.rs` (a separate crate).
use byteorder::WriteBytesExt;
use std::io::Write;
/// Generates a store-only, non-compressed image:
///
/// * `00` compression mode (i.e.`BTYPE` = `00` = no compression) is used
/// * No filter is applied to the image rows
///
/// Currently the image always has the following properties:
///
/// * Single `IDAT` chunk
/// * Zlib chunks of maximum possible size
/// * 8-bit RGBA
///
/// These images are somewhat artificial, but may be useful for benchmarking performance of parts
/// outside of `fdeflate` crate and/or the `unfilter` function (e.g. these images were originally
/// used to evaluate changes to minimize copying of image pixels between various buffers - see
/// [this
/// discussion](https://github.com/image-rs/image-png/discussions/416#discussioncomment-7436871)
/// for more details).
pub fn write_noncompressed_png(w: &mut impl Write, size: u32, idat_bytes: usize) {
write_png_sig(w);
write_rgba8_ihdr_with_width(w, size);
write_rgba8_idats(w, size, idat_bytes);
write_iend(w);
}
/// Writes PNG signature.
/// See http://www.libpng.org/pub/png/spec/1.2/PNG-Structure.html#PNG-file-signature
pub fn write_png_sig(w: &mut impl Write) {
const SIG: [u8; 8] = [137, 80, 78, 71, 13, 10, 26, 10];
w.write_all(&SIG).unwrap();
}
/// Writes an arbitrary PNG chunk.
pub fn write_chunk(w: &mut impl Write, chunk_type: &[u8], data: &[u8]) {
assert_eq!(chunk_type.len(), 4);
let crc = {
let input = chunk_type
.iter()
.copied()
.chain(data.iter().copied())
.collect::<Vec<_>>();
crc32fast::hash(input.as_slice())
};
w.write_u32::<byteorder::BigEndian>(data.len() as u32)
.unwrap();
w.write_all(chunk_type).unwrap();
w.write_all(data).unwrap();
w.write_u32::<byteorder::BigEndian>(crc).unwrap();
}
/// Writes an IHDR chunk that indicates a non-interlaced RGBA8 that uses the same height and
/// `width`. See http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IHDR
pub fn write_rgba8_ihdr_with_width(w: &mut impl Write, width: u32) {
let mut data = Vec::new();
data.write_u32::<byteorder::BigEndian>(width).unwrap();
data.write_u32::<byteorder::BigEndian>(width).unwrap(); // height
data.write_u8(8).unwrap(); // bit depth = always 8-bits per channel
data.write_u8(6).unwrap(); // color type = color + alpha
data.write_u8(0).unwrap(); // compression method (0 is the only allowed value)
data.write_u8(0).unwrap(); // filter method (0 is the only allowed value)
data.write_u8(0).unwrap(); // interlace method = no interlacing
write_chunk(w, b"IHDR", &data);
}
/// Generates RGBA8 `width` x `height` image and wraps it in a store-only zlib container.
pub fn generate_rgba8_with_width_and_height(width: u32, height: u32) -> Vec<u8> {
// Generate arbitrary test pixels.
let image_pixels = {
let mut row = Vec::new();
row.write_u8(0).unwrap(); // filter = no filter
let row_pixels = (0..width).flat_map(|i| {
let color: u8 = (i * 255 / width) as u8;
let alpha: u8 = 0xff;
[color, 255 - color, color / 2, alpha]
});
row.extend(row_pixels);
std::iter::repeat(row)
.take(height as usize)
.flatten()
.collect::<Vec<_>>()
};
let mut zlib_data = Vec::new();
let mut store_only_compressor =
fdeflate::StoredOnlyCompressor::new(std::io::Cursor::new(&mut zlib_data)).unwrap();
store_only_compressor.write_data(&image_pixels).unwrap();
store_only_compressor.finish().unwrap();
zlib_data
}
/// Writes an IDAT chunk.
pub fn write_rgba8_idats(w: &mut impl Write, size: u32, idat_bytes: usize) {
let data = generate_rgba8_with_width_and_height(size, size);
for chunk in data.chunks(idat_bytes) {
write_chunk(w, b"IDAT", chunk);
}
}
/// Writes an IEND chunk.
/// See http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html#C.IEND
pub fn write_iend(w: &mut impl Write) {
write_chunk(w, b"IEND", &[]);
}

582
vendor/png/src/text_metadata.rs vendored Normal file
View File

@@ -0,0 +1,582 @@
//! # Text chunks (tEXt/zTXt/iTXt) structs and functions
//!
//! The [PNG spec](https://www.w3.org/TR/2003/REC-PNG-20031110/#11textinfo) optionally allows for
//! embedded text chunks in the file. They may appear either before or after the image data
//! chunks. There are three kinds of text chunks.
//! - `tEXt`: This has a `keyword` and `text` field, and is ISO 8859-1 encoded.
//! - `zTXt`: This is semantically the same as `tEXt`, i.e. it has the same fields and
//! encoding, but the `text` field is compressed before being written into the PNG file.
//! - `iTXt`: This chunk allows for its `text` field to be any valid UTF-8, and supports
//! compression of the text field as well.
//!
//! The `ISO 8859-1` encoding technically doesn't allow any control characters
//! to be used, but in practice these values are encountered anyway. This can
//! either be the extended `ISO-8859-1` encoding with control characters or the
//! `Windows-1252` encoding. This crate assumes the `ISO-8859-1` encoding is
//! used.
//!
//! ## Reading text chunks
//!
//! As a PNG is decoded, any text chunk encountered is appended the
//! [`Info`](`crate::common::Info`) struct, in the `uncompressed_latin1_text`,
//! `compressed_latin1_text`, and the `utf8_text` fields depending on whether the encountered
//! chunk is `tEXt`, `zTXt`, or `iTXt`.
//!
//! ```
//! use std::fs::File;
//! use std::io::BufReader;
//! use std::iter::FromIterator;
//! use std::path::PathBuf;
//!
//! // Opening a png file that has a zTXt chunk
//! let decoder = png::Decoder::new(
//! BufReader::new(File::open("tests/text_chunk_examples/ztxt_example.png").unwrap())
//! );
//! let mut reader = decoder.read_info().unwrap();
//! // If the text chunk is before the image data frames, `reader.info()` already contains the text.
//! for text_chunk in &reader.info().compressed_latin1_text {
//! println!("{:?}", text_chunk.keyword); // Prints the keyword
//! println!("{:#?}", text_chunk); // Prints out the text chunk.
//! // To get the uncompressed text, use the `get_text` method.
//! println!("{}", text_chunk.get_text().unwrap());
//! }
//! ```
//!
//! ## Writing text chunks
//!
//! There are two ways to write text chunks: the first is to add the appropriate text structs directly to the encoder header before the header is written to file.
//! To add a text chunk at any point in the stream, use the `write_text_chunk` method.
//!
//! ```
//! # use png::text_metadata::{ITXtChunk, ZTXtChunk};
//! # use std::env;
//! # use std::fs::File;
//! # use std::io::BufWriter;
//! # use std::iter::FromIterator;
//! # use std::path::PathBuf;
//! # let file = File::create(PathBuf::from_iter(["target", "text_chunk.png"])).unwrap();
//! # let ref mut w = BufWriter::new(file);
//! let mut encoder = png::Encoder::new(w, 2, 1); // Width is 2 pixels and height is 1.
//! encoder.set_color(png::ColorType::Rgba);
//! encoder.set_depth(png::BitDepth::Eight);
//! // Adding text chunks to the header
//! encoder
//! .add_text_chunk(
//! "Testing tEXt".to_string(),
//! "This is a tEXt chunk that will appear before the IDAT chunks.".to_string(),
//! )
//! .unwrap();
//! encoder
//! .add_ztxt_chunk(
//! "Testing zTXt".to_string(),
//! "This is a zTXt chunk that is compressed in the png file.".to_string(),
//! )
//! .unwrap();
//! encoder
//! .add_itxt_chunk(
//! "Testing iTXt".to_string(),
//! "iTXt chunks support all of UTF8. Example: हिंदी.".to_string(),
//! )
//! .unwrap();
//!
//! let mut writer = encoder.write_header().unwrap();
//!
//! let data = [255, 0, 0, 255, 0, 0, 0, 255]; // An array containing a RGBA sequence. First pixel is red and second pixel is black.
//! writer.write_image_data(&data).unwrap(); // Save
//!
//! // We can add a tEXt/zTXt/iTXt at any point before the encoder is dropped from scope. These chunks will be at the end of the png file.
//! let tail_ztxt_chunk = ZTXtChunk::new("Comment".to_string(), "A zTXt chunk after the image data.".to_string());
//! writer.write_text_chunk(&tail_ztxt_chunk).unwrap();
//!
//! // The fields of the text chunk are public, so they can be mutated before being written to the file.
//! let mut tail_itxt_chunk = ITXtChunk::new("Author".to_string(), "सायंतन खान".to_string());
//! tail_itxt_chunk.compressed = true;
//! tail_itxt_chunk.language_tag = "hi".to_string();
//! tail_itxt_chunk.translated_keyword = "लेखक".to_string();
//! writer.write_text_chunk(&tail_itxt_chunk).unwrap();
//! ```
#![warn(missing_docs)]
use crate::{chunk, encoder, DecodingError, EncodingError};
use fdeflate::BoundedDecompressionError;
use flate2::write::ZlibEncoder;
use flate2::Compression;
use std::{convert::TryFrom, io::Write};
/// Default decompression limit for compressed text chunks.
pub const DECOMPRESSION_LIMIT: usize = 2097152; // 2 MiB
/// Text encoding errors that is wrapped by the standard EncodingError type
#[derive(Debug, Clone, Copy)]
pub(crate) enum TextEncodingError {
/// Unrepresentable characters in string
Unrepresentable,
/// Keyword longer than 79 bytes or empty
InvalidKeywordSize,
/// Error encountered while compressing text
CompressionError,
}
/// Text decoding error that is wrapped by the standard DecodingError type
#[derive(Debug, Clone, Copy)]
pub(crate) enum TextDecodingError {
/// Unrepresentable characters in string
Unrepresentable,
/// Keyword longer than 79 bytes or empty
InvalidKeywordSize,
/// Missing null separator
MissingNullSeparator,
/// Compressed text cannot be uncompressed
InflationError,
/// Needs more space to decompress
OutOfDecompressionSpace,
/// Using an unspecified value for the compression method
InvalidCompressionMethod,
/// Using a byte that is not 0 or 255 as compression flag in iTXt chunk
InvalidCompressionFlag,
/// Missing the compression flag
MissingCompressionFlag,
}
/// A generalized text chunk trait
pub trait EncodableTextChunk {
/// Encode text chunk as `Vec<u8>` to a `Write`
fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError>;
}
/// Struct representing a tEXt chunk
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct TEXtChunk {
/// Keyword field of the tEXt chunk. Needs to be between 1-79 bytes when encoded as Latin-1.
pub keyword: String,
/// Text field of tEXt chunk. Can be at most 2GB.
pub text: String,
}
fn decode_iso_8859_1(text: &[u8]) -> String {
text.iter().map(|&b| b as char).collect()
}
pub(crate) fn encode_iso_8859_1(text: &str) -> Result<Vec<u8>, TextEncodingError> {
encode_iso_8859_1_iter(text).collect()
}
fn encode_iso_8859_1_into(buf: &mut Vec<u8>, text: &str) -> Result<(), TextEncodingError> {
for b in encode_iso_8859_1_iter(text) {
buf.push(b?);
}
Ok(())
}
fn encode_iso_8859_1_iter(text: &str) -> impl Iterator<Item = Result<u8, TextEncodingError>> + '_ {
text.chars()
.map(|c| u8::try_from(c as u32).map_err(|_| TextEncodingError::Unrepresentable))
}
fn decode_ascii(text: &[u8]) -> Result<&str, TextDecodingError> {
if text.is_ascii() {
// `from_utf8` cannot panic because we're already checked that `text` is ASCII-7.
// And this is the only safe way to get ASCII-7 string from `&[u8]`.
Ok(std::str::from_utf8(text).expect("unreachable"))
} else {
Err(TextDecodingError::Unrepresentable)
}
}
impl TEXtChunk {
/// Constructs a new TEXtChunk.
/// Not sure whether it should take &str or String.
pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self {
Self {
keyword: keyword.into(),
text: text.into(),
}
}
/// Decodes a slice of bytes to a String using Latin-1 decoding.
/// The decoder runs in strict mode, and any decoding errors are passed along to the caller.
pub(crate) fn decode(
keyword_slice: &[u8],
text_slice: &[u8],
) -> Result<Self, TextDecodingError> {
if keyword_slice.is_empty() || keyword_slice.len() > 79 {
return Err(TextDecodingError::InvalidKeywordSize);
}
Ok(Self {
keyword: decode_iso_8859_1(keyword_slice),
text: decode_iso_8859_1(text_slice),
})
}
}
impl EncodableTextChunk for TEXtChunk {
/// Encodes TEXtChunk to a Writer. The keyword and text are separated by a byte of zeroes.
fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> {
let mut data = encode_iso_8859_1(&self.keyword)?;
if data.is_empty() || data.len() > 79 {
return Err(TextEncodingError::InvalidKeywordSize.into());
}
data.push(0);
encode_iso_8859_1_into(&mut data, &self.text)?;
encoder::write_chunk(w, chunk::tEXt, &data)
}
}
/// Struct representing a zTXt chunk
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ZTXtChunk {
/// Keyword field of the tEXt chunk. Needs to be between 1-79 bytes when encoded as Latin-1.
pub keyword: String,
/// Text field of zTXt chunk. It is compressed by default, but can be uncompressed if necessary.
text: OptCompressed,
}
/// Private enum encoding the compressed and uncompressed states of zTXt/iTXt text field.
#[derive(Clone, Debug, PartialEq, Eq)]
enum OptCompressed {
/// Compressed version of text field. Can be at most 2GB.
Compressed(Vec<u8>),
/// Uncompressed text field.
Uncompressed(String),
}
impl ZTXtChunk {
/// Creates a new ZTXt chunk.
pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self {
Self {
keyword: keyword.into(),
text: OptCompressed::Uncompressed(text.into()),
}
}
pub(crate) fn decode(
keyword_slice: &[u8],
compression_method: u8,
text_slice: &[u8],
) -> Result<Self, TextDecodingError> {
if keyword_slice.is_empty() || keyword_slice.len() > 79 {
return Err(TextDecodingError::InvalidKeywordSize);
}
if compression_method != 0 {
return Err(TextDecodingError::InvalidCompressionMethod);
}
Ok(Self {
keyword: decode_iso_8859_1(keyword_slice),
text: OptCompressed::Compressed(text_slice.to_vec()),
})
}
/// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `DECOMPRESSION_LIMIT` bytes.
pub fn decompress_text(&mut self) -> Result<(), DecodingError> {
self.decompress_text_with_limit(DECOMPRESSION_LIMIT)
}
/// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `limit` bytes.
pub fn decompress_text_with_limit(&mut self, limit: usize) -> Result<(), DecodingError> {
match &self.text {
OptCompressed::Compressed(v) => {
let uncompressed_raw = match fdeflate::decompress_to_vec_bounded(&v[..], limit) {
Ok(s) => s,
Err(BoundedDecompressionError::OutputTooLarge { .. }) => {
return Err(DecodingError::from(
TextDecodingError::OutOfDecompressionSpace,
));
}
Err(_) => {
return Err(DecodingError::from(TextDecodingError::InflationError));
}
};
self.text = OptCompressed::Uncompressed(decode_iso_8859_1(&uncompressed_raw));
}
OptCompressed::Uncompressed(_) => {}
};
Ok(())
}
/// Decompresses the inner text, and returns it as a `String`.
/// If decompression uses more the 2MiB, first call decompress with limit, and then this method.
pub fn get_text(&self) -> Result<String, DecodingError> {
match &self.text {
OptCompressed::Compressed(v) => {
let uncompressed_raw = fdeflate::decompress_to_vec(v)
.map_err(|_| DecodingError::from(TextDecodingError::InflationError))?;
Ok(decode_iso_8859_1(&uncompressed_raw))
}
OptCompressed::Uncompressed(s) => Ok(s.clone()),
}
}
/// Compresses the inner text, mutating its own state.
pub fn compress_text(&mut self) -> Result<(), EncodingError> {
match &self.text {
OptCompressed::Uncompressed(s) => {
let uncompressed_raw = encode_iso_8859_1(s)?;
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::fast());
encoder
.write_all(&uncompressed_raw)
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
self.text = OptCompressed::Compressed(
encoder
.finish()
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?,
);
}
OptCompressed::Compressed(_) => {}
}
Ok(())
}
}
impl EncodableTextChunk for ZTXtChunk {
fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> {
let mut data = encode_iso_8859_1(&self.keyword)?;
if data.is_empty() || data.len() > 79 {
return Err(TextEncodingError::InvalidKeywordSize.into());
}
// Null separator
data.push(0);
// Compression method: the only valid value is 0, as of 2021.
data.push(0);
match &self.text {
OptCompressed::Compressed(v) => {
data.extend_from_slice(&v[..]);
}
OptCompressed::Uncompressed(s) => {
// This code may have a bug. Check for correctness.
let uncompressed_raw = encode_iso_8859_1(s)?;
let mut encoder = ZlibEncoder::new(data, Compression::fast());
encoder
.write_all(&uncompressed_raw)
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
data = encoder
.finish()
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
}
};
encoder::write_chunk(w, chunk::zTXt, &data)
}
}
/// Struct encoding an iTXt chunk
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ITXtChunk {
/// The keyword field. This needs to be between 1-79 bytes when encoded as Latin-1.
pub keyword: String,
/// Indicates whether the text will be (or was) compressed in the PNG.
pub compressed: bool,
/// A hyphen separated list of languages that the keyword is translated to. This is ASCII-7 encoded.
pub language_tag: String,
/// Translated keyword. This is UTF-8 encoded.
pub translated_keyword: String,
/// Text field of iTXt chunk. It is compressed by default, but can be uncompressed if necessary.
text: OptCompressed,
}
impl ITXtChunk {
/// Constructs a new iTXt chunk. Leaves all but keyword and text to default values.
pub fn new(keyword: impl Into<String>, text: impl Into<String>) -> Self {
Self {
keyword: keyword.into(),
compressed: false,
language_tag: "".to_string(),
translated_keyword: "".to_string(),
text: OptCompressed::Uncompressed(text.into()),
}
}
pub(crate) fn decode(
keyword_slice: &[u8],
compression_flag: u8,
compression_method: u8,
language_tag_slice: &[u8],
translated_keyword_slice: &[u8],
text_slice: &[u8],
) -> Result<Self, TextDecodingError> {
if keyword_slice.is_empty() || keyword_slice.len() > 79 {
return Err(TextDecodingError::InvalidKeywordSize);
}
let keyword = decode_iso_8859_1(keyword_slice);
let compressed = match compression_flag {
0 => false,
1 => true,
_ => return Err(TextDecodingError::InvalidCompressionFlag),
};
if compressed && compression_method != 0 {
return Err(TextDecodingError::InvalidCompressionMethod);
}
let language_tag = decode_ascii(language_tag_slice)?.to_owned();
let translated_keyword = std::str::from_utf8(translated_keyword_slice)
.map_err(|_| TextDecodingError::Unrepresentable)?
.to_string();
let text = if compressed {
OptCompressed::Compressed(text_slice.to_vec())
} else {
OptCompressed::Uncompressed(
String::from_utf8(text_slice.to_vec())
.map_err(|_| TextDecodingError::Unrepresentable)?,
)
};
Ok(Self {
keyword,
compressed,
language_tag,
translated_keyword,
text,
})
}
/// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `DECOMPRESSION_LIMIT` bytes.
pub fn decompress_text(&mut self) -> Result<(), DecodingError> {
self.decompress_text_with_limit(DECOMPRESSION_LIMIT)
}
/// Decompresses the inner text, mutating its own state. Can only handle decompressed text up to `limit` bytes.
pub fn decompress_text_with_limit(&mut self, limit: usize) -> Result<(), DecodingError> {
match &self.text {
OptCompressed::Compressed(v) => {
let uncompressed_raw = match fdeflate::decompress_to_vec_bounded(v, limit) {
Ok(s) => s,
Err(BoundedDecompressionError::OutputTooLarge { .. }) => {
return Err(DecodingError::from(
TextDecodingError::OutOfDecompressionSpace,
));
}
Err(_) => {
return Err(DecodingError::from(TextDecodingError::InflationError));
}
};
self.text = OptCompressed::Uncompressed(
String::from_utf8(uncompressed_raw)
.map_err(|_| TextDecodingError::Unrepresentable)?,
);
}
OptCompressed::Uncompressed(_) => {}
};
Ok(())
}
/// Decompresses the inner text, and returns it as a `String`.
/// If decompression takes more than 2 MiB, try `decompress_text_with_limit` followed by this method.
pub fn get_text(&self) -> Result<String, DecodingError> {
match &self.text {
OptCompressed::Compressed(v) => {
let uncompressed_raw = fdeflate::decompress_to_vec(v)
.map_err(|_| DecodingError::from(TextDecodingError::InflationError))?;
String::from_utf8(uncompressed_raw)
.map_err(|_| TextDecodingError::Unrepresentable.into())
}
OptCompressed::Uncompressed(s) => Ok(s.clone()),
}
}
/// Compresses the inner text, mutating its own state.
pub fn compress_text(&mut self) -> Result<(), EncodingError> {
match &self.text {
OptCompressed::Uncompressed(s) => {
let uncompressed_raw = s.as_bytes();
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::fast());
encoder
.write_all(uncompressed_raw)
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
self.text = OptCompressed::Compressed(
encoder
.finish()
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?,
);
}
OptCompressed::Compressed(_) => {}
}
Ok(())
}
}
impl EncodableTextChunk for ITXtChunk {
fn encode<W: Write>(&self, w: &mut W) -> Result<(), EncodingError> {
// Keyword
let mut data = encode_iso_8859_1(&self.keyword)?;
if data.is_empty() || data.len() > 79 {
return Err(TextEncodingError::InvalidKeywordSize.into());
}
// Null separator
data.push(0);
// Compression flag
if self.compressed {
data.push(1);
} else {
data.push(0);
}
// Compression method
data.push(0);
// Language tag
if !self.language_tag.is_ascii() {
return Err(EncodingError::from(TextEncodingError::Unrepresentable));
}
data.extend(self.language_tag.as_bytes());
// Null separator
data.push(0);
// Translated keyword
data.extend_from_slice(self.translated_keyword.as_bytes());
// Null separator
data.push(0);
// Text
if self.compressed {
match &self.text {
OptCompressed::Compressed(v) => {
data.extend_from_slice(&v[..]);
}
OptCompressed::Uncompressed(s) => {
let uncompressed_raw = s.as_bytes();
let mut encoder = ZlibEncoder::new(data, Compression::fast());
encoder
.write_all(uncompressed_raw)
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
data = encoder
.finish()
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
}
}
} else {
match &self.text {
OptCompressed::Compressed(v) => {
let uncompressed_raw = fdeflate::decompress_to_vec(v)
.map_err(|_| EncodingError::from(TextEncodingError::CompressionError))?;
data.extend_from_slice(&uncompressed_raw[..]);
}
OptCompressed::Uncompressed(s) => {
data.extend_from_slice(s.as_bytes());
}
}
}
encoder::write_chunk(w, chunk::iTXt, &data)
}
}

43
vendor/png/src/traits.rs vendored Normal file
View File

@@ -0,0 +1,43 @@
use std::io;
macro_rules! read_bytes_ext {
($output_type:ty) => {
impl<W: io::Read + ?Sized> ReadBytesExt<$output_type> for W {
#[inline]
fn read_be(&mut self) -> io::Result<$output_type> {
let mut bytes = [0u8; std::mem::size_of::<$output_type>()];
self.read_exact(&mut bytes)?;
Ok(<$output_type>::from_be_bytes(bytes))
}
}
};
}
macro_rules! write_bytes_ext {
($input_type:ty) => {
impl<W: io::Write + ?Sized> WriteBytesExt<$input_type> for W {
#[inline]
fn write_be(&mut self, n: $input_type) -> io::Result<()> {
self.write_all(&n.to_be_bytes())
}
}
};
}
/// Read extension to read big endian data
pub trait ReadBytesExt<T>: io::Read {
/// Read `T` from a bytes stream. Most significant byte first.
fn read_be(&mut self) -> io::Result<T>;
}
/// Write extension to write big endian data
pub trait WriteBytesExt<T>: io::Write {
/// Writes `T` to a bytes stream. Most significant byte first.
fn write_be(&mut self, _: T) -> io::Result<()>;
}
read_bytes_ext!(u8);
read_bytes_ext!(u16);
read_bytes_ext!(u32);
write_bytes_ext!(u32);