Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

2318
vendor/miniz_oxide/src/inflate/core.rs vendored Normal file

File diff suppressed because it is too large Load Diff

364
vendor/miniz_oxide/src/inflate/mod.rs vendored Normal file
View File

@@ -0,0 +1,364 @@
//! This module contains functionality for decompression.
#[cfg(feature = "with-alloc")]
use crate::alloc::{boxed::Box, vec, vec::Vec};
#[cfg(all(feature = "std", feature = "with-alloc"))]
use std::error::Error;
pub mod core;
mod output_buffer;
#[cfg(not(feature = "rustc-dep-of-std"))]
pub mod stream;
#[cfg(not(feature = "rustc-dep-of-std"))]
use self::core::*;
const TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS: i32 = -4;
const TINFL_STATUS_BAD_PARAM: i32 = -3;
const TINFL_STATUS_ADLER32_MISMATCH: i32 = -2;
const TINFL_STATUS_FAILED: i32 = -1;
const TINFL_STATUS_DONE: i32 = 0;
const TINFL_STATUS_NEEDS_MORE_INPUT: i32 = 1;
const TINFL_STATUS_HAS_MORE_OUTPUT: i32 = 2;
#[cfg(feature = "block-boundary")]
const TINFL_STATUS_BLOCK_BOUNDARY: i32 = 3;
/// Return status codes.
#[repr(i8)]
#[cfg_attr(not(feature = "rustc-dep-of-std"), derive(Hash, Debug))]
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum TINFLStatus {
/// More input data was expected, but the caller indicated that there was no more data, so the
/// input stream is likely truncated.
///
/// This can't happen if you have provided the
/// [`TINFL_FLAG_HAS_MORE_INPUT`][core::inflate_flags::TINFL_FLAG_HAS_MORE_INPUT] flag to the
/// decompression. By setting that flag, you indicate more input exists but is not provided,
/// and so reaching the end of the input data without finding the end of the compressed stream
/// would instead return a [`NeedsMoreInput`][Self::NeedsMoreInput] status.
FailedCannotMakeProgress = TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS as i8,
/// The output buffer is an invalid size; consider the `flags` parameter.
BadParam = TINFL_STATUS_BAD_PARAM as i8,
/// The decompression went fine, but the adler32 checksum did not match the one
/// provided in the header.
Adler32Mismatch = TINFL_STATUS_ADLER32_MISMATCH as i8,
/// Failed to decompress due to invalid data.
Failed = TINFL_STATUS_FAILED as i8,
/// Finished decompression without issues.
///
/// This indicates the end of the compressed stream has been reached.
Done = TINFL_STATUS_DONE as i8,
/// The decompressor needs more input data to continue decompressing.
///
/// This occurs when there's no more consumable input, but the end of the stream hasn't been
/// reached, and you have supplied the
/// [`TINFL_FLAG_HAS_MORE_INPUT`][core::inflate_flags::TINFL_FLAG_HAS_MORE_INPUT] flag to the
/// decompressor. Had you not supplied that flag (which would mean you were asserting that you
/// believed all the data was available) you would have gotten a
/// [`FailedCannotMakeProcess`][Self::FailedCannotMakeProgress] instead.
NeedsMoreInput = TINFL_STATUS_NEEDS_MORE_INPUT as i8,
/// There is still pending data that didn't fit in the output buffer.
HasMoreOutput = TINFL_STATUS_HAS_MORE_OUTPUT as i8,
/// Reached the end of a deflate block, and the start of the next block.
///
/// At this point, you can suspend decompression and later resume with a new `DecompressorOxide`.
/// The only state that must be preserved is [`DecompressorOxide::block_boundary_state()`],
/// plus the last 32KiB of the output buffer (or less if you know the stream was compressed with
/// a smaller window size).
///
/// This is only returned if you use the
/// [`TINFL_FLAG_STOP_ON_BLOCK_BOUNDARY`][core::inflate_flags::TINFL_FLAG_STOP_ON_BLOCK_BOUNDARY] flag.
#[cfg(feature = "block-boundary")]
BlockBoundary = TINFL_STATUS_BLOCK_BOUNDARY as i8,
}
impl TINFLStatus {
pub fn from_i32(value: i32) -> Option<TINFLStatus> {
use self::TINFLStatus::*;
match value {
TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS => Some(FailedCannotMakeProgress),
TINFL_STATUS_BAD_PARAM => Some(BadParam),
TINFL_STATUS_ADLER32_MISMATCH => Some(Adler32Mismatch),
TINFL_STATUS_FAILED => Some(Failed),
TINFL_STATUS_DONE => Some(Done),
TINFL_STATUS_NEEDS_MORE_INPUT => Some(NeedsMoreInput),
TINFL_STATUS_HAS_MORE_OUTPUT => Some(HasMoreOutput),
#[cfg(feature = "block-boundary")]
TINFL_STATUS_BLOCK_BOUNDARY => Some(BlockBoundary),
_ => None,
}
}
}
/// Struct return when decompress_to_vec functions fail.
#[cfg(feature = "with-alloc")]
#[derive(Debug)]
pub struct DecompressError {
/// Decompressor status on failure. See [TINFLStatus] for details.
pub status: TINFLStatus,
/// The currently decompressed data if any.
pub output: Vec<u8>,
}
#[cfg(feature = "with-alloc")]
impl alloc::fmt::Display for DecompressError {
#[cold]
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
f.write_str(match self.status {
TINFLStatus::FailedCannotMakeProgress => "Truncated input stream",
TINFLStatus::BadParam => "Invalid output buffer size",
TINFLStatus::Adler32Mismatch => "Adler32 checksum mismatch",
TINFLStatus::Failed => "Invalid input data",
TINFLStatus::Done => "", // Unreachable
TINFLStatus::NeedsMoreInput => "Truncated input stream",
TINFLStatus::HasMoreOutput => "Output size exceeded the specified limit",
#[cfg(feature = "block-boundary")]
TINFLStatus::BlockBoundary => "Reached end of a deflate block",
})
}
}
/// Implement Error trait only if std feature is requested as it requires std.
#[cfg(all(feature = "std", feature = "with-alloc"))]
impl Error for DecompressError {}
#[cfg(feature = "with-alloc")]
fn decompress_error(status: TINFLStatus, output: Vec<u8>) -> Result<Vec<u8>, DecompressError> {
Err(DecompressError { status, output })
}
/// Decompress the deflate-encoded data in `input` to a vector.
///
/// NOTE: This function will not bound the output, so if the output is large enough it can result in an out of memory error.
/// It is therefore suggested to not use this for anything other than test programs, use the functions with a specified limit, or
/// ideally streaming decompression via the [flate2](https://github.com/alexcrichton/flate2-rs) library instead.
///
/// Returns a [`Result`] containing the [`Vec`] of decompressed data on success, and a [struct][DecompressError] containing the status and so far decompressed data if any on failure.
#[inline]
#[cfg(feature = "with-alloc")]
pub fn decompress_to_vec(input: &[u8]) -> Result<Vec<u8>, DecompressError> {
decompress_to_vec_inner(input, 0, usize::MAX)
}
/// Decompress the deflate-encoded data (with a zlib wrapper) in `input` to a vector.
///
/// NOTE: This function will not bound the output, so if the output is large enough it can result in an out of memory error.
/// It is therefore suggested to not use this for anything other than test programs, use the functions with a specified limit, or
/// ideally streaming decompression via the [flate2](https://github.com/alexcrichton/flate2-rs) library instead.
///
/// Returns a [`Result`] containing the [`Vec`] of decompressed data on success, and a [struct][DecompressError] containing the status and so far decompressed data if any on failure.
#[inline]
#[cfg(feature = "with-alloc")]
pub fn decompress_to_vec_zlib(input: &[u8]) -> Result<Vec<u8>, DecompressError> {
decompress_to_vec_inner(
input,
inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER,
usize::MAX,
)
}
/// Decompress the deflate-encoded data in `input` to a vector.
///
/// The vector is grown to at most `max_size` bytes; if the data does not fit in that size,
/// the error [struct][DecompressError] will contain the status [`TINFLStatus::HasMoreOutput`] and the data that was decompressed on failure.
///
/// As this function tries to decompress everything in one go, it's not ideal for general use outside of tests or where the output size is expected to be small.
/// It is suggested to use streaming decompression via the [flate2](https://github.com/alexcrichton/flate2-rs) library instead.
///
/// Returns a [`Result`] containing the [`Vec`] of decompressed data on success, and a [struct][DecompressError] on failure.
#[inline]
#[cfg(feature = "with-alloc")]
pub fn decompress_to_vec_with_limit(
input: &[u8],
max_size: usize,
) -> Result<Vec<u8>, DecompressError> {
decompress_to_vec_inner(input, 0, max_size)
}
/// Decompress the deflate-encoded data (with a zlib wrapper) in `input` to a vector.
/// The vector is grown to at most `max_size` bytes; if the data does not fit in that size,
/// the error [struct][DecompressError] will contain the status [`TINFLStatus::HasMoreOutput`] and the data that was decompressed on failure.
///
/// As this function tries to decompress everything in one go, it's not ideal for general use outside of tests or where the output size is expected to be small.
/// It is suggested to use streaming decompression via the [flate2](https://github.com/alexcrichton/flate2-rs) library instead.
///
/// Returns a [`Result`] containing the [`Vec`] of decompressed data on success, and a [struct][DecompressError] on failure.
#[inline]
#[cfg(feature = "with-alloc")]
pub fn decompress_to_vec_zlib_with_limit(
input: &[u8],
max_size: usize,
) -> Result<Vec<u8>, DecompressError> {
decompress_to_vec_inner(input, inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER, max_size)
}
/// Backend of various to-[`Vec`] decompressions.
///
/// Returns [`Vec`] of decompressed data on success and the [error struct][DecompressError] with details on failure.
#[cfg(feature = "with-alloc")]
fn decompress_to_vec_inner(
mut input: &[u8],
flags: u32,
max_output_size: usize,
) -> Result<Vec<u8>, DecompressError> {
let flags = flags | inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
let mut ret: Vec<u8> = vec![0; input.len().saturating_mul(2).min(max_output_size)];
let mut decomp = Box::<DecompressorOxide>::default();
let mut out_pos = 0;
loop {
// Wrap the whole output slice so we know we have enough of the
// decompressed data for matches.
let (status, in_consumed, out_consumed) =
decompress(&mut decomp, input, &mut ret, out_pos, flags);
out_pos += out_consumed;
match status {
TINFLStatus::Done => {
ret.truncate(out_pos);
return Ok(ret);
}
TINFLStatus::HasMoreOutput => {
// in_consumed is not expected to be out of bounds,
// but the check eliminates a panicking code path
if in_consumed > input.len() {
return decompress_error(TINFLStatus::HasMoreOutput, ret);
}
input = &input[in_consumed..];
// if the buffer has already reached the size limit, return an error
if ret.len() >= max_output_size {
return decompress_error(TINFLStatus::HasMoreOutput, ret);
}
// calculate the new length, capped at `max_output_size`
let new_len = ret.len().saturating_mul(2).min(max_output_size);
ret.resize(new_len, 0);
}
_ => return decompress_error(status, ret),
}
}
}
/// Decompress one or more source slices from an iterator into the output slice.
///
/// * On success, returns the number of bytes that were written.
/// * On failure, returns the failure status code.
///
/// This will fail if the output buffer is not large enough, but in that case
/// the output buffer will still contain the partial decompression.
///
/// * `out` the output buffer.
/// * `it` the iterator of input slices.
/// * `zlib_header` if the first slice out of the iterator is expected to have a
/// Zlib header. Otherwise the slices are assumed to be the deflate data only.
/// * `ignore_adler32` if the adler32 checksum should be calculated or not.
#[cfg(not(feature = "rustc-dep-of-std"))]
pub fn decompress_slice_iter_to_slice<'out, 'inp>(
out: &'out mut [u8],
it: impl Iterator<Item = &'inp [u8]>,
zlib_header: bool,
ignore_adler32: bool,
) -> Result<usize, TINFLStatus> {
use self::core::inflate_flags::*;
let mut it = it.peekable();
let r = &mut DecompressorOxide::new();
let mut out_pos = 0;
while let Some(in_buf) = it.next() {
let has_more = it.peek().is_some();
let flags = {
let mut f = TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
if zlib_header {
f |= TINFL_FLAG_PARSE_ZLIB_HEADER;
}
if ignore_adler32 {
f |= TINFL_FLAG_IGNORE_ADLER32;
}
if has_more {
f |= TINFL_FLAG_HAS_MORE_INPUT;
}
f
};
let (status, _input_read, bytes_written) = decompress(r, in_buf, out, out_pos, flags);
out_pos += bytes_written;
match status {
TINFLStatus::NeedsMoreInput => continue,
TINFLStatus::Done => return Ok(out_pos),
e => return Err(e),
}
}
// If we ran out of source slices without getting a `Done` from the
// decompression we can call it a failure.
Err(TINFLStatus::FailedCannotMakeProgress)
}
#[cfg(all(test, feature = "with-alloc"))]
mod test {
use super::{
decompress_slice_iter_to_slice, decompress_to_vec_zlib, decompress_to_vec_zlib_with_limit,
DecompressError, TINFLStatus,
};
const ENCODED: [u8; 20] = [
120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19,
];
#[test]
fn decompress_vec() {
let res = decompress_to_vec_zlib(&ENCODED[..]).unwrap();
assert_eq!(res.as_slice(), &b"Hello, zlib!"[..]);
}
#[test]
fn decompress_vec_with_high_limit() {
let res = decompress_to_vec_zlib_with_limit(&ENCODED[..], 100_000).unwrap();
assert_eq!(res.as_slice(), &b"Hello, zlib!"[..]);
}
#[test]
fn fail_to_decompress_with_limit() {
let res = decompress_to_vec_zlib_with_limit(&ENCODED[..], 8);
match res {
Err(DecompressError {
status: TINFLStatus::HasMoreOutput,
..
}) => (), // expected result
_ => panic!("Decompression output size limit was not enforced"),
}
}
#[test]
fn test_decompress_slice_iter_to_slice() {
// one slice
let mut out = [0_u8; 12_usize];
let r =
decompress_slice_iter_to_slice(&mut out, Some(&ENCODED[..]).into_iter(), true, false);
assert_eq!(r, Ok(12));
assert_eq!(&out[..12], &b"Hello, zlib!"[..]);
// some chunks at a time
for chunk_size in 1..13 {
// Note: because of https://github.com/Frommi/miniz_oxide/issues/110 our
// out buffer needs to have +1 byte available when the chunk size cuts
// the adler32 data off from the last actual data.
let mut out = [0_u8; 12_usize + 1];
let r =
decompress_slice_iter_to_slice(&mut out, ENCODED.chunks(chunk_size), true, false);
assert_eq!(r, Ok(12));
assert_eq!(&out[..12], &b"Hello, zlib!"[..]);
}
// output buffer too small
let mut out = [0_u8; 3_usize];
let r = decompress_slice_iter_to_slice(&mut out, ENCODED.chunks(7), true, false);
assert!(r.is_err());
}
}

View File

@@ -0,0 +1,111 @@
/// A wrapper for the output slice used when decompressing.
///
/// Using this rather than `Cursor` lets us implement the writing methods directly on
/// the buffer and lets us use a usize rather than u64 for the position which helps with
/// performance on 32-bit systems.
pub struct OutputBuffer<'a> {
slice: &'a mut [u8],
position: usize,
}
impl<'a> OutputBuffer<'a> {
#[inline]
pub fn from_slice_and_pos(slice: &'a mut [u8], position: usize) -> OutputBuffer<'a> {
OutputBuffer { slice, position }
}
#[inline(always)]
pub const fn position(&self) -> usize {
self.position
}
#[inline(always)]
pub fn set_position(&mut self, position: usize) {
self.position = position;
}
/// Write a byte to the current position and increment
///
/// Assumes that there is space.
#[inline]
pub fn write_byte(&mut self, byte: u8) {
self.slice[self.position] = byte;
self.position += 1;
}
/// Write a slice to the current position and increment
///
/// Assumes that there is space.
#[inline]
pub fn write_slice(&mut self, data: &[u8]) {
let len = data.len();
self.slice[self.position..self.position + len].copy_from_slice(data);
self.position += data.len();
}
#[inline]
pub const fn bytes_left(&self) -> usize {
self.slice.len() - self.position
}
#[inline(always)]
pub const fn get_ref(&self) -> &[u8] {
self.slice
}
#[inline(always)]
pub fn get_mut(&mut self) -> &mut [u8] {
self.slice
}
}
/// A wrapper for the output slice used when decompressing.
///
/// Using this rather than `Cursor` lets us implement the writing methods directly on
/// the buffer and lets us use a usize rather than u64 for the position which helps with
/// performance on 32-bit systems.
#[derive(Copy, Clone)]
pub struct InputWrapper<'a> {
slice: &'a [u8],
}
impl<'a> InputWrapper<'a> {
#[inline(always)]
pub const fn as_slice(&self) -> &[u8] {
self.slice
}
#[inline(always)]
pub const fn from_slice(slice: &'a [u8]) -> InputWrapper<'a> {
InputWrapper { slice }
}
#[inline(always)]
pub fn advance(&mut self, steps: usize) {
self.slice = &self.slice[steps..];
}
#[inline]
pub fn read_byte(&mut self) -> Option<u8> {
self.slice.first().map(|n| {
self.advance(1);
*n
})
}
#[inline]
#[cfg(target_pointer_width = "64")]
pub fn read_u32_le(&mut self) -> u32 {
let ret = {
let four_bytes: [u8; 4] = self.slice[..4].try_into().unwrap_or_default();
u32::from_le_bytes(four_bytes)
};
self.advance(4);
ret
}
#[inline(always)]
pub const fn bytes_left(&self) -> usize {
self.slice.len()
}
}

505
vendor/miniz_oxide/src/inflate/stream.rs vendored Normal file
View File

@@ -0,0 +1,505 @@
//! Extra streaming decompression functionality.
//!
//! As of now this is mainly intended for use to build a higher-level wrapper.
#[cfg(feature = "with-alloc")]
use crate::alloc::boxed::Box;
use core::{cmp, mem};
use crate::inflate::core::{decompress, inflate_flags, DecompressorOxide, TINFL_LZ_DICT_SIZE};
use crate::inflate::TINFLStatus;
use crate::{DataFormat, MZError, MZFlush, MZResult, MZStatus, StreamResult};
/// Tag that determines reset policy of [InflateState](struct.InflateState.html)
pub trait ResetPolicy {
/// Performs reset
fn reset(&self, state: &mut InflateState);
}
/// Resets state, without performing expensive ops (e.g. zeroing buffer)
///
/// Note that not zeroing buffer can lead to security issues when dealing with untrusted input.
pub struct MinReset;
impl ResetPolicy for MinReset {
fn reset(&self, state: &mut InflateState) {
state.decompressor().init();
state.dict_ofs = 0;
state.dict_avail = 0;
state.first_call = true;
state.has_flushed = false;
state.last_status = TINFLStatus::NeedsMoreInput;
}
}
/// Resets state and zero memory, continuing to use the same data format.
pub struct ZeroReset;
impl ResetPolicy for ZeroReset {
#[inline]
fn reset(&self, state: &mut InflateState) {
MinReset.reset(state);
state.dict = [0; TINFL_LZ_DICT_SIZE];
}
}
/// Full reset of the state, including zeroing memory.
///
/// Requires to provide new data format.
pub struct FullReset(pub DataFormat);
impl ResetPolicy for FullReset {
#[inline]
fn reset(&self, state: &mut InflateState) {
ZeroReset.reset(state);
state.data_format = self.0;
}
}
/// A struct that compbines a decompressor with extra data for streaming decompression.
///
#[derive(Clone)]
pub struct InflateState {
/// Inner decompressor struct
decomp: DecompressorOxide,
/// Buffer of input bytes for matches.
/// TODO: Could probably do this a bit cleaner with some
/// Cursor-like class.
/// We may also look into whether we need to keep a buffer here, or just one in the
/// decompressor struct.
dict: [u8; TINFL_LZ_DICT_SIZE],
/// Where in the buffer are we currently at?
dict_ofs: usize,
/// How many bytes of data to be flushed is there currently in the buffer?
dict_avail: usize,
first_call: bool,
has_flushed: bool,
/// Whether the input data is wrapped in a zlib header and checksum.
/// TODO: This should be stored in the decompressor.
data_format: DataFormat,
last_status: TINFLStatus,
}
impl Default for InflateState {
fn default() -> Self {
InflateState {
decomp: DecompressorOxide::default(),
dict: [0; TINFL_LZ_DICT_SIZE],
dict_ofs: 0,
dict_avail: 0,
first_call: true,
has_flushed: false,
data_format: DataFormat::Raw,
last_status: TINFLStatus::NeedsMoreInput,
}
}
}
impl InflateState {
/// Create a new state.
///
/// Note that this struct is quite large due to internal buffers, and as such storing it on
/// the stack is not recommended.
///
/// # Parameters
/// `data_format`: Determines whether the compressed data is assumed to wrapped with zlib
/// metadata.
pub fn new(data_format: DataFormat) -> InflateState {
InflateState {
data_format,
..Default::default()
}
}
/// Create a new state on the heap.
///
/// # Parameters
/// `data_format`: Determines whether the compressed data is assumed to wrapped with zlib
/// metadata.
#[cfg(feature = "with-alloc")]
pub fn new_boxed(data_format: DataFormat) -> Box<InflateState> {
let mut b: Box<InflateState> = Box::default();
b.data_format = data_format;
b
}
/// Access the innner decompressor.
pub fn decompressor(&mut self) -> &mut DecompressorOxide {
&mut self.decomp
}
/// Return the status of the last call to `inflate` with this `InflateState`.
pub const fn last_status(&self) -> TINFLStatus {
self.last_status
}
/// Create a new state using miniz/zlib style window bits parameter.
///
/// The decompressor does not support different window sizes. As such,
/// any positive (>0) value will set the zlib header flag, while a negative one
/// will not.
#[cfg(feature = "with-alloc")]
pub fn new_boxed_with_window_bits(window_bits: i32) -> Box<InflateState> {
let mut b: Box<InflateState> = Box::default();
b.data_format = DataFormat::from_window_bits(window_bits);
b
}
#[inline]
/// Reset the decompressor without re-allocating memory, using the given
/// data format.
pub fn reset(&mut self, data_format: DataFormat) {
self.reset_as(FullReset(data_format));
}
#[inline]
/// Resets the state according to specified policy.
pub fn reset_as<T: ResetPolicy>(&mut self, policy: T) {
policy.reset(self)
}
}
/// Try to decompress from `input` to `output` with the given [`InflateState`]
///
/// # `flush`
///
/// Generally, the various [`MZFlush`] flags have meaning only on the compression side. They can be
/// supplied here, but the only one that has any semantic meaning is [`MZFlush::Finish`], which is a
/// signal that the stream is expected to finish, and failing to do so is an error. It isn't
/// necessary to specify it when the stream ends; you'll still get returned a
/// [`MZStatus::StreamEnd`] anyway. Other values either have no effect or cause errors. It's
/// likely that you'll almost always just want to use [`MZFlush::None`].
///
/// # Errors
///
/// Returns [`MZError::Buf`] if the size of the `output` slice is empty or no progress was made due
/// to lack of expected input data, or if called with [`MZFlush::Finish`] and input wasn't all
/// consumed.
///
/// Returns [`MZError::Data`] if this or a a previous call failed with an error return from
/// [`TINFLStatus`]; probably indicates corrupted data.
///
/// Returns [`MZError::Stream`] when called with [`MZFlush::Full`] (meaningless on
/// decompression), or when called without [`MZFlush::Finish`] after an earlier call with
/// [`MZFlush::Finish`] has been made.
pub fn inflate(
state: &mut InflateState,
input: &[u8],
output: &mut [u8],
flush: MZFlush,
) -> StreamResult {
let mut bytes_consumed = 0;
let mut bytes_written = 0;
let mut next_in = input;
let mut next_out = output;
if flush == MZFlush::Full {
return StreamResult::error(MZError::Stream);
}
let mut decomp_flags = if state.data_format == DataFormat::Zlib {
inflate_flags::TINFL_FLAG_COMPUTE_ADLER32
} else {
inflate_flags::TINFL_FLAG_IGNORE_ADLER32
};
if (state.data_format == DataFormat::Zlib)
| (state.data_format == DataFormat::ZLibIgnoreChecksum)
{
decomp_flags |= inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER;
}
let first_call = state.first_call;
state.first_call = false;
if state.last_status == TINFLStatus::FailedCannotMakeProgress {
return StreamResult::error(MZError::Buf);
}
if (state.last_status as i32) < 0 {
return StreamResult::error(MZError::Data);
}
if state.has_flushed && (flush != MZFlush::Finish) {
return StreamResult::error(MZError::Stream);
}
state.has_flushed |= flush == MZFlush::Finish;
if (flush == MZFlush::Finish) && first_call {
decomp_flags |= inflate_flags::TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
// The caller is indicating that they want to finish the compression and this is the first call with the current stream
// so we can simply write directly to the output buffer.
// If there is not enough space for all of the decompressed data we will end up with a failure regardless.
let status = decompress(&mut state.decomp, next_in, next_out, 0, decomp_flags);
let in_bytes = status.1;
let out_bytes = status.2;
let status = status.0;
state.last_status = status;
bytes_consumed += in_bytes;
bytes_written += out_bytes;
let ret_status = {
if status == TINFLStatus::FailedCannotMakeProgress {
Err(MZError::Buf)
} else if (status as i32) < 0 {
Err(MZError::Data)
} else if status != TINFLStatus::Done {
state.last_status = TINFLStatus::Failed;
Err(MZError::Buf)
} else {
Ok(MZStatus::StreamEnd)
}
};
return StreamResult {
bytes_consumed,
bytes_written,
status: ret_status,
};
}
if flush != MZFlush::Finish {
decomp_flags |= inflate_flags::TINFL_FLAG_HAS_MORE_INPUT;
}
if state.dict_avail != 0 {
bytes_written += push_dict_out(state, &mut next_out);
return StreamResult {
bytes_consumed,
bytes_written,
status: Ok(
if (state.last_status == TINFLStatus::Done) && (state.dict_avail == 0) {
MZStatus::StreamEnd
} else {
MZStatus::Ok
},
),
};
}
let status = inflate_loop(
state,
&mut next_in,
&mut next_out,
&mut bytes_consumed,
&mut bytes_written,
decomp_flags,
flush,
);
StreamResult {
bytes_consumed,
bytes_written,
status,
}
}
fn inflate_loop(
state: &mut InflateState,
next_in: &mut &[u8],
next_out: &mut &mut [u8],
total_in: &mut usize,
total_out: &mut usize,
decomp_flags: u32,
flush: MZFlush,
) -> MZResult {
let orig_in_len = next_in.len();
loop {
let status = decompress(
&mut state.decomp,
next_in,
&mut state.dict,
state.dict_ofs,
decomp_flags,
);
let in_bytes = status.1;
let out_bytes = status.2;
let status = status.0;
state.last_status = status;
*next_in = &next_in[in_bytes..];
*total_in += in_bytes;
state.dict_avail = out_bytes;
*total_out += push_dict_out(state, next_out);
// Finish was requested but we didn't end on an end block.
if status == TINFLStatus::FailedCannotMakeProgress {
return Err(MZError::Buf);
}
// The stream was corrupted, and decompression failed.
else if (status as i32) < 0 {
return Err(MZError::Data);
}
// The decompressor has flushed all it's data and is waiting for more input, but
// there was no more input provided.
if (status == TINFLStatus::NeedsMoreInput) && orig_in_len == 0 {
return Err(MZError::Buf);
}
if flush == MZFlush::Finish {
if status == TINFLStatus::Done {
// There is not enough space in the output buffer to flush the remaining
// decompressed data in the internal buffer.
return if state.dict_avail != 0 {
Err(MZError::Buf)
} else {
Ok(MZStatus::StreamEnd)
};
// No more space in the output buffer, but we're not done.
} else if next_out.is_empty() {
return Err(MZError::Buf);
}
} else {
// We're not expected to finish, so it's fine if we can't flush everything yet.
let empty_buf = next_in.is_empty() || next_out.is_empty();
if (status == TINFLStatus::Done) || empty_buf || (state.dict_avail != 0) {
return if (status == TINFLStatus::Done) && (state.dict_avail == 0) {
// No more data left, we're done.
Ok(MZStatus::StreamEnd)
} else {
// Ok for now, still waiting for more input data or output space.
Ok(MZStatus::Ok)
};
}
}
}
}
fn push_dict_out(state: &mut InflateState, next_out: &mut &mut [u8]) -> usize {
let n = cmp::min(state.dict_avail, next_out.len());
(next_out[..n]).copy_from_slice(&state.dict[state.dict_ofs..state.dict_ofs + n]);
*next_out = &mut mem::take(next_out)[n..];
state.dict_avail -= n;
state.dict_ofs = (state.dict_ofs + (n)) & (TINFL_LZ_DICT_SIZE - 1);
n
}
#[cfg(all(test, feature = "with-alloc"))]
mod test {
use super::{inflate, InflateState};
use crate::{DataFormat, MZFlush, MZStatus};
use alloc::vec;
#[test]
fn test_state() {
let encoded = [
120u8, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4,
19,
];
let mut out = vec![0; 50];
let mut state = InflateState::new_boxed(DataFormat::Zlib);
let res = inflate(&mut state, &encoded, &mut out, MZFlush::Finish);
let status = res.status.expect("Failed to decompress!");
assert_eq!(status, MZStatus::StreamEnd);
assert_eq!(out[..res.bytes_written as usize], b"Hello, zlib!"[..]);
assert_eq!(res.bytes_consumed, encoded.len());
state.reset_as(super::ZeroReset);
out.iter_mut().map(|x| *x = 0).count();
let res = inflate(&mut state, &encoded, &mut out, MZFlush::Finish);
let status = res.status.expect("Failed to decompress!");
assert_eq!(status, MZStatus::StreamEnd);
assert_eq!(out[..res.bytes_written as usize], b"Hello, zlib!"[..]);
assert_eq!(res.bytes_consumed, encoded.len());
state.reset_as(super::MinReset);
out.iter_mut().map(|x| *x = 0).count();
let res = inflate(&mut state, &encoded, &mut out, MZFlush::Finish);
let status = res.status.expect("Failed to decompress!");
assert_eq!(status, MZStatus::StreamEnd);
assert_eq!(out[..res.bytes_written as usize], b"Hello, zlib!"[..]);
assert_eq!(res.bytes_consumed, encoded.len());
assert_eq!(state.decompressor().adler32(), Some(459605011));
// Test state when not computing adler.
state = InflateState::new_boxed(DataFormat::ZLibIgnoreChecksum);
out.iter_mut().map(|x| *x = 0).count();
let res = inflate(&mut state, &encoded, &mut out, MZFlush::Finish);
let status = res.status.expect("Failed to decompress!");
assert_eq!(status, MZStatus::StreamEnd);
assert_eq!(out[..res.bytes_written as usize], b"Hello, zlib!"[..]);
assert_eq!(res.bytes_consumed, encoded.len());
// Not computed, so should be Some(1)
assert_eq!(state.decompressor().adler32(), Some(1));
// Should still have the checksum read from the header file.
assert_eq!(state.decompressor().adler32_header(), Some(459605011))
}
#[test]
fn test_partial_continue() {
let encoded = [
120u8, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4,
19,
];
// Feed input bytes one at a time to the decompressor
let mut out = vec![0; 50];
let mut state = InflateState::new_boxed(DataFormat::Zlib);
let mut part_in = 0;
let mut part_out = 0;
for i in 1..=encoded.len() {
let res = inflate(
&mut state,
&encoded[part_in..i],
&mut out[part_out..],
MZFlush::None,
);
let status = res.status.expect("Failed to decompress!");
if i == encoded.len() {
assert_eq!(status, MZStatus::StreamEnd);
} else {
assert_eq!(status, MZStatus::Ok);
}
part_out += res.bytes_written as usize;
part_in += res.bytes_consumed;
}
assert_eq!(out[..part_out as usize], b"Hello, zlib!"[..]);
assert_eq!(part_in, encoded.len());
assert_eq!(state.decompressor().adler32(), Some(459605011));
}
// Inflate part of a stream and clone the inflate state.
// Discard the original state and resume the stream from the clone.
#[test]
fn test_rewind_and_resume() {
let encoded = [
120u8, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4,
19,
];
let decoded = b"Hello, zlib!";
// Feed partial input bytes to the decompressor
let mut out = vec![0; 50];
let mut state = InflateState::new_boxed(DataFormat::Zlib);
let res1 = inflate(&mut state, &encoded[..10], &mut out, MZFlush::None);
let status = res1.status.expect("Failed to decompress!");
assert_eq!(status, MZStatus::Ok);
// Clone the state and discard the original
let mut resume = state.clone();
drop(state);
// Resume the stream using the cloned state
let res2 = inflate(
&mut resume,
&encoded[res1.bytes_consumed..],
&mut out[res1.bytes_written..],
MZFlush::Finish,
);
let status = res2.status.expect("Failed to decompress!");
assert_eq!(status, MZStatus::StreamEnd);
assert_eq!(res1.bytes_consumed + res2.bytes_consumed, encoded.len());
assert_eq!(res1.bytes_written + res2.bytes_written, decoded.len());
assert_eq!(
&out[..res1.bytes_written + res2.bytes_written as usize],
decoded
);
assert_eq!(resume.decompressor().adler32(), Some(459605011));
}
}