Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

View File

@@ -0,0 +1,66 @@
//! Buffer wrappers implementing default so we can allocate the buffers with `Box::default()`
//! to avoid stack copies. Box::new() doesn't at the moment, and using a vec means we would lose
//! static length info.
use crate::deflate::core::{LZ_DICT_SIZE, MAX_MATCH_LEN};
use alloc::boxed::Box;
use alloc::vec;
/// Size of the buffer of lz77 encoded data.
pub const LZ_CODE_BUF_SIZE: usize = 64 * 1024;
pub const LZ_CODE_BUF_MASK: usize = LZ_CODE_BUF_SIZE - 1;
/// Size of the output buffer.
pub const OUT_BUF_SIZE: usize = (LZ_CODE_BUF_SIZE * 13) / 10;
pub const LZ_DICT_FULL_SIZE: usize = LZ_DICT_SIZE + MAX_MATCH_LEN - 1 + 1;
/// Size of hash values in the hash chains.
pub const LZ_HASH_BITS: i32 = 15;
/// How many bits to shift when updating the current hash value.
pub const LZ_HASH_SHIFT: i32 = (LZ_HASH_BITS + 2) / 3;
/// Size of the chained hash tables.
pub const LZ_HASH_SIZE: usize = 1 << LZ_HASH_BITS;
#[inline]
pub const fn update_hash(current_hash: u16, byte: u8) -> u16 {
((current_hash << LZ_HASH_SHIFT) ^ byte as u16) & (LZ_HASH_SIZE as u16 - 1)
}
pub struct HashBuffers {
pub dict: Box<[u8; LZ_DICT_FULL_SIZE]>,
pub next: Box<[u16; LZ_DICT_SIZE]>,
pub hash: Box<[u16; LZ_DICT_SIZE]>,
}
impl HashBuffers {
#[inline]
pub fn reset(&mut self) {
self.dict.fill(0);
self.next.fill(0);
self.hash.fill(0);
}
}
impl Default for HashBuffers {
fn default() -> HashBuffers {
HashBuffers {
dict: vec![0; LZ_DICT_FULL_SIZE]
.into_boxed_slice()
.try_into()
.unwrap(),
next: vec![0; LZ_DICT_SIZE].into_boxed_slice().try_into().unwrap(),
hash: vec![0; LZ_DICT_SIZE].into_boxed_slice().try_into().unwrap(),
}
}
}
pub struct LocalBuf {
pub b: [u8; OUT_BUF_SIZE],
}
impl Default for LocalBuf {
fn default() -> LocalBuf {
LocalBuf {
b: [0; OUT_BUF_SIZE],
}
}
}

2491
vendor/miniz_oxide/src/deflate/core.rs vendored Normal file

File diff suppressed because it is too large Load Diff

237
vendor/miniz_oxide/src/deflate/mod.rs vendored Normal file
View File

@@ -0,0 +1,237 @@
//! This module contains functionality for compression.
use crate::alloc::vec;
use crate::alloc::vec::Vec;
mod buffer;
pub mod core;
mod stored;
pub mod stream;
mod zlib;
use self::core::*;
/// How much processing the compressor should do to compress the data.
/// `NoCompression` and `Bestspeed` have special meanings, the other levels determine the number
/// of checks for matches in the hash chains and whether to use lazy or greedy parsing.
#[repr(i32)]
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum CompressionLevel {
/// Don't do any compression, only output uncompressed blocks.
NoCompression = 0,
/// Fast compression. Uses a special compression routine that is optimized for speed.
BestSpeed = 1,
/// Slow/high compression. Do a lot of checks to try to find good matches.
BestCompression = 9,
/// Even more checks, can be very slow.
UberCompression = 10,
/// Default compromise between speed and compression.
DefaultLevel = 6,
/// Use the default compression level.
DefaultCompression = -1,
}
// Missing safe rust analogue (this and mem-to-mem are quite similar)
/*
fn tdefl_compress(
d: Option<&mut CompressorOxide>,
in_buf: *const c_void,
in_size: Option<&mut usize>,
out_buf: *mut c_void,
out_size: Option<&mut usize>,
flush: TDEFLFlush,
) -> TDEFLStatus {
let res = match d {
None => {
in_size.map(|size| *size = 0);
out_size.map(|size| *size = 0);
(TDEFLStatus::BadParam, 0, 0)
},
Some(compressor) => {
let callback_res = CallbackOxide::new(
compressor.callback_func.clone(),
in_buf,
in_size,
out_buf,
out_size,
);
if let Ok(mut callback) = callback_res {
let res = compress(compressor, &mut callback, flush);
callback.update_size(Some(res.1), Some(res.2));
res
} else {
(TDEFLStatus::BadParam, 0, 0)
}
}
};
res.0
}*/
// Missing safe rust analogue
/*
fn tdefl_init(
d: Option<&mut CompressorOxide>,
put_buf_func: PutBufFuncPtr,
put_buf_user: *mut c_void,
flags: c_int,
) -> TDEFLStatus {
if let Some(d) = d {
*d = CompressorOxide::new(
put_buf_func.map(|func|
CallbackFunc { put_buf_func: func, put_buf_user: put_buf_user }
),
flags as u32,
);
TDEFLStatus::Okay
} else {
TDEFLStatus::BadParam
}
}*/
// Missing safe rust analogue (though maybe best served by flate2 front-end instead)
/*
fn tdefl_compress_mem_to_output(
buf: *const c_void,
buf_len: usize,
put_buf_func: PutBufFuncPtr,
put_buf_user: *mut c_void,
flags: c_int,
) -> bool*/
// Missing safe Rust analogue
/*
fn tdefl_compress_mem_to_mem(
out_buf: *mut c_void,
out_buf_len: usize,
src_buf: *const c_void,
src_buf_len: usize,
flags: c_int,
) -> usize*/
/// Compress the input data to a vector, using the specified compression level (0-10).
pub fn compress_to_vec(input: &[u8], level: u8) -> Vec<u8> {
compress_to_vec_inner(input, level, 0, 0)
}
/// Compress the input data to a vector, using the specified compression level (0-10), and with a
/// zlib wrapper.
pub fn compress_to_vec_zlib(input: &[u8], level: u8) -> Vec<u8> {
compress_to_vec_inner(input, level, 1, 0)
}
/// Simple function to compress data to a vec.
fn compress_to_vec_inner(mut input: &[u8], level: u8, window_bits: i32, strategy: i32) -> Vec<u8> {
// The comp flags function sets the zlib flag if the window_bits parameter is > 0.
let flags = create_comp_flags_from_zip_params(level.into(), window_bits, strategy);
let mut compressor = CompressorOxide::new(flags);
let mut output = vec![0; ::core::cmp::max(input.len() / 2, 2)];
let mut out_pos = 0;
loop {
let (status, bytes_in, bytes_out) = compress(
&mut compressor,
input,
&mut output[out_pos..],
TDEFLFlush::Finish,
);
out_pos += bytes_out;
match status {
TDEFLStatus::Done => {
output.truncate(out_pos);
break;
}
TDEFLStatus::Okay if bytes_in <= input.len() => {
input = &input[bytes_in..];
// We need more space, so resize the vector.
if output.len().saturating_sub(out_pos) < 30 {
output.resize(output.len() * 2, 0)
}
}
// Not supposed to happen unless there is a bug.
_ => panic!("Bug! Unexpectedly failed to compress!"),
}
}
output
}
#[cfg(test)]
mod test {
use super::{compress_to_vec, compress_to_vec_inner, CompressionStrategy};
use crate::inflate::decompress_to_vec;
use alloc::vec;
/// Test deflate example.
///
/// Check if the encoder produces the same code as the example given by Mark Adler here:
/// https://stackoverflow.com/questions/17398931/deflate-encoding-with-static-huffman-codes/17415203
#[test]
fn compress_small() {
let test_data = b"Deflate late";
let check = [
0x73, 0x49, 0x4d, 0xcb, 0x49, 0x2c, 0x49, 0x55, 0x00, 0x11, 0x00,
];
let res = compress_to_vec(test_data, 1);
assert_eq!(&check[..], res.as_slice());
let res = compress_to_vec(test_data, 9);
assert_eq!(&check[..], res.as_slice());
}
#[test]
fn compress_huff_only() {
let test_data = b"Deflate late";
let res = compress_to_vec_inner(test_data, 1, 0, CompressionStrategy::HuffmanOnly as i32);
let d = decompress_to_vec(res.as_slice()).expect("Failed to decompress!");
assert_eq!(test_data, d.as_slice());
}
#[test]
fn compress_rle() {
let test_data = b"Deflate late";
let res = compress_to_vec_inner(test_data, 1, 0, CompressionStrategy::RLE as i32);
let d = decompress_to_vec(res.as_slice()).expect("Failed to decompress!");
assert_eq!(test_data, d.as_slice());
}
/// Test that a raw block compresses fine.
#[test]
fn compress_raw() {
let text = b"Hello, zlib!";
let encoded = {
let len = text.len();
let notlen = !len;
let mut encoded = vec![
1,
len as u8,
(len >> 8) as u8,
notlen as u8,
(notlen >> 8) as u8,
];
encoded.extend_from_slice(&text[..]);
encoded
};
let res = compress_to_vec(text, 0);
assert_eq!(encoded, res.as_slice());
}
#[test]
fn short() {
let test_data = [10, 10, 10, 10, 10, 55];
let c = compress_to_vec(&test_data, 9);
let d = decompress_to_vec(c.as_slice()).expect("Failed to decompress!");
assert_eq!(&test_data, d.as_slice());
// Check that a static block is used here, rather than a raw block
// , so the data is actually compressed.
// (The optimal compressed length would be 5, but neither miniz nor zlib manages that either
// as neither checks matches against the byte at index 0.)
assert!(c.len() <= 6);
}
}

305
vendor/miniz_oxide/src/deflate/stored.rs vendored Normal file
View File

@@ -0,0 +1,305 @@
use crate::deflate::buffer::{update_hash, LZ_HASH_SHIFT, LZ_HASH_SIZE};
use crate::deflate::core::{
flush_block, CallbackOxide, CompressorOxide, TDEFLFlush, TDEFLStatus, LZ_DICT_SIZE,
LZ_DICT_SIZE_MASK, MAX_MATCH_LEN, MIN_MATCH_LEN,
};
use core::cmp;
pub(crate) fn compress_stored(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool {
let in_buf = match callback.buf() {
None => return true,
Some(in_buf) => in_buf,
};
// Make sure this is cleared in case compression level is switched later.
// TODO: It's possible we don't need this or could do this elsewhere later
// but just do this here to avoid causing issues for now.
d.params.saved_match_len = 0;
let mut bytes_written = d.lz.total_bytes;
let mut src_pos = d.params.src_pos;
let mut lookahead_size = d.dict.lookahead_size;
let mut lookahead_pos = d.dict.lookahead_pos;
while src_pos < in_buf.len() || (d.params.flush != TDEFLFlush::None && lookahead_size != 0) {
let src_buf_left = in_buf.len() - src_pos;
let num_bytes_to_process = cmp::min(src_buf_left, MAX_MATCH_LEN - lookahead_size);
if lookahead_size + d.dict.size >= usize::from(MIN_MATCH_LEN) - 1
&& num_bytes_to_process > 0
{
let dictb = &mut d.dict.b;
let mut dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK;
let mut ins_pos = lookahead_pos + lookahead_size - 2;
// Start the hash value from the first two bytes
let mut hash = update_hash(
u16::from(dictb.dict[ins_pos & LZ_DICT_SIZE_MASK]),
dictb.dict[(ins_pos + 1) & LZ_DICT_SIZE_MASK],
);
lookahead_size += num_bytes_to_process;
for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
// Add byte to input buffer.
dictb.dict[dst_pos] = c;
if dst_pos < MAX_MATCH_LEN - 1 {
dictb.dict[LZ_DICT_SIZE + dst_pos] = c;
}
// Generate hash from the current byte,
hash = update_hash(hash, c);
dictb.next[ins_pos & LZ_DICT_SIZE_MASK] = dictb.hash[hash as usize];
// and insert it into the hash chain.
dictb.hash[hash as usize] = ins_pos as u16;
dst_pos = (dst_pos + 1) & LZ_DICT_SIZE_MASK;
ins_pos += 1;
}
src_pos += num_bytes_to_process;
} else {
let dictb = &mut d.dict.b;
for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
let dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK;
dictb.dict[dst_pos] = c;
if dst_pos < MAX_MATCH_LEN - 1 {
dictb.dict[LZ_DICT_SIZE + dst_pos] = c;
}
lookahead_size += 1;
if lookahead_size + d.dict.size >= MIN_MATCH_LEN.into() {
let ins_pos = lookahead_pos + lookahead_size - 3;
let hash = ((u32::from(dictb.dict[ins_pos & LZ_DICT_SIZE_MASK])
<< (LZ_HASH_SHIFT * 2))
^ ((u32::from(dictb.dict[(ins_pos + 1) & LZ_DICT_SIZE_MASK])
<< LZ_HASH_SHIFT)
^ u32::from(c)))
& (LZ_HASH_SIZE as u32 - 1);
dictb.next[ins_pos & LZ_DICT_SIZE_MASK] = dictb.hash[hash as usize];
dictb.hash[hash as usize] = ins_pos as u16;
}
}
src_pos += num_bytes_to_process;
}
d.dict.size = cmp::min(LZ_DICT_SIZE - lookahead_size, d.dict.size);
if d.params.flush == TDEFLFlush::None && lookahead_size < MAX_MATCH_LEN {
break;
}
let len_to_move = 1;
bytes_written += 1;
lookahead_pos += len_to_move;
assert!(lookahead_size >= len_to_move);
lookahead_size -= len_to_move;
d.dict.size = cmp::min(d.dict.size + len_to_move, LZ_DICT_SIZE);
if bytes_written > 31 * 1024 {
d.lz.total_bytes = bytes_written;
d.params.src_pos = src_pos;
// These values are used in flush_block, so we need to write them back here.
d.dict.lookahead_size = lookahead_size;
d.dict.lookahead_pos = lookahead_pos;
let n = flush_block(d, callback, TDEFLFlush::None)
.unwrap_or(TDEFLStatus::PutBufFailed as i32);
if n != 0 {
return n > 0;
}
bytes_written = d.lz.total_bytes;
}
}
d.lz.total_bytes = bytes_written;
d.params.src_pos = src_pos;
d.dict.lookahead_size = lookahead_size;
d.dict.lookahead_pos = lookahead_pos;
true
}
/*
fn compress_rle(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool {
let mut src_pos = d.params.src_pos;
let in_buf = match callback.in_buf {
None => return true,
Some(in_buf) => in_buf,
};
let mut lookahead_size = d.dict.lookahead_size;
let mut lookahead_pos = d.dict.lookahead_pos;
let mut saved_lit = d.params.saved_lit;
let mut saved_match_dist = d.params.saved_match_dist;
let mut saved_match_len = d.params.saved_match_len;
while src_pos < in_buf.len() || (d.params.flush != TDEFLFlush::None && lookahead_size != 0) {
let src_buf_left = in_buf.len() - src_pos;
let num_bytes_to_process = cmp::min(src_buf_left, MAX_MATCH_LEN - lookahead_size);
if lookahead_size + d.dict.size >= usize::from(MIN_MATCH_LEN) - 1
&& num_bytes_to_process > 0
{
let dictb = &mut d.dict.b;
let mut dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK;
let mut ins_pos = lookahead_pos + lookahead_size - 2;
// Start the hash value from the first two bytes
let mut hash = update_hash(
u16::from(dictb.dict[ins_pos & LZ_DICT_SIZE_MASK]),
dictb.dict[(ins_pos + 1) & LZ_DICT_SIZE_MASK],
);
lookahead_size += num_bytes_to_process;
for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
// Add byte to input buffer.
dictb.dict[dst_pos] = c;
if dst_pos < MAX_MATCH_LEN - 1 {
dictb.dict[LZ_DICT_SIZE + dst_pos] = c;
}
// Generate hash from the current byte,
hash = update_hash(hash, c);
dictb.next[ins_pos & LZ_DICT_SIZE_MASK] = dictb.hash[hash as usize];
// and insert it into the hash chain.
dictb.hash[hash as usize] = ins_pos as u16;
dst_pos = (dst_pos + 1) & LZ_DICT_SIZE_MASK;
ins_pos += 1;
}
src_pos += num_bytes_to_process;
} else {
let dictb = &mut d.dict.b;
for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] {
let dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK;
dictb.dict[dst_pos] = c;
if dst_pos < MAX_MATCH_LEN - 1 {
dictb.dict[LZ_DICT_SIZE + dst_pos] = c;
}
lookahead_size += 1;
if lookahead_size + d.dict.size >= MIN_MATCH_LEN.into() {
let ins_pos = lookahead_pos + lookahead_size - 3;
let hash = ((u32::from(dictb.dict[ins_pos & LZ_DICT_SIZE_MASK])
<< (LZ_HASH_SHIFT * 2))
^ ((u32::from(dictb.dict[(ins_pos + 1) & LZ_DICT_SIZE_MASK])
<< LZ_HASH_SHIFT)
^ u32::from(c)))
& (LZ_HASH_SIZE as u32 - 1);
dictb.next[ins_pos & LZ_DICT_SIZE_MASK] = dictb.hash[hash as usize];
dictb.hash[hash as usize] = ins_pos as u16;
}
}
src_pos += num_bytes_to_process;
}
d.dict.size = cmp::min(LZ_DICT_SIZE - lookahead_size, d.dict.size);
if d.params.flush == TDEFLFlush::None && lookahead_size < MAX_MATCH_LEN {
break;
}
let mut len_to_move = 1;
let mut cur_match_dist = 0;
let mut cur_match_len = if saved_match_len != 0 {
saved_match_len
} else {
u32::from(MIN_MATCH_LEN) - 1
};
let cur_pos = lookahead_pos & LZ_DICT_SIZE_MASK;
// If TDEFL_RLE_MATCHES is set, we only look for repeating sequences of the current byte.
if d.dict.size != 0 && d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS == 0 {
let c = d.dict.b.dict[(cur_pos.wrapping_sub(1)) & LZ_DICT_SIZE_MASK];
cur_match_len = d.dict.b.dict[cur_pos..(cur_pos + lookahead_size)]
.iter()
.take_while(|&x| *x == c)
.count() as u32;
if cur_match_len < MIN_MATCH_LEN.into() {
cur_match_len = 0
} else {
cur_match_dist = 1
}
}
let far_and_small = cur_match_len == MIN_MATCH_LEN.into() && cur_match_dist >= 8 * 1024;
let filter_small = d.params.flags & TDEFL_FILTER_MATCHES != 0 && cur_match_len <= 5;
if far_and_small || filter_small || cur_pos == cur_match_dist as usize {
cur_match_dist = 0;
cur_match_len = 0;
}
if saved_match_len != 0 {
if cur_match_len > saved_match_len {
record_literal(&mut d.huff, &mut d.lz, saved_lit);
if cur_match_len >= 128 {
record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist);
saved_match_len = 0;
len_to_move = cur_match_len as usize;
} else {
saved_lit = d.dict.b.dict[cur_pos];
saved_match_dist = cur_match_dist;
saved_match_len = cur_match_len;
}
} else {
record_match(&mut d.huff, &mut d.lz, saved_match_len, saved_match_dist);
len_to_move = (saved_match_len - 1) as usize;
saved_match_len = 0;
}
} else if cur_match_dist == 0 {
record_literal(
&mut d.huff,
&mut d.lz,
d.dict.b.dict[cmp::min(cur_pos, d.dict.b.dict.len() - 1)],
);
} else if d.params.greedy_parsing
|| (d.params.flags & TDEFL_RLE_MATCHES != 0)
|| cur_match_len >= 128
{
// If we are using lazy matching, check for matches at the next byte if the current
// match was shorter than 128 bytes.
record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist);
len_to_move = cur_match_len as usize;
} else {
saved_lit = d.dict.b.dict[cmp::min(cur_pos, d.dict.b.dict.len() - 1)];
saved_match_dist = cur_match_dist;
saved_match_len = cur_match_len;
}
lookahead_pos += len_to_move;
assert!(lookahead_size >= len_to_move);
lookahead_size -= len_to_move;
d.dict.size = cmp::min(d.dict.size + len_to_move, LZ_DICT_SIZE);
let lz_buf_tight = d.lz.code_position > LZ_CODE_BUF_SIZE - 8;
let raw = d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0;
let fat = ((d.lz.code_position * 115) >> 7) >= d.lz.total_bytes as usize;
let fat_or_raw = (d.lz.total_bytes > 31 * 1024) && (fat || raw);
if lz_buf_tight || fat_or_raw {
d.params.src_pos = src_pos;
// These values are used in flush_block, so we need to write them back here.
d.dict.lookahead_size = lookahead_size;
d.dict.lookahead_pos = lookahead_pos;
let n = flush_block(d, callback, TDEFLFlush::None)
.unwrap_or(TDEFLStatus::PutBufFailed as i32);
if n != 0 {
d.params.saved_lit = saved_lit;
d.params.saved_match_dist = saved_match_dist;
d.params.saved_match_len = saved_match_len;
return n > 0;
}
}
}
d.params.src_pos = src_pos;
d.dict.lookahead_size = lookahead_size;
d.dict.lookahead_pos = lookahead_pos;
d.params.saved_lit = saved_lit;
d.params.saved_match_dist = saved_match_dist;
d.params.saved_match_len = saved_match_len;
true
}*/

121
vendor/miniz_oxide/src/deflate/stream.rs vendored Normal file
View File

@@ -0,0 +1,121 @@
//! Extra streaming compression functionality.
//!
//! As of now this is mainly intended for use to build a higher-level wrapper.
//!
//! There is no DeflateState as the needed state is contained in the compressor struct itself.
use crate::deflate::core::{compress, CompressorOxide, TDEFLFlush, TDEFLStatus};
use crate::{MZError, MZFlush, MZStatus, StreamResult};
/// Try to compress from input to output with the given [`CompressorOxide`].
///
/// # Errors
///
/// Returns [`MZError::Buf`] If the size of the `output` slice is empty or no progress was made due
/// to lack of expected input data, or if called without [`MZFlush::Finish`] after the compression
/// was already finished.
///
/// Returns [`MZError::Param`] if the compressor parameters are set wrong.
///
/// Returns [`MZError::Stream`] when lower-level decompressor returns a
/// [`TDEFLStatus::PutBufFailed`]; may not actually be possible.
pub fn deflate(
compressor: &mut CompressorOxide,
input: &[u8],
output: &mut [u8],
flush: MZFlush,
) -> StreamResult {
if output.is_empty() {
return StreamResult::error(MZError::Buf);
}
if compressor.prev_return_status() == TDEFLStatus::Done {
return if flush == MZFlush::Finish {
StreamResult {
bytes_written: 0,
bytes_consumed: 0,
status: Ok(MZStatus::StreamEnd),
}
} else {
StreamResult::error(MZError::Buf)
};
}
let mut bytes_written = 0;
let mut bytes_consumed = 0;
let mut next_in = input;
let mut next_out = output;
let status = loop {
let in_bytes;
let out_bytes;
let defl_status = {
let res = compress(compressor, next_in, next_out, TDEFLFlush::from(flush));
in_bytes = res.1;
out_bytes = res.2;
res.0
};
next_in = &next_in[in_bytes..];
next_out = &mut next_out[out_bytes..];
bytes_consumed += in_bytes;
bytes_written += out_bytes;
// Check if we are done, or compression failed.
match defl_status {
TDEFLStatus::BadParam => break Err(MZError::Param),
// Don't think this can happen as we're not using a custom callback.
TDEFLStatus::PutBufFailed => break Err(MZError::Stream),
TDEFLStatus::Done => break Ok(MZStatus::StreamEnd),
_ => (),
};
// All the output space was used, so wait for more.
if next_out.is_empty() {
break Ok(MZStatus::Ok);
}
if next_in.is_empty() && (flush != MZFlush::Finish) {
let total_changed = bytes_written > 0 || bytes_consumed > 0;
break if (flush != MZFlush::None) || total_changed {
// We wrote or consumed something, and/or did a flush (sync/partial etc.).
Ok(MZStatus::Ok)
} else {
// No more input data, not flushing, and nothing was consumed or written,
// so couldn't make any progress.
Err(MZError::Buf)
};
}
};
StreamResult {
bytes_consumed,
bytes_written,
status,
}
}
#[cfg(test)]
mod test {
use super::deflate;
use crate::deflate::CompressorOxide;
use crate::inflate::decompress_to_vec_zlib;
use crate::{MZFlush, MZStatus};
use alloc::boxed::Box;
use alloc::vec;
#[test]
fn test_state() {
let data = b"Hello zlib!";
let mut compressed = vec![0; 50];
let mut compressor = Box::<CompressorOxide>::default();
let res = deflate(&mut compressor, data, &mut compressed, MZFlush::Finish);
let status = res.status.expect("Failed to compress!");
let decomp =
decompress_to_vec_zlib(&compressed).expect("Failed to decompress compressed data");
assert_eq!(status, MZStatus::StreamEnd);
assert_eq!(decomp[..], data[..]);
assert_eq!(res.bytes_consumed, data.len());
}
}

112
vendor/miniz_oxide/src/deflate/zlib.rs vendored Normal file
View File

@@ -0,0 +1,112 @@
use crate::deflate::core::deflate_flags::{
TDEFL_FORCE_ALL_RAW_BLOCKS, TDEFL_GREEDY_PARSING_FLAG, TDEFL_RLE_MATCHES,
};
const DEFAULT_CM: u8 = 8;
const DEFAULT_CINFO: u8 = 7 << 4;
const _DEFAULT_FDICT: u8 = 0;
const DEFAULT_CMF: u8 = DEFAULT_CM | DEFAULT_CINFO;
// CMF used for RLE (technically it uses a window size of 0 but the lowest that can
// be specified in the header corresponds to a window size of 1 << (0 + 8) aka 256.
const MIN_CMF: u8 = DEFAULT_CM; // | 0
/// The 16-bit value consisting of CMF and FLG must be divisible by this to be valid.
const FCHECK_DIVISOR: u8 = 31;
/// Generate FCHECK from CMF and FLG (without FCKECH )so that they are correct according to the
/// specification, i.e (CMF*256 + FCHK) % 31 = 0.
/// Returns flg with the FCHKECK bits added (any existing FCHECK bits are ignored).
#[inline]
fn add_fcheck(cmf: u8, flg: u8) -> u8 {
let rem = ((usize::from(cmf) * 256) + usize::from(flg)) % usize::from(FCHECK_DIVISOR);
// Clear existing FCHECK if any
let flg = flg & 0b11100000;
// Casting is safe as rem can't overflow since it is a value mod 31
// We can simply add the value to flg as (31 - rem) will never be above 2^5
flg + (FCHECK_DIVISOR - rem as u8)
}
#[inline]
const fn zlib_level_from_flags(flags: u32) -> u8 {
use crate::deflate::core::NUM_PROBES;
let num_probes = flags & super::MAX_PROBES_MASK;
if (flags & TDEFL_GREEDY_PARSING_FLAG != 0) || (flags & TDEFL_RLE_MATCHES != 0) {
if num_probes <= 1 {
0
} else {
1
}
} else if num_probes >= NUM_PROBES[9] as u32 {
3
} else {
2
}
}
#[inline]
const fn cmf_from_flags(flags: u32) -> u8 {
if (flags & TDEFL_RLE_MATCHES == 0) && (flags & TDEFL_FORCE_ALL_RAW_BLOCKS == 0) {
DEFAULT_CMF
// If we are using RLE encoding or no compression the window bits can be set as the
// minimum.
} else {
MIN_CMF
}
}
/// Get the zlib header for the level using the default window size and no
/// dictionary.
#[inline]
fn header_from_level(level: u8, flags: u32) -> [u8; 2] {
let cmf = cmf_from_flags(flags);
[cmf, add_fcheck(cmf, level << 6)]
}
/// Create a zlib header from the given compression flags.
/// Only level is considered.
#[inline]
pub fn header_from_flags(flags: u32) -> [u8; 2] {
let level = zlib_level_from_flags(flags);
header_from_level(level, flags)
}
#[cfg(test)]
mod test {
use crate::shared::MZ_DEFAULT_WINDOW_BITS;
#[test]
fn zlib() {
use super::super::*;
use super::*;
let test_level = |level, expected| {
let flags = create_comp_flags_from_zip_params(
level,
MZ_DEFAULT_WINDOW_BITS,
CompressionStrategy::Default as i32,
);
assert_eq!(zlib_level_from_flags(flags), expected);
};
assert_eq!(zlib_level_from_flags(DEFAULT_FLAGS), 2);
test_level(0, 0);
test_level(1, 0);
test_level(2, 1);
test_level(3, 1);
for i in 4..=8 {
test_level(i, 2)
}
test_level(9, 3);
test_level(10, 3);
}
#[test]
fn test_header() {
let header = super::header_from_level(3, 0);
assert_eq!(
((usize::from(header[0]) * 256) + usize::from(header[1])) % 31,
0
);
}
}