Vendor dependencies for 0.3.0 release

This commit is contained in:
2025-09-27 10:29:08 -05:00
parent 0c8d39d483
commit 82ab7f317b
26803 changed files with 16134934 additions and 0 deletions

141
vendor/ogg/src/crc.rs vendored Normal file
View File

@@ -0,0 +1,141 @@
// Ogg decoder and encoder written in Rust
//
// Copyright (c) 2016-2017 est31 <MTest31@outlook.com>
// and contributors. All rights reserved.
// Redistribution or use only under the terms
// specified in the LICENSE file attached to this
// source distribution.
/*!
Implementation of the CRC algorithm with the
vorbis specific parameters and setup
*/
// Lookup table to enable bytewise CRC32 calculation
// Created using the crc32-table-generate example.
//
static CRC_LOOKUP_ARRAY : &[u32] = &[
0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9,
0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005,
0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd,
0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9,
0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011,
0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd,
0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5,
0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81,
0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49,
0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95,
0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d,
0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae,
0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16,
0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca,
0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02,
0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066,
0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e,
0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692,
0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a,
0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e,
0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686,
0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a,
0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb,
0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f,
0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47,
0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b,
0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623,
0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7,
0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f,
0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3,
0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b,
0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f,
0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640,
0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c,
0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24,
0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30,
0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088,
0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654,
0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c,
0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18,
0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0,
0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c,
0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4];
/*
// Const implementation: TODO adopt it once MSRV > 1.46
static CRC_LOOKUP_ARRAY :&[u32] = &lookup_array();
const fn get_tbl_elem(idx :u32) -> u32 {
let mut r :u32 = idx << 24;
let mut i = 0;
while i < 8 {
r = (r << 1) ^ (-(((r >> 31) & 1) as i32) as u32 & 0x04c11db7);
i += 1;
}
return r;
}
const fn lookup_array() -> [u32; 0x100] {
let mut lup_arr :[u32; 0x100] = [0; 0x100];
let mut i = 0;
while i < 0x100 {
lup_arr[i] = get_tbl_elem(i as u32);
i += 1;
}
lup_arr
}
*/
#[cfg(test)]
pub fn vorbis_crc32(array :&[u8]) -> u32 {
return vorbis_crc32_update(0, array);
}
pub fn vorbis_crc32_update(cur :u32, array :&[u8]) -> u32 {
let mut ret :u32 = cur;
for av in array {
ret = (ret << 8) ^ CRC_LOOKUP_ARRAY[(*av as u32 ^ (ret >> 24)) as usize];
}
return ret;
}
#[test]
fn test_crc32() {
// Test page taken from real Ogg file
let test_arr = &[
0x4f, 0x67, 0x67, 0x53, 0x00, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0xa3,
0x90, 0x5b, 0x00, 0x00, 0x00, 0x00,
// The spec requires us to zero out the CRC field
/*0x6d, 0x94, 0x4e, 0x3d,*/ 0x00, 0x00, 0x00, 0x00,
0x01, 0x1e, 0x01, 0x76, 0x6f, 0x72,
0x62, 0x69, 0x73, 0x00, 0x00, 0x00, 0x00, 0x02,
0x44, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0xb5, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb8, 0x01];
println!("");
println!("CRC of \"==!\" calculated as 0x{:08x} (expected 0x9f858776)", vorbis_crc32(&[61,61,33]));
println!("Test page CRC calculated as 0x{:08x} (expected 0x3d4e946d)", vorbis_crc32(test_arr));
assert_eq!(vorbis_crc32(&[61,61,33]), 0x9f858776);
assert_eq!(vorbis_crc32(test_arr), 0x3d4e946d);
assert_eq!(vorbis_crc32(&test_arr[0 .. 27]), 0x7b374db8);
}

103
vendor/ogg/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,103 @@
// Ogg decoder and encoder written in Rust
//
// Copyright (c) 2016 est31 <MTest31@outlook.com>
// and contributors. All rights reserved.
// Redistribution or use only under the terms
// specified in the LICENSE file attached to this
// source distribution.
#![forbid(unsafe_code)]
/*!
Ogg container decoder and encoder
The most interesting structures for in this
mod are `PacketReader` and `PacketWriter`.
*/
extern crate byteorder;
#[cfg(feature = "async")]
extern crate tokio_io;
#[cfg(feature = "async")]
#[macro_use]
extern crate futures;
#[cfg(feature = "async")]
extern crate bytes;
#[cfg(test)]
mod test;
macro_rules! tri {
($e:expr) => {
match $e {
Ok(val) => val,
Err(err) => return Err(err.into()),
}
};
}
mod crc;
pub mod reading;
pub mod writing;
pub use writing::{PacketWriter, PacketWriteEndInfo};
pub use reading::{PacketReader, OggReadError};
/**
Ogg packet representation.
For the Ogg format, packets are the logically smallest subdivision it handles.
Every packet belongs to a *logical* bitstream. The *logical* bitstreams then form a *physical* bitstream, with the data combined in multiple different ways.
Every logical bitstream is identified by the serial number its pages have stored. The Packet struct contains a field for that number as well, so that one can find out which logical bitstream the Packet belongs to.
*/
pub struct Packet {
/// The data the `Packet` contains
pub data :Vec<u8>,
/// `true` iff this packet is the first one in the page.
first_packet_pg :bool,
/// `true` iff this packet is the first one in the logical bitstream.
first_packet_stream :bool,
/// `true` iff this packet is the last one in the page.
last_packet_pg :bool,
/// `true` iff this packet is the last one in the logical bitstream
last_packet_stream :bool,
/// Absolute granule position of the last page the packet was in.
/// The meaning of the absolute granule position is defined by the codec.
absgp_page :u64,
/// Serial number. Uniquely identifying the logical bitstream.
stream_serial :u32,
/*/// Packet counter
/// Why u64? There are MAX_U32 pages, and every page has up to 128 packets. u32 wouldn't be sufficient here...
pub sequence_num :u64,*/ // TODO perhaps add this later on...
}
impl Packet {
/// Returns whether the packet is the first one starting in the page
pub fn first_in_page(&self) -> bool {
self.first_packet_pg
}
/// Returns whether the packet is the first one of the entire stream
pub fn first_in_stream(&self) -> bool {
self.first_packet_stream
}
/// Returns whether the packet is the last one starting in the page
pub fn last_in_page(&self) -> bool {
self.last_packet_pg
}
/// Returns whether the packet is the last one of the entire stream
pub fn last_in_stream(&self) -> bool {
self.last_packet_stream
}
/// Returns the absolute granule position of the page the packet ended in.
///
/// The meaning of the absolute granule position is defined by the codec.
pub fn absgp_page(&self) -> u64 {
self.absgp_page
}
/// Returns the serial number that uniquely identifies the logical bitstream.
pub fn stream_serial(&self) -> u32 {
self.stream_serial
}
}

1154
vendor/ogg/src/reading.rs vendored Normal file

File diff suppressed because it is too large Load Diff

541
vendor/ogg/src/test.rs vendored Normal file
View File

@@ -0,0 +1,541 @@
// Ogg decoder and encoder written in Rust
//
// Copyright (c) 2016-2017 est31 <MTest31@outlook.com>
// and contributors. All rights reserved.
// Redistribution or use only under the terms
// specified in the LICENSE file attached to this
// source distribution.
use super::*;
use std::io::{Cursor, Seek, SeekFrom};
macro_rules! test_arr_eq {
($a_arr:expr, $b_arr:expr) => {
let a_arr = &$a_arr;
let b_arr = &$b_arr;
for i in 0 .. b_arr.len() {
if a_arr[i] != b_arr[i] {
panic!("Mismatch of values at index {}: {} {}", i, a_arr[i], b_arr[i]);
}
}
}
}
#[test]
fn test_packet_rw() {
let mut c = Cursor::new(Vec::new());
let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let test_arr_2 = [2, 4, 8, 16, 32, 64, 128, 127, 126, 125, 124];
let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125];
{
let mut w = PacketWriter::new(&mut c);
let np = PacketWriteEndInfo::NormalPacket;
w.write_packet(Box::new(test_arr), 0xdeadb33f, np, 0).unwrap();
w.write_packet(Box::new(test_arr_2), 0xdeadb33f, np, 1).unwrap();
w.write_packet(Box::new(test_arr_3), 0xdeadb33f,
PacketWriteEndInfo::EndPage, 2).unwrap();
}
//print_u8_slice(c.get_ref());
assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0);
{
let mut r = PacketReader::new(c);
let p1 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr, *p1.data);
let p2 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr_2, *p2.data);
let p3 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr_3, *p3.data);
}
// Now test packets spanning multiple segments
let mut c = Cursor::new(Vec::new());
let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let mut test_arr_2 = [0; 700];
let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125];
for (idx, a) in test_arr_2.iter_mut().enumerate() {
*a = (idx as u8) / 4;
}
{
let mut w = PacketWriter::new(&mut c);
let np = PacketWriteEndInfo::NormalPacket;
w.write_packet(Box::new(test_arr), 0xdeadb33f, np, 0).unwrap();
w.write_packet(Box::new(test_arr_2), 0xdeadb33f, np, 1).unwrap();
w.write_packet(Box::new(test_arr_3), 0xdeadb33f,
PacketWriteEndInfo::EndPage, 2).unwrap();
}
//print_u8_slice(c.get_ref());
assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0);
{
let mut r = PacketReader::new(&mut c);
let p1 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr, *p1.data);
let p2 = r.read_packet().unwrap().unwrap();
test_arr_eq!(test_arr_2, *p2.data);
let p3 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr_3, *p3.data);
}
// Now test packets spanning multiple pages
let mut c = Cursor::new(Vec::new());
let mut test_arr_2 = [0; 14_000];
let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125];
for (idx, a) in test_arr_2.iter_mut().enumerate() {
*a = (idx as u8) / 4;
}
{
let mut w = PacketWriter::new(&mut c);
let np = PacketWriteEndInfo::NormalPacket;
w.write_packet(Box::new(test_arr_2), 0xdeadb33f, np, 1).unwrap();
w.write_packet(Box::new(test_arr_3), 0xdeadb33f,
PacketWriteEndInfo::EndPage, 2).unwrap();
}
//print_u8_slice(c.get_ref());
assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0);
{
let mut r = PacketReader::new(c);
let p2 = r.read_packet().unwrap().unwrap();
test_arr_eq!(test_arr_2, *p2.data);
let p3 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr_3, *p3.data);
}
}
#[test]
fn test_page_end_after_first_packet() {
// Test that everything works well if we force a page end
// after the first packet
let mut c = Cursor::new(Vec::new());
let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let test_arr_2 = [2, 4, 8, 16, 32, 64, 128, 127, 126, 125, 124];
let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125];
{
let mut w = PacketWriter::new(&mut c);
let np = PacketWriteEndInfo::NormalPacket;
w.write_packet(Box::new(test_arr), 0xdeadb33f,
PacketWriteEndInfo::EndPage, 0).unwrap();
w.write_packet(Box::new(test_arr_2), 0xdeadb33f, np, 1).unwrap();
w.write_packet(Box::new(test_arr_3), 0xdeadb33f,
PacketWriteEndInfo::EndPage, 2).unwrap();
}
//print_u8_slice(c.get_ref());
assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0);
{
let mut r = PacketReader::new(c);
let p1 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr, *p1.data);
let p2 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr_2, *p2.data);
let p3 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr_3, *p3.data);
}
}
#[test]
fn test_packet_write() {
let mut c = Cursor::new(Vec::new());
// Test page taken from real Ogg file
let test_arr_out = [
0x4f, 0x67, 0x67, 0x53, 0x00, 0x02, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0xa3,
0x90, 0x5b, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x94,
0x4e, 0x3d, 0x01, 0x1e, 0x01, 0x76, 0x6f, 0x72,
0x62, 0x69, 0x73, 0x00, 0x00, 0x00, 0x00, 0x02,
0x44, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0xb5, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb8, 0x01u8];
let test_arr_in = [0x01, 0x76, 0x6f, 0x72,
0x62, 0x69, 0x73, 0x00, 0x00, 0x00, 0x00, 0x02,
0x44, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0xb5, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0xb8, 0x01u8];
{
let mut w = PacketWriter::new(&mut c);
w.write_packet(Box::new(test_arr_in), 0x5b90a374,
PacketWriteEndInfo::EndPage, 0).unwrap();
}
//print_u8_slice(c.get_ref());
assert_eq!(c.get_ref().len(), test_arr_out.len());
let cr = c.get_ref();
test_arr_eq!(cr, test_arr_out);
}
#[test]
fn test_write_large() {
// Test that writing an overlarge packet works,
// aka where a new page is forced by the
// first packet in the page.
let mut c = Cursor::new(Vec::new());
// A page can contain at most 255 * 255 = 65025
// bytes of payload packet data.
// A length of 70_00 will guaranteed create a page break.
let test_arr = gen_pck(1234, 70_000 / 4);
{
let mut w = PacketWriter::new(&mut c);
w.write_packet(test_arr.clone(), 0x5b90a374,
PacketWriteEndInfo::EndPage, 0).unwrap();
}
//print_u8_slice(c.get_ref());
assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0);
{
let mut r = PacketReader::new(c);
let p = r.read_packet().unwrap().unwrap();
test_arr_eq!(test_arr, *p.data);
}
}
struct XorShift {
state :(u32, u32, u32, u32),
}
impl XorShift {
fn from_two(seed :(u32, u32)) -> Self {
let mut xs = XorShift {
state : (seed.0 ^ 0x2a24a930, seed.1 ^ 0xa9f60227,
!seed.0 ^ 0x68c44d2d, !seed.1 ^ 0xa1f9794a)
};
xs.next();
xs.next();
xs.next();
xs
}
fn next(&mut self) -> u32 {
let mut r = self.state.3;
r ^= r << 11;
r ^= r >> 8;
self.state.3 = self.state.2;
self.state.2 = self.state.1;
self.state.1 = self.state.0;
r ^= self.state.0;
r ^= self.state.0 >> 19;
self.state.0 = r;
r
}
}
fn gen_pck(seed :u32, len_d_four :usize) -> Box<[u8]> {
let mut ret = Vec::with_capacity(len_d_four * 4);
let mut xs = XorShift::from_two((seed, len_d_four as u32));
if len_d_four > 0 {
ret.push(seed as u8);
ret.push((seed >> 8) as u8);
ret.push((seed >> 16) as u8);
ret.push((seed >> 24) as u8);
}
for _ in 1..len_d_four {
let v = xs.next();
ret.push(v as u8);
ret.push((v >> 8) as u8);
ret.push((v >> 16) as u8);
ret.push((v >> 24) as u8);
}
ret.into_boxed_slice()
}
macro_rules! test_seek_r {
($r:expr, $absgp:expr) => {
test_seek_r!($r, $absgp, +, 0);
};
($r:expr, $absgp:expr, $o:tt, $m:expr) => {
// First, perform the seek
$r.seek_absgp(None, $absgp).unwrap();
// Then go to the searched packet inside the page
// We know that all groups of three packets form one.
for _ in 0 .. ($absgp % 3) $o $m {
$r.read_packet().unwrap().unwrap();
}
// Now read the actual packet we are interested in and
let pck = $r.read_packet().unwrap().unwrap();
// a) ensure we have a correct absolute granule pos
// for the page and
assert!(($absgp - pck.absgp_page as i64).abs() <= 3);
// b) ensure the packet's content matches with the one we
// have put in. This is another insurance.
test_arr_eq!(pck.data, gen_pck($absgp, &pck.data.len() / 4));
};
}
macro_rules! ensure_continues_r {
($r:expr, $absgp:expr) => {
// Ensure the stream continues normally
let pck = $r.read_packet().unwrap().unwrap();
test_arr_eq!(pck.data, gen_pck($absgp, &pck.data.len() / 4));
let pck = $r.read_packet().unwrap().unwrap();
test_arr_eq!(pck.data, gen_pck($absgp + 1, &pck.data.len() / 4));
let pck = $r.read_packet().unwrap().unwrap();
test_arr_eq!(pck.data, gen_pck($absgp + 2, &pck.data.len() / 4));
let pck = $r.read_packet().unwrap().unwrap();
test_arr_eq!(pck.data, gen_pck($absgp + 3, &pck.data.len() / 4));
};
}
#[test]
fn test_byte_seeking_continued() {
let mut c = Cursor::new(Vec::new());
let off;
{
let mut w = PacketWriter::new(&mut c);
let np = PacketWriteEndInfo::NormalPacket;
let ep = PacketWriteEndInfo::EndPage;
let es = PacketWriteEndInfo::EndStream;
w.write_packet(gen_pck(1, 300), 0xdeadb33f, ep, 1).unwrap();
w.write_packet(gen_pck(2, 270_000), 0xdeadb33f, np, 2).unwrap();
off = w.get_current_offs().unwrap();
w.write_packet(gen_pck(3, 270_000), 0xdeadb33f, np, 3).unwrap();
w.write_packet(gen_pck(4, 270_000), 0xdeadb33f, es, 4).unwrap();
}
assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0);
let mut r = PacketReader::new(c);
let pck = r.read_packet().unwrap().unwrap();
assert_eq!(1, pck.absgp_page);
test_arr_eq!(pck.data, gen_pck(1, &pck.data.len() / 4));
// Jump over the second packet
assert_eq!(r.seek_bytes(SeekFrom::Start(off)).unwrap(), off);
let pck = r.read_packet().unwrap().unwrap();
assert_eq!(3, pck.absgp_page);
test_arr_eq!(pck.data, gen_pck(3, &pck.data.len() / 4));
let pck = r.read_packet().unwrap().unwrap();
assert_eq!(4, pck.absgp_page);
test_arr_eq!(pck.data, gen_pck(4, &pck.data.len() / 4));
}
#[test]
fn test_seeking() {
let pck_count = 402;
let mut rng = XorShift::from_two((0x9899eb03, 0x54138143));
let mut c = Cursor::new(Vec::new());
{
let mut w = PacketWriter::new(&mut c);
let np = PacketWriteEndInfo::NormalPacket;
let ep = PacketWriteEndInfo::EndPage;
for ctr in 0..pck_count {
w.write_packet(gen_pck(ctr, rng.next() as usize & 127), 0xdeadb33f,
if (ctr + 1) % 3 == 0 { ep } else { np }, ctr as u64).unwrap();
}
}
assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0);
let mut r = PacketReader::new(c);
macro_rules! test_seek {
($absgp:expr) => {
test_seek_r!(r, $absgp)
};
}
macro_rules! ensure_continues {
($absgp:expr) => {
ensure_continues_r!(r, $absgp)
};
}
test_seek!(32);
test_seek!(300);
test_seek!(314);
test_seek!(100);
ensure_continues!(101);
test_seek!(10);
ensure_continues!(11);
// Ensure that if we seek to the same place multiple times, it doesn't
// fill data needlessly.
r.seek_absgp(None, 377).unwrap();
r.seek_absgp(None, 377).unwrap();
test_seek!(377);
ensure_continues!(378);
// Ensure that if we seek to the same place multiple times, it doesn't
// fill data needlessly.
r.seek_absgp(None, 200).unwrap();
r.seek_absgp(None, 200).unwrap();
test_seek!(200);
ensure_continues!(201);
// Ensure the final page can be sought to
test_seek!(401);
// After we sought to the final page, we should be able to seek
// before it again.
test_seek!(250);
}
// TODO add seeking tests for more cases:
// * multiple logical streams
// * seeking to unavailable positions
#[test]
/// Test for pages with -1 absgp (no packet ending there),
/// and generally for continued packets.
fn test_seeking_continued() {
let pck_count = 402;
// Array of length to add to the randomized packet size
// From this array, we take a random index to determine
// the value for the current packet.
let mut pck_len_add = [0; 8];
// One page can contain at most 255 * 255 = 65025
// bytes of payload packet data.
// Therefore, to force a page that contains no
// page ending, we need more than double that number.
// 65025 * 2 = 130_050.
// 1/4 for large packets that guaranteed produce at
// least one -1 absgp page each.
pck_len_add[0] = 133_000;
pck_len_add[1] = 133_000;
// 1/8 for really large packets that produce >= 3
// -1 abs pages each.
pck_len_add[2] = 270_000;
// 1/4 for big fill packets
// one packet is full after a few of them
pck_len_add[3] = 30_000;
pck_len_add[4] = 13_000;
// 3/8 for small fill packets (0-127 bytes)
let mut rng = XorShift::from_two((0x9899eb03, 0x54138143));
let mut c = Cursor::new(Vec::new());
{
let mut w = PacketWriter::new(&mut c);
let np = PacketWriteEndInfo::NormalPacket;
let ep = PacketWriteEndInfo::EndPage;
for ctr in 0..pck_count {
let r = rng.next() as usize;
let size = (r & 127) + pck_len_add[(r >> 8) & 7] >> 2;
w.write_packet(gen_pck(ctr, size), 0xdeadb33f,
if (ctr + 1) % 3 == 0 { ep } else { np }, ctr as u64).unwrap();
}
}
assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0);
let mut r = PacketReader::new(c);
macro_rules! test_seek {
($absgp:expr) => {
test_seek_r!(r, $absgp)
};
($absgp:expr, $o:tt, $m:expr) => {
test_seek_r!(r, $absgp, $o, $m)
};
}
macro_rules! ensure_continues {
($absgp:expr) => {
ensure_continues_r!(r, $absgp)
};
}
test_seek!(32);
test_seek!(300,+,2);
test_seek!(314,+,2);
test_seek!(100,-,1);
ensure_continues!(101);
test_seek!(10);
ensure_continues!(11);
// Ensure that if we seek to the same place multiple times, it doesn't
// fill data needlessly.
r.seek_absgp(None, 377).unwrap();
r.seek_absgp(None, 377).unwrap();
test_seek!(377);
ensure_continues!(378);
// Ensure that if we seek to the same place multiple times, it doesn't
// fill data needlessly.
r.seek_absgp(None, 200).unwrap();
r.seek_absgp(None, 200).unwrap();
test_seek!(200);
ensure_continues!(201);
// Ensure the final page can be sought to
test_seek!(401,-,2);
// Aafter we sought to the final page, we should be able to seek
// before it again.
test_seek!(250,-,1);
}
// Regression test for issue 14:
// Have "O" right before the OggS magic.
#[test]
fn test_issue_14() {
let mut c = Cursor::new(Vec::new());
let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let test_arr_2 = [2, 4, 8, 16, 32, 64, 128, 127, 126, 125, 124];
let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125];
{
use std::io::Write;
c.write_all(&[b'O']).unwrap();
let mut w = PacketWriter::new(&mut c);
let np = PacketWriteEndInfo::NormalPacket;
w.write_packet(Box::new(test_arr), 0xdeadb33f, np, 0).unwrap();
w.write_packet(Box::new(test_arr_2), 0xdeadb33f, np, 1).unwrap();
w.write_packet(Box::new(test_arr_3), 0xdeadb33f,
PacketWriteEndInfo::EndPage, 2).unwrap();
}
//print_u8_slice(c.get_ref());
assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0);
{
let mut r = PacketReader::new(c);
let p1 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr, *p1.data);
let p2 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr_2, *p2.data);
let p3 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr_3, *p3.data);
}
// Now test packets spanning multiple segments
let mut c = Cursor::new(Vec::new());
let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let mut test_arr_2 = [0; 700];
let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125];
for (idx, a) in test_arr_2.iter_mut().enumerate() {
*a = (idx as u8) / 4;
}
{
let mut w = PacketWriter::new(&mut c);
let np = PacketWriteEndInfo::NormalPacket;
w.write_packet(Box::new(test_arr), 0xdeadb33f, np, 0).unwrap();
w.write_packet(Box::new(test_arr_2), 0xdeadb33f, np, 1).unwrap();
w.write_packet(Box::new(test_arr_3), 0xdeadb33f,
PacketWriteEndInfo::EndPage, 2).unwrap();
}
//print_u8_slice(c.get_ref());
assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0);
{
let mut r = PacketReader::new(&mut c);
let p1 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr, *p1.data);
let p2 = r.read_packet().unwrap().unwrap();
test_arr_eq!(test_arr_2, *p2.data);
let p3 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr_3, *p3.data);
}
// Now test packets spanning multiple pages
let mut c = Cursor::new(Vec::new());
let mut test_arr_2 = [0; 14_000];
let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125];
for (idx, a) in test_arr_2.iter_mut().enumerate() {
*a = (idx as u8) / 4;
}
{
let mut w = PacketWriter::new(&mut c);
let np = PacketWriteEndInfo::NormalPacket;
w.write_packet(Box::new(test_arr_2), 0xdeadb33f, np, 1).unwrap();
w.write_packet(Box::new(test_arr_3), 0xdeadb33f,
PacketWriteEndInfo::EndPage, 2).unwrap();
}
//print_u8_slice(c.get_ref());
assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0);
{
let mut r = PacketReader::new(c);
let p2 = r.read_packet().unwrap().unwrap();
test_arr_eq!(test_arr_2, *p2.data);
let p3 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr_3, *p3.data);
}
}

344
vendor/ogg/src/writing.rs vendored Normal file
View File

@@ -0,0 +1,344 @@
// Ogg decoder and encoder written in Rust
//
// Copyright (c) 2016-2017 est31 <MTest31@outlook.com>
// and contributors. All rights reserved.
// Redistribution or use only under the terms
// specified in the LICENSE file attached to this
// source distribution.
/*!
Writing logic
*/
use std::result;
use std::io::{self, Cursor, Write, Seek, SeekFrom};
use byteorder::{WriteBytesExt, LittleEndian};
use std::collections::HashMap;
use crc::vorbis_crc32_update;
/// Ogg version of the `std::io::Result` type.
///
/// We need `std::result::Result` at other points
/// too, so we can't use `Result` as the name.
type IoResult<T> = result::Result<T, io::Error>;
/**
Writer for packets into an Ogg stream.
Note that the functionality of this struct isn't as well tested as for
the `PacketReader` struct.
*/
pub struct PacketWriter<T :io::Write> {
wtr :T,
page_vals :HashMap<u32, CurrentPageValues>,
}
struct CurrentPageValues {
/// `true` if this page is the first one in the logical bitstream
first_page :bool,
/// Page counter of the current page
/// Increased for every page
sequence_num :u32,
/// Points to the first unwritten position in cur_pg_lacing.
segment_cnt :u8,
cur_pg_lacing :[u8; 255],
/// The data and the absgp's of the packets
cur_pg_data :Vec<(Box<[u8]>, u64)>,
/// Some(offs), if the last packet
/// couldn't make it fully into this page, and
/// has to be continued in the next page.
///
/// `offs` should point to the first idx in
/// cur_pg_data[last] that should NOT be written
/// in this page anymore.
///
/// None if all packets can be written nicely.
pck_this_overflow_idx :Option<usize>,
/// Some(offs), if the first packet
/// couldn't make it fully into the last page, and
/// has to be continued in this page.
///
/// `offs` should point to the first idx in cur_pg_data[0]
/// that hasn't been written.
///
/// None if all packets can be written nicely.
pck_last_overflow_idx :Option<usize>,
}
/// Specifies whether to end something with the write of the packet.
///
/// If you want to end a stream you need to inform the Ogg `PacketWriter`
/// about this. This is the enum to do so.
///
/// Also, Codecs sometimes have special requirements to put
/// the first packet of the whole stream into its own page.
/// The `EndPage` variant can be used for this.
#[derive(PartialEq)]
#[derive(Clone, Copy)]
pub enum PacketWriteEndInfo {
/// No ends here, just a normal packet
NormalPacket,
/// Force-end the current page
EndPage,
/// End the whole logical stream.
EndStream,
}
impl <T :io::Write> PacketWriter<T> {
pub fn new(wtr :T) -> Self {
return PacketWriter {
wtr,
page_vals : HashMap::new(),
};
}
pub fn into_inner(self) -> T {
self.wtr
}
/// Access the interior writer
///
/// This allows access of the writer contained inside.
/// No guarantees are given onto the pattern of the writes.
/// They may change in the future.
pub fn inner(&self) -> &T {
&self.wtr
}
/// Access the interior writer mutably
///
/// This allows access of the writer contained inside.
/// No guarantees are given onto the pattern of the writes.
/// They may change in the future.
pub fn inner_mut(&mut self) -> &mut T {
&mut self.wtr
}
/// Write a packet
///
///
pub fn write_packet(&mut self, pck_cont :Box<[u8]>, serial :u32,
inf :PacketWriteEndInfo,
/* TODO find a better way to design the API around
passing the absgp to the underlying implementation.
e.g. the caller passes a closure on init which gets
called when we encounter a new page... with the param
the index inside the current page, or something.
*/
absgp :u64) -> IoResult<()> {
let is_end_stream :bool = inf == PacketWriteEndInfo::EndStream;
let pg = self.page_vals.entry(serial).or_insert(
CurrentPageValues {
first_page : true,
sequence_num : 0,
segment_cnt : 0,
cur_pg_lacing :[0; 255],
cur_pg_data :Vec::with_capacity(255),
pck_this_overflow_idx : None,
pck_last_overflow_idx : None,
}
);
let cont_len = pck_cont.len();
pg.cur_pg_data.push((pck_cont, absgp));
let last_data_segment_size = (cont_len % 255) as u8;
let needed_segments :usize = (cont_len / 255) + 1;
let mut segment_in_page_i :u8 = pg.segment_cnt;
let mut at_page_end :bool = false;
for segment_i in 0 .. needed_segments {
at_page_end = false;
if segment_i + 1 < needed_segments {
// For all segments containing 255 pieces of data
pg.cur_pg_lacing[segment_in_page_i as usize] = 255;
} else {
// For the last segment, must contain < 255 pieces of data
// (including 0)
pg.cur_pg_lacing[segment_in_page_i as usize] = last_data_segment_size;
}
pg.segment_cnt = segment_in_page_i + 1;
segment_in_page_i = (segment_in_page_i + 1) % 255;
if segment_in_page_i == 0 {
if segment_i + 1 < needed_segments {
// We have to flush a page, but we know there are more to come...
pg.pck_this_overflow_idx = Some((segment_i + 1) * 255);
tri!(PacketWriter::write_page(&mut self.wtr, serial, pg,
false));
} else {
// We have to write a page end, and it's the very last
// we need to write
tri!(PacketWriter::write_page(&mut self.wtr,
serial, pg, is_end_stream));
// Not actually required
// (it is always None except if we set it to Some directly
// before we call write_page)
pg.pck_this_overflow_idx = None;
// Required (it could have been Some(offs) before)
pg.pck_last_overflow_idx = None;
}
at_page_end = true;
}
}
if (inf != PacketWriteEndInfo::NormalPacket) && !at_page_end {
// Write a page end
tri!(PacketWriter::write_page(&mut self.wtr, serial, pg,
is_end_stream));
pg.pck_last_overflow_idx = None;
// TODO if inf was PacketWriteEndInfo::EndStream, we have to
// somehow erase pg from the hashmap...
// any ideas? perhaps needs external scope...
}
// All went fine.
Ok(())
}
fn write_page(wtr :&mut T, serial :u32, pg :&mut CurrentPageValues,
last_page :bool) -> IoResult<()> {
{
// The page header with everything but the lacing values:
let mut hdr_cur = Cursor::new(Vec::with_capacity(27));
tri!(hdr_cur.write_all(&[0x4f, 0x67, 0x67, 0x53, 0x00]));
let mut flags :u8 = 0;
if pg.pck_last_overflow_idx.is_some() { flags |= 0x01; }
if pg.first_page { flags |= 0x02; }
if last_page { flags |= 0x04; }
tri!(hdr_cur.write_u8(flags));
let pck_data = &pg.cur_pg_data;
let mut last_finishing_pck_absgp = (-1i64) as u64;
for (idx, &(_, absgp)) in pck_data.iter().enumerate() {
if !(idx + 1 == pck_data.len() &&
pg.pck_this_overflow_idx.is_some()) {
last_finishing_pck_absgp = absgp;
}
}
tri!(hdr_cur.write_u64::<LittleEndian>(last_finishing_pck_absgp));
tri!(hdr_cur.write_u32::<LittleEndian>(serial));
tri!(hdr_cur.write_u32::<LittleEndian>(pg.sequence_num));
// checksum, calculated later on :)
tri!(hdr_cur.write_u32::<LittleEndian>(0));
tri!(hdr_cur.write_u8(pg.segment_cnt));
let mut hash_calculated :u32;
let pg_lacing = &pg.cur_pg_lacing[0 .. pg.segment_cnt as usize];
hash_calculated = vorbis_crc32_update(0, hdr_cur.get_ref());
hash_calculated = vorbis_crc32_update(hash_calculated, pg_lacing);
for (idx, &(ref pck, _)) in pck_data.iter().enumerate() {
let mut start :usize = 0;
if idx == 0 { if let Some(idx) = pg.pck_last_overflow_idx {
start = idx;
}}
let mut end :usize = pck.len();
if idx + 1 == pck_data.len() {
if let Some(idx) = pg.pck_this_overflow_idx {
end = idx;
}
}
hash_calculated = vorbis_crc32_update(hash_calculated,
&pck[start .. end]);
}
// Go back to enter the checksum
// Don't do excessive checking here (that the seek
// succeeded & we are at the right pos now).
// It's hopefully not required.
tri!(hdr_cur.seek(SeekFrom::Start(22)));
tri!(hdr_cur.write_u32::<LittleEndian>(hash_calculated));
// Now all is done, write the stuff!
tri!(wtr.write_all(hdr_cur.get_ref()));
tri!(wtr.write_all(pg_lacing));
for (idx, &(ref pck, _)) in pck_data.iter().enumerate() {
let mut start :usize = 0;
if idx == 0 { if let Some(idx) = pg.pck_last_overflow_idx {
start = idx;
}}
let mut end :usize = pck.len();
if idx + 1 == pck_data.len() {
if let Some(idx) = pg.pck_this_overflow_idx {
end = idx;
}
}
tri!(wtr.write_all(&pck[start .. end]));
}
}
// Reset the page.
pg.first_page = false;
pg.sequence_num += 1;
pg.segment_cnt = 0;
// If we couldn't fully write the last
// packet, we need to keep it for the next page,
// otherwise just clear everything.
if pg.pck_this_overflow_idx.is_some() {
let d = pg.cur_pg_data.pop().unwrap();
pg.cur_pg_data.clear();
pg.cur_pg_data.push(d);
} else {
pg.cur_pg_data.clear();
}
pg.pck_last_overflow_idx = pg.pck_this_overflow_idx;
pg.pck_this_overflow_idx = None;
return Ok(());
}
}
impl<T :io::Seek + io::Write> PacketWriter<T> {
pub fn get_current_offs(&mut self) -> Result<u64, io::Error> {
self.wtr.seek(SeekFrom::Current(0))
}
}
// TODO once 1.18 gets released, move this
// to the test module and make wtr pub(crate).
#[test]
fn test_recapture() {
// Test that we can deal with recapture
// at varying distances.
// This is a regression test
use std::io::Write;
use super::PacketReader;
let mut c = Cursor::new(Vec::new());
let test_arr = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let test_arr_2 = [2, 4, 8, 16, 32, 64, 128, 127, 126, 125, 124];
let test_arr_3 = [3, 5, 9, 17, 33, 65, 129, 129, 127, 126, 125];
{
let np = PacketWriteEndInfo::NormalPacket;
let ep = PacketWriteEndInfo::EndPage;
{
let mut w = PacketWriter::new(&mut c);
w.write_packet(Box::new(test_arr), 0xdeadb33f, ep, 0).unwrap();
// Now, after the end of the page, put in some noise.
w.wtr.write_all(&[0; 38]).unwrap();
w.write_packet(Box::new(test_arr_2), 0xdeadb33f, np, 1).unwrap();
w.write_packet(Box::new(test_arr_3), 0xdeadb33f, ep, 2).unwrap();
}
}
//print_u8_slice(c.get_ref());
assert_eq!(c.seek(SeekFrom::Start(0)).unwrap(), 0);
{
let mut r = PacketReader::new(c);
let p1 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr, *p1.data);
let p2 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr_2, *p2.data);
let p3 = r.read_packet().unwrap().unwrap();
assert_eq!(test_arr_3, *p3.data);
}
}