This is a stable release with several nice improvements and bugfixes. In particular initialization of the functable was very slow in earlier versions. You can observe that with this test file:
main.rs
```rust
use std::{
ffi::{c_char, c_int},
mem::MaybeUninit,
};
// use libz_sys as zlib;
use libz_ng_sys as zlib;
fn main() {
let mut it = std::env::args();
let _exe = it.next().unwrap();
let level: i32 = it.next().unwrap().parse().unwrap();
let path: String = it.next().unwrap();
let next_in = std::fs::read(path).unwrap();
let mut next_out = vec![0; next_in.len()];
let mut dest_len = next_out.len() as _;
let err = unsafe {
compress2(
next_out.as_mut_ptr(),
&mut dest_len,
next_in.as_ptr(),
next_in.len() as _,
level,
)
};
assert_eq!(err, zlib::Z_OK);
}
const VERSION: *const c_char = "2.3.0\0".as_ptr() as *const c_char;
const STREAM_SIZE: c_int = std::mem::size_of::() as c_int;
unsafe fn compress2(
dest: *mut u8,
dest_len: *mut usize,
source: *const u8,
mut source_len: u64,
level: i32,
) -> i32 {
let mut stream = MaybeUninit::zeroed();
let max: u32 = u32::MAX;
let mut left = *dest_len;
let mut err = zlib::deflateInit_(stream.as_mut_ptr(), level, VERSION, STREAM_SIZE);
if err != zlib::Z_OK {
return err;
}
let mut stream = stream.assume_init_mut();
stream.next_out = dest;
stream.avail_out = 0;
stream.next_in = source as *mut u8;
stream.avail_in = 0;
loop {
if stream.avail_out == 0 {
stream.avail_out = Ord::min(max as u64, left as u64) as u32;
left -= stream.avail_out as usize;
}
if stream.avail_in == 0 {
stream.avail_in = Ord::min(max as u64, source_len as u64) as u32;
source_len -= stream.avail_in as u64;
}
dbg!(stream.avail_out, stream.avail_in);
err = zlib::deflate(
stream,
if source_len > 0 {
zlib::Z_NO_FLUSH
} else {
zlib::Z_FINISH
},
);
if err != zlib::Z_OK {
break;
}
}
*dest_len = stream.total_out;
zlib::deflateEnd(stream);
return if err == zlib::Z_STREAM_END {
zlib::Z_OK
} else {
err
};
}
```
it gives this very counter-intuitive result where compression level 8 is 3x slower than level 9:
hyperfine "cargo run --release -- 8 /home/folkertdev/rust/zlib-rs/silesia-small.tar" "cargo run --release -- 9 /home/folkertdev/rust/zlib-rs/silesia-small.tar"
Benchmark #1: cargo run --release -- 8 /home/folkertdev/rust/zlib-rs/silesia-small.tar
Time (mean ± σ): 1.897 s ± 0.049 s [User: 1.883 s, System: 0.014 s]
Range (min … max): 1.840 s … 1.988 s 10 runs
Benchmark #2: cargo run --release -- 9 /home/folkertdev/rust/zlib-rs/silesia-small.tar
Time (mean ± σ): 625.5 ms ± 13.8 ms [User: 608.2 ms, System: 17.3 ms]
Range (min … max): 602.7 ms … 651.0 ms 10 runs
Summary
'cargo run --release -- 9 /home/folkertdev/rust/zlib-rs/silesia-small.tar' ran
3.03 ± 0.10 times faster than 'cargo run --release -- 8 /home/folkertdev/rust/zlib-rs/silesia-small.tar'
this is a problem for me, so this version update and subsequent release of libz-ng-sys would be very useful.
updates the zlib-ng submodule to its latest stable version
This is a stable release with several nice improvements and bugfixes. In particular initialization of the functable was very slow in earlier versions. You can observe that with this test file:
main.rs
```rust use std::{ ffi::{c_char, c_int}, mem::MaybeUninit, }; // use libz_sys as zlib; use libz_ng_sys as zlib; fn main() { let mut it = std::env::args(); let _exe = it.next().unwrap(); let level: i32 = it.next().unwrap().parse().unwrap(); let path: String = it.next().unwrap(); let next_in = std::fs::read(path).unwrap(); let mut next_out = vec![0; next_in.len()]; let mut dest_len = next_out.len() as _; let err = unsafe { compress2( next_out.as_mut_ptr(), &mut dest_len, next_in.as_ptr(), next_in.len() as _, level, ) }; assert_eq!(err, zlib::Z_OK); } const VERSION: *const c_char = "2.3.0\0".as_ptr() as *const c_char; const STREAM_SIZE: c_int = std::mem::size_of::it gives this very counter-intuitive result where compression level 8 is 3x slower than level 9:
this is a problem for me, so this version update and subsequent release of
libz-ng-sys
would be very useful.