fix: align_skip

This commit is contained in:
Pascal Engélibert 2023-04-29 16:51:57 +02:00
parent 0b40fe93d9
commit e30791fe44
Signed by: tuxmain
GPG key ID: 3504BC6D362F7DCA
2 changed files with 81 additions and 7 deletions

View file

@ -80,7 +80,7 @@ impl<T> RingBuf<T> {
self.write_index = (self.write_index + 1) % self.data.len();
len += 1;
}
if self.available + len < self.data.len() {
if self.available + len <= self.data.len() {
self.available += len;
} else {
self.read_index = self.write_index;
@ -210,10 +210,21 @@ impl<T> RingBuf<T> {
/// Skip a few elements to align the read index, returning the number of skipped elements.
pub fn align_skip(&mut self, align: usize) -> usize {
assert_eq!(self.data.len() % align, 0);
let skip = align - (self.read_index % align);
self.read_index = (self.read_index + skip) % self.data.len();
self.available = self.available.saturating_sub(skip);
skip
if self.read_index % align != 0 {
let skip = align - (self.read_index % align);
self.read_index = (self.read_index + skip) % self.data.len();
if skip <= self.available {
self.available -= skip;
skip
} else {
let available = self.available;
self.available = 0;
self.write_index = self.read_index;
available
}
} else {
0
}
}
}

View file

@ -4,20 +4,31 @@ use rand::Rng;
#[test]
fn chunks_exact_mut_random() {
const MAX_APPEND_LEN: usize = 256;
const MAX_APPEND_LEN: usize = 1023;
const CHUNK_SIZE: usize = 32;
const BUF_SIZE: usize = 1024;
let mut rng = rand::thread_rng();
let mut ringbuf = RingBuf::new(1024, 0_u32);
let mut ringbuf = RingBuf::new(BUF_SIZE, 0_u32);
let mut current_value = 0;
for _ in 0_u32..1024 {
let append_len = rng.gen_range(0..MAX_APPEND_LEN);
let overwritten = append_len;
let available = ringbuf.available();
assert_eq!(
ringbuf.push_from_iter_overflowing(
current_value as u32..current_value as u32 + append_len as u32
),
append_len
);
assert_eq!((available + append_len).min(BUF_SIZE), ringbuf.available());
dbg!(append_len);
dbg!(current_value);
if append_len + available > BUF_SIZE {
current_value += append_len - (BUF_SIZE - available);
}
dbg!(current_value);
ringbuf.align_skip(CHUNK_SIZE);
for (j, chunk) in ringbuf.chunks_exact_mut(CHUNK_SIZE).enumerate() {
assert_eq!(chunk.len(), CHUNK_SIZE);
for (k, v) in chunk.iter().enumerate() {
@ -74,3 +85,55 @@ fn push_from_slice_overflowing() {
([4, 5, 6, 7, 8].as_slice(), [9, 10, 11].as_slice())
);
}
#[test]
fn align_skip_basic() {
let mut ringbuf = RingBuf::new(8, 0_u32);
assert_eq!(ringbuf.align_skip(4), 0);
assert_eq!(ringbuf.available(), 0);
assert_eq!(ringbuf.read_index, 0);
assert_eq!(ringbuf.write_index, 0);
assert!(ringbuf.push(1));
assert_eq!(ringbuf.align_skip(4), 0);
assert_eq!(ringbuf.available(), 1);
assert_eq!(ringbuf.read_index, 0);
assert_eq!(ringbuf.write_index, 1);
assert!(ringbuf.push(1));
assert_eq!(ringbuf.align_skip(4), 0);
assert_eq!(ringbuf.available(), 2);
assert_eq!(ringbuf.read_index, 0);
assert_eq!(ringbuf.write_index, 2);
assert!(ringbuf.push(1));
assert_eq!(ringbuf.align_skip(4), 0);
assert_eq!(ringbuf.available(), 3);
assert_eq!(ringbuf.read_index, 0);
assert_eq!(ringbuf.write_index, 3);
assert!(ringbuf.push(1));
assert_eq!(ringbuf.align_skip(4), 0);
assert_eq!(ringbuf.available(), 4);
assert_eq!(ringbuf.read_index, 0);
assert_eq!(ringbuf.write_index, 4);
assert_eq!(ringbuf.read_first_one(), Some(&1));
assert_eq!(ringbuf.align_skip(4), 3);
assert_eq!(ringbuf.available(), 0);
assert_eq!(ringbuf.read_index, 4);
assert_eq!(ringbuf.write_index, 4);
}
#[test]
fn align_skip_overflow() {
let mut ringbuf = RingBuf::new(8, 0_u32);
ringbuf.push_from_slice_overflowing(&[1, 2, 3, 4, 5, 6]);
assert_eq!(ringbuf.read_first_slices(5).0, &[1, 2, 3, 4, 5]);
assert_eq!(ringbuf.align_skip(4), 1);
assert_eq!(ringbuf.available(), 0);
assert_eq!(ringbuf.read_index, 0);
assert_eq!(ringbuf.write_index, 0);
}