Compare commits

...

3 commits

Author SHA1 Message Date
Pascal Engélibert 0b40fe93d9
More useful methods 2022-10-01 15:50:50 +02:00
Pascal Engélibert c49db676ee
Immutable exact chunks 2022-10-01 10:13:50 +02:00
Pascal Engélibert 60841ec34c
Checked push 2022-10-01 10:10:23 +02:00
3 changed files with 237 additions and 18 deletions

View file

@ -5,9 +5,10 @@ Yet another Rust ring buffer implementation.
**Early development**: more features will be added soon. Also, more tests. And probably breaking changes.
Features:
* Push element
* Push from iterator
* Mutable exact chunk iterator (read contiguous slices)
* Checked/overflowing element/iterator safe push
* Overflowing slice push
* Mutable/immutable exact chunk iterator (read contiguous slices)
* Other useful methods
## License

View file

@ -13,6 +13,7 @@ impl<T> RingBuf<T> {
where
T: Clone,
{
assert!(size != 0);
Self {
data: vec![value; size],
read_index: 0,
@ -21,20 +22,130 @@ impl<T> RingBuf<T> {
}
}
pub fn push(&mut self, value: T) {
self.data[self.write_index] = value;
self.write_index = (self.write_index + 1) % self.data.len();
self.available = (self.available + 1).min(self.data.len());
/// Total capacity
pub fn capacity(&self) -> usize {
self.data.len()
}
pub fn push_from_iter<I: Iterator<Item = T>>(&mut self, iter: I) {
/// Data written but not yet read
pub fn available(&self) -> usize {
self.available
}
pub fn clear(&mut self) {
self.read_index = self.write_index;
self.available = 0;
}
pub fn push(&mut self, value: T) -> bool {
if self.available < self.data.len() {
self.data[self.write_index] = value;
self.write_index = (self.write_index + 1) % self.data.len();
self.available = (self.available + 1).min(self.data.len());
true
} else {
false
}
}
pub fn push_overflowing(&mut self, value: T) {
self.data[self.write_index] = value;
self.write_index = (self.write_index + 1) % self.data.len();
if self.available < self.data.len() {
self.available += 1;
} else {
self.read_index = (self.read_index + 1) % self.data.len();
}
}
pub fn push_from_iter<I: Iterator<Item = T>>(&mut self, mut iter: I) -> usize {
let mut len = 0;
while len < self.data.len() - self.available {
if let Some(value) = iter.next() {
self.data[self.write_index] = value;
self.write_index = (self.write_index + 1) % self.data.len();
len += 1;
} else {
break;
}
}
self.available += len;
len
}
pub fn push_from_iter_overflowing<I: Iterator<Item = T>>(&mut self, iter: I) -> usize {
let mut len = 0;
for value in iter {
self.data[self.write_index] = value;
self.write_index = (self.write_index + 1) % self.data.len();
len += 1;
}
self.available = (self.available + len).min(self.data.len());
if self.available + len < self.data.len() {
self.available += len;
} else {
self.read_index = self.write_index;
self.available = self.data.len();
}
len
}
pub fn push_from_slice_overflowing(&mut self, values: &[T])
where
T: Clone,
{
assert!(values.len() <= self.data.len());
let split_at = self.data.len() - self.write_index;
if split_at < values.len() {
self.data[self.write_index..].clone_from_slice(&values[0..split_at]);
self.data[0..values.len() - split_at].clone_from_slice(&values[split_at..]);
self.write_index = values.len() - split_at;
} else {
self.data[self.write_index..self.write_index + values.len()].clone_from_slice(values);
self.write_index += values.len();
}
if self.available + values.len() <= self.data.len() {
// We have enough room
self.available += values.len();
} else {
// Overflow!
self.read_index = self.write_index;
self.available = self.data.len();
}
}
/// Returns available data as `(right, left)` slices.
///
/// Does not increment the read index.
pub fn as_slices(&self) -> (&[T], &[T]) {
let (left, right) = self.data.split_at(self.read_index);
if self.available < right.len() {
(&right[0..self.available], &[])
} else {
(right, &left[0..self.available - right.len()])
}
}
/// Returns available data as `(right, left)` slices
///
/// Does not increment the read index.
pub fn as_slices_mut(&mut self) -> (&mut [T], &mut [T]) {
let (left, right) = self.data.split_at_mut(self.read_index);
if self.available < right.len() {
(&mut right[0..self.available], &mut [])
} else {
let right_len = right.len();
(right, &mut left[0..self.available - right_len])
}
}
pub fn chunks_exact(&mut self, chunk_size: usize) -> ChunksExact<T> {
assert_eq!(self.read_index % chunk_size, 0);
assert_eq!(self.data.len() % chunk_size, 0);
ChunksExact {
ringbuf: self,
chunk_size,
}
}
pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<T> {
@ -46,6 +157,86 @@ impl<T> RingBuf<T> {
chunk_size,
}
}
/// Read all the available elements, returned as `(right, left)`.
/// That is, the elements in `.1` come after those in `.0`.
pub fn read_all_slices(&mut self) -> (&[T], &[T]) {
self.read_first_slices(self.available)
}
/// Read the first `number` available elements, returned as `(right, left)`.
/// That is, the elements in `.1` come after those in `.0`.
pub fn read_first_slices(&mut self, number: usize) -> (&[T], &[T]) {
let (left, right) = self.data.split_at(self.read_index);
let len = self.available.min(number);
self.available -= len;
if len < right.len() {
self.read_index += len;
(&right[0..len], &[])
} else {
self.read_index = len - right.len();
(right, &left[0..len - right.len()])
}
}
/// Read the first available element.
pub fn read_first_one(&mut self) -> Option<&T> {
if self.available > 0 {
let ret = &self.data[self.read_index];
self.read_index = (self.read_index + 1) % self.data.len();
self.available -= 1;
Some(ret)
} else {
None
}
}
pub fn as_raw_slices(&self) -> (&[T], &[T]) {
self.data.split_at(self.read_index)
}
pub fn as_raw_slices_mut(&mut self) -> (&mut [T], &mut [T]) {
self.data.split_at_mut(self.read_index)
}
pub fn raw_slice(&self) -> &[T] {
&self.data
}
pub fn raw_slice_mut(&mut self) -> &mut [T] {
&mut self.data
}
/// Skip a few elements to align the read index, returning the number of skipped elements.
pub fn align_skip(&mut self, align: usize) -> usize {
assert_eq!(self.data.len() % align, 0);
let skip = align - (self.read_index % align);
self.read_index = (self.read_index + skip) % self.data.len();
self.available = self.available.saturating_sub(skip);
skip
}
}
pub struct ChunksExact<'a, T> {
ringbuf: &'a mut RingBuf<T>,
chunk_size: usize,
}
impl<'a, T> Iterator for ChunksExact<'a, T> {
type Item = &'a [T];
fn next(&mut self) -> Option<Self::Item> {
if self.ringbuf.available >= self.chunk_size {
let read_index = self.ringbuf.read_index;
self.ringbuf.read_index =
(self.ringbuf.read_index + self.chunk_size) % self.ringbuf.data.len();
self.ringbuf.available -= self.chunk_size;
let ret: &[T] = &self.ringbuf.data[read_index..read_index + self.chunk_size];
let ret: &'a [T] = unsafe { std::mem::transmute(ret) };
Some(ret)
} else {
None
}
}
}
pub struct ChunksExactMut<'a, T> {

View file

@ -3,17 +3,21 @@ use crate::*;
use rand::Rng;
#[test]
fn correctness_random() {
fn chunks_exact_mut_random() {
const MAX_APPEND_LEN: usize = 256;
const CHUNK_SIZE: usize = 32;
let mut rng = rand::thread_rng();
let mut ringbuf = RingBuf::new(1024, 0_u32);
let mut buf = [0; 32];
let mut current_value = 0;
for _ in 0_u32..1024 {
let append_len = rng.gen_range(0..MAX_APPEND_LEN);
ringbuf.push_from_iter(current_value as u32..current_value as u32 + append_len as u32);
assert_eq!(
ringbuf.push_from_iter_overflowing(
current_value as u32..current_value as u32 + append_len as u32
),
append_len
);
for (j, chunk) in ringbuf.chunks_exact_mut(CHUNK_SIZE).enumerate() {
assert_eq!(chunk.len(), CHUNK_SIZE);
for (k, v) in chunk.iter().enumerate() {
@ -28,22 +32,45 @@ fn correctness_random() {
}
#[test]
fn correctness_manual() {
fn chunks_exact_mut_manual() {
let mut ringbuf = RingBuf::new(8, 0_u32);
ringbuf.push_from_iter(1..4);
assert_eq!(ringbuf.push_from_iter(1..4), 3);
assert_eq!(ringbuf.available(), 3);
let mut iter = ringbuf.chunks_exact_mut(4);
assert_eq!(iter.next(), None);
ringbuf.push(4);
assert_eq!(ringbuf.available(), 3);
assert!(ringbuf.push(4));
assert_eq!(ringbuf.available(), 4);
let mut iter = ringbuf.chunks_exact_mut(4);
assert_eq!(iter.next(), Some([1, 2, 3, 4].as_mut_slice()));
assert_eq!(iter.next(), None);
ringbuf.push_from_iter(5..8);
assert_eq!(ringbuf.available(), 0);
assert_eq!(ringbuf.push_from_iter(5..8), 3);
assert_eq!(ringbuf.available(), 3);
let mut iter = ringbuf.chunks_exact_mut(4);
assert_eq!(iter.next(), None);
ringbuf.push_from_iter(8..14);
assert_eq!(ringbuf.push_from_iter_overflowing(8..14), 6);
assert_eq!(ringbuf.available(), 8);
dbg!("{:?}", ringbuf.raw_slice());
assert_eq!(ringbuf.align_skip(4), 3);
let mut iter = ringbuf.chunks_exact_mut(4);
assert_eq!(iter.next(), Some([13, 6, 7, 8].as_mut_slice()));
assert_eq!(iter.next(), Some([9, 10, 11, 12].as_mut_slice()));
assert_eq!(iter.next(), None);
assert_eq!(ringbuf.read_first_one(), Some(&13));
assert_eq!(ringbuf.available(), 0);
}
#[test]
fn push_from_slice_overflowing() {
let mut ringbuf = RingBuf::new(8, 0_u32);
ringbuf.push_from_slice_overflowing(&[1, 2, 3, 4, 5]);
dbg!("{:?}", ringbuf.raw_slice());
ringbuf.push_from_slice_overflowing(&[6, 7, 8, 9, 10, 11]);
dbg!("{:?}", ringbuf.raw_slice());
assert_eq!(
ringbuf.read_all_slices(),
([4, 5, 6, 7, 8].as_slice(), [9, 10, 11].as_slice())
);
}