chunkable-ringbuf/src/lib.rs

274 lines
7 KiB
Rust

#[cfg(test)]
mod test;
pub struct RingBuf<T> {
data: Vec<T>,
read_index: usize,
write_index: usize,
available: usize,
}
impl<T> RingBuf<T> {
pub fn new(size: usize, value: T) -> Self
where
T: Clone,
{
assert!(size != 0);
Self {
data: vec![value; size],
read_index: 0,
write_index: 0,
available: 0,
}
}
/// Total capacity
pub fn capacity(&self) -> usize {
self.data.len()
}
/// Data written but not yet read
pub fn available(&self) -> usize {
self.available
}
pub fn clear(&mut self) {
self.read_index = self.write_index;
self.available = 0;
}
pub fn push(&mut self, value: T) -> bool {
if self.available < self.data.len() {
self.data[self.write_index] = value;
self.write_index = (self.write_index + 1) % self.data.len();
self.available = (self.available + 1).min(self.data.len());
true
} else {
false
}
}
pub fn push_overflowing(&mut self, value: T) {
self.data[self.write_index] = value;
self.write_index = (self.write_index + 1) % self.data.len();
if self.available < self.data.len() {
self.available += 1;
} else {
self.read_index = (self.read_index + 1) % self.data.len();
}
}
pub fn push_from_iter<I: Iterator<Item = T>>(&mut self, mut iter: I) -> usize {
let mut len = 0;
while len < self.data.len() - self.available {
if let Some(value) = iter.next() {
self.data[self.write_index] = value;
self.write_index = (self.write_index + 1) % self.data.len();
len += 1;
} else {
break;
}
}
self.available += len;
len
}
pub fn push_from_iter_overflowing<I: Iterator<Item = T>>(&mut self, iter: I) -> usize {
let mut len = 0;
for value in iter {
self.data[self.write_index] = value;
self.write_index = (self.write_index + 1) % self.data.len();
len += 1;
}
if self.available + len <= self.data.len() {
self.available += len;
} else {
self.read_index = self.write_index;
self.available = self.data.len();
}
len
}
pub fn push_from_slice_overflowing(&mut self, values: &[T])
where
T: Clone,
{
assert!(values.len() <= self.data.len());
let split_at = self.data.len() - self.write_index;
if split_at < values.len() {
self.data[self.write_index..].clone_from_slice(&values[0..split_at]);
self.data[0..values.len() - split_at].clone_from_slice(&values[split_at..]);
self.write_index = values.len() - split_at;
} else {
self.data[self.write_index..self.write_index + values.len()].clone_from_slice(values);
self.write_index += values.len();
}
if self.available + values.len() <= self.data.len() {
// We have enough room
self.available += values.len();
} else {
// Overflow!
self.read_index = self.write_index;
self.available = self.data.len();
}
}
/// Returns available data as `(right, left)` slices.
///
/// Does not increment the read index.
pub fn as_slices(&self) -> (&[T], &[T]) {
let (left, right) = self.data.split_at(self.read_index);
if self.available < right.len() {
(&right[0..self.available], &[])
} else {
(right, &left[0..self.available - right.len()])
}
}
/// Returns available data as `(right, left)` slices
///
/// Does not increment the read index.
pub fn as_slices_mut(&mut self) -> (&mut [T], &mut [T]) {
let (left, right) = self.data.split_at_mut(self.read_index);
if self.available < right.len() {
(&mut right[0..self.available], &mut [])
} else {
let right_len = right.len();
(right, &mut left[0..self.available - right_len])
}
}
pub fn chunks_exact(&mut self, chunk_size: usize) -> ChunksExact<T> {
assert_eq!(self.read_index % chunk_size, 0);
assert_eq!(self.data.len() % chunk_size, 0);
ChunksExact {
ringbuf: self,
chunk_size,
}
}
pub fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<T> {
assert_eq!(self.read_index % chunk_size, 0);
assert_eq!(self.data.len() % chunk_size, 0);
ChunksExactMut {
ringbuf: self,
chunk_size,
}
}
/// Read all the available elements, returned as `(right, left)`.
/// That is, the elements in `.1` come after those in `.0`.
pub fn read_all_slices(&mut self) -> (&[T], &[T]) {
self.read_first_slices(self.available)
}
/// Read the first `number` available elements, returned as `(right, left)`.
/// That is, the elements in `.1` come after those in `.0`.
pub fn read_first_slices(&mut self, number: usize) -> (&[T], &[T]) {
let (left, right) = self.data.split_at(self.read_index);
let len = self.available.min(number);
self.available -= len;
if len < right.len() {
self.read_index += len;
(&right[0..len], &[])
} else {
self.read_index = len - right.len();
(right, &left[0..len - right.len()])
}
}
/// Read the first available element.
pub fn read_first_one(&mut self) -> Option<&T> {
if self.available > 0 {
let ret = &self.data[self.read_index];
self.read_index = (self.read_index + 1) % self.data.len();
self.available -= 1;
Some(ret)
} else {
None
}
}
pub fn as_raw_slices(&self) -> (&[T], &[T]) {
self.data.split_at(self.read_index)
}
pub fn as_raw_slices_mut(&mut self) -> (&mut [T], &mut [T]) {
self.data.split_at_mut(self.read_index)
}
pub fn raw_slice(&self) -> &[T] {
&self.data
}
pub fn raw_slice_mut(&mut self) -> &mut [T] {
&mut self.data
}
/// Skip a few elements to align the read index, returning the number of skipped elements.
pub fn align_skip(&mut self, align: usize) -> usize {
assert_eq!(self.data.len() % align, 0);
if self.read_index % align != 0 {
let skip = align - (self.read_index % align);
self.read_index = (self.read_index + skip) % self.data.len();
if skip <= self.available {
self.available -= skip;
skip
} else {
let available = self.available;
self.available = 0;
self.write_index = self.read_index;
available
}
} else {
0
}
}
}
pub struct ChunksExact<'a, T> {
ringbuf: &'a mut RingBuf<T>,
chunk_size: usize,
}
impl<'a, T> Iterator for ChunksExact<'a, T> {
type Item = &'a [T];
fn next(&mut self) -> Option<Self::Item> {
if self.ringbuf.available >= self.chunk_size {
let read_index = self.ringbuf.read_index;
self.ringbuf.read_index =
(self.ringbuf.read_index + self.chunk_size) % self.ringbuf.data.len();
self.ringbuf.available -= self.chunk_size;
let ret: &[T] = &self.ringbuf.data[read_index..read_index + self.chunk_size];
let ret: &'a [T] = unsafe { std::mem::transmute(ret) };
Some(ret)
} else {
None
}
}
}
pub struct ChunksExactMut<'a, T> {
ringbuf: &'a mut RingBuf<T>,
chunk_size: usize,
}
impl<'a, T> Iterator for ChunksExactMut<'a, T> {
type Item = &'a mut [T];
fn next(&mut self) -> Option<Self::Item> {
if self.ringbuf.available >= self.chunk_size {
let read_index = self.ringbuf.read_index;
self.ringbuf.read_index =
(self.ringbuf.read_index + self.chunk_size) % self.ringbuf.data.len();
self.ringbuf.available -= self.chunk_size;
let ret: &mut [T] = &mut self.ringbuf.data[read_index..read_index + self.chunk_size];
let ret: &'a mut [T] = unsafe { std::mem::transmute(ret) };
Some(ret)
} else {
None
}
}
}