engine/collections/
ring_buffer.rs

1// SPDX-FileCopyrightText: 2025 Jens Pitkänen <jens.pitkanen@helsinki.fi>
2//
3// SPDX-License-Identifier: GPL-3.0-or-later
4
5mod boxed;
6mod slice;
7
8use core::{
9    marker::PhantomData,
10    mem::{transmute, MaybeUninit},
11    sync::atomic::{AtomicUsize, Ordering},
12};
13
14use bytemuck::{fill_zeroes, Zeroable};
15use platform::Box;
16
17use crate::allocators::LinearAllocator;
18
19pub use boxed::*;
20pub use slice::*;
21
22/// Metadata related to a specific allocation from a [`RingBuffer`].
23#[derive(Debug)]
24pub struct RingAllocationMetadata {
25    pub(super) offset: usize,
26    pub(super) padding: usize,
27    pub(super) buffer_identifier: usize,
28}
29
30/// Ring buffer for allocating varying length byte slices in a sequential, FIFO
31/// fashion.
32///
33/// Allocations are represented by [`RingSlice`]s, which are lifetimeless
34/// handles to this buffer, and can be used to get a `&mut [u8]` that will hold
35/// the memory until the [`RingSlice`] is passed into [`RingBuffer::free`]. The
36/// slices must be freed in the same order as they were allocated. As such,
37/// dropping a [`RingSlice`] will cause the slice to never be reclaimed, which
38/// will "clog" the ring buffer.
39///
40/// The sum of the lengths of the slices allocated from this buffer, when full,
41/// may be less than the total capacity, since the individual slices are
42/// contiguous and can't span across the end of the backing buffer. These gaps
43/// could be prevented with memory mapping trickery in the future.
44pub struct RingBuffer<'a, T> {
45    buffer_lifetime: PhantomData<&'a mut [T]>,
46    buffer_ptr: *mut MaybeUninit<T>,
47    buffer_len: usize,
48    allocated_offset: usize,
49    allocated_len: usize,
50    buffer_identifier: usize,
51}
52
53// Safety: if the allocations themselves are Sync, the the whole ring buffer is
54// sync, since the only non-Sync part of this struct is buffer_ptr, and there's
55// no additional thread safety concerns related to it. This is even more
56// straightforward than LinearAllocator's case (which otherwise similar),
57// because allocating from this in general requires a mutable borrow, which
58// means that any shared borrows between threads are all just reads.
59unsafe impl<T: Sync> Sync for RingBuffer<'_, T> {}
60
61fn make_buffer_id() -> usize {
62    static BUFFER_ID_COUNTER: AtomicUsize = AtomicUsize::new(0);
63    let prev_id = BUFFER_ID_COUNTER.fetch_add(1, Ordering::Relaxed);
64    prev_id.checked_add(1).unwrap()
65}
66
67impl<T> RingBuffer<'_, T> {
68    /// Allocates a new ring buffer with the given capacity.
69    pub fn new(
70        allocator: &'static LinearAllocator,
71        capacity: usize,
72    ) -> Option<RingBuffer<'static, T>> {
73        let buffer = allocator.try_alloc_uninit_slice(capacity, None)?;
74        Some(RingBuffer {
75            buffer_lifetime: PhantomData,
76            buffer_ptr: buffer.as_mut_ptr(),
77            buffer_len: buffer.len(),
78            allocated_offset: 0,
79            allocated_len: 0,
80            buffer_identifier: make_buffer_id(),
81        })
82    }
83
84    /// Creates a new ring buffer with the given buffer.
85    ///
86    /// ### Safety
87    ///
88    /// All allocations made from this [`RingBuffer`] must be passed back into
89    /// [`RingBuffer::free`] before it is dropped, as the backing memory is only
90    /// borrowed for its lifetime, and the [`Box`] references could leak.
91    pub unsafe fn from_mut(buffer: &mut [MaybeUninit<T>]) -> RingBuffer<T> {
92        RingBuffer {
93            buffer_lifetime: PhantomData,
94            buffer_ptr: buffer.as_mut_ptr(),
95            buffer_len: buffer.len(),
96            allocated_offset: 0,
97            allocated_len: 0,
98            buffer_identifier: make_buffer_id(),
99        }
100    }
101
102    /// Returns how many elements can be allocated at maximum without freeing.
103    pub fn capacity(&self) -> usize {
104        self.buffer_len
105    }
106
107    /// If it fits, allocates `len` contiguous bytes and returns the offset and
108    /// padding of the allocation.
109    fn allocate_offset(&mut self, len: usize) -> Option<(usize, usize)> {
110        let allocated_end = self.allocated_offset + self.allocated_len;
111        let padding_to_end = self.buffer_len - (allocated_end % self.buffer_len);
112        if allocated_end + len <= self.buffer_len {
113            // The allocation fits between the current allocated slice's end and
114            // the end of the buffer
115            self.allocated_len += len;
116            Some((allocated_end, 0))
117        } else if self.allocated_len + padding_to_end + len <= self.buffer_len {
118            // The slice fits even with padding added to the end so that the
119            // allocated slice starts at the beginning
120            self.allocated_len += padding_to_end + len;
121            Some((0, padding_to_end))
122        } else {
123            None
124        }
125    }
126}
127
128impl<T: Zeroable> RingBuffer<'_, T> {
129    /// Allocates and zeroes out a slice of the given length if there's enough
130    /// contiguous free space.
131    pub fn allocate(&mut self, len: usize) -> Option<RingSlice<T>> {
132        let (offset, padding) = self.allocate_offset(len)?;
133
134        // Safety: The offset is smaller than the length of the backing slice,
135        // so it's definitely safe to offset by.
136        let ptr = unsafe { self.buffer_ptr.add(offset) };
137
138        // Safety: The offset allocation logic ensures that we create distinct
139        // slices, so the slice created here does not alias with any other
140        // slice. The pointer is not null since it's from a slice in the
141        // constructor.
142        let slice: &mut [MaybeUninit<T>] = unsafe { core::slice::from_raw_parts_mut(ptr, len) };
143
144        fill_zeroes(slice);
145
146        // Safety: fill_zeroes above initializes the whole slice.
147        let slice = unsafe { transmute::<&mut [MaybeUninit<T>], &mut [T]>(slice) };
148
149        // Safety: the constructors of the RingBuffer ensure that the memory is
150        // not freed while this Box exists, and `RingBuffer::allocate_offset`
151        // ensures that no aliasing allocations are created.
152        let slice = unsafe { Box::from_ptr(&raw mut *slice) };
153
154        Some(RingSlice {
155            slice,
156            metadata: RingAllocationMetadata {
157                offset,
158                padding,
159                buffer_identifier: self.buffer_identifier,
160            },
161        })
162    }
163
164    /// Reclaims the memory occupied by the given slice. Returns the slice back
165    /// in an `Err` if the slice isn't the current head of the allocated span,
166    /// and the memory is not reclaimed.
167    ///
168    /// ### Panics
169    ///
170    /// Panics if the [`RingSlice`] was allocated from a different
171    /// [`RingBuffer`].
172    pub fn free(&mut self, slice: RingSlice<T>) -> Result<(), RingSlice<T>> {
173        assert_eq!(
174            self.buffer_identifier, slice.metadata.buffer_identifier,
175            "this slice was not allocated from this ring buffer",
176        );
177        let allocated_offset_with_padding =
178            (self.allocated_offset + slice.metadata.padding) % self.buffer_len;
179        if slice.metadata.padding == 0 && slice.len() * size_of::<T>() == 0 {
180            // No memory was reserved, nothing needs to be done. This case
181            // exists because the offset might not match in this case, as a
182            // previous free might have reset the offset due to the actual
183            // allocated length being 0.
184            Ok(())
185        } else if slice.metadata.offset == allocated_offset_with_padding {
186            let freed_len = slice.len();
187            self.allocated_offset = (self.allocated_offset + freed_len) % self.buffer_len;
188            self.allocated_len -= freed_len + slice.metadata.padding;
189            if self.allocated_len == 0 {
190                self.allocated_offset = 0;
191            }
192            Ok(())
193        } else {
194            Err(slice)
195        }
196    }
197}
198
199impl<T> RingBuffer<'_, T> {
200    /// Allocates space for one T if there's free space, and boxes it.
201    pub fn allocate_box(&mut self, value: T) -> Result<RingBox<T>, T> {
202        let Some((offset, padding)) = self.allocate_offset(1) else {
203            return Err(value);
204        };
205
206        // Safety: the offset is smaller than the length of the backing slice,
207        // so it's definitely safe to offset by.
208        let ptr = unsafe { self.buffer_ptr.add(offset) };
209
210        // Safety: as established above, ptr points to a specific element in the
211        // slice whose raw pointer `self.buffer_ptr` is, so this is a valid
212        // reference.
213        let uninit_mut: &mut MaybeUninit<T> = unsafe { &mut *ptr };
214
215        let init_mut = uninit_mut.write(value);
216
217        // Safety: the constructors of the RingBuffer ensure that the memory is
218        // not freed while this Box exists, and `RingBuffer::allocate_offset`
219        // ensures that no aliasing allocations are created.
220        let boxed = unsafe { Box::from_ptr(&raw mut *init_mut) };
221
222        Ok(RingBox {
223            boxed,
224            metadata: RingAllocationMetadata {
225                offset,
226                padding,
227                buffer_identifier: self.buffer_identifier,
228            },
229        })
230    }
231
232    /// Reclaims the memory occupied by the given box. Returns the box back in
233    /// an `Err` if the slice isn't the current head of the allocated span, and
234    /// the memory is not reclaimed.
235    ///
236    /// ### Panics
237    ///
238    /// Panics if the [`RingBox`] was allocated from a different [`RingBuffer`].
239    pub fn free_box(&mut self, boxed: RingBox<T>) -> Result<(), RingBox<T>> {
240        assert_eq!(
241            self.buffer_identifier, boxed.metadata.buffer_identifier,
242            "this box was not allocated from this ring buffer",
243        );
244        let allocated_offset_with_padding =
245            (self.allocated_offset + boxed.metadata.padding) % self.buffer_len;
246        if boxed.metadata.padding == 0 && size_of::<T>() == 0 {
247            // No memory was reserved, nothing needs to be done. This case
248            // exists because the offset might not match in this case, as a
249            // previous free might have reset the offset due to the actual
250            // allocated length being 0.
251            Ok(())
252        } else if boxed.metadata.offset == allocated_offset_with_padding {
253            self.allocated_offset = (self.allocated_offset + 1) % self.buffer_len;
254            self.allocated_len -= 1 + boxed.metadata.padding;
255            if self.allocated_len == 0 {
256                self.allocated_offset = 0;
257            }
258            Ok(())
259        } else {
260            Err(boxed)
261        }
262    }
263}
264
265#[cfg(test)]
266mod tests {
267    use crate::allocators::{static_allocator, LinearAllocator};
268
269    use super::RingBuffer;
270
271    #[test]
272    fn works_at_all() {
273        static ALLOC: &LinearAllocator = static_allocator!(1);
274        let mut ring = RingBuffer::<u8>::new(ALLOC, 1).unwrap();
275        let mut slice = ring.allocate(1).unwrap();
276        slice[0] = 123;
277        ring.free(slice).unwrap();
278    }
279
280    #[test]
281    fn wraps_when_full() {
282        static ALLOC: &LinearAllocator = static_allocator!(10);
283        let mut ring = RingBuffer::<u8>::new(ALLOC, 10).unwrap();
284
285        let first_half = ring.allocate(4).unwrap();
286        let _second_half = ring.allocate(4).unwrap();
287        assert!(ring.allocate(4).is_none(), "ring should be full");
288
289        // Wrap:
290        ring.free(first_half).unwrap();
291        let _third_half = ring.allocate(4).unwrap();
292
293        assert!(ring.allocate(4).is_none(), "ring should be full");
294    }
295
296    #[test]
297    #[should_panic]
298    fn panics_on_free_with_wrong_buffer_identity() {
299        static ALLOC_0: &LinearAllocator = static_allocator!(1);
300        static ALLOC_1: &LinearAllocator = static_allocator!(1);
301
302        let mut ring0 = RingBuffer::<u8>::new(ALLOC_0, 1).unwrap();
303        let mut ring1 = RingBuffer::<u8>::new(ALLOC_1, 1).unwrap();
304
305        let foo0 = ring0.allocate(1).unwrap();
306        let _ = ring1.free(foo0); // should panic
307    }
308}