engine/allocators/
linear_allocator.rs

1// SPDX-FileCopyrightText: 2024 Jens Pitkänen <jens.pitkanen@helsinki.fi>
2//
3// SPDX-License-Identifier: GPL-3.0-or-later
4
5use core::{
6    ffi::c_void,
7    fmt::Debug,
8    marker::PhantomData,
9    mem::{transmute, MaybeUninit},
10    slice,
11    sync::atomic::{AtomicUsize, Ordering},
12};
13
14use bytemuck::{fill_zeroes, Zeroable};
15use platform::Box;
16
17/// Creates a static [`LinearAllocator`](super::LinearAllocator) with the given
18/// amount of bytes of backing memory.
19///
20/// Note that this creates an allocator backed by a static byte array, i.e. the
21/// memory isn't dynamically allocated nor freed, it's just a big static
22/// variable, *one for each call of this macro.* Generally this'll appear once
23/// per crate if needed, there shouldn't be much of a reason to have multiple of
24/// these, if any.
25///
26/// As such, even though this can be assigned to a variable (e.g. `let arena =
27/// static_allocator!(1);`), that variable will only be a borrow of the single
28/// static variable that this macro expands to. If such a function is called
29/// multiple times, `arena` will get a reference to the same arena every time.
30///
31/// ### Example
32///
33/// ```
34/// use engine::allocators::{LinearAllocator, static_allocator};
35/// static PERSISTENT_ARENA: &LinearAllocator = static_allocator!(1024 * 1024);
36/// ```
37#[macro_export]
38macro_rules! static_allocator {
39    ($size:expr) => {{
40        static mut MEM: [u8; $size] = [0; $size];
41        // Safety (LinearAllocator::from_raw_slice): MEM is only accessible in
42        // this scope, and this scope only creates one allocator from it, since
43        // the allocator is stored in a static variable, so MEM won't be shared.
44        // Since MEM is a static variable, the pointer is valid for 'static.
45        static ALLOCATOR: $crate::allocators::LinearAllocator =
46            unsafe { $crate::allocators::LinearAllocator::from_raw_slice(&raw mut MEM) };
47        &ALLOCATOR
48    }};
49}
50
51pub use static_allocator;
52
53/// A linear allocator with a constant capacity. Can allocate memory regions
54/// with any size or alignment (within the capacity) very fast, but individual
55/// allocations can't be freed to make more space while there's still other
56/// allocations in use.
57pub struct LinearAllocator<'a> {
58    backing_mem_lifetime_holder: PhantomData<&'a mut ()>,
59    backing_mem_ptr: *mut c_void,
60    backing_mem_size: usize,
61    /// The amount of bytes allocated starting from `backing_mem_ptr`. Can
62    /// overflow `backing_mem_size` when the allocator reaches capacity, but in
63    /// such a case, we don't even create a reference to the out-of-bounds area
64    /// of memory.
65    allocated: AtomicUsize,
66}
67
68impl Debug for LinearAllocator<'_> {
69    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
70        f.debug_struct("LinearAllocator")
71            .field("backing_mem_ptr", &self.backing_mem_ptr)
72            .field("backing_mem_size", &self.backing_mem_size)
73            .field("allocated", &self.allocated)
74            .finish_non_exhaustive()
75    }
76}
77
78/// Safety: the only non-Sync part of [`LinearAllocator`] is backing memory
79/// pointer, which is fine to access from multiple threads simultaneously as the
80/// regions of memory accessed via the pointer are distinct on every access, due
81/// to the atomic incrementing of [`LinearAllocator::allocated`].
82unsafe impl Sync for LinearAllocator<'_> {}
83
84impl LinearAllocator<'_> {
85    /// Creates a new [`LinearAllocator`] with `capacity` bytes of backing
86    /// memory. Returns None if allocating the memory fails or if `capacity`
87    /// overflows `isize`.
88    ///
89    /// See [`static_allocator`](super::static_allocator) for bootstrapping one
90    /// of these.
91    pub fn new<'a>(allocator: &'a LinearAllocator, capacity: usize) -> Option<LinearAllocator<'a>> {
92        if capacity > isize::MAX as usize {
93            // Practically never happens, but asserting this here helps avoid a
94            // safety check later.
95            return None;
96        }
97
98        let buffer: &'a mut [MaybeUninit<u8>] = allocator.try_alloc_uninit_slice(capacity, None)?;
99
100        Some(LinearAllocator {
101            backing_mem_lifetime_holder: PhantomData,
102            backing_mem_ptr: buffer.as_mut_ptr() as *mut c_void,
103            backing_mem_size: buffer.len(),
104            allocated: AtomicUsize::new(0),
105        })
106    }
107
108    /// Creates  a new [`LinearAllocator`] with as many bytes of backing memory
109    /// as there are in the given slice.
110    ///
111    /// Only the first [`isize::MAX`] bytes of the slice are used if it's longer
112    /// than that.
113    ///
114    /// ### Safety
115    ///
116    /// The `backing_slice` pointer must not be shared, nor the memory behind
117    /// it, and it must live for as long as this allocator (and any allocations
118    /// from it) live. Consider this function as taking ownership of the memory
119    /// pointed to by it for 'static.
120    pub const unsafe fn from_raw_slice(backing_slice: *mut [u8]) -> LinearAllocator<'static> {
121        LinearAllocator {
122            backing_mem_lifetime_holder: PhantomData,
123            backing_mem_ptr: (*backing_slice).as_mut_ptr() as *mut c_void,
124            backing_mem_size: if backing_slice.len() > isize::MAX as usize {
125                isize::MAX as usize
126            } else {
127                backing_slice.len()
128            },
129            allocated: AtomicUsize::new(0),
130        }
131    }
132
133    /// Returns an estimate of the amount of allocated memory currently, in
134    /// bytes.
135    ///
136    /// An "estimate" since the value returned is from an [`Ordering::Relaxed`]
137    /// atomic operation, which technically may return the wrong value even when
138    /// using the allocator on a single thread due to funky out-of-order
139    /// computing details. Still, the value can be considered accurate for some
140    /// point in time.
141    pub fn allocated(&self) -> usize {
142        self.allocated
143            .load(Ordering::Relaxed)
144            .min(self.backing_mem_size)
145    }
146
147    /// Returns the total (free and allocated) amount of memory owned by this
148    /// allocator, in bytes.
149    pub fn total(&self) -> usize {
150        self.backing_mem_size
151    }
152
153    /// Allocates memory for a `T` and returns a boxed version of it.
154    pub fn try_alloc_box<T>(&'static self, value: T) -> Option<Box<T>> {
155        let slice = self.try_alloc_uninit_slice(1, None)?;
156        let (allocation, _) = slice.split_first_mut().unwrap();
157        let allocation = allocation.write(value);
158        Some(Box::from_mut(allocation))
159    }
160
161    /// Allocates memory for a `[T]` with `len` elements, zeroes it out, and
162    /// returns a boxed version of it.
163    pub fn try_alloc_boxed_slice_zeroed<T: Zeroable>(
164        &'static self,
165        len: usize,
166    ) -> Option<Box<[T]>> {
167        let slice = self.try_alloc_uninit_slice::<T>(len, None)?;
168        fill_zeroes(slice);
169        // Safety: the whole slice is initialized by the fill_zeroes above.
170        let slice = unsafe { transmute::<&mut [MaybeUninit<T>], &mut [T]>(slice) };
171        Some(Box::from_mut(slice))
172    }
173
174    /// Allocates memory for a `[T]` with `len` elements, fills it by calling
175    /// `init`, and returns a boxed version of it.
176    ///
177    /// If `init` returns None for any invocation, this also returns None. Note
178    /// that the already allocated memory isn't freed up in this case (due to
179    /// [`LinearAllocator`] being strictly growing for thread-safety reasons).
180    pub fn try_alloc_boxed_slice_with<T, F: FnMut() -> Option<T>>(
181        &'static self,
182        mut init: F,
183        len: usize,
184    ) -> Option<Box<[T]>> {
185        let slice = self.try_alloc_uninit_slice::<T>(len, None)?;
186        for uninit in &mut *slice {
187            uninit.write(init()?);
188        }
189        // Safety: the whole slice is initialized by the loop above.
190        let slice = unsafe { transmute::<&mut [MaybeUninit<T>], &mut [T]>(slice) };
191        Some(Box::from_mut(slice))
192    }
193
194    /// Allocates memory for a slice of `MaybeUninit<T>`, leaving the contents
195    /// of the slice uninitialized, returning None if there's not enough free
196    /// memory.
197    ///
198    /// Note regardless of if the allocation is successful, `len` bytes are
199    /// "allocated" from the allocation offset. This means that once this
200    /// returns `None`, subsequent allocations will always fail until
201    /// [`LinearAllocator::reset`].
202    ///
203    /// If `alignment` is Some, it will be used for alignment instead of `T`'s
204    /// alignment. If the resulting alignment would result in `T` being
205    /// unaligned, this function will panic.
206    pub fn try_alloc_uninit_slice<'a, T>(
207        &'a self,
208        len: usize,
209        alignment: Option<usize>,
210    ) -> Option<&'a mut [MaybeUninit<T>]> {
211        let alignment = if let Some(alignment) = alignment {
212            assert_eq!(
213                0,
214                alignment % align_of::<T>(),
215                "invalid manual alignment for allocation",
216            );
217            alignment
218        } else {
219            align_of::<T>()
220        };
221
222        let reserved_bytes = len * size_of::<T>() + alignment - 1;
223        // This is a relaxed fetch_add since we don't really care about the
224        // order of allocations, we don't have any other atomic operations to
225        // order, all we care about is that we get distinct allocation offsets
226        // between different calls to try_alloc_uninit_slice. `self.allocated`
227        // may overflow, but that's simply taken as a signal that the allocator
228        // is full.
229        let allocation_unaligned_offset =
230            self.allocated.fetch_add(reserved_bytes, Ordering::Relaxed);
231
232        // Make sure the entire allocation fits in the backing memory.
233        if allocation_unaligned_offset + reserved_bytes > self.backing_mem_size {
234            return None;
235        }
236
237        // Safety:
238        // - Due to the check above, we know the offset is less than
239        //   `self.backing_mem_size`, which in turn is clamped to `isize::MAX`
240        //   in the allocator constructor.
241        // - Due to the same check above, we know the offset version of the
242        //   pointer is still within the bounds of the allocated object.
243        let unaligned_allocation_ptr =
244            unsafe { self.backing_mem_ptr.byte_add(allocation_unaligned_offset) };
245
246        // Figure out the properly aligned offset of the new allocation.
247        let extra_offset_for_alignment = unaligned_allocation_ptr.align_offset(alignment);
248        let allocation_aligned_offset =
249            allocation_unaligned_offset.saturating_add(extra_offset_for_alignment);
250
251        // Make sure the *aligned* allocation fits in the backing memory.
252        if allocation_aligned_offset + len * size_of::<T>() > self.backing_mem_size {
253            return None;
254        }
255
256        // Safety: exactly the same pattern and reasoning used for
257        // `unaligned_allocation_ptr`, see the safety explanation for that. As a
258        // slight addendum, note how the bounds check above takes into account
259        // `the aligned offset + length * the size of T`, as that is the area of
260        // memory we'll be creating a reference to.
261        let aligned_allocation_ptr =
262            unsafe { unaligned_allocation_ptr.byte_add(extra_offset_for_alignment) };
263
264        let uninit_t_ptr = aligned_allocation_ptr as *mut MaybeUninit<T>;
265
266        // Safety:
267        // - `uninit_t_ptr` is non-null and valid for both reads and writes
268        //   (which in turn have to follow MaybeUninit semantics, so we're
269        //   "passing the unsafety" to the user of the slice).
270        //   - The entire memory range of the slice is contained within a single
271        //     allocated object, the malloc'd area of memory from the
272        //     constructor.
273        //   - `uninit_ptr` is non-null and aligned regardless of slice length
274        //     of the size of T.
275        // - `uninit_ptr` does point to `len` consecutive properly initialized
276        //   values of type `MaybeUninit<T>`, because uninitialized values are
277        //   valid for the type.
278        // - The memory referenced by this slice is not accessed through any
279        //   other pointer for the duration of lifetime 'a, since this pointer
280        //   is derived from `self.allocated`, which has been bumped past the
281        //   bounds of this slice, and is not reset until self is mutably
282        //   borrowable again (i.e. after this slice has been dropped).
283        // - `len * size_of::<MaybeUninit<T>>()` is not larger than
284        //   `isize::MAX`, because it is not larger than `self.backing_mem_size`
285        //   as checked above, and that in turn is checked to be no larger than
286        //   `isize::MAX` in the constructor.
287        let uninit_t_slice: &'a mut [MaybeUninit<T>] =
288            unsafe { slice::from_raw_parts_mut(uninit_t_ptr, len) };
289
290        Some(uninit_t_slice)
291    }
292
293    /// Resets the linear allocator, reclaiming all of the backing memory for
294    /// future allocations.
295    pub fn reset(&mut self) {
296        // Safety: though this is not an unsafe operation, pretty much all the
297        // unsafety in this file relies on `self.backing_mem_ptr +
298        // self.allocated` to not point into memory which is already being
299        // borrowed. Here's why we're not: We have a mutable borrow of self. =>
300        // There's no other borrows of self. => There's no pointers to the
301        // backing memory. (All previous allocations have lifetimes that cannot
302        // outlive the related immutable borrow of this allocator.)
303        //
304        // Additionally, any atomic shenanigans between threads don't need to be
305        // accounted for because we have an exclusive borrow of self, thus self
306        // can't be shared between threads currently.
307        self.allocated.store(0, Ordering::Release);
308    }
309}