diff --git a/src/mm/allocator.rs b/src/mm/allocator.rs deleted file mode 100644 index 941fff33b6..0000000000 --- a/src/mm/allocator.rs +++ /dev/null @@ -1,79 +0,0 @@ -//! Implementation of the Hermit Allocator for dynamically allocating heap memory -//! in the kernel. - -use core::alloc::{GlobalAlloc, Layout}; - -use hermit_sync::RawInterruptTicketMutex; -use talc::{ErrOnOom, Span, Talc, Talck}; - -pub struct LockedAllocator(Talck); - -impl LockedAllocator { - pub const fn new() -> Self { - Self(Talc::new(ErrOnOom).lock()) - } - - #[inline] - fn align_layout(layout: Layout) -> Layout { - let align = layout - .align() - .max(core::mem::align_of::>()); - Layout::from_size_align(layout.size(), align).unwrap() - } - - pub unsafe fn init(&self, heap_bottom: *mut u8, heap_size: usize) { - let arena = Span::from_base_size(heap_bottom, heap_size); - unsafe { - self.0.lock().claim(arena).unwrap(); - } - } -} - -/// To avoid false sharing, the global memory allocator align -/// all requests to a cache line. -unsafe impl GlobalAlloc for LockedAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - let layout = Self::align_layout(layout); - unsafe { self.0.alloc(layout) } - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - let layout = Self::align_layout(layout); - unsafe { self.0.dealloc(ptr, layout) } - } - - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - let layout = Self::align_layout(layout); - unsafe { self.0.alloc_zeroed(layout) } - } - - unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - let layout = Self::align_layout(layout); - unsafe { self.0.realloc(ptr, layout, new_size) } - } -} - -#[cfg(all(test, not(target_os = "none")))] -mod tests { - use core::mem; - - use super::*; - - #[test] - fn empty() { - const ARENA_SIZE: usize = 0x1000; - let mut arena: [u8; ARENA_SIZE] = [0; ARENA_SIZE]; - let allocator: LockedAllocator = LockedAllocator::new(); - unsafe { - allocator.init(arena.as_mut_ptr(), ARENA_SIZE); - } - - let layout = Layout::from_size_align(1, 1).unwrap(); - // we have 4 kbyte memory - assert!(unsafe { !allocator.alloc(layout.clone()).is_null() }); - - let layout = Layout::from_size_align(0x1000, mem::align_of::()).unwrap(); - let addr = unsafe { allocator.alloc(layout) }; - assert!(addr.is_null()); - } -} diff --git a/src/mm/mod.rs b/src/mm/mod.rs index 79241449dc..d2cf104bac 100644 --- a/src/mm/mod.rs +++ b/src/mm/mod.rs @@ -40,7 +40,6 @@ //! │ │ │ │ //! ``` -pub(crate) mod allocator; pub(crate) mod device_alloc; pub(crate) mod physicalmem; pub(crate) mod virtualmem; @@ -50,10 +49,10 @@ use core::ops::Range; use align_address::Align; use free_list::{PageLayout, PageRange}; -use hermit_sync::Lazy; +use hermit_sync::{Lazy, RawInterruptTicketMutex}; pub use memory_addresses::{PhysAddr, VirtAddr}; +use talc::{ErrOnOom, Span, Talc, Talck}; -use self::allocator::LockedAllocator; #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))] use crate::arch::mm::paging::HugePageSize; pub use crate::arch::mm::paging::virtual_to_physical; @@ -64,7 +63,7 @@ use crate::{arch, env}; #[cfg(target_os = "none")] #[global_allocator] -pub(crate) static ALLOCATOR: LockedAllocator = LockedAllocator::new(); +pub(crate) static ALLOCATOR: Talck = Talc::new(ErrOnOom).lock(); /// Physical and virtual address range of the 2 MiB pages that map the kernel. static KERNEL_ADDR_RANGE: Lazy> = Lazy::new(|| { @@ -276,11 +275,9 @@ pub(crate) fn init() { let heap_end_addr = map_addr; + let arena = Span::new(heap_start_addr.as_mut_ptr(), heap_end_addr.as_mut_ptr()); unsafe { - ALLOCATOR.init( - heap_start_addr.as_mut_ptr(), - (heap_end_addr - heap_start_addr) as usize, - ); + ALLOCATOR.lock().claim(arena).unwrap(); } info!("Heap is located at {heap_start_addr:p}..{heap_end_addr:p} ({map_size} Bytes unmapped)");