1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! Shared memory management.
16 
17 use super::dbm::{flush_dirty_range, mark_dirty_block, set_dbm_enabled};
18 use super::error::MemoryTrackerError;
19 use super::page_table::{PageTable, MMIO_LAZY_MAP_FLAG};
20 use super::util::{page_4kb_of, virt_to_phys};
21 use crate::console;
22 use crate::dsb;
23 use crate::exceptions::HandleExceptionError;
24 use crate::hyp::{self, get_mem_sharer, get_mmio_guard};
25 use crate::util::unchecked_align_down;
26 use crate::util::RangeExt as _;
27 use aarch64_paging::paging::{
28     Attributes, Descriptor, MemoryRegion as VaRange, VirtualAddress, PAGE_SIZE,
29 };
30 use alloc::alloc::{alloc_zeroed, dealloc, handle_alloc_error};
31 use alloc::boxed::Box;
32 use alloc::collections::BTreeSet;
33 use alloc::vec::Vec;
34 use buddy_system_allocator::{FrameAllocator, LockedFrameAllocator};
35 use core::alloc::Layout;
36 use core::cmp::max;
37 use core::mem::size_of;
38 use core::num::NonZeroUsize;
39 use core::ops::Range;
40 use core::ptr::NonNull;
41 use core::result;
42 use log::{debug, error, trace};
43 use once_cell::race::OnceBox;
44 use spin::mutex::SpinMutex;
45 use tinyvec::ArrayVec;
46 
47 /// A global static variable representing the system memory tracker, protected by a spin mutex.
48 pub static MEMORY: SpinMutex<Option<MemoryTracker>> = SpinMutex::new(None);
49 
50 static SHARED_POOL: OnceBox<LockedFrameAllocator<32>> = OnceBox::new();
51 static SHARED_MEMORY: SpinMutex<Option<MemorySharer>> = SpinMutex::new(None);
52 
53 /// Memory range.
54 pub type MemoryRange = Range<usize>;
55 
get_va_range(range: &MemoryRange) -> VaRange56 fn get_va_range(range: &MemoryRange) -> VaRange {
57     VaRange::new(range.start, range.end)
58 }
59 
60 type Result<T> = result::Result<T, MemoryTrackerError>;
61 
62 #[derive(Clone, Copy, Debug, Default, PartialEq)]
63 enum MemoryType {
64     #[default]
65     ReadOnly,
66     ReadWrite,
67 }
68 
69 #[derive(Clone, Debug, Default)]
70 struct MemoryRegion {
71     range: MemoryRange,
72     mem_type: MemoryType,
73 }
74 
75 /// Tracks non-overlapping slices of main memory.
76 pub struct MemoryTracker {
77     total: MemoryRange,
78     page_table: PageTable,
79     regions: ArrayVec<[MemoryRegion; MemoryTracker::CAPACITY]>,
80     mmio_regions: ArrayVec<[MemoryRange; MemoryTracker::MMIO_CAPACITY]>,
81     mmio_range: MemoryRange,
82     payload_range: Option<MemoryRange>,
83     mmio_sharer: MmioSharer,
84 }
85 
86 impl MemoryTracker {
87     const CAPACITY: usize = 5;
88     const MMIO_CAPACITY: usize = 5;
89 
90     /// Creates a new instance from an active page table, covering the maximum RAM size.
new( mut page_table: PageTable, total: MemoryRange, mmio_range: MemoryRange, payload_range: Option<Range<VirtualAddress>>, ) -> Self91     pub fn new(
92         mut page_table: PageTable,
93         total: MemoryRange,
94         mmio_range: MemoryRange,
95         payload_range: Option<Range<VirtualAddress>>,
96     ) -> Self {
97         assert!(
98             !total.overlaps(&mmio_range),
99             "MMIO space should not overlap with the main memory region."
100         );
101 
102         // Activate dirty state management first, otherwise we may get permission faults immediately
103         // after activating the new page table. This has no effect before the new page table is
104         // activated because none of the entries in the initial idmap have the DBM flag.
105         set_dbm_enabled(true);
106 
107         debug!("Activating dynamic page table...");
108         // SAFETY: page_table duplicates the static mappings for everything that the Rust code is
109         // aware of so activating it shouldn't have any visible effect.
110         unsafe { page_table.activate() }
111         debug!("... Success!");
112 
113         Self {
114             total,
115             page_table,
116             regions: ArrayVec::new(),
117             mmio_regions: ArrayVec::new(),
118             mmio_range,
119             payload_range: payload_range.map(|r| r.start.0..r.end.0),
120             mmio_sharer: MmioSharer::new().unwrap(),
121         }
122     }
123 
124     /// Resize the total RAM size.
125     ///
126     /// This function fails if it contains regions that are not included within the new size.
shrink(&mut self, range: &MemoryRange) -> Result<()>127     pub fn shrink(&mut self, range: &MemoryRange) -> Result<()> {
128         if range.start != self.total.start {
129             return Err(MemoryTrackerError::DifferentBaseAddress);
130         }
131         if self.total.end < range.end {
132             return Err(MemoryTrackerError::SizeTooLarge);
133         }
134         if !self.regions.iter().all(|r| r.range.is_within(range)) {
135             return Err(MemoryTrackerError::SizeTooSmall);
136         }
137 
138         self.total = range.clone();
139         Ok(())
140     }
141 
142     /// Allocate the address range for a const slice; returns None if failed.
alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange>143     pub fn alloc_range(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
144         let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
145         self.check_allocatable(&region)?;
146         self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
147             error!("Error during range allocation: {e}");
148             MemoryTrackerError::FailedToMap
149         })?;
150         self.add(region)
151     }
152 
153     /// Allocates the address range for a const slice.
154     ///
155     /// # Safety
156     ///
157     /// Callers of this method need to ensure that the `range` is valid for mapping as read-only
158     /// data.
alloc_range_outside_main_memory( &mut self, range: &MemoryRange, ) -> Result<MemoryRange>159     pub unsafe fn alloc_range_outside_main_memory(
160         &mut self,
161         range: &MemoryRange,
162     ) -> Result<MemoryRange> {
163         let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadOnly };
164         self.check_no_overlap(&region)?;
165         self.page_table.map_rodata(&get_va_range(range)).map_err(|e| {
166             error!("Error during range allocation: {e}");
167             MemoryTrackerError::FailedToMap
168         })?;
169         self.add(region)
170     }
171 
172     /// Allocate the address range for a mutable slice; returns None if failed.
alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange>173     pub fn alloc_range_mut(&mut self, range: &MemoryRange) -> Result<MemoryRange> {
174         let region = MemoryRegion { range: range.clone(), mem_type: MemoryType::ReadWrite };
175         self.check_allocatable(&region)?;
176         self.page_table.map_data_dbm(&get_va_range(range)).map_err(|e| {
177             error!("Error during mutable range allocation: {e}");
178             MemoryTrackerError::FailedToMap
179         })?;
180         self.add(region)
181     }
182 
183     /// Allocate the address range for a const slice; returns None if failed.
alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange>184     pub fn alloc(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
185         self.alloc_range(&(base..(base + size.get())))
186     }
187 
188     /// Allocate the address range for a mutable slice; returns None if failed.
alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange>189     pub fn alloc_mut(&mut self, base: usize, size: NonZeroUsize) -> Result<MemoryRange> {
190         self.alloc_range_mut(&(base..(base + size.get())))
191     }
192 
193     /// Checks that the given range of addresses is within the MMIO region, and then maps it
194     /// appropriately.
map_mmio_range(&mut self, range: MemoryRange) -> Result<()>195     pub fn map_mmio_range(&mut self, range: MemoryRange) -> Result<()> {
196         if !range.is_within(&self.mmio_range) {
197             return Err(MemoryTrackerError::OutOfRange);
198         }
199         if self.mmio_regions.iter().any(|r| range.overlaps(r)) {
200             return Err(MemoryTrackerError::Overlaps);
201         }
202         if self.mmio_regions.len() == self.mmio_regions.capacity() {
203             return Err(MemoryTrackerError::Full);
204         }
205 
206         if get_mmio_guard().is_some() {
207             self.page_table.map_device_lazy(&get_va_range(&range)).map_err(|e| {
208                 error!("Error during lazy MMIO device mapping: {e}");
209                 MemoryTrackerError::FailedToMap
210             })?;
211         } else {
212             self.page_table.map_device(&get_va_range(&range)).map_err(|e| {
213                 error!("Error during MMIO device mapping: {e}");
214                 MemoryTrackerError::FailedToMap
215             })?;
216         }
217 
218         if self.mmio_regions.try_push(range).is_some() {
219             return Err(MemoryTrackerError::Full);
220         }
221 
222         Ok(())
223     }
224 
225     /// Checks that the memory region meets the following criteria:
226     /// - It is within the range of the `MemoryTracker`.
227     /// - It does not overlap with any previously allocated regions.
228     /// - The `regions` ArrayVec has sufficient capacity to add it.
check_allocatable(&self, region: &MemoryRegion) -> Result<()>229     fn check_allocatable(&self, region: &MemoryRegion) -> Result<()> {
230         if !region.range.is_within(&self.total) {
231             return Err(MemoryTrackerError::OutOfRange);
232         }
233         self.check_no_overlap(region)
234     }
235 
236     /// Checks that the given region doesn't overlap with any other previously allocated regions,
237     /// and that the regions ArrayVec has capacity to add it.
check_no_overlap(&self, region: &MemoryRegion) -> Result<()>238     fn check_no_overlap(&self, region: &MemoryRegion) -> Result<()> {
239         if self.regions.iter().any(|r| region.range.overlaps(&r.range)) {
240             return Err(MemoryTrackerError::Overlaps);
241         }
242         if self.regions.len() == self.regions.capacity() {
243             return Err(MemoryTrackerError::Full);
244         }
245         Ok(())
246     }
247 
add(&mut self, region: MemoryRegion) -> Result<MemoryRange>248     fn add(&mut self, region: MemoryRegion) -> Result<MemoryRange> {
249         if self.regions.try_push(region).is_some() {
250             return Err(MemoryTrackerError::Full);
251         }
252 
253         Ok(self.regions.last().unwrap().range.clone())
254     }
255 
256     /// Unshares any MMIO region previously shared with the MMIO guard.
unshare_all_mmio(&mut self) -> Result<()>257     pub fn unshare_all_mmio(&mut self) -> Result<()> {
258         self.mmio_sharer.unshare_all();
259 
260         Ok(())
261     }
262 
263     /// Initialize the shared heap to dynamically share memory from the global allocator.
init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()>264     pub fn init_dynamic_shared_pool(&mut self, granule: usize) -> Result<()> {
265         const INIT_CAP: usize = 10;
266 
267         let previous = SHARED_MEMORY.lock().replace(MemorySharer::new(granule, INIT_CAP));
268         if previous.is_some() {
269             return Err(MemoryTrackerError::SharedMemorySetFailure);
270         }
271 
272         SHARED_POOL
273             .set(Box::new(LockedFrameAllocator::new()))
274             .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
275 
276         Ok(())
277     }
278 
279     /// Initialize the shared heap from a static region of memory.
280     ///
281     /// Some hypervisors such as Gunyah do not support a MemShare API for guest
282     /// to share its memory with host. Instead they allow host to designate part
283     /// of guest memory as "shared" ahead of guest starting its execution. The
284     /// shared memory region is indicated in swiotlb node. On such platforms use
285     /// a separate heap to allocate buffers that can be shared with host.
init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()>286     pub fn init_static_shared_pool(&mut self, range: Range<usize>) -> Result<()> {
287         let size = NonZeroUsize::new(range.len()).unwrap();
288         let range = self.alloc_mut(range.start, size)?;
289         let shared_pool = LockedFrameAllocator::<32>::new();
290 
291         shared_pool.lock().insert(range);
292 
293         SHARED_POOL
294             .set(Box::new(shared_pool))
295             .map_err(|_| MemoryTrackerError::SharedPoolSetFailure)?;
296 
297         Ok(())
298     }
299 
300     /// Initialize the shared heap to use heap memory directly.
301     ///
302     /// When running on "non-protected" hypervisors which permit host direct accesses to guest
303     /// memory, there is no need to perform any memory sharing and/or allocate buffers from a
304     /// dedicated region so this function instructs the shared pool to use the global allocator.
init_heap_shared_pool(&mut self) -> Result<()>305     pub fn init_heap_shared_pool(&mut self) -> Result<()> {
306         // As MemorySharer only calls MEM_SHARE methods if the hypervisor supports them, internally
307         // using init_dynamic_shared_pool() on a non-protected platform will make use of the heap
308         // without any actual "dynamic memory sharing" taking place and, as such, the granule may
309         // be set to the one of the global_allocator i.e. a byte.
310         self.init_dynamic_shared_pool(size_of::<u8>())
311     }
312 
313     /// Unshares any memory that may have been shared.
unshare_all_memory(&mut self)314     pub fn unshare_all_memory(&mut self) {
315         drop(SHARED_MEMORY.lock().take());
316     }
317 
318     /// Handles translation fault for blocks flagged for lazy MMIO mapping by enabling the page
319     /// table entry and MMIO guard mapping the block. Breaks apart a block entry if required.
handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()>320     fn handle_mmio_fault(&mut self, addr: VirtualAddress) -> Result<()> {
321         let shared_range = self.mmio_sharer.share(addr)?;
322         self.map_lazy_mmio_as_valid(&shared_range)?;
323 
324         Ok(())
325     }
326 
327     /// Modify the PTEs corresponding to a given range from (invalid) "lazy MMIO" to valid MMIO.
328     ///
329     /// Returns an error if any PTE in the range is not an invalid lazy MMIO mapping.
map_lazy_mmio_as_valid(&mut self, page_range: &VaRange) -> Result<()>330     fn map_lazy_mmio_as_valid(&mut self, page_range: &VaRange) -> Result<()> {
331         // This must be safe and free from break-before-make (BBM) violations, given that the
332         // initial lazy mapping has the valid bit cleared, and each newly created valid descriptor
333         // created inside the mapping has the same size and alignment.
334         self.page_table
335             .modify_range(page_range, &|_: &VaRange, desc: &mut Descriptor, _: usize| {
336                 let flags = desc.flags().expect("Unsupported PTE flags set");
337                 if flags.contains(MMIO_LAZY_MAP_FLAG) && !flags.contains(Attributes::VALID) {
338                     desc.modify_flags(Attributes::VALID, Attributes::empty());
339                     Ok(())
340                 } else {
341                     Err(())
342                 }
343             })
344             .map_err(|_| MemoryTrackerError::InvalidPte)
345     }
346 
347     /// Flush all memory regions marked as writable-dirty.
flush_dirty_pages(&mut self) -> Result<()>348     fn flush_dirty_pages(&mut self) -> Result<()> {
349         // Collect memory ranges for which dirty state is tracked.
350         let writable_regions =
351             self.regions.iter().filter(|r| r.mem_type == MemoryType::ReadWrite).map(|r| &r.range);
352         // Execute a barrier instruction to ensure all hardware updates to the page table have been
353         // observed before reading PTE flags to determine dirty state.
354         dsb!("ish");
355         // Now flush writable-dirty pages in those regions.
356         for range in writable_regions.chain(self.payload_range.as_ref().into_iter()) {
357             self.page_table
358                 .walk_range(&get_va_range(range), &flush_dirty_range)
359                 .map_err(|_| MemoryTrackerError::FlushRegionFailed)?;
360         }
361         Ok(())
362     }
363 
364     /// Handles permission fault for read-only blocks by setting writable-dirty state.
365     /// In general, this should be called from the exception handler when hardware dirty
366     /// state management is disabled or unavailable.
handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()>367     fn handle_permission_fault(&mut self, addr: VirtualAddress) -> Result<()> {
368         self.page_table
369             .modify_range(&(addr..addr + 1).into(), &mark_dirty_block)
370             .map_err(|_| MemoryTrackerError::SetPteDirtyFailed)
371     }
372 }
373 
374 impl Drop for MemoryTracker {
drop(&mut self)375     fn drop(&mut self) {
376         set_dbm_enabled(false);
377         self.flush_dirty_pages().unwrap();
378         self.unshare_all_memory();
379     }
380 }
381 
382 struct MmioSharer {
383     granule: usize,
384     frames: BTreeSet<usize>,
385 }
386 
387 impl MmioSharer {
new() -> Result<Self>388     fn new() -> Result<Self> {
389         let granule = Self::get_granule()?;
390         let frames = BTreeSet::new();
391 
392         // Allows safely calling util::unchecked_align_down().
393         assert!(granule.is_power_of_two());
394 
395         Ok(Self { granule, frames })
396     }
397 
get_granule() -> Result<usize>398     fn get_granule() -> Result<usize> {
399         let Some(mmio_guard) = get_mmio_guard() else {
400             return Ok(PAGE_SIZE);
401         };
402         match mmio_guard.granule()? {
403             granule if granule % PAGE_SIZE == 0 => Ok(granule), // For good measure.
404             granule => Err(MemoryTrackerError::UnsupportedMmioGuardGranule(granule)),
405         }
406     }
407 
408     /// Share the MMIO region aligned to the granule size containing addr (not validated as MMIO).
share(&mut self, addr: VirtualAddress) -> Result<VaRange>409     fn share(&mut self, addr: VirtualAddress) -> Result<VaRange> {
410         // This can't use virt_to_phys() since 0x0 is a valid MMIO address and we are ID-mapped.
411         let phys = addr.0;
412         let base = unchecked_align_down(phys, self.granule);
413 
414         // TODO(ptosi): Share the UART using this method and remove the hardcoded check.
415         if self.frames.contains(&base) || base == page_4kb_of(console::BASE_ADDRESS) {
416             return Err(MemoryTrackerError::DuplicateMmioShare(base));
417         }
418 
419         if let Some(mmio_guard) = get_mmio_guard() {
420             mmio_guard.map(base)?;
421         }
422 
423         let inserted = self.frames.insert(base);
424         assert!(inserted);
425 
426         let base_va = VirtualAddress(base);
427         Ok((base_va..base_va + self.granule).into())
428     }
429 
unshare_all(&mut self)430     fn unshare_all(&mut self) {
431         let Some(mmio_guard) = get_mmio_guard() else {
432             return self.frames.clear();
433         };
434 
435         while let Some(base) = self.frames.pop_first() {
436             mmio_guard.unmap(base).unwrap();
437         }
438     }
439 }
440 
441 impl Drop for MmioSharer {
drop(&mut self)442     fn drop(&mut self) {
443         self.unshare_all();
444     }
445 }
446 
447 /// Allocates a memory range of at least the given size and alignment that is shared with the host.
448 /// Returns a pointer to the buffer.
alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>>449 pub(crate) fn alloc_shared(layout: Layout) -> hyp::Result<NonNull<u8>> {
450     assert_ne!(layout.size(), 0);
451     let Some(buffer) = try_shared_alloc(layout) else {
452         handle_alloc_error(layout);
453     };
454 
455     trace!("Allocated shared buffer at {buffer:?} with {layout:?}");
456     Ok(buffer)
457 }
458 
try_shared_alloc(layout: Layout) -> Option<NonNull<u8>>459 fn try_shared_alloc(layout: Layout) -> Option<NonNull<u8>> {
460     let mut shared_pool = SHARED_POOL.get().unwrap().lock();
461 
462     if let Some(buffer) = shared_pool.alloc_aligned(layout) {
463         Some(NonNull::new(buffer as _).unwrap())
464     } else if let Some(shared_memory) = SHARED_MEMORY.lock().as_mut() {
465         // Adjusts the layout size to the max of the next power of two and the alignment,
466         // as this is the actual size of the memory allocated in `alloc_aligned()`.
467         let size = max(layout.size().next_power_of_two(), layout.align());
468         let refill_layout = Layout::from_size_align(size, layout.align()).unwrap();
469         shared_memory.refill(&mut shared_pool, refill_layout);
470         shared_pool.alloc_aligned(layout).map(|buffer| NonNull::new(buffer as _).unwrap())
471     } else {
472         None
473     }
474 }
475 
476 /// Unshares and deallocates a memory range which was previously allocated by `alloc_shared`.
477 ///
478 /// The layout passed in must be the same layout passed to the original `alloc_shared` call.
479 ///
480 /// # Safety
481 ///
482 /// The memory must have been allocated by `alloc_shared` with the same layout, and not yet
483 /// deallocated.
dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()>484 pub(crate) unsafe fn dealloc_shared(vaddr: NonNull<u8>, layout: Layout) -> hyp::Result<()> {
485     SHARED_POOL.get().unwrap().lock().dealloc_aligned(vaddr.as_ptr() as usize, layout);
486 
487     trace!("Deallocated shared buffer at {vaddr:?} with {layout:?}");
488     Ok(())
489 }
490 
491 /// Allocates memory on the heap and shares it with the host.
492 ///
493 /// Unshares all pages when dropped.
494 struct MemorySharer {
495     granule: usize,
496     frames: Vec<(usize, Layout)>,
497 }
498 
499 impl MemorySharer {
500     /// Constructs a new `MemorySharer` instance with the specified granule size and capacity.
501     /// `granule` must be a power of 2.
new(granule: usize, capacity: usize) -> Self502     fn new(granule: usize, capacity: usize) -> Self {
503         assert!(granule.is_power_of_two());
504         Self { granule, frames: Vec::with_capacity(capacity) }
505     }
506 
507     /// Gets from the global allocator a granule-aligned region that suits `hint` and share it.
refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout)508     fn refill(&mut self, pool: &mut FrameAllocator<32>, hint: Layout) {
509         let layout = hint.align_to(self.granule).unwrap().pad_to_align();
510         assert_ne!(layout.size(), 0);
511         // SAFETY: layout has non-zero size.
512         let Some(shared) = NonNull::new(unsafe { alloc_zeroed(layout) }) else {
513             handle_alloc_error(layout);
514         };
515 
516         let base = shared.as_ptr() as usize;
517         let end = base.checked_add(layout.size()).unwrap();
518 
519         if let Some(mem_sharer) = get_mem_sharer() {
520             trace!("Sharing memory region {:#x?}", base..end);
521             for vaddr in (base..end).step_by(self.granule) {
522                 let vaddr = NonNull::new(vaddr as *mut _).unwrap();
523                 mem_sharer.share(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
524             }
525         }
526 
527         self.frames.push((base, layout));
528         pool.add_frame(base, end);
529     }
530 }
531 
532 impl Drop for MemorySharer {
drop(&mut self)533     fn drop(&mut self) {
534         while let Some((base, layout)) = self.frames.pop() {
535             if let Some(mem_sharer) = get_mem_sharer() {
536                 let end = base.checked_add(layout.size()).unwrap();
537                 trace!("Unsharing memory region {:#x?}", base..end);
538                 for vaddr in (base..end).step_by(self.granule) {
539                     let vaddr = NonNull::new(vaddr as *mut _).unwrap();
540                     mem_sharer.unshare(virt_to_phys(vaddr).try_into().unwrap()).unwrap();
541                 }
542             }
543 
544             // SAFETY: The region was obtained from alloc_zeroed() with the recorded layout.
545             unsafe { dealloc(base as *mut _, layout) };
546         }
547     }
548 }
549 
550 /// Handles a translation fault with the given fault address register (FAR).
551 #[inline]
handle_translation_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError>552 pub fn handle_translation_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
553     let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
554     let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
555     Ok(memory.handle_mmio_fault(far)?)
556 }
557 
558 /// Handles a permission fault with the given fault address register (FAR).
559 #[inline]
handle_permission_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError>560 pub fn handle_permission_fault(far: VirtualAddress) -> result::Result<(), HandleExceptionError> {
561     let mut guard = MEMORY.try_lock().ok_or(HandleExceptionError::PageTableUnavailable)?;
562     let memory = guard.as_mut().ok_or(HandleExceptionError::PageTableNotInitialized)?;
563     Ok(memory.handle_permission_fault(far)?)
564 }
565