1 // Copyright 2022, The Android Open Source Project 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 //! Page table management. 16 17 use crate::read_sysreg; 18 use aarch64_paging::idmap::IdMap; 19 use aarch64_paging::paging::{Attributes, Constraints, Descriptor, MemoryRegion}; 20 use aarch64_paging::MapError; 21 use core::result; 22 23 /// Software bit used to indicate a device that should be lazily mapped. 24 pub(super) const MMIO_LAZY_MAP_FLAG: Attributes = Attributes::SWFLAG_0; 25 26 // We assume that: 27 // - MAIR_EL1.Attr0 = "Device-nGnRE memory" (0b0000_0100) 28 // - MAIR_EL1.Attr1 = "Normal memory, Outer & Inner WB Non-transient, R/W-Allocate" (0b1111_1111) 29 const MEMORY: Attributes = 30 Attributes::VALID.union(Attributes::NORMAL).union(Attributes::NON_GLOBAL); 31 const DEVICE_LAZY: Attributes = 32 MMIO_LAZY_MAP_FLAG.union(Attributes::DEVICE_NGNRE).union(Attributes::EXECUTE_NEVER); 33 const DEVICE: Attributes = DEVICE_LAZY.union(Attributes::VALID); 34 const CODE: Attributes = MEMORY.union(Attributes::READ_ONLY); 35 const DATA: Attributes = MEMORY.union(Attributes::EXECUTE_NEVER); 36 const RODATA: Attributes = DATA.union(Attributes::READ_ONLY); 37 const DATA_DBM: Attributes = RODATA.union(Attributes::DBM); 38 39 type Result<T> = result::Result<T, MapError>; 40 41 /// High-level API for managing MMU mappings. 42 pub struct PageTable { 43 idmap: IdMap, 44 } 45 46 impl From<IdMap> for PageTable { from(idmap: IdMap) -> Self47 fn from(idmap: IdMap) -> Self { 48 Self { idmap } 49 } 50 } 51 52 impl Default for PageTable { default() -> Self53 fn default() -> Self { 54 const TCR_EL1_TG0_MASK: usize = 0x3; 55 const TCR_EL1_TG0_SHIFT: u32 = 14; 56 const TCR_EL1_TG0_SIZE_4KB: usize = 0b00; 57 58 const TCR_EL1_T0SZ_MASK: usize = 0x3f; 59 const TCR_EL1_T0SZ_SHIFT: u32 = 0; 60 const TCR_EL1_T0SZ_39_VA_BITS: usize = 64 - 39; 61 62 // Ensure that entry.S wasn't changed without updating the assumptions about TCR_EL1 here. 63 let tcr_el1 = read_sysreg!("tcr_el1"); 64 assert_eq!((tcr_el1 >> TCR_EL1_TG0_SHIFT) & TCR_EL1_TG0_MASK, TCR_EL1_TG0_SIZE_4KB); 65 assert_eq!((tcr_el1 >> TCR_EL1_T0SZ_SHIFT) & TCR_EL1_T0SZ_MASK, TCR_EL1_T0SZ_39_VA_BITS); 66 67 IdMap::new(Self::ASID, Self::ROOT_LEVEL).into() 68 } 69 } 70 71 impl PageTable { 72 /// ASID used for the underlying page table. 73 pub const ASID: usize = 1; 74 75 /// Level of the underlying page table's root page. 76 const ROOT_LEVEL: usize = 1; 77 78 /// Activates the page table. 79 /// 80 /// # Safety 81 /// 82 /// The caller must ensure that the PageTable instance has valid and identical mappings for the 83 /// code being currently executed. Otherwise, the Rust execution model (on which the borrow 84 /// checker relies) would be violated. activate(&mut self)85 pub unsafe fn activate(&mut self) { 86 // SAFETY: the caller of this unsafe function asserts that switching to a different 87 // translation is safe 88 unsafe { self.idmap.activate() } 89 } 90 91 /// Maps the given range of virtual addresses to the physical addresses as lazily mapped 92 /// nGnRE device memory. map_device_lazy(&mut self, range: &MemoryRegion) -> Result<()>93 pub fn map_device_lazy(&mut self, range: &MemoryRegion) -> Result<()> { 94 self.idmap.map_range(range, DEVICE_LAZY) 95 } 96 97 /// Maps the given range of virtual addresses to the physical addresses as valid device 98 /// nGnRE device memory. map_device(&mut self, range: &MemoryRegion) -> Result<()>99 pub fn map_device(&mut self, range: &MemoryRegion) -> Result<()> { 100 self.idmap.map_range(range, DEVICE) 101 } 102 103 /// Maps the given range of virtual addresses to the physical addresses as non-executable 104 /// and writable normal memory. map_data(&mut self, range: &MemoryRegion) -> Result<()>105 pub fn map_data(&mut self, range: &MemoryRegion) -> Result<()> { 106 self.idmap.map_range(range, DATA) 107 } 108 109 /// Maps the given range of virtual addresses to the physical addresses as non-executable, 110 /// read-only and writable-clean normal memory. map_data_dbm(&mut self, range: &MemoryRegion) -> Result<()>111 pub fn map_data_dbm(&mut self, range: &MemoryRegion) -> Result<()> { 112 // Map the region down to pages to minimize the size of the regions that will be marked 113 // dirty once a store hits them, but also to ensure that we can clear the read-only 114 // attribute while the mapping is live without causing break-before-make (BBM) violations. 115 // The latter implies that we must avoid the use of the contiguous hint as well. 116 self.idmap.map_range_with_constraints( 117 range, 118 DATA_DBM, 119 Constraints::NO_BLOCK_MAPPINGS | Constraints::NO_CONTIGUOUS_HINT, 120 ) 121 } 122 123 /// Maps the given range of virtual addresses to the physical addresses as read-only 124 /// normal memory. map_code(&mut self, range: &MemoryRegion) -> Result<()>125 pub fn map_code(&mut self, range: &MemoryRegion) -> Result<()> { 126 self.idmap.map_range(range, CODE) 127 } 128 129 /// Maps the given range of virtual addresses to the physical addresses as non-executable 130 /// and read-only normal memory. map_rodata(&mut self, range: &MemoryRegion) -> Result<()>131 pub fn map_rodata(&mut self, range: &MemoryRegion) -> Result<()> { 132 self.idmap.map_range(range, RODATA) 133 } 134 135 /// Applies the provided updater function to a number of PTEs corresponding to a given memory 136 /// range. modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<()> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> result::Result<(), ()>,137 pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<()> 138 where 139 F: Fn(&MemoryRegion, &mut Descriptor, usize) -> result::Result<(), ()>, 140 { 141 self.idmap.modify_range(range, f) 142 } 143 144 /// Applies the provided callback function to a number of PTEs corresponding to a given memory 145 /// range. walk_range<F>(&self, range: &MemoryRegion, f: &F) -> Result<()> where F: Fn(&MemoryRegion, &Descriptor, usize) -> result::Result<(), ()>,146 pub fn walk_range<F>(&self, range: &MemoryRegion, f: &F) -> Result<()> 147 where 148 F: Fn(&MemoryRegion, &Descriptor, usize) -> result::Result<(), ()>, 149 { 150 let mut callback = |mr: &MemoryRegion, d: &Descriptor, l: usize| f(mr, d, l); 151 self.idmap.walk_range(range, &mut callback) 152 } 153 } 154