1 // Copyright 2022, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! Low-level entry and exit points of pvmfw.
16 
17 use crate::config;
18 use crate::fdt;
19 use crate::memory;
20 use bssl_sys::CRYPTO_library_init;
21 use core::arch::asm;
22 use core::mem::{drop, size_of};
23 use core::num::NonZeroUsize;
24 use core::ops::Range;
25 use core::slice;
26 use log::debug;
27 use log::error;
28 use log::info;
29 use log::warn;
30 use log::LevelFilter;
31 use vmbase::util::RangeExt as _;
32 use vmbase::{
33     configure_heap, console,
34     hyp::{get_mem_sharer, get_mmio_guard},
35     layout::{self, crosvm},
36     main,
37     memory::{min_dcache_line_size, MemoryTracker, MEMORY, SIZE_128KB, SIZE_4KB},
38     power::reboot,
39 };
40 use zeroize::Zeroize;
41 
42 #[derive(Debug, Clone)]
43 pub enum RebootReason {
44     /// A malformed BCC was received.
45     InvalidBcc,
46     /// An invalid configuration was appended to pvmfw.
47     InvalidConfig,
48     /// An unexpected internal error happened.
49     InternalError,
50     /// The provided FDT was invalid.
51     InvalidFdt,
52     /// The provided payload was invalid.
53     InvalidPayload,
54     /// The provided ramdisk was invalid.
55     InvalidRamdisk,
56     /// Failed to verify the payload.
57     PayloadVerificationError,
58     /// DICE layering process failed.
59     SecretDerivationError,
60 }
61 
62 main!(start);
63 configure_heap!(SIZE_128KB);
64 
65 /// Entry point for pVM firmware.
start(fdt_address: u64, payload_start: u64, payload_size: u64, _arg3: u64)66 pub fn start(fdt_address: u64, payload_start: u64, payload_size: u64, _arg3: u64) {
67     // Limitations in this function:
68     // - can't access non-pvmfw memory (only statically-mapped memory)
69     // - can't access MMIO (therefore, no logging)
70 
71     match main_wrapper(fdt_address as usize, payload_start as usize, payload_size as usize) {
72         Ok((entry, bcc)) => jump_to_payload(fdt_address, entry.try_into().unwrap(), bcc),
73         Err(_) => reboot(), // TODO(b/220071963) propagate the reason back to the host.
74     }
75 
76     // if we reach this point and return, vmbase::entry::rust_entry() will call power::shutdown().
77 }
78 
79 struct MemorySlices<'a> {
80     fdt: &'a mut libfdt::Fdt,
81     kernel: &'a [u8],
82     ramdisk: Option<&'a [u8]>,
83 }
84 
85 impl<'a> MemorySlices<'a> {
new( fdt: usize, kernel: usize, kernel_size: usize, vm_dtbo: Option<&mut [u8]>, vm_ref_dt: Option<&[u8]>, ) -> Result<Self, RebootReason>86     fn new(
87         fdt: usize,
88         kernel: usize,
89         kernel_size: usize,
90         vm_dtbo: Option<&mut [u8]>,
91         vm_ref_dt: Option<&[u8]>,
92     ) -> Result<Self, RebootReason> {
93         let fdt_size = NonZeroUsize::new(crosvm::FDT_MAX_SIZE).unwrap();
94         // TODO - Only map the FDT as read-only, until we modify it right before jump_to_payload()
95         // e.g. by generating a DTBO for a template DT in main() and, on return, re-map DT as RW,
96         // overwrite with the template DT and apply the DTBO.
97         let range = MEMORY.lock().as_mut().unwrap().alloc_mut(fdt, fdt_size).map_err(|e| {
98             error!("Failed to allocate the FDT range: {e}");
99             RebootReason::InternalError
100         })?;
101 
102         // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
103         let fdt = unsafe { slice::from_raw_parts_mut(range.start as *mut u8, range.len()) };
104 
105         let info = fdt::sanitize_device_tree(fdt, vm_dtbo, vm_ref_dt)?;
106         let fdt = libfdt::Fdt::from_mut_slice(fdt).map_err(|e| {
107             error!("Failed to load sanitized FDT: {e}");
108             RebootReason::InvalidFdt
109         })?;
110         debug!("Fdt passed validation!");
111 
112         let memory_range = info.memory_range;
113         debug!("Resizing MemoryTracker to range {memory_range:#x?}");
114         MEMORY.lock().as_mut().unwrap().shrink(&memory_range).map_err(|e| {
115             error!("Failed to use memory range value from DT: {memory_range:#x?}: {e}");
116             RebootReason::InvalidFdt
117         })?;
118 
119         if let Some(mem_sharer) = get_mem_sharer() {
120             let granule = mem_sharer.granule().map_err(|e| {
121                 error!("Failed to get memory protection granule: {e}");
122                 RebootReason::InternalError
123             })?;
124             MEMORY.lock().as_mut().unwrap().init_dynamic_shared_pool(granule).map_err(|e| {
125                 error!("Failed to initialize dynamically shared pool: {e}");
126                 RebootReason::InternalError
127             })?;
128         } else {
129             let range = info.swiotlb_info.fixed_range().ok_or_else(|| {
130                 error!("Pre-shared pool range not specified in swiotlb node");
131                 RebootReason::InvalidFdt
132             })?;
133 
134             MEMORY.lock().as_mut().unwrap().init_static_shared_pool(range).map_err(|e| {
135                 error!("Failed to initialize pre-shared pool {e}");
136                 RebootReason::InvalidFdt
137             })?;
138         }
139 
140         let kernel_range = if let Some(r) = info.kernel_range {
141             MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
142                 error!("Failed to obtain the kernel range with DT range: {e}");
143                 RebootReason::InternalError
144             })?
145         } else if cfg!(feature = "legacy") {
146             warn!("Failed to find the kernel range in the DT; falling back to legacy ABI");
147 
148             let kernel_size = NonZeroUsize::new(kernel_size).ok_or_else(|| {
149                 error!("Invalid kernel size: {kernel_size:#x}");
150                 RebootReason::InvalidPayload
151             })?;
152 
153             MEMORY.lock().as_mut().unwrap().alloc(kernel, kernel_size).map_err(|e| {
154                 error!("Failed to obtain the kernel range with legacy range: {e}");
155                 RebootReason::InternalError
156             })?
157         } else {
158             error!("Failed to locate the kernel from the DT");
159             return Err(RebootReason::InvalidPayload);
160         };
161 
162         let kernel = kernel_range.start as *const u8;
163         // SAFETY: The tracker validated the range to be in main memory, mapped, and not overlap.
164         let kernel = unsafe { slice::from_raw_parts(kernel, kernel_range.len()) };
165 
166         let ramdisk = if let Some(r) = info.initrd_range {
167             debug!("Located ramdisk at {r:?}");
168             let r = MEMORY.lock().as_mut().unwrap().alloc_range(&r).map_err(|e| {
169                 error!("Failed to obtain the initrd range: {e}");
170                 RebootReason::InvalidRamdisk
171             })?;
172 
173             // SAFETY: The region was validated by memory to be in main memory, mapped, and
174             // not overlap.
175             Some(unsafe { slice::from_raw_parts(r.start as *const u8, r.len()) })
176         } else {
177             info!("Couldn't locate the ramdisk from the device tree");
178             None
179         };
180 
181         Ok(Self { fdt, kernel, ramdisk })
182     }
183 }
184 
185 /// Sets up the environment for main() and wraps its result for start().
186 ///
187 /// Provide the abstractions necessary for start() to abort the pVM boot and for main() to run with
188 /// the assumption that its environment has been properly configured.
main_wrapper( fdt: usize, payload: usize, payload_size: usize, ) -> Result<(usize, Range<usize>), RebootReason>189 fn main_wrapper(
190     fdt: usize,
191     payload: usize,
192     payload_size: usize,
193 ) -> Result<(usize, Range<usize>), RebootReason> {
194     // Limitations in this function:
195     // - only access MMIO once (and while) it has been mapped and configured
196     // - only perform logging once the logger has been initialized
197     // - only access non-pvmfw memory once (and while) it has been mapped
198 
199     log::set_max_level(LevelFilter::Info);
200     // TODO(https://crbug.com/boringssl/35): Remove this init when BoringSSL can handle this
201     // internally.
202     // SAFETY: Configures the internal state of the library - may be called multiple times.
203     unsafe {
204         CRYPTO_library_init();
205     }
206 
207     let page_table = memory::init_page_table().map_err(|e| {
208         error!("Failed to set up the dynamic page tables: {e}");
209         RebootReason::InternalError
210     })?;
211 
212     // SAFETY: We only get the appended payload from here, once. The region was statically mapped,
213     // then remapped by `init_page_table()`.
214     let appended_data = unsafe { get_appended_data_slice() };
215 
216     let appended = AppendedPayload::new(appended_data).ok_or_else(|| {
217         error!("No valid configuration found");
218         RebootReason::InvalidConfig
219     })?;
220 
221     let config_entries = appended.get_entries();
222 
223     // Up to this point, we were using the built-in static (from .rodata) page tables.
224     MEMORY.lock().replace(MemoryTracker::new(
225         page_table,
226         crosvm::MEM_START..layout::MAX_VIRT_ADDR,
227         crosvm::MMIO_RANGE,
228         Some(memory::appended_payload_range()),
229     ));
230 
231     let slices = MemorySlices::new(
232         fdt,
233         payload,
234         payload_size,
235         config_entries.vm_dtbo,
236         config_entries.vm_ref_dt,
237     )?;
238 
239     // This wrapper allows main() to be blissfully ignorant of platform details.
240     let next_bcc = crate::main(
241         slices.fdt,
242         slices.kernel,
243         slices.ramdisk,
244         config_entries.bcc,
245         config_entries.debug_policy,
246     )?;
247 
248     // Writable-dirty regions will be flushed when MemoryTracker is dropped.
249     config_entries.bcc.zeroize();
250 
251     info!("Expecting a bug making MMIO_GUARD_UNMAP return NOT_SUPPORTED on success");
252     MEMORY.lock().as_mut().unwrap().unshare_all_mmio().map_err(|e| {
253         error!("Failed to unshare MMIO ranges: {e}");
254         RebootReason::InternalError
255     })?;
256     // Call unshare_all_memory here (instead of relying on the dtor) while UART is still mapped.
257     MEMORY.lock().as_mut().unwrap().unshare_all_memory();
258     if let Some(mmio_guard) = get_mmio_guard() {
259         mmio_guard.unmap(console::BASE_ADDRESS).map_err(|e| {
260             error!("Failed to unshare the UART: {e}");
261             RebootReason::InternalError
262         })?;
263     }
264 
265     // Drop MemoryTracker and deactivate page table.
266     drop(MEMORY.lock().take());
267 
268     Ok((slices.kernel.as_ptr() as usize, next_bcc))
269 }
270 
jump_to_payload(fdt_address: u64, payload_start: u64, bcc: Range<usize>) -> !271 fn jump_to_payload(fdt_address: u64, payload_start: u64, bcc: Range<usize>) -> ! {
272     const ASM_STP_ALIGN: usize = size_of::<u64>() * 2;
273     const SCTLR_EL1_RES1: u64 = (0b11 << 28) | (0b101 << 20) | (0b1 << 11);
274     // Stage 1 instruction access cacheability is unaffected.
275     const SCTLR_EL1_I: u64 = 0b1 << 12;
276     // SETEND instruction disabled at EL0 in aarch32 mode.
277     const SCTLR_EL1_SED: u64 = 0b1 << 8;
278     // Various IT instructions are disabled at EL0 in aarch32 mode.
279     const SCTLR_EL1_ITD: u64 = 0b1 << 7;
280 
281     const SCTLR_EL1_VAL: u64 = SCTLR_EL1_RES1 | SCTLR_EL1_ITD | SCTLR_EL1_SED | SCTLR_EL1_I;
282 
283     let scratch = layout::scratch_range();
284 
285     assert_ne!(scratch.end - scratch.start, 0, "scratch memory is empty.");
286     assert_eq!(scratch.start.0 % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
287     assert_eq!(scratch.end.0 % ASM_STP_ALIGN, 0, "scratch memory is misaligned.");
288 
289     assert!(bcc.is_within(&(scratch.start.0..scratch.end.0)));
290     assert_eq!(bcc.start % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
291     assert_eq!(bcc.end % ASM_STP_ALIGN, 0, "Misaligned guest BCC.");
292 
293     let stack = memory::stack_range();
294 
295     assert_ne!(stack.end - stack.start, 0, "stack region is empty.");
296     assert_eq!(stack.start.0 % ASM_STP_ALIGN, 0, "Misaligned stack region.");
297     assert_eq!(stack.end.0 % ASM_STP_ALIGN, 0, "Misaligned stack region.");
298 
299     // Zero all memory that could hold secrets and that can't be safely written to from Rust.
300     // Disable the exception vector, caches and page table and then jump to the payload at the
301     // given address, passing it the given FDT pointer.
302     //
303     // SAFETY: We're exiting pvmfw by passing the register values we need to a noreturn asm!().
304     unsafe {
305         asm!(
306             "cmp {scratch}, {bcc}",
307             "b.hs 1f",
308 
309             // Zero .data & .bss until BCC.
310             "0: stp xzr, xzr, [{scratch}], 16",
311             "cmp {scratch}, {bcc}",
312             "b.lo 0b",
313 
314             "1:",
315             // Skip BCC.
316             "mov {scratch}, {bcc_end}",
317             "cmp {scratch}, {scratch_end}",
318             "b.hs 1f",
319 
320             // Keep zeroing .data & .bss.
321             "0: stp xzr, xzr, [{scratch}], 16",
322             "cmp {scratch}, {scratch_end}",
323             "b.lo 0b",
324 
325             "1:",
326             // Flush d-cache over .data & .bss (including BCC).
327             "0: dc cvau, {cache_line}",
328             "add {cache_line}, {cache_line}, {dcache_line_size}",
329             "cmp {cache_line}, {scratch_end}",
330             "b.lo 0b",
331 
332             "mov {cache_line}, {stack}",
333             // Zero stack region.
334             "0: stp xzr, xzr, [{stack}], 16",
335             "cmp {stack}, {stack_end}",
336             "b.lo 0b",
337 
338             // Flush d-cache over stack region.
339             "0: dc cvau, {cache_line}",
340             "add {cache_line}, {cache_line}, {dcache_line_size}",
341             "cmp {cache_line}, {stack_end}",
342             "b.lo 0b",
343 
344             "msr sctlr_el1, {sctlr_el1_val}",
345             "isb",
346             "mov x1, xzr",
347             "mov x2, xzr",
348             "mov x3, xzr",
349             "mov x4, xzr",
350             "mov x5, xzr",
351             "mov x6, xzr",
352             "mov x7, xzr",
353             "mov x8, xzr",
354             "mov x9, xzr",
355             "mov x10, xzr",
356             "mov x11, xzr",
357             "mov x12, xzr",
358             "mov x13, xzr",
359             "mov x14, xzr",
360             "mov x15, xzr",
361             "mov x16, xzr",
362             "mov x17, xzr",
363             "mov x18, xzr",
364             "mov x19, xzr",
365             "mov x20, xzr",
366             "mov x21, xzr",
367             "mov x22, xzr",
368             "mov x23, xzr",
369             "mov x24, xzr",
370             "mov x25, xzr",
371             "mov x26, xzr",
372             "mov x27, xzr",
373             "mov x28, xzr",
374             "mov x29, xzr",
375             "msr ttbr0_el1, xzr",
376             // Ensure that CMOs have completed before entering payload.
377             "dsb nsh",
378             "br x30",
379             sctlr_el1_val = in(reg) SCTLR_EL1_VAL,
380             bcc = in(reg) u64::try_from(bcc.start).unwrap(),
381             bcc_end = in(reg) u64::try_from(bcc.end).unwrap(),
382             cache_line = in(reg) u64::try_from(scratch.start.0).unwrap(),
383             scratch = in(reg) u64::try_from(scratch.start.0).unwrap(),
384             scratch_end = in(reg) u64::try_from(scratch.end.0).unwrap(),
385             stack = in(reg) u64::try_from(stack.start.0).unwrap(),
386             stack_end = in(reg) u64::try_from(stack.end.0).unwrap(),
387             dcache_line_size = in(reg) u64::try_from(min_dcache_line_size()).unwrap(),
388             in("x0") fdt_address,
389             in("x30") payload_start,
390             options(noreturn),
391         );
392     };
393 }
394 
395 /// # Safety
396 ///
397 /// This must only be called once, since we are returning a mutable reference.
398 /// The appended data region must be mapped.
get_appended_data_slice() -> &'static mut [u8]399 unsafe fn get_appended_data_slice() -> &'static mut [u8] {
400     let range = memory::appended_payload_range();
401     // SAFETY: This region is mapped and the linker script prevents it from overlapping with other
402     // objects.
403     unsafe { slice::from_raw_parts_mut(range.start.0 as *mut u8, range.end - range.start) }
404 }
405 
406 enum AppendedPayload<'a> {
407     /// Configuration data.
408     Config(config::Config<'a>),
409     /// Deprecated raw BCC, as used in Android T.
410     LegacyBcc(&'a mut [u8]),
411 }
412 
413 impl<'a> AppendedPayload<'a> {
new(data: &'a mut [u8]) -> Option<Self>414     fn new(data: &'a mut [u8]) -> Option<Self> {
415         // The borrow checker gets confused about the ownership of data (see inline comments) so we
416         // intentionally obfuscate it using a raw pointer; see a similar issue (still not addressed
417         // in v1.77) in https://users.rust-lang.org/t/78467.
418         let data_ptr = data as *mut [u8];
419 
420         // Config::new() borrows data as mutable ...
421         match config::Config::new(data) {
422             // ... so this branch has a mutable reference to data, from the Ok(Config<'a>). But ...
423             Ok(valid) => Some(Self::Config(valid)),
424             // ... if Config::new(data).is_err(), the Err holds no ref to data. However ...
425             Err(config::Error::InvalidMagic) if cfg!(feature = "legacy") => {
426                 // ... the borrow checker still complains about a second mutable ref without this.
427                 // SAFETY: Pointer to a valid mut (not accessed elsewhere), 'a lifetime re-used.
428                 let data: &'a mut _ = unsafe { &mut *data_ptr };
429 
430                 const BCC_SIZE: usize = SIZE_4KB;
431                 warn!("Assuming the appended data at {:?} to be a raw BCC", data.as_ptr());
432                 Some(Self::LegacyBcc(&mut data[..BCC_SIZE]))
433             }
434             Err(e) => {
435                 error!("Invalid configuration data at {data_ptr:?}: {e}");
436                 None
437             }
438         }
439     }
440 
get_entries(self) -> config::Entries<'a>441     fn get_entries(self) -> config::Entries<'a> {
442         match self {
443             Self::Config(cfg) => cfg.get_entries(),
444             Self::LegacyBcc(bcc) => config::Entries { bcc, ..Default::default() },
445         }
446     }
447 }
448