1 // Copyright 2022, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! Support for the pvmfw configuration data format.
16 
17 use core::fmt;
18 use core::mem;
19 use core::num::NonZeroUsize;
20 use core::ops::Range;
21 use core::result;
22 use log::{info, warn};
23 use static_assertions::const_assert_eq;
24 use vmbase::util::RangeExt;
25 use zerocopy::{FromBytes, FromZeroes};
26 
27 /// Configuration data header.
28 #[repr(C, packed)]
29 #[derive(Clone, Copy, Debug, FromZeroes, FromBytes)]
30 struct Header {
31     /// Magic number; must be `Header::MAGIC`.
32     magic: u32,
33     /// Version of the header format.
34     version: Version,
35     /// Total size of the configuration data.
36     total_size: u32,
37     /// Feature flags; currently reserved and must be zero.
38     flags: u32,
39 }
40 
41 #[derive(Debug)]
42 pub enum Error {
43     /// Reserved region can't fit configuration header.
44     BufferTooSmall,
45     /// Header has the wrong alignment
46     HeaderMisaligned,
47     /// Header doesn't contain the expect magic value.
48     InvalidMagic,
49     /// Version of the header isn't supported.
50     UnsupportedVersion(Version),
51     /// Header describes configuration data that doesn't fit in the expected buffer.
52     InvalidSize(usize),
53     /// Header entry is missing.
54     MissingEntry(Entry),
55     /// Range described by entry does not fit within config data.
56     EntryOutOfBounds(Entry, Range<usize>, Range<usize>),
57     /// Entries are in out of order
58     EntryOutOfOrder,
59 }
60 
61 impl fmt::Display for Error {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result62     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
63         match self {
64             Self::BufferTooSmall => write!(f, "Reserved region is smaller than config header"),
65             Self::HeaderMisaligned => write!(f, "Reserved region is misaligned"),
66             Self::InvalidMagic => write!(f, "Wrong magic number"),
67             Self::UnsupportedVersion(v) => write!(f, "Version {v} not supported"),
68             Self::InvalidSize(sz) => write!(f, "Total size ({sz:#x}) overflows reserved region"),
69             Self::MissingEntry(entry) => write!(f, "Mandatory {entry:?} entry is missing"),
70             Self::EntryOutOfBounds(entry, range, limits) => {
71                 write!(
72                     f,
73                     "Entry {entry:?} out of bounds: {range:#x?} must be within range {limits:#x?}"
74                 )
75             }
76             Self::EntryOutOfOrder => write!(f, "Entries are out of order"),
77         }
78     }
79 }
80 
81 pub type Result<T> = result::Result<T, Error>;
82 
83 impl Header {
84     const MAGIC: u32 = u32::from_ne_bytes(*b"pvmf");
85     const VERSION_1_0: Version = Version { major: 1, minor: 0 };
86     const VERSION_1_1: Version = Version { major: 1, minor: 1 };
87     const VERSION_1_2: Version = Version { major: 1, minor: 2 };
88 
total_size(&self) -> usize89     pub fn total_size(&self) -> usize {
90         self.total_size as usize
91     }
92 
body_lowest_bound(&self) -> Result<usize>93     pub fn body_lowest_bound(&self) -> Result<usize> {
94         let entries_offset = mem::size_of::<Self>();
95 
96         // Ensure that the entries are properly aligned and do not require padding.
97         const_assert_eq!(mem::align_of::<Header>() % mem::align_of::<HeaderEntry>(), 0);
98         const_assert_eq!(mem::size_of::<Header>() % mem::align_of::<HeaderEntry>(), 0);
99 
100         let entries_size = self.entry_count()?.checked_mul(mem::size_of::<HeaderEntry>()).unwrap();
101 
102         Ok(entries_offset.checked_add(entries_size).unwrap())
103     }
104 
entry_count(&self) -> Result<usize>105     pub fn entry_count(&self) -> Result<usize> {
106         let last_entry = match self.version {
107             Self::VERSION_1_0 => Entry::DebugPolicy,
108             Self::VERSION_1_1 => Entry::VmDtbo,
109             Self::VERSION_1_2 => Entry::VmBaseDtbo,
110             v @ Version { major: 1, .. } => {
111                 const LATEST: Version = Header::VERSION_1_2;
112                 warn!("Parsing unknown config data version {v} as version {LATEST}");
113                 return Ok(Entry::COUNT);
114             }
115             v => return Err(Error::UnsupportedVersion(v)),
116         };
117 
118         Ok(last_entry as usize + 1)
119     }
120 }
121 
122 #[derive(Clone, Copy, Debug)]
123 pub enum Entry {
124     Bcc,
125     DebugPolicy,
126     VmDtbo,
127     VmBaseDtbo,
128     #[allow(non_camel_case_types)] // TODO: Use mem::variant_count once stable.
129     _VARIANT_COUNT,
130 }
131 
132 impl Entry {
133     const COUNT: usize = Self::_VARIANT_COUNT as usize;
134 
135     const ALL_ENTRIES: [Entry; Self::COUNT] =
136         [Self::Bcc, Self::DebugPolicy, Self::VmDtbo, Self::VmBaseDtbo];
137 }
138 
139 #[derive(Default)]
140 pub struct Entries<'a> {
141     pub bcc: &'a mut [u8],
142     pub debug_policy: Option<&'a [u8]>,
143     pub vm_dtbo: Option<&'a mut [u8]>,
144     pub vm_ref_dt: Option<&'a [u8]>,
145 }
146 
147 #[repr(packed)]
148 #[derive(Clone, Copy, Debug, FromZeroes, FromBytes)]
149 struct HeaderEntry {
150     offset: u32,
151     size: u32,
152 }
153 
154 #[repr(C, packed)]
155 #[derive(Clone, Copy, Debug, Eq, FromZeroes, FromBytes, PartialEq)]
156 pub struct Version {
157     minor: u16,
158     major: u16,
159 }
160 
161 impl fmt::Display for Version {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result162     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
163         // Copy the fields to local variables to prevent unaligned access.
164         let (major, minor) = (self.major, self.minor);
165         write!(f, "{}.{}", major, minor)
166     }
167 }
168 
169 /// Range with non-empty length.
170 #[derive(Debug, Copy, Clone)]
171 struct NonEmptyRange {
172     start: usize,
173     size: NonZeroUsize,
174 }
175 
176 impl NonEmptyRange {
new(start: usize, size: usize) -> Option<Self>177     pub fn new(start: usize, size: usize) -> Option<Self> {
178         // Ensure end() is safe.
179         start.checked_add(size).unwrap();
180 
181         Some(Self { start, size: NonZeroUsize::new(size)? })
182     }
183 
end(&self) -> usize184     fn end(&self) -> usize {
185         self.start + self.len()
186     }
187 
len(&self) -> usize188     fn len(&self) -> usize {
189         self.size.into()
190     }
191 
as_range(&self) -> Range<usize>192     fn as_range(&self) -> Range<usize> {
193         self.start..self.end()
194     }
195 }
196 
197 #[derive(Debug)]
198 pub struct Config<'a> {
199     body: &'a mut [u8],
200     ranges: [Option<NonEmptyRange>; Entry::COUNT],
201 }
202 
203 impl<'a> Config<'a> {
204     /// Take ownership of a pvmfw configuration consisting of its header and following entries.
new(bytes: &'a mut [u8]) -> Result<Self>205     pub fn new(bytes: &'a mut [u8]) -> Result<Self> {
206         const HEADER_SIZE: usize = mem::size_of::<Header>();
207         if bytes.len() < HEADER_SIZE {
208             return Err(Error::BufferTooSmall);
209         }
210 
211         let (header, rest) =
212             zerocopy::Ref::<_, Header>::new_from_prefix(bytes).ok_or(Error::HeaderMisaligned)?;
213         let header = header.into_ref();
214 
215         if header.magic != Header::MAGIC {
216             return Err(Error::InvalidMagic);
217         }
218 
219         let header_flags = header.flags;
220         if header_flags != 0 {
221             warn!("Ignoring unknown config flags: {header_flags:#x}");
222         }
223 
224         info!("pvmfw config version: {}", header.version);
225 
226         // Validate that we won't get an invalid alignment in the following due to padding to u64.
227         const_assert_eq!(HEADER_SIZE % mem::size_of::<u64>(), 0);
228 
229         // Ensure that Header::total_size isn't larger than anticipated by the caller and resize
230         // the &[u8] to catch OOB accesses to entries/blobs.
231         let total_size = header.total_size();
232         let rest = if let Some(rest_size) = total_size.checked_sub(HEADER_SIZE) {
233             rest.get_mut(..rest_size).ok_or(Error::InvalidSize(total_size))?
234         } else {
235             return Err(Error::InvalidSize(total_size));
236         };
237 
238         let (header_entries, body) =
239             zerocopy::Ref::<_, [HeaderEntry]>::new_slice_from_prefix(rest, header.entry_count()?)
240                 .ok_or(Error::BufferTooSmall)?;
241 
242         // Validate that we won't get an invalid alignment in the following due to padding to u64.
243         const_assert_eq!(mem::size_of::<HeaderEntry>() % mem::size_of::<u64>(), 0);
244 
245         // Ensure entries are in the body.
246         let limits = header.body_lowest_bound()?..total_size;
247         let mut ranges: [Option<NonEmptyRange>; Entry::COUNT] = [None; Entry::COUNT];
248         let mut last_end = 0;
249         for entry in Entry::ALL_ENTRIES {
250             let Some(header_entry) = header_entries.get(entry as usize) else { continue };
251             let entry_offset = header_entry.offset.try_into().unwrap();
252             let entry_size = header_entry.size.try_into().unwrap();
253             let Some(range) = NonEmptyRange::new(entry_offset, entry_size) else { continue };
254             let range = range.as_range();
255             if !range.is_within(&limits) {
256                 return Err(Error::EntryOutOfBounds(entry, range, limits));
257             }
258 
259             if last_end > range.start {
260                 return Err(Error::EntryOutOfOrder);
261             }
262             last_end = range.end;
263 
264             ranges[entry as usize] = NonEmptyRange::new(
265                 entry_offset - limits.start, // is_within() validates safety of this.
266                 entry_size,
267             );
268         }
269         // Ensures that BCC exists.
270         ranges[Entry::Bcc as usize].ok_or(Error::MissingEntry(Entry::Bcc))?;
271 
272         Ok(Self { body, ranges })
273     }
274 
275     /// Locate the various config entries.
get_entries(self) -> Entries<'a>276     pub fn get_entries(self) -> Entries<'a> {
277         // We require the blobs to be in the same order as the `Entry` enum (and this is checked
278         // in `new` above)
279         // So we can just work through the body range and split off the parts we are interested in.
280         let mut offset = 0;
281         let mut body = self.body;
282 
283         let mut entries: [Option<&mut [u8]>; Entry::COUNT] = Default::default();
284         for (i, range) in self.ranges.iter().enumerate() {
285             if let Some(range) = range {
286                 body = &mut body[range.start - offset..];
287                 let (chunk, rest) = body.split_at_mut(range.len());
288                 offset = range.end();
289                 body = rest;
290                 entries[i] = Some(chunk);
291             }
292         }
293         let [bcc, debug_policy, vm_dtbo, vm_ref_dt] = entries;
294 
295         // The platform BCC has always been required.
296         let bcc = bcc.unwrap();
297 
298         // We have no reason to mutate so drop the `mut`.
299         let debug_policy = debug_policy.map(|x| &*x);
300         let vm_ref_dt = vm_ref_dt.map(|x| &*x);
301 
302         Entries { bcc, debug_policy, vm_dtbo, vm_ref_dt }
303     }
304 }
305