1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 use crate::{aligned_subslice, read, write_bytes_mut, BlockIo, Result, StorageError};
16 use core::default::Default;
17 use core::mem::{align_of, size_of};
18 use core::num::NonZeroU64;
19 use crc32fast::Hasher;
20 use safemath::SafeNum;
21 use zerocopy::{AsBytes, FromBytes, FromZeroes, Ref};
22
23 const GPT_GUID_LEN: usize = 16;
24 pub const GPT_NAME_LEN_U16: usize = 36;
25
26 #[repr(C, packed)]
27 #[derive(Debug, Default, Copy, Clone, AsBytes, FromBytes, FromZeroes)]
28 pub struct GptHeader {
29 pub magic: u64,
30 pub revision: u32,
31 pub size: u32,
32 pub crc32: u32,
33 pub reserved0: u32,
34 pub current: u64,
35 pub backup: u64,
36 pub first: u64,
37 pub last: u64,
38 pub guid: [u8; GPT_GUID_LEN],
39 pub entries: u64,
40 pub entries_count: u32,
41 pub entries_size: u32,
42 pub entries_crc: u32,
43 }
44
45 impl GptHeader {
46 /// Cast a bytes slice into a GptHeader structure.
from_bytes(bytes: &mut [u8]) -> &mut GptHeader47 fn from_bytes(bytes: &mut [u8]) -> &mut GptHeader {
48 Ref::<_, GptHeader>::new_from_prefix(bytes).unwrap().0.into_mut()
49 }
50
51 /// Update the header crc32 value.
update_crc(&mut self)52 pub fn update_crc(&mut self) {
53 self.crc32 = 0;
54 self.crc32 = crc32(self.as_bytes());
55 }
56 }
57
58 /// GptEntry is the partition entry data structure in the GPT.
59 #[repr(C)]
60 #[derive(Debug, Copy, Clone, AsBytes, FromBytes, FromZeroes)]
61 pub struct GptEntry {
62 pub part_type: [u8; GPT_GUID_LEN],
63 pub guid: [u8; GPT_GUID_LEN],
64 pub first: u64,
65 pub last: u64,
66 pub flags: u64,
67 pub name: [u16; GPT_NAME_LEN_U16],
68 }
69
70 impl GptEntry {
71 /// Return the partition entry size in blocks.
blocks(&self) -> Result<u64>72 pub fn blocks(&self) -> Result<u64> {
73 u64::try_from((SafeNum::from(self.last) - self.first) + 1).map_err(|e| e.into())
74 }
75
76 /// Return whether this is a `NULL` entry. The first null entry marks the end of the partition
77 /// entries.
is_null(&self) -> bool78 fn is_null(&self) -> bool {
79 self.first == 0 && self.last == 0
80 }
81
82 /// Decode the partition name into a string. A length N utf16 string can be at most 2N utf8
83 /// bytes. Therefore, a safe size of `buffer` is 2*GPT_NAME_LEN_U16 = 72.
name_to_str<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a str>84 pub fn name_to_str<'a>(&self, buffer: &'a mut [u8]) -> Result<&'a str> {
85 let mut index = 0;
86 for c in char::decode_utf16(self.name) {
87 match c.unwrap_or(char::REPLACEMENT_CHARACTER) {
88 '\0' => break,
89 c if c.len_utf8() <= buffer[index..].len() => {
90 index += c.encode_utf8(&mut buffer[index..]).len()
91 }
92 _ => return Err(StorageError::InvalidInput), // Not enough space in `buffer`.
93 }
94 }
95 // SAFETY:
96 // _unchecked should be OK here since we wrote each utf8 byte ourselves,
97 // but it's just an optimization, checked version would be fine also.
98 unsafe { Ok(core::str::from_utf8_unchecked(&buffer[..index])) }
99 }
100 }
101
102 impl core::fmt::Display for GptEntry {
fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result103 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
104 // Format: partition name: "abc", [first, last]: [123, 456]
105 let mut name_conversion_buffer = [0u8; GPT_NAME_LEN_U16 * 2];
106 let name = self.name_to_str(&mut name_conversion_buffer).map_err(|_| core::fmt::Error)?;
107 write!(f, "partition name: \"{}\", [first, last]: [{}, {}]", name, self.first, self.last)
108 }
109 }
110
111 // core::mem::offset_of!(GptHeader, crc32) is unsatble feature and rejected by the compiler in our
112 // settings. We pre-compute the value here.
113 const GPT_CRC32_OFFSET: u64 = 16;
114 const GPT_ENTRY_ALIGNMENT: u64 = align_of::<GptEntry>() as u64;
115 const GPT_ENTRY_SIZE: u64 = size_of::<GptEntry>() as u64;
116 const GPT_MAX_NUM_ENTRIES: u64 = 128;
117 const GPT_HEADER_SIZE: u64 = size_of::<GptHeader>() as u64; // 92 bytes.
118 const GPT_HEADER_SIZE_PADDED: u64 =
119 (GPT_HEADER_SIZE + GPT_ENTRY_ALIGNMENT - 1) / GPT_ENTRY_ALIGNMENT * GPT_ENTRY_ALIGNMENT;
120 pub const GPT_MAGIC: u64 = 0x5452415020494645;
121
122 enum HeaderType {
123 Primary,
124 Secondary,
125 }
126
127 #[repr(C)]
128 #[derive(Debug, Default, Copy, Clone, AsBytes, FromBytes, FromZeroes)]
129 struct GptInfo {
130 // The number of valid entries in the entries array.
131 // May change as partitions are added or removed.
132 num_valid_entries: Option<NonZeroU64>,
133 // The maximum number of elements available in the entries array.
134 // Note: this is GREATER THAN OR EQUAL TO the number of valid entries
135 // and LESS THAN OR EQUAL TO the value of GPT_MAX_NUM_ENTRIES.
136 // Values other than GPT_MAX_NUM_ENTRIES are mostly used in unit tests.
137 max_entries: u64,
138 // Block size of the GPT disk.
139 block_size: u64,
140 }
141
142 impl GptInfo {
from_bytes(bytes: &mut [u8]) -> &mut Self143 fn from_bytes(bytes: &mut [u8]) -> &mut Self {
144 Ref::<_, GptInfo>::new_from_prefix(bytes).unwrap().0.into_mut()
145 }
146
num_valid_entries(&self) -> Result<u64>147 fn num_valid_entries(&self) -> Result<u64> {
148 Ok(self.num_valid_entries.ok_or_else(|| StorageError::InvalidInput)?.get())
149 }
150 }
151
152 /// An object that contains the GPT header/entries information.
153 pub(crate) struct Gpt<'a> {
154 info: &'a mut GptInfo,
155 /// Raw bytes of primary GPT header.
156 primary_header: &'a mut [u8],
157 /// Raw bytes of primary GPT entries.
158 primary_entries: &'a mut [u8],
159 /// Raw bytes of secondary GPT header.
160 secondary_header: &'a mut [u8],
161 /// Raw bytes of secondary GPT entries.
162 secondary_entries: &'a mut [u8],
163 }
164
165 impl<'a> Gpt<'a> {
166 /// Create an uninitialized Gpt instance from a provided buffer.
167 ///
168 /// # Args:
169 ///
170 /// * `max_entries`: Maximum number of entries allowed.
171 ///
172 /// * `buffer`: Buffer for creating the object. Must have a size at least
173 /// `Gpt::required_buffer_size(max_entries)`.
new_from_buffer(max_entries: u64, buffer: &'a mut [u8]) -> Result<Gpt<'a>>174 pub(crate) fn new_from_buffer(max_entries: u64, buffer: &'a mut [u8]) -> Result<Gpt<'a>> {
175 if max_entries > GPT_MAX_NUM_ENTRIES
176 || buffer.len() < Self::required_buffer_size(max_entries)?
177 {
178 return Err(StorageError::InvalidInput);
179 }
180 let buffer = aligned_subslice(buffer, GPT_ENTRY_ALIGNMENT)?;
181 *GptInfo::from_bytes(buffer) =
182 GptInfo { num_valid_entries: None, max_entries, block_size: 0 };
183 Self::from_existing(buffer)
184 }
185
186 /// Reconstruct an existing Gpt struct from a buffer previously created with `new_from_buffer`.
187 ///
188 /// The method simply partitions the input buffer and populate the `GptInfo` struct and
189 /// primary/secondary header/entries slices. It assumes that the buffer contains a valid
190 /// GptInfo struct.
from_existing(buffer: &'a mut [u8]) -> Result<Gpt<'a>>191 pub fn from_existing(buffer: &'a mut [u8]) -> Result<Gpt<'a>> {
192 let buffer = aligned_subslice(buffer, GPT_ENTRY_ALIGNMENT)?;
193 let (info, remain) = Ref::<_, GptInfo>::new_from_prefix(buffer).unwrap();
194 let entries_size = SafeNum::from(info.max_entries) * GPT_ENTRY_SIZE;
195 let header_size: usize = SafeNum::from(GPT_HEADER_SIZE_PADDED).try_into()?;
196 let split_pos = entries_size + header_size;
197 let (primary, secondary) = remain.split_at_mut(split_pos.try_into()?);
198 let (primary_header, primary_entries) = primary.split_at_mut(header_size);
199 let (secondary_header, secondary_entries) = secondary.split_at_mut(header_size);
200
201 Ok(Self {
202 info: info.into_mut(),
203 primary_header,
204 primary_entries,
205 secondary_header,
206 secondary_entries: &mut secondary_entries[..entries_size.try_into()?],
207 })
208 }
209
210 /// The minimum buffer size needed for `new_from_buffer()`
required_buffer_size(max_entries: u64) -> Result<usize>211 pub(crate) fn required_buffer_size(max_entries: u64) -> Result<usize> {
212 let entries_size = SafeNum::from(max_entries) * GPT_ENTRY_SIZE;
213 (((entries_size + GPT_HEADER_SIZE_PADDED) * 2) + size_of::<GptInfo>() + GPT_ENTRY_ALIGNMENT
214 - 1)
215 .try_into()
216 .map_err(|e: safemath::Error| e.into())
217 }
218
219 /// Return the list of GPT entries.
220 ///
221 /// If the object does not contain a valid GPT, the method returns Error.
entries(&self) -> Result<&[GptEntry]>222 pub(crate) fn entries(&self) -> Result<&[GptEntry]> {
223 self.check_valid()?;
224 Ok(&Ref::<_, [GptEntry]>::new_slice(&self.primary_entries[..]).unwrap().into_slice()
225 [..self.info.num_valid_entries()?.try_into()?])
226 }
227
228 /// Search for a partition entry.
229 ///
230 /// If partition doesn't exist, the method returns `Ok(None)`.
231 ///
232 /// If the object does not contain a valid GPT, the method returns Error.
find_partition(&self, part: &str) -> Result<&GptEntry>233 pub(crate) fn find_partition(&self, part: &str) -> Result<&GptEntry> {
234 for entry in self.entries()? {
235 let mut name_conversion_buffer = [0u8; GPT_NAME_LEN_U16 * 2];
236 if entry.name_to_str(&mut name_conversion_buffer)? != part {
237 continue;
238 }
239 return Ok(entry);
240 }
241 Err(StorageError::NotExist)
242 }
243
244 /// Check whether the Gpt has been initialized.
check_valid(&self) -> Result<()>245 fn check_valid(&self) -> Result<()> {
246 self.info.num_valid_entries()?;
247 Ok(())
248 }
249
250 /// Helper function for loading and validating GPT header and entries.
validate_gpt( &mut self, blk_dev: &mut (impl BlockIo + ?Sized), scratch: &mut [u8], header_type: HeaderType, ) -> Result<bool>251 fn validate_gpt(
252 &mut self,
253 blk_dev: &mut (impl BlockIo + ?Sized),
254 scratch: &mut [u8],
255 header_type: HeaderType,
256 ) -> Result<bool> {
257 let (header_start, header_bytes, entries) = match header_type {
258 HeaderType::Primary => {
259 (blk_dev.block_size().into(), &mut self.primary_header, &mut self.primary_entries)
260 }
261 HeaderType::Secondary => (
262 (SafeNum::from(blk_dev.num_blocks()) - 1) * blk_dev.block_size(),
263 &mut self.secondary_header,
264 &mut self.secondary_entries,
265 ),
266 };
267 read(blk_dev, header_start.try_into()?, header_bytes, scratch)?;
268 let header =
269 Ref::<_, GptHeader>::new_from_prefix(header_bytes.as_bytes()).unwrap().0.into_ref();
270
271 if header.magic != GPT_MAGIC {
272 return Ok(false);
273 }
274
275 let entries_size = SafeNum::from(header.entries_count) * GPT_ENTRY_SIZE;
276 let entries_offset = SafeNum::from(header.entries) * blk_dev.block_size();
277 if self.info.max_entries < header.entries_count.into()
278 || u64::try_from(entries_size + entries_offset)?
279 > ((SafeNum::from(blk_dev.num_blocks()) - 1) * blk_dev.block_size()).try_into()?
280 {
281 return Ok(false);
282 }
283
284 let crc32_offset = SafeNum::from(GPT_CRC32_OFFSET).try_into()?;
285 let mut hasher = Hasher::new();
286 hasher.update(&header.as_bytes()[..crc32_offset]);
287 hasher.update(&[0u8; size_of::<u32>()]);
288 hasher.update(&header.as_bytes()[crc32_offset + size_of::<u32>()..]);
289 if hasher.finalize() != header.crc32 {
290 return Ok(false);
291 }
292
293 // Load the entries
294 let out = &mut entries[..entries_size.try_into()?];
295 read(blk_dev, entries_offset.try_into()?, out, scratch)?;
296 // Validate entries crc32.
297 Ok(header.entries_crc == crc32(out))
298 }
299
300 /// Load and sync GPT from a block device.
load_and_sync( &mut self, blk_dev: &mut (impl BlockIo + ?Sized), scratch: &mut [u8], ) -> Result<()>301 fn load_and_sync(
302 &mut self,
303 blk_dev: &mut (impl BlockIo + ?Sized),
304 scratch: &mut [u8],
305 ) -> Result<()> {
306 self.info.num_valid_entries = None;
307
308 let block_size = blk_dev.block_size();
309 let total_blocks: SafeNum = blk_dev.num_blocks().into();
310
311 let primary_header_blk = 1;
312 let primary_header_pos = block_size;
313 let secondary_header_blk = total_blocks - 1;
314 let secondary_header_pos = secondary_header_blk * block_size;
315
316 // Entries position for restoring.
317 let primary_entries_blk = 2;
318 let primary_entries_pos = SafeNum::from(primary_entries_blk) * block_size;
319 let primary_valid = self.validate_gpt(blk_dev, scratch, HeaderType::Primary)?;
320 let secondary_valid = self.validate_gpt(blk_dev, scratch, HeaderType::Secondary)?;
321
322 let primary_header = GptHeader::from_bytes(self.primary_header);
323 let secondary_header = GptHeader::from_bytes(self.secondary_header);
324 if !primary_valid {
325 if !secondary_valid {
326 return Err(StorageError::NoValidGpt);
327 }
328 // Restore to primary
329 primary_header.as_bytes_mut().clone_from_slice(secondary_header.as_bytes());
330 self.primary_entries.clone_from_slice(&self.secondary_entries);
331 primary_header.current = primary_header_blk;
332 primary_header.backup = secondary_header_blk.try_into()?;
333 primary_header.entries = primary_entries_blk;
334 primary_header.update_crc();
335
336 write_bytes_mut(blk_dev, primary_header_pos, primary_header.as_bytes_mut(), scratch)?;
337 write_bytes_mut(
338 blk_dev,
339 primary_entries_pos.try_into()?,
340 self.primary_entries,
341 scratch,
342 )?
343 } else if !secondary_valid {
344 // Restore to secondary
345 let secondary_entries_pos = secondary_header_pos
346 - (SafeNum::from(self.info.max_entries) * core::mem::size_of::<GptEntry>());
347 let secondary_entries_blk = secondary_entries_pos / block_size;
348
349 secondary_header.as_bytes_mut().clone_from_slice(primary_header.as_bytes());
350 self.secondary_entries.clone_from_slice(&self.primary_entries);
351 secondary_header.current = secondary_header_blk.try_into()?;
352 secondary_header.backup = primary_header_blk;
353 secondary_header.entries = secondary_entries_blk.try_into()?;
354 secondary_header.update_crc();
355
356 write_bytes_mut(
357 blk_dev,
358 secondary_header_pos.try_into()?,
359 secondary_header.as_bytes_mut(),
360 scratch,
361 )?;
362 write_bytes_mut(
363 blk_dev,
364 secondary_entries_pos.try_into()?,
365 self.secondary_entries,
366 scratch,
367 )?;
368 }
369
370 // Calculate actual number of GPT entries by finding the first invalid entry.
371 let entries =
372 Ref::<_, [GptEntry]>::new_slice(&self.primary_entries[..]).unwrap().into_slice();
373 self.info.num_valid_entries =
374 NonZeroU64::new(match entries.iter().position(|e| e.is_null()) {
375 Some(idx) => idx as u64,
376 _ => self.info.max_entries,
377 });
378 self.info.block_size = block_size;
379 Ok(())
380 }
381 }
382
383 /// Wrapper of gpt.load_and_sync(). Library internal helper for AsBlockDevice::sync_gpt().
gpt_sync( blk_dev: &mut (impl BlockIo + ?Sized), gpt: &mut Gpt, scratch: &mut [u8], ) -> Result<()>384 pub(crate) fn gpt_sync(
385 blk_dev: &mut (impl BlockIo + ?Sized),
386 gpt: &mut Gpt,
387 scratch: &mut [u8],
388 ) -> Result<()> {
389 gpt.load_and_sync(blk_dev, scratch)
390 }
391
392 /// Checks if a read/write range into a GPT partition overflows and returns the range's absolute
393 /// offset in the block device.
check_gpt_rw_params( gpt_cache_buffer: &mut [u8], part_name: &str, offset: u64, size: usize, ) -> Result<u64>394 pub(crate) fn check_gpt_rw_params(
395 gpt_cache_buffer: &mut [u8],
396 part_name: &str,
397 offset: u64,
398 size: usize,
399 ) -> Result<u64> {
400 let gpt = Gpt::from_existing(gpt_cache_buffer)?;
401 let entry = gpt.find_partition(part_name)?;
402 let end: u64 = (SafeNum::from(offset) + size).try_into()?;
403 let total_size = SafeNum::from(entry.blocks()?) * gpt.info.block_size;
404 match end <= total_size.try_into()? {
405 true => Ok((SafeNum::from(entry.first) * gpt.info.block_size + offset).try_into()?),
406 false => Err(StorageError::OutOfRange),
407 }
408 }
409
crc32(data: &[u8]) -> u32410 fn crc32(data: &[u8]) -> u32 {
411 let mut hasher = Hasher::new();
412 hasher.update(data);
413 hasher.finalize()
414 }
415
416 #[cfg(test)]
417 pub(crate) mod test {
418 use super::*;
419 use gbl_storage_testlib::{
420 alignment_scratch_size, AsBlockDevice, TestBlockDevice, TestBlockDeviceBuilder,
421 };
422
423 /// Helper function to extract the gpt header from a test block device.
424 /// This function lives here and not as a method of TestBlockDevice so that
425 /// the Gpt type doesn't have to be exported.
gpt(dev: &mut TestBlockDevice) -> Gpt426 fn gpt(dev: &mut TestBlockDevice) -> Gpt {
427 let (_, gpt) = dev.scratch.split_at_mut(alignment_scratch_size(&mut dev.io).unwrap());
428 Gpt::from_existing(gpt).unwrap()
429 }
430
431 #[test]
test_new_from_buffer()432 fn test_new_from_buffer() {
433 let mut dev: TestBlockDevice = include_bytes!("../test/gpt_test_1.bin").as_slice().into();
434 dev.sync_gpt().unwrap();
435
436 assert_eq!(dev.partition_iter().count(), 2);
437 dev.find_partition("boot_a").unwrap();
438 dev.find_partition("boot_b").unwrap();
439 assert!(dev.find_partition("boot_c").is_err());
440 }
441
442 #[test]
test_gpt_buffer_too_small()443 fn test_gpt_buffer_too_small() {
444 let mut dev: TestBlockDevice = include_bytes!("../test/gpt_test_1.bin").as_slice().into();
445 dev.scratch = vec![0u8; dev.scratch.len() - 1];
446 assert!(dev.sync_gpt().is_err());
447 }
448
449 #[test]
test_gpt_too_many_entries()450 fn test_gpt_too_many_entries() {
451 let mut dev = TestBlockDeviceBuilder::new()
452 .set_data(include_bytes!("../test/gpt_test_1.bin"))
453 .set_max_gpt_entries(129)
454 .build();
455 assert!(dev.sync_gpt().is_err());
456 }
457
458 #[test]
test_load_gpt_primary()459 fn test_load_gpt_primary() {
460 let disk = include_bytes!("../test/gpt_test_1.bin");
461 let mut dev: TestBlockDevice = disk.as_slice().into();
462
463 // Corrupt secondary.
464 dev.io.storage[disk.len() - 512..].fill(0);
465 dev.sync_gpt().unwrap();
466
467 assert_eq!(dev.partition_iter().count(), 2);
468 dev.find_partition("boot_a").unwrap();
469 dev.find_partition("boot_b").unwrap();
470 assert!(dev.find_partition("boot_c").is_err());
471
472 // Check that secondary is restored
473 assert_eq!(dev.io.storage, disk);
474 }
475
476 #[test]
test_load_gpt_secondary()477 fn test_load_gpt_secondary() {
478 let disk = include_bytes!("../test/gpt_test_1.bin");
479 let mut dev: TestBlockDevice = disk.as_slice().into();
480
481 // Corrupt primary.
482 dev.io.storage[512..1024].fill(0);
483 dev.sync_gpt().unwrap();
484
485 assert_eq!(dev.partition_iter().count(), 2);
486 dev.find_partition("boot_a").unwrap();
487 dev.find_partition("boot_b").unwrap();
488
489 // Check that primary is restored
490 assert_eq!(dev.io.storage, disk);
491 }
492
493 #[test]
test_good_gpt_no_repair_write()494 fn test_good_gpt_no_repair_write() {
495 let mut dev: TestBlockDevice = include_bytes!("../test/gpt_test_1.bin").as_slice().into();
496 dev.sync_gpt().unwrap();
497
498 assert_eq!(dev.io.num_writes, 0);
499 }
500
501 #[test]
test_load_gpt_incorrect_magic()502 fn test_load_gpt_incorrect_magic() {
503 let disk = include_bytes!("../test/gpt_test_1.bin");
504 let mut dev = TestBlockDeviceBuilder::new().set_data(disk).build();
505 dev.sync_gpt().unwrap();
506
507 let gpt = gpt(&mut dev);
508 let primary_header = &mut gpt.primary_header[..GPT_HEADER_SIZE.try_into().unwrap()];
509 let gpt_header = GptHeader::from_bytes(primary_header);
510 gpt_header.magic = 0x123456;
511 gpt_header.update_crc();
512 let primary_header = Vec::from(primary_header);
513 dev.io.storage[512..512 + primary_header.len()].clone_from_slice(&primary_header);
514
515 dev.sync_gpt().unwrap();
516
517 // Check that incorrect magic header is restored
518 assert_eq!(dev.io.storage, disk);
519 }
520
521 #[test]
test_load_gpt_exceeds_max_entries()522 fn test_load_gpt_exceeds_max_entries() {
523 let mut dev = TestBlockDeviceBuilder::new()
524 .set_data(include_bytes!("../test/gpt_test_1.bin"))
525 .set_max_gpt_entries(127)
526 .build();
527
528 assert!(dev.sync_gpt().is_err());
529 }
530
531 #[test]
test_load_gpt_non_max_entries()532 fn test_load_gpt_non_max_entries() {
533 // Create a header with non-max entries_count
534 let disk = include_bytes!("../test/gpt_test_1.bin");
535 let mut dev = TestBlockDeviceBuilder::new().set_data(disk).build();
536 let block_size: usize = dev.io.block_size.try_into().unwrap();
537 dev.sync_gpt().unwrap();
538
539 let gpt = gpt(&mut dev);
540 let primary_header = &mut gpt.primary_header[..GPT_HEADER_SIZE.try_into().unwrap()];
541 let gpt_header = GptHeader::from_bytes(primary_header);
542 gpt_header.entries_count = 2;
543 // Update entries crc32
544 gpt_header.entries_crc =
545 crc32(&gpt.primary_entries[..(2 * GPT_ENTRY_SIZE).try_into().unwrap()]);
546 gpt_header.update_crc();
547 // Update to primary.
548 let primary_header = Vec::from(primary_header);
549 dev.io.storage[block_size..block_size + primary_header.len()]
550 .clone_from_slice(&primary_header);
551
552 // Corrupt secondary. Sync ok
553 dev.io.storage[disk.len() - block_size..].fill(0);
554 dev.sync_gpt().unwrap();
555
556 // Corrup primary. Sync ok
557 dev.io.storage[block_size..(block_size * 2)].fill(0);
558 dev.sync_gpt().unwrap();
559 }
560
561 #[test]
test_uninitialized_gpt()562 fn test_uninitialized_gpt() {
563 // Load a good GPT first.
564 let mut dev = TestBlockDeviceBuilder::new()
565 .set_data(include_bytes!("../test/gpt_test_1.bin"))
566 .build();
567 dev.sync_gpt().unwrap();
568 dev.io.storage[..64 * 1024].fill(0);
569 // Load a bad GPT. Validate that the valid state is reset.
570 assert!(dev.sync_gpt().is_err());
571 assert!(dev.find_partition("").is_err());
572 }
573
574 #[test]
test_gpt_read()575 fn test_gpt_read() {
576 let mut dev = TestBlockDeviceBuilder::new()
577 .set_data(include_bytes!("../test/gpt_test_1.bin"))
578 .build();
579 dev.sync_gpt().unwrap();
580
581 let expect_boot_a = include_bytes!("../test/boot_a.bin");
582 let expect_boot_b = include_bytes!("../test/boot_b.bin");
583
584 let mut actual_boot_a = vec![0u8; expect_boot_a.len()];
585 let mut actual_boot_b = vec![0u8; expect_boot_b.len()];
586
587 dev.read_gpt_partition("boot_a", 0, &mut actual_boot_a).unwrap();
588 assert_eq!(expect_boot_a.to_vec(), actual_boot_a);
589 // partial read
590 actual_boot_a = actual_boot_a[1..].to_vec();
591 dev.read_gpt_partition("boot_a", 1, &mut actual_boot_a).unwrap();
592 assert_eq!(expect_boot_a[1..].to_vec(), actual_boot_a);
593
594 dev.read_gpt_partition("boot_b", 0, &mut actual_boot_b).unwrap();
595 assert_eq!(expect_boot_b.to_vec(), actual_boot_b);
596 // partial read
597 actual_boot_b = actual_boot_b[1..].to_vec();
598 dev.read_gpt_partition("boot_b", 1, &mut actual_boot_b).unwrap();
599 assert_eq!(expect_boot_b[1..].to_vec(), actual_boot_b);
600 }
601
602 #[test]
test_gpt_write()603 fn test_gpt_write() {
604 let mut dev = TestBlockDeviceBuilder::new()
605 .set_data(include_bytes!("../test/gpt_test_1.bin"))
606 .build();
607 dev.sync_gpt().unwrap();
608
609 let mut expect_boot_a = include_bytes!("../test/boot_a.bin").to_vec();
610 expect_boot_a.reverse();
611 let mut expect_boot_b = include_bytes!("../test/boot_b.bin").to_vec();
612 expect_boot_b.reverse();
613
614 let mut actual_boot_a = vec![0u8; expect_boot_a.len()];
615 let mut actual_boot_b = vec![0u8; expect_boot_b.len()];
616
617 // "boot_a" partition
618 // Mutable version
619 dev.write_gpt_partition("boot_a", 0, expect_boot_a.as_mut_slice()).unwrap();
620 dev.read_gpt_partition("boot_a", 0, &mut actual_boot_a).unwrap();
621 assert_eq!(expect_boot_a.to_vec(), actual_boot_a);
622 // Mutable version, partial write.
623 dev.write_gpt_partition("boot_a", 1, expect_boot_a[1..].as_mut()).unwrap();
624 dev.read_gpt_partition("boot_a", 1, &mut actual_boot_a[1..]).unwrap();
625 assert_eq!(expect_boot_a[1..], actual_boot_a[1..]);
626
627 // "boot_b" partition
628 // Mutable version
629 dev.write_gpt_partition("boot_b", 0, expect_boot_b.as_mut_slice()).unwrap();
630 dev.read_gpt_partition("boot_b", 0, &mut actual_boot_b).unwrap();
631 assert_eq!(expect_boot_b.to_vec(), actual_boot_b);
632 // Mutable version, partial write.
633 dev.write_gpt_partition("boot_b", 1, expect_boot_b[1..].as_mut()).unwrap();
634 dev.read_gpt_partition("boot_b", 1, &mut actual_boot_b[1..]).unwrap();
635 assert_eq!(expect_boot_b[1..], actual_boot_b[1..]);
636 }
637
638 #[test]
test_gpt_rw_overflow()639 fn test_gpt_rw_overflow() {
640 let mut dev = TestBlockDeviceBuilder::new()
641 .set_data(include_bytes!("../../libstorage/test/gpt_test_1.bin"))
642 .build();
643 dev.sync_gpt().unwrap();
644
645 let mut boot_a = [0u8; include_bytes!("../test/boot_a.bin").len()];
646 let mut boot_b = [0u8; include_bytes!("../test/boot_b.bin").len()];
647
648 assert!(dev.read_gpt_partition("boot_a", 1, &mut boot_a).is_err());
649 assert!(dev.write_gpt_partition("boot_a", 1, boot_a.as_mut_slice()).is_err());
650
651 assert!(dev.read_gpt_partition("boot_b", 1, &mut boot_b).is_err());
652 assert!(dev.write_gpt_partition("boot_b", 1, boot_b.as_mut_slice()).is_err());
653 }
654 }
655