1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #![cfg_attr(not(any(test, android_dylib)), no_std)]
16
17 //! ZBI Processing Library
18 //!
19 //! This library is meant to be a generic processing library for the ZBI format
20 //! defined in sdk/lib/zbi-format/include/lib/zbi-format/zbi.h.
21 //!
22 //! Mainly it provides [`ZbiContainer`] that can create ([`ZbiContainer::new`]) valid container in
23 //! the provided buffer. Or parses and checks ([`ZbiContainer::parse`]) existing container in the
24 //! buffer. In both cases it provides iterator to walk thorough the items in container.
25 //!
26 //! Note: in both cases provided buffer must be properly aligned to [`ZBI_ALIGNMENT_USIZE`].
27 //! Using [`align_buffer`] would do proper alignment for you.
28 //!
29 //! ```
30 //! use zbi::{ZbiContainer, ZbiFlags, ZbiType, align_buffer};
31 //!
32 //! let mut buffer = [0; 200];
33 //! let mut buffer = align_buffer(&mut buffer[..]).unwrap();
34 //! let mut container = ZbiContainer::new(buffer).unwrap();
35 //! container.create_entry(ZbiType::DebugData, 0, ZbiFlags::default(), 10).unwrap();
36 //! container.create_entry_with_payload(ZbiType::DebugData, 0, ZbiFlags::default(), &[]).unwrap();
37 //!
38 //! assert_eq!(container.iter().count(), 2);
39 //!
40 //! let mut it = container.iter();
41 //! assert_eq!(it.next().unwrap().header.length, 10);
42 //! assert_eq!(it.next().unwrap().header.length, 0);
43 //! assert_eq!(it.next(), None);
44 //! ```
45
46 mod zbi_format;
47
48 use bitflags::bitflags;
49 use core::{
50 fmt::{Debug, Display, Formatter},
51 mem::{size_of, take},
52 ops::DerefMut,
53 };
54 use zbi_format::*;
55 use zerocopy::{AsBytes, ByteSlice, ByteSliceMut, Ref};
56
57 type ZbiResult<T> = Result<T, ZbiError>;
58
59 /// [`ZbiContainer`] requires buffer and each entry to be aligned to this amount of bytes.
60 /// [`align_buffer`] can be used to adjust buffer alignment to match this requirement.
61 // ZBI_ALIGNMENT is u32 and it is not productive to `try_into()` to usize all the time.
62 // Expectation is that value should always fit in `u32` and `usize`, which we test.
63 pub const ZBI_ALIGNMENT_USIZE: usize = ZBI_ALIGNMENT as usize;
64
65 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
66 const ZBI_ARCH_KERNEL_TYPE: ZbiType = ZbiType::KernelArm64;
67 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
68 const ZBI_ARCH_KERNEL_TYPE: ZbiType = ZbiType::KernelX64;
69 #[cfg(any(target_arch = "riscv", target_arch = "riscv64"))]
70 const ZBI_ARCH_KERNEL_TYPE: ZbiType = ZbiType::KernelRiscv64;
71
72 /// Aligns provided slice to [`ZBI_ALIGNMENT_USIZE`] bytes.
73 ///
74 /// # Returns
75 ///
76 /// * `Ok(aligned_slice)` - on success, which can have `length == 0`
77 /// * [`ZbiError::TooBig`] - returned if there is not enough space to align the slice
align_buffer<B: ByteSlice>(buffer: B) -> ZbiResult<B>78 pub fn align_buffer<B: ByteSlice>(buffer: B) -> ZbiResult<B> {
79 let tail_offset = get_align_buffer_offset(&buffer[..])?;
80 let (_, aligned_buffer) = buffer.split_at(tail_offset);
81 Ok(aligned_buffer)
82 }
83
84 /// ZbiItem is element representation in [`ZbiContainer`]
85 ///
86 /// It contains of `header` and `payload`. Both contain references to actual location in buffer.
87 /// And `payload` goes right after `header` in the buffer.
88 ///
89 /// Header must be [`ZBI_ALIGNMENT_USIZE`] aligned in the buffer.
90 /// The length field specifies the actual payload length and does not include the size of padding.
91 /// Since all headers in [`ZbiContainer`] are [`ZBI_ALIGNMENT_USIZE`] aligned payload may be followed by padding,
92 /// which is included in [`ZbiContainer`] length, but not in each [`ZbiItem`]
93 #[derive(Debug)]
94 pub struct ZbiItem<B: ByteSlice> {
95 /// ZBI header
96 pub header: Ref<B, ZbiHeader>,
97 /// Payload corresponding to ZBI header
98 pub payload: B,
99 }
100
101 impl<B: ByteSlice, C: ByteSlice> PartialEq<ZbiItem<C>> for ZbiItem<B> {
eq(&self, other: &ZbiItem<C>) -> bool102 fn eq(&self, other: &ZbiItem<C>) -> bool {
103 self.header.as_bytes() == other.header.as_bytes()
104 && self.payload.as_bytes() == other.payload.as_bytes()
105 }
106 }
107
108 impl<B: ByteSlice + PartialEq> ZbiItem<B> {
109 /// Attempts to parse provided buffer.
110 ///
111 /// # Arguments
112 /// * `buffer` - buffer to parse (can be mutable if further changes to element is required)
113 ///
114 /// # Returns
115 ///
116 /// * `Ok((ZbiItem, tail))` - if parsing was successful function returns `ZbiItem` and tail of
117 /// buffer that wasn't used.
118 /// * `Err(ZbiError)` - if parsing fails Err is returned.
119 ///
120 /// # Example
121 ///
122 /// ```
123 /// use zbi::ZbiItem;
124 ///
125 /// # const LEN: usize = 100;
126 /// # #[repr(align(8))]
127 /// # struct ZbiAligned([u8; LEN]);
128 /// # let buffer = ZbiAligned(core::array::from_fn::<_, LEN, _>(|_| 0u8));
129 /// # let buffer = &buffer.0[..];
130 /// let (zbi_item, tail) = ZbiItem::parse(buffer).unwrap();
131 /// println!("{}", zbi_item.header.type_);
132 /// println!("{}", tail.len());
133 /// assert_eq!(zbi_item.header.length, zbi_item.payload.len() as u32);
134 /// ```
parse(buffer: B) -> ZbiResult<(ZbiItem<B>, B)>135 pub fn parse(buffer: B) -> ZbiResult<(ZbiItem<B>, B)> {
136 is_zbi_aligned(&buffer)?;
137
138 let (hdr, payload) = Ref::<B, ZbiHeader>::new_from_prefix(buffer).ok_or(ZbiError::Error)?;
139
140 let item_payload_len =
141 usize::try_from(hdr.length).map_err(|_| ZbiError::PlatformBadLength)?;
142 if payload.len() < item_payload_len {
143 return Err(ZbiError::TooBig);
144 }
145
146 let (item_payload, tail) = payload.split_at(item_payload_len);
147 let item = ZbiItem { header: hdr, payload: item_payload };
148 Ok((item, tail))
149 }
150
151 /// Validates `ZbiItem` header values.
152 ///
153 /// # Example
154 ///
155 /// ```
156 /// use zbi::{ZbiItem, ZbiError};
157 ///
158 /// # const LEN: usize = 100;
159 /// # #[repr(align(8))]
160 /// # struct ZbiAligned([u8; LEN]);
161 /// # let buffer = ZbiAligned(core::array::from_fn::<_, LEN, _>(|_| 0u8));
162 /// # let buffer = &buffer.0[..];
163 /// // E.g. if `header.magic = 0` this is invalid value.
164 /// let (zbi_item, tail) = ZbiItem::parse(buffer).unwrap();
165 /// assert_eq!(zbi_item.header.magic, 0);
166 /// assert_eq!(zbi_item.is_valid(), Err(ZbiError::BadMagic));
167 /// ```
is_valid(&self) -> ZbiResult<()>168 pub fn is_valid(&self) -> ZbiResult<()> {
169 if self.header.magic != ZBI_ITEM_MAGIC {
170 Err(ZbiError::BadMagic)
171 } else if !self.header.get_flags().contains(ZbiFlags::VERSION) {
172 Err(ZbiError::BadVersion)
173 } else if !self.header.get_flags().contains(ZbiFlags::CRC32)
174 && (self.header.crc32 != ZBI_ITEM_NO_CRC32)
175 {
176 Err(ZbiError::BadCrc)
177 } else {
178 Ok(())
179 }
180 }
181 }
182
183 impl<B: ByteSliceMut + PartialEq> ZbiItem<B> {
184 /// Create `ZbiItem` with provided information and payload length.
185 ///
186 ///
187 /// # Result
188 ///
189 /// * `(ZbiItem, tail)` - returned on success. `ZbiItem` would have payload of requested
190 /// length. And tail would be remaining part of the `buffer` that wasn't
191 /// used.
192 /// * `ZbiError::BadAlignment` - if buffer wasn't aligned.
193 /// * `ZbiError::TooBig` - if buffer is not long enough to hold
194 /// [`ZbiHeader`] + `payload` of `payload_len`.
195 /// * `ZbiError::PlatformBadLength` - if `payload_len` value is bigger than `u32::MAX`
196 ///
197 /// # Example
198 /// ```
199 /// use zbi::{ZbiItem, ZbiFlags, ZbiType};
200 ///
201 /// # const LEN: usize = 100;
202 /// # #[repr(align(8))]
203 /// # struct ZbiAligned([u8; LEN]);
204 /// # let mut buffer = ZbiAligned(core::array::from_fn::<_, LEN, _>(|_| 0u8));
205 /// # let mut buffer = &mut buffer.0[..];
206 /// let (item, _tail) = ZbiItem::new(
207 /// &mut buffer[..],
208 /// ZbiType::KernelX64,
209 /// 0,
210 /// ZbiFlags::default(),
211 /// 2,
212 /// ).unwrap();
213 /// assert_eq!(item.header.length, 2);
214 /// assert_eq!(item.payload.len(), 2);
215 /// ```
new( buffer: B, type_: ZbiType, extra: u32, flags: ZbiFlags, payload_len: usize, ) -> ZbiResult<(ZbiItem<B>, B)>216 pub fn new(
217 buffer: B,
218 type_: ZbiType,
219 extra: u32,
220 flags: ZbiFlags,
221 payload_len: usize,
222 ) -> ZbiResult<(ZbiItem<B>, B)> {
223 if buffer.len() < core::mem::size_of::<ZbiHeader>()
224 || buffer.len() - core::mem::size_of::<ZbiHeader>() < payload_len
225 {
226 return Err(ZbiError::TooBig);
227 }
228
229 is_zbi_aligned(&buffer)?;
230
231 // Need to convert payload_len to u32 type to put in structure
232 let payload_len_u32 =
233 u32::try_from(payload_len).map_err(|_| ZbiError::PlatformBadLength)?;
234
235 let (mut header, item_tail) =
236 Ref::<B, ZbiHeader>::new_from_prefix(buffer).ok_or(ZbiError::Error)?;
237 header.type_ = type_ as u32;
238 header.length = payload_len_u32;
239 header.extra = extra;
240 header.set_flags(&flags);
241 header.reserved0 = 0;
242 header.reserved1 = 0;
243 header.magic = ZBI_ITEM_MAGIC;
244 header.crc32 = ZBI_ITEM_NO_CRC32;
245
246 // It is safe to do split because we checked if input buffer big enough to contain header
247 // and requested payload size.
248 let (payload, tail) = item_tail.split_at(payload_len);
249
250 Ok((ZbiItem { header, payload }, tail))
251 }
252 }
253
254 /// Main structure to work with ZBI format.
255 ///
256 /// It allows to create valid buffer as well as parse existing one.
257 /// Both cases would allow to iterate over elements in the container via [`ZbiContainer::iter`] or
258 /// [`ZbiContainer::iter_mut`].
259 #[derive(Debug, PartialEq)]
260 pub struct ZbiContainer<B: ByteSlice> {
261 /// Container specific [`ZbiHeader`], witch would be first element if ZBI buffer.
262 ///
263 /// `header.length` would show how many bytes after this header is used for ZBI elements and
264 /// padding.
265 ///
266 /// `header.type_` is always [`ZbiType::Container`]
267 pub header: Ref<B, ZbiHeader>,
268
269 // Same as header.header.length, but for convenience is `usize` to avoid use of try_into and
270 // returning ZbiError if we need to use it.
271 // Use getters and setters to access length:
272 // - set_payload_length_usize()
273 // - get_payload_length_u32()
274 // - get_payload_length_usize()
275 payload_length: usize,
276
277 // Buffer that follows `header`. It contains ZbiItems + padding if any and remaining tail for
278 // possible growth.
279 buffer: B,
280 }
281
282 impl<B: ByteSlice> ZbiContainer<B> {
283 // Helper to construct [`ZbiContainer`] which handles `paload_length` value, which should be
284 // in sync with `header.length`.
construct(header: Ref<B, ZbiHeader>, buffer: B) -> ZbiResult<Self>285 fn construct(header: Ref<B, ZbiHeader>, buffer: B) -> ZbiResult<Self> {
286 Ok(Self {
287 payload_length: usize::try_from(header.length)
288 .map_err(|_| ZbiError::PlatformBadLength)?,
289 header,
290 buffer,
291 })
292 }
293
294 /// Returns current container length as `u32`. Length doesn't include container header, only
295 /// items and padding.
get_payload_length_u32(&self) -> u32296 pub fn get_payload_length_u32(&self) -> u32 {
297 self.header.length
298 }
299
300 /// Returns current container length as `usize`. Length doesn't include container header, only
301 /// items and padding.
get_payload_length_usize(&self) -> usize302 pub fn get_payload_length_usize(&self) -> usize {
303 self.payload_length
304 }
305
306 /// Immutable iterator over ZBI elements. First element is first ZBI element after
307 /// container header. Container header is not available via iterator.
iter(&self) -> ZbiContainerIterator<impl ByteSlice + Default + Debug + PartialEq + '_>308 pub fn iter(&self) -> ZbiContainerIterator<impl ByteSlice + Default + Debug + PartialEq + '_> {
309 ZbiContainerIterator {
310 state: Ok(()),
311 buffer: &self.buffer[..self.get_payload_length_usize()],
312 }
313 }
314
315 /// Validates if ZBI is bootable for the target platform.
316 ///
317 /// # Returns
318 ///
319 /// * `Ok(())` - if bootable
320 /// * Err([`ZbiError::IncompleteKernel`]) - if first element in container has type not bootable
321 /// on target platform.
322 /// * Err([`ZbiError::Truncated`]) - if container is empty
is_bootable(&self) -> ZbiResult<()>323 pub fn is_bootable(&self) -> ZbiResult<()> {
324 let hdr = &self.header;
325 if hdr.length == 0 {
326 return Err(ZbiError::Truncated);
327 }
328
329 match self.iter().next() {
330 Some(ZbiItem { header, payload: _ }) if header.type_ == ZBI_ARCH_KERNEL_TYPE as u32 => {
331 Ok(())
332 }
333 Some(_) => Err(ZbiError::IncompleteKernel),
334 None => Err(ZbiError::Truncated),
335 }
336 }
337
338 /// Creates `ZbiContainer` from provided buffer.
339 ///
340 /// Buffer must be aligned to [`ZBI_ALIGNMENT_USIZE`] ([`align_buffer`] could be
341 /// used for that). If buffer is mutable than container can be mutable.
342 ///
343 /// # Returns
344 ///
345 /// * `Ok(ZbiContainer)` - if buffer is aligned and contain valid buffer.
346 /// * Err([`ZbiError`]) - if error occurred.
parse(buffer: B) -> ZbiResult<Self>347 pub fn parse(buffer: B) -> ZbiResult<Self> {
348 is_zbi_aligned(&buffer)?;
349
350 let (header, payload) =
351 Ref::<B, ZbiHeader>::new_from_prefix(buffer).ok_or(ZbiError::Error)?;
352
353 let length: usize = header.length.try_into().map_err(|_| ZbiError::TooBig)?;
354 if length > payload.len() {
355 return Err(ZbiError::Truncated);
356 }
357
358 if header.type_ != ZbiType::Container as u32 {
359 return Err(ZbiError::BadType);
360 } else if header.extra != ZBI_CONTAINER_MAGIC || header.magic != ZBI_ITEM_MAGIC {
361 return Err(ZbiError::BadMagic);
362 } else if !header.get_flags().contains(ZbiFlags::VERSION) {
363 return Err(ZbiError::BadVersion);
364 } else if !header.get_flags().contains(ZbiFlags::CRC32) && header.crc32 != ZBI_ITEM_NO_CRC32
365 {
366 return Err(ZbiError::BadCrc);
367 }
368
369 let res = Self::construct(header, payload)?;
370 // Compiler thinks it is still borrowed when we reach Ok(res), so adding scope for it
371 {
372 let mut it = res.iter();
373 for b in &mut it {
374 b.is_valid()?;
375 }
376
377 // Check if there were item parsing errors
378 it.state?;
379 }
380 Ok(res)
381 }
382 }
383
384 impl<B: ByteSliceMut + PartialEq> ZbiContainer<B> {
set_payload_length_usize(&mut self, len: usize) -> ZbiResult<()>385 fn set_payload_length_usize(&mut self, len: usize) -> ZbiResult<()> {
386 if self.buffer.len() < len {
387 return Err(ZbiError::Truncated);
388 }
389 self.header.length = u32::try_from(len).map_err(|_| ZbiError::PlatformBadLength)?;
390 self.payload_length = len;
391 Ok(())
392 }
393
394 /// Creates new empty `ZbiContainer` using provided buffer.
395 ///
396 /// # Returns
397 ///
398 /// * `Ok(ZbiContainer)` - on success
399 /// * Err([`ZbiError`]) - on error
new(buffer: B) -> ZbiResult<Self>400 pub fn new(buffer: B) -> ZbiResult<Self> {
401 let (item, buffer) =
402 ZbiItem::new(buffer, ZbiType::Container, ZBI_CONTAINER_MAGIC, ZbiFlags::default(), 0)?;
403
404 Self::construct(item.header, buffer)
405 }
406
align_tail(&mut self) -> ZbiResult<()>407 fn align_tail(&mut self) -> ZbiResult<()> {
408 let length = self.get_payload_length_usize();
409 let align_offset = get_align_buffer_offset(&self.buffer[length..])?;
410 let new_length = length + align_offset;
411 self.set_payload_length_usize(new_length)?;
412 Ok(())
413 }
414
415 /// Get payload slice for the next ZBI entry Next.
416 ///
417 /// Next entry should be added using [`ZbiContainer::create_entry`].
418 ///
419 /// This is useful when it's non-trivial to determine the length of a payload ahead of time -
420 /// for example, loading a variable-length string from persistent storage.
421 ///
422 /// Rather than loading the payload into a temporary buffer, determining the length, then
423 /// copying it into the ZBI, this function allows loading data directly into the ZBI. Since this
424 /// buffer is currently unused area, loading data here does not affect the ZBI until
425 /// zbi_create_entry() is called.
426 ///
427 /// # Example
428 ///
429 /// ```
430 /// # use zbi::{ZbiContainer, ZbiFlags, ZbiType, align_buffer};
431 /// #
432 /// # let mut buffer = [0; 100];
433 /// # let mut buffer = align_buffer(&mut buffer[..]).unwrap();
434 /// # let mut container = ZbiContainer::new(buffer).unwrap();
435 /// #
436 /// # let payload_to_use = [1, 2, 3, 4];
437 /// let next_payload = container.get_next_payload().unwrap();
438 /// next_payload[..payload_to_use.len()].copy_from_slice(&payload_to_use[..]);
439 ///
440 /// container
441 /// .create_entry(ZbiType::KernelX64, 0, ZbiFlags::default(), payload_to_use.len())
442 /// .unwrap();
443 ///
444 /// assert_eq!(container.iter().count(), 1);
445 /// assert_eq!(&*container.iter().next().unwrap().payload, &payload_to_use[..]);
446 /// ```
447 ///
448 /// # Returns:
449 /// `Ok(&mut [u8])` - on success; slice of buffer where next entries payload would be located.
450 /// Err([`ZbiError::TooBig`]) - if buffer is not big enough for new element without payload.
get_next_payload(&mut self) -> ZbiResult<&mut [u8]>451 pub fn get_next_payload(&mut self) -> ZbiResult<&mut [u8]> {
452 let length = self.get_payload_length_usize();
453 let align_payload_offset = length
454 .checked_add(size_of::<ZbiHeader>())
455 .ok_or(ZbiError::LengthOverflow)?
456 .checked_add(get_align_buffer_offset(&self.buffer[length..])?)
457 .ok_or(ZbiError::LengthOverflow)?;
458 if self.buffer.len() < align_payload_offset {
459 return Err(ZbiError::TooBig);
460 }
461 Ok(&mut self.buffer[align_payload_offset..])
462 }
463
464 /// Creates a new ZBI entry with the provided payload.
465 ///
466 /// The new entry is aligned to [`ZBI_ALIGNMENT_USIZE`]. The capacity of the base ZBI must
467 /// be large enough to fit the new entry.
468 ///
469 /// The [`ZbiFlags::VERSION`] is unconditionally set for the new entry.
470 ///
471 /// The [`ZbiFlags::CRC32`] flag yields an error because CRC computation is not yet
472 /// supported.
473 ///
474 /// # Arguments
475 /// * `type_` - The new entry's type
476 /// * `extra` - The new entry's type-specific data
477 /// * `flags` - The new entry's flags
478 /// * `payload` - The payload, copied into the new entry
479 ///
480 /// # Returns:
481 /// * Ok(()) - on success
482 /// * Err([`ZbiError::TooBig`]) - if buffer is not big enough for new element with payload
483 /// * Err([`ZbiError::Crc32NotSupported`]) - if unsupported [`ZbiFlags::CRC32`] is set
484 /// * Err([`ZbiError`]) - if other errors occurred
485 ///
486 /// # Example
487 /// ```
488 /// # use zbi::{ZbiContainer, ZbiFlags, ZbiType, align_buffer};
489 /// #
490 /// # let mut buffer = [0; 100];
491 /// # let mut buffer = align_buffer(&mut buffer[..]).unwrap();
492 /// # let mut container = ZbiContainer::new(buffer).unwrap();
493 /// #
494 /// container
495 /// .create_entry_with_payload(ZbiType::KernelX64, 0, ZbiFlags::default(), &[1, 2, 3, 4])
496 /// .unwrap();
497 /// assert_eq!(container.iter().count(), 1);
498 /// assert_eq!(&*container.iter().next().unwrap().payload, &[1, 2, 3, 4]);
499 /// ```
create_entry_with_payload( &mut self, type_: ZbiType, extra: u32, flags: ZbiFlags, payload: &[u8], ) -> ZbiResult<()>500 pub fn create_entry_with_payload(
501 &mut self,
502 type_: ZbiType,
503 extra: u32,
504 flags: ZbiFlags,
505 payload: &[u8],
506 ) -> ZbiResult<()> {
507 self.get_next_payload()?[..payload.len()].copy_from_slice(payload);
508 self.create_entry(type_, extra, flags, payload.len())
509 }
510
511 /// Creates a new ZBI entry and returns a pointer to the payload.
512 ///
513 /// The new entry is aligned to [`ZBI_ALIGNMENT_USIZE`]. The capacity of the base ZBI must
514 /// be large enough to fit the new entry.
515 ///
516 /// The [`ZbiFlags::VERSION`] is unconditionally set for the new entry.
517 ///
518 /// The [`ZbiFlags::CRC32`] flag yields an error because CRC computation is not yet
519 /// supported.
520 ///
521 /// # Arguments
522 /// * `type_` - The new entry's type.
523 /// * `extra` - The new entry's type-specific data.
524 /// * `flags` - The new entry's flags.
525 /// * `payload_length` - The length of the new entry's payload.
526 ///
527 /// # Returns
528 /// * Ok(()) - On success.
529 /// * Err([`ZbiError::TooBig`]) - if buffer is not big enough for new element with payload
530 /// * Err([`ZbiError::Crc32NotSupported`]) - if unsupported [`ZbiFlags::CRC32`] is set
531 /// * Err([`ZbiError`]) - if other errors occurred
532 ///
533 /// # Example
534 /// ```
535 /// # use zbi::{ZbiContainer, ZbiFlags, ZbiType, align_buffer};
536 /// #
537 /// # let mut buffer = [0; 100];
538 /// # let mut buffer = align_buffer(&mut buffer[..]).unwrap();
539 /// # let mut container = ZbiContainer::new(buffer).unwrap();
540 /// #
541 /// # let payload_to_use = [1, 2, 3, 4];
542 /// let next_payload = container.get_next_payload().unwrap();
543 /// next_payload[..payload_to_use.len()].copy_from_slice(&payload_to_use[..]);
544 ///
545 /// container
546 /// .create_entry(ZbiType::KernelX64, 0, ZbiFlags::default(), payload_to_use.len())
547 /// .unwrap();
548 ///
549 /// assert_eq!(container.iter().count(), 1);
550 /// assert_eq!(&*container.iter().next().unwrap().payload, &payload_to_use[..]);
551 /// ```
create_entry( &mut self, type_: ZbiType, extra: u32, flags: ZbiFlags, payload_length: usize, ) -> ZbiResult<()>552 pub fn create_entry(
553 &mut self,
554 type_: ZbiType,
555 extra: u32,
556 flags: ZbiFlags,
557 payload_length: usize,
558 ) -> ZbiResult<()> {
559 // We don't support CRC computation (yet?)
560 if flags.contains(ZbiFlags::CRC32) {
561 return Err(ZbiError::Crc32NotSupported);
562 }
563
564 let length = self.get_payload_length_usize();
565 let (item, _) =
566 ZbiItem::new(&mut self.buffer[length..], type_, extra, flags, payload_length)?;
567 let used = length
568 .checked_add(core::mem::size_of::<ZbiHeader>())
569 .ok_or(ZbiError::LengthOverflow)?
570 .checked_add(item.payload.len())
571 .ok_or(ZbiError::LengthOverflow)?;
572 self.set_payload_length_usize(used)?;
573 self.align_tail()?;
574 Ok(())
575 }
576
577 /// Extends a ZBI container with another container's payload.
578 ///
579 /// # Arguments
580 /// * `other` - The container to copy the payload from.
581 ///
582 /// # Returns
583 /// * `Ok(())` - On success.
584 /// * Err([`ZbiError::TooBig`]) - If container is too small.
585 ///
586 /// # Example
587 /// ```
588 /// # use zbi::{ZbiContainer, ZbiType, ZbiFlags, align_buffer};
589 /// #
590 /// # let mut buffer = [0; 200];
591 /// # let mut buffer = align_buffer(&mut buffer[..]).unwrap();
592 /// let mut container_0 = ZbiContainer::new(&mut buffer[..]).unwrap();
593 /// container_0
594 /// .create_entry_with_payload(ZbiType::DebugData, 0, ZbiFlags::default(), &[0, 1])
595 /// .unwrap();
596 ///
597 /// # let mut buffer = [0; 200];
598 /// # let mut buffer = align_buffer(&mut buffer[..]).unwrap();
599 /// let mut container_1 = ZbiContainer::new(&mut buffer[..]).unwrap();
600 /// container_1
601 /// .create_entry_with_payload(ZbiType::KernelX64, 0, ZbiFlags::default(), &[0, 1, 3, 4])
602 /// .unwrap();
603 ///
604 /// container_0.extend(&container_1).unwrap();
605 ///
606 /// assert_eq!(container_0.iter().count(), 2);
607 /// # let cont0_element_1 = &container_0
608 /// # .iter()
609 /// # .enumerate()
610 /// # .filter_map(|(i, e)| if i == 1 { Some(e) } else { None })
611 /// # .collect::<Vec<_>>()[0];
612 /// # let cont1_element_0 = &container_1.iter().next().unwrap();
613 /// # assert_eq!(cont0_element_1, cont1_element_0);
614 /// ```
extend(&mut self, other: &ZbiContainer<impl ByteSlice + PartialEq>) -> ZbiResult<()>615 pub fn extend(&mut self, other: &ZbiContainer<impl ByteSlice + PartialEq>) -> ZbiResult<()> {
616 let new_length = self
617 .get_payload_length_usize()
618 .checked_add(other.get_payload_length_usize())
619 .ok_or(ZbiError::LengthOverflow)?;
620 if self.buffer.len() < new_length {
621 return Err(ZbiError::TooBig);
622 }
623
624 for b in other.iter() {
625 let start = self.get_payload_length_usize();
626 let end = start + core::mem::size_of::<ZbiHeader>();
627 self.buffer[start..end].clone_from_slice(b.header.bytes());
628 let start = end;
629 let end = start + b.payload.len();
630 self.buffer[start..end].clone_from_slice(&b.payload);
631 self.set_payload_length_usize(end)?;
632 self.align_tail()?;
633 }
634 Ok(())
635 }
636 }
637
638 impl<B: ByteSlice + PartialEq + DerefMut> ZbiContainer<B> {
639 /// Mutable iterator over ZBI elements. First element is first ZBI element after
640 /// container header. Container header is not available via iterator.
iter_mut( &mut self, ) -> ZbiContainerIterator<impl ByteSliceMut + Debug + Default + PartialEq + '_>641 pub fn iter_mut(
642 &mut self,
643 ) -> ZbiContainerIterator<impl ByteSliceMut + Debug + Default + PartialEq + '_> {
644 let length = self.get_payload_length_usize();
645 ZbiContainerIterator { state: Ok(()), buffer: &mut self.buffer[..length] }
646 }
647 }
648
649 /// Container iterator
650 // State is required to check elements are valid during parsing.
651 // During this parsing can fail and we need to check if iterator returned `None` because there are
652 // no more elements left or because there was an error.
653 // If container object already exist state should never contain error, since container was already
654 // verified.
655 pub struct ZbiContainerIterator<B> {
656 state: ZbiResult<()>,
657 buffer: B,
658 }
659
660 impl<B: ByteSlice + PartialEq + Default + Debug> Iterator for ZbiContainerIterator<B> {
661 type Item = ZbiItem<B>;
662
next(&mut self) -> Option<Self::Item>663 fn next(&mut self) -> Option<Self::Item> {
664 // Align buffer before parsing
665 match align_buffer(take(&mut self.buffer)) {
666 Ok(v) => self.buffer = v,
667 Err(_) => {
668 self.state = Err(ZbiError::Truncated);
669 return None;
670 }
671 };
672
673 if self.buffer.is_empty() {
674 return None;
675 }
676
677 match ZbiItem::<B>::parse(take(&mut self.buffer)) {
678 Ok((item, mut tail)) => {
679 // Remove item that was just parsed from the buffer for next
680 // iteration before returning it.
681 core::mem::swap(&mut tail, &mut self.buffer);
682 Some(item)
683 }
684 Err(e) => {
685 // If there was an error during item parsing,
686 // make sure to set state to error, before signalling end of iteration.
687 self.state = Err(e);
688 None
689 }
690 }
691 }
692 }
693
694 #[repr(u32)]
695 #[derive(AsBytes, Clone, Copy, Debug, Eq, PartialEq)]
696 /// All possible [`ZbiHeader`]`.type` values.
697 pub enum ZbiType {
698 /// Each ZBI starts with a container header.
699 /// * `length`: Total size of the image after this header. This includes all item headers,
700 /// payloads, and padding. It does not include the container header itself.
701 /// Must be a multiple of [`ZBI_ALIGNMENT_USIZE`].
702 /// * `extra`: Must be `ZBI_CONTAINER_MAGIC`.
703 /// * `flags`: Must be [`ZbiFlags::VERSION`] and no other flags.
704 Container = ZBI_TYPE_CONTAINER,
705
706 /// x86-64 kernel. See [`ZbiKernel`] for a payload description.
707 //
708 // 'KRNL'
709 KernelX64 = ZBI_TYPE_KERNEL_X64,
710
711 /// ARM64 kernel. See [`ZbiKernel`] for a payload description.
712 //
713 // KRN8
714 KernelArm64 = ZBI_TYPE_KERNEL_ARM64,
715
716 /// RISC-V kernel. See [`ZbiKernel`] for a payload description.
717 //
718 // 'KRNV'
719 KernelRiscv64 = ZBI_TYPE_KERNEL_RISCV64,
720
721 /// A discarded item that should just be ignored. This is used for an
722 /// item that was already processed and should be ignored by whatever
723 /// stage is now looking at the ZBI. An earlier stage already "consumed"
724 /// this information, but avoided copying data around to remove it from
725 /// the ZBI item stream.
726 //
727 // 'SKIP'
728 Discard = ZBI_TYPE_DISCARD,
729
730 /// A virtual disk image. This is meant to be treated as if it were a
731 /// storage device. The payload (after decompression) is the contents of
732 /// the storage device, in whatever format that might be.
733 //
734 // 'RDSK'
735 StorageRamdisk = ZBI_TYPE_STORAGE_RAMDISK,
736
737 /// The /boot filesystem in BOOTFS format, specified in
738 /// <lib/zbi-format/internal/bootfs.h>. This represents an internal
739 /// contract between Zircon userboot (//docs/userboot.md), which handles
740 /// the contents of this filesystem, and platform tooling, which prepares
741 /// them.
742 //
743 // 'BFSB'
744 StorageBootFs = ZBI_TYPE_STORAGE_BOOTFS,
745
746 /// Storage used by the kernel (such as a compressed image containing the
747 /// actual kernel). The meaning and format of the data is specific to the
748 /// kernel, though it always uses the standard (private) storage
749 /// compression protocol. Each particular `ZbiType::Kernel{ARCH}` item image and its
750 /// `StorageKernel` item image are intimately tied and one cannot work
751 /// without the exact correct corresponding other.
752 //
753 // 'KSTR'
754 StorageKernel = ZBI_TYPE_STORAGE_KERNEL,
755
756 /// Device-specific factory data, stored in BOOTFS format.
757 //
758 // TODO(fxbug.dev/34597): This should not use the "STORAGE" infix.
759 //
760 // 'BFSF'
761 StorageBootFsFactory = ZBI_TYPE_STORAGE_BOOTFS_FACTORY,
762
763 /// A kernel command line fragment, a UTF-8 string that need not be
764 /// NULL-terminated. The kernel's own option parsing accepts only printable
765 /// 'ASCI'I and treats all other characters as equivalent to whitespace. Multiple
766 /// `ZbiType::CmdLine` items can appear. They are treated as if concatenated with
767 /// ' ' between each item, in the order they appear: first items in the bootable
768 /// ZBI containing the kernel; then items in the ZBI synthesized by the boot
769 /// loader. The kernel interprets the [whole command line](../../../../docs/kernel_cmdline.md).
770 //
771 // 'CMDL'
772 CmdLine = ZBI_TYPE_CMDLINE,
773
774 /// The crash log from the previous boot, a UTF-8 string.
775 //
776 // 'BOOM'
777 CrashLog = ZBI_TYPE_CRASHLOG,
778
779 /// Physical memory region that will persist across warm boots. See `zbi_nvram_t`
780 /// for payload description.
781 //
782 // 'NVLL'
783 Nvram = ZBI_TYPE_NVRAM,
784
785 /// Platform ID Information.
786 //
787 // 'PLID'
788 PlatformId = ZBI_TYPE_PLATFORM_ID,
789
790 /// Board-specific information.
791 //
792 // mBSI
793 DrvBoardInfo = ZBI_TYPE_DRV_BOARD_INFO,
794
795 /// CPU configuration. See `zbi_topology_node_t` for a description of the payload.
796 CpuTopology = ZBI_TYPE_CPU_TOPOLOGY,
797
798 /// Device memory configuration. See `zbi_mem_range_t` for a description of the payload.
799 //
800 // 'MEMC'
801 MemConfig = ZBI_TYPE_MEM_CONFIG,
802
803 /// Kernel driver configuration. The `ZbiHeader.extra` field gives a
804 /// ZBI_KERNEL_DRIVER_* type that determines the payload format.
805 /// See <lib/zbi-format/driver-config.h> for details.
806 //
807 // 'KDRV'
808 KernelDriver = ZBI_TYPE_KERNEL_DRIVER,
809
810 /// 'ACPI' Root Table Pointer, a `u64` physical address.
811 //
812 // 'RSDP'
813 AcpiRsdp = ZBI_TYPE_ACPI_RSDP,
814
815 /// 'SMBI'OS entry point, a [u64] physical address.
816 //
817 // 'SMBI'
818 Smbios = ZBI_TYPE_SMBIOS,
819
820 /// EFI system table, a [u64] physical address.
821 //
822 // 'EFIS'
823 EfiSystemTable = ZBI_TYPE_EFI_SYSTEM_TABLE,
824
825 /// EFI memory attributes table. An example of this format can be found in UEFI 2.10
826 /// section 4.6.4, but the consumer of this item is responsible for interpreting whatever
827 /// the bootloader supplies (in particular the "version" field may differ as the format
828 /// evolves).
829 //
830 // 'EMAT'
831 EfiMemoryAttributesTable = ZBI_TYPE_EFI_MEMORY_ATTRIBUTES_TABLE,
832
833 /// Framebuffer parameters, a `zbi_swfb_t` entry.
834 //
835 // 'SWFB'
836 FrameBuffer = ZBI_TYPE_FRAMEBUFFER,
837
838 /// The image arguments, data is a trivial text format of one "key=value" per line
839 /// with leading whitespace stripped and "#" comment lines and blank lines ignored.
840 /// It is processed by bootsvc and parsed args are shared to others via Arguments service.
841 /// TODO: the format can be streamlined after the /config/additional_boot_args compat support is
842 /// removed.
843 //
844 // 'IARG'
845 ImageArgs = ZBI_TYPE_IMAGE_ARGS,
846
847 /// A copy of the boot version stored within the sysconfig partition
848 //
849 // 'BVRS'
850 BootVersion = ZBI_TYPE_BOOT_VERSION,
851
852 /// MAC address for Ethernet, Wifi, Bluetooth, etc. `ZbiHeader.extra`
853 /// is a board-specific index to specify which device the MAC address
854 /// applies to. `ZbiHeader.length` gives the size in bytes, which
855 /// varies depending on the type of address appropriate for the device.
856 //
857 // mMAC
858 DrvMacAddress = ZBI_TYPE_DRV_MAC_ADDRESS,
859
860 /// A partition map for a storage device, a `zbi_partition_map_t` header
861 /// followed by one or more `zbi_partition_t` entries. `ZbiHeader.extra`
862 /// is a board-specific index to specify which device this applies to.
863 //
864 // mPRT
865 DrvPartitionMap = ZBI_TYPE_DRV_PARTITION_MAP,
866
867 /// Private information for the board driver.
868 //
869 // mBOR
870 DrvBoardPrivate = ZBI_TYPE_DRV_BOARD_PRIVATE,
871
872 /// Information about reboot
873 // 'HWRB'
874 HwRebootReason = ZBI_TYPE_HW_REBOOT_REASON,
875
876 /// The serial number, an unterminated ASCII string of printable non-whitespace
877 /// characters with length `ZbiHeader.length`.
878 //
879 // 'SRLN'
880 SerialNumber = ZBI_TYPE_SERIAL_NUMBER,
881
882 /// This type specifies a binary file passed in by the bootloader.
883 /// The first byte specifies the length of the filename without a NUL terminator.
884 /// The filename starts on the second byte.
885 /// The file contents are located immediately after the filename.
886 /// ```none
887 /// Layout: | name_len | name | payload
888 /// ^(1 byte) ^(name_len bytes) ^(length of file)
889 /// ```
890 //
891 // 'BTFL'
892 BootloaderFile = ZBI_TYPE_BOOTLOADER_FILE,
893
894 /// The devicetree blob from the legacy boot loader, if any. This is used only
895 /// for diagnostic and development purposes. Zircon kernel and driver
896 /// configuration is entirely driven by specific ZBI items from the boot
897 /// loader. The boot shims for legacy boot loaders pass the raw devicetree
898 /// along for development purposes, but extract information from it to populate
899 /// specific ZBI items such as [`ZbiType::KernelDriver`] et al.
900 DeviceTree = ZBI_TYPE_DEVICETREE,
901
902 /// An arbitrary number of random bytes attested to have high entropy. Any
903 /// number of items of any size can be provided, but no data should be provided
904 /// that is not true entropy of cryptographic quality. This is used to seed
905 /// secure cryptographic pseudo-random number generators.
906 //
907 // 'RAND'
908 SecureEntropy = ZBI_TYPE_SECURE_ENTROPY,
909
910 /// This provides a data dump and associated logging from a boot loader,
911 /// shim, or earlier incarnation that wants its data percolated up by the
912 /// booting Zircon kernel. See `zbi_debugdata_t` for a description of the
913 /// payload.
914 //
915 // 'DBGD'
916 DebugData = ZBI_TYPE_DEBUGDATA,
917 }
918
919 impl ZbiType {
920 /// Checks if [`ZbiType`] is a Kernel type. (E.g. [`ZbiType::KernelX64`])
921 /// ```
922 /// # use zbi::ZbiType;
923 /// assert!(ZbiType::KernelX64.is_kernel());
924 /// ```
is_kernel(&self) -> bool925 pub fn is_kernel(&self) -> bool {
926 ((*self as u32) & ZBI_TYPE_KERNEL_MASK) == ZBI_TYPE_KERNEL_PREFIX
927 }
928
929 /// Checks if [`ZbiType`] is a Driver Metadata type. (E.g. [`ZbiType::DrvBoardInfo`])
930 /// ```
931 /// # use zbi::ZbiType;
932 /// assert!(ZbiType::DrvBoardInfo.is_driver_metadata());
933 /// ```
is_driver_metadata(&self) -> bool934 pub fn is_driver_metadata(&self) -> bool {
935 ((*self as u32) & ZBI_TYPE_DRIVER_METADATA_MASK) == ZBI_TYPE_DRIVER_METADATA_PREFIX
936 }
937 }
938
939 impl From<ZbiType> for u32 {
from(val: ZbiType) -> Self940 fn from(val: ZbiType) -> Self {
941 val as u32
942 }
943 }
944
945 impl TryFrom<u32> for ZbiType {
946 type Error = ZbiError;
try_from(val: u32) -> Result<Self, Self::Error>947 fn try_from(val: u32) -> Result<Self, Self::Error> {
948 match val {
949 ZBI_TYPE_KERNEL_X64 => Ok(Self::KernelX64),
950 ZBI_TYPE_KERNEL_ARM64 => Ok(Self::KernelArm64),
951 ZBI_TYPE_KERNEL_RISCV64 => Ok(Self::KernelRiscv64),
952 ZBI_TYPE_CONTAINER => Ok(Self::Container),
953 ZBI_TYPE_DISCARD => Ok(Self::Discard),
954 ZBI_TYPE_STORAGE_RAMDISK => Ok(Self::StorageRamdisk),
955 ZBI_TYPE_STORAGE_BOOTFS => Ok(Self::StorageBootFs),
956 ZBI_TYPE_STORAGE_KERNEL => Ok(Self::StorageKernel),
957 ZBI_TYPE_STORAGE_BOOTFS_FACTORY => Ok(Self::StorageBootFsFactory),
958 ZBI_TYPE_CMDLINE => Ok(Self::CmdLine),
959 ZBI_TYPE_CRASHLOG => Ok(Self::CrashLog),
960 ZBI_TYPE_NVRAM => Ok(Self::Nvram),
961 ZBI_TYPE_PLATFORM_ID => Ok(Self::PlatformId),
962 ZBI_TYPE_DRV_BOARD_INFO => Ok(Self::DrvBoardInfo),
963 ZBI_TYPE_CPU_TOPOLOGY => Ok(Self::CpuTopology),
964 ZBI_TYPE_MEM_CONFIG => Ok(Self::MemConfig),
965 ZBI_TYPE_KERNEL_DRIVER => Ok(Self::KernelDriver),
966 ZBI_TYPE_ACPI_RSDP => Ok(Self::AcpiRsdp),
967 ZBI_TYPE_SMBIOS => Ok(Self::Smbios),
968 ZBI_TYPE_EFI_SYSTEM_TABLE => Ok(Self::EfiSystemTable),
969 ZBI_TYPE_EFI_MEMORY_ATTRIBUTES_TABLE => Ok(Self::EfiMemoryAttributesTable),
970 ZBI_TYPE_FRAMEBUFFER => Ok(Self::FrameBuffer),
971 ZBI_TYPE_IMAGE_ARGS => Ok(Self::ImageArgs),
972 ZBI_TYPE_BOOT_VERSION => Ok(Self::BootVersion),
973 ZBI_TYPE_DRV_MAC_ADDRESS => Ok(Self::DrvMacAddress),
974 ZBI_TYPE_DRV_PARTITION_MAP => Ok(Self::DrvPartitionMap),
975 ZBI_TYPE_DRV_BOARD_PRIVATE => Ok(Self::DrvBoardPrivate),
976 ZBI_TYPE_HW_REBOOT_REASON => Ok(Self::HwRebootReason),
977 ZBI_TYPE_SERIAL_NUMBER => Ok(Self::SerialNumber),
978 ZBI_TYPE_BOOTLOADER_FILE => Ok(Self::BootloaderFile),
979 ZBI_TYPE_DEVICETREE => Ok(Self::DeviceTree),
980 ZBI_TYPE_SECURE_ENTROPY => Ok(Self::SecureEntropy),
981 ZBI_TYPE_DEBUGDATA => Ok(Self::DebugData),
982 _ => Err(ZbiError::BadType),
983 }
984 }
985 }
986
987 bitflags! {
988 /// Flags associated with an item.
989 ///
990 /// A valid flags value must always include [`ZbiFlags::VERSION`].
991 /// Values should also contain [`ZbiFlags::CRC32`] for any item
992 /// where it's feasible to compute the [`ZbiFlags::CRC32`] at build time.
993 /// Other flags are specific to each type.
994 ///
995 /// Matches C-reference `zbi_flags_t` which is `uint32_t`.
996 pub struct ZbiFlags: u32 {
997 /// This flag is always required.
998 const VERSION = ZBI_FLAGS_VERSION;
999 /// ZBI items with the `CRC32` flag must have a valid `crc32`.
1000 /// Otherwise their `crc32` field must contain `ZBI_ITEM_NO_CRC32`
1001 const CRC32 = ZBI_FLAGS_CRC32;
1002 }
1003 }
1004
1005 /// A valid flags must always include [`ZbiFlags::VERSION`].
1006 impl Default for ZbiFlags {
default() -> ZbiFlags1007 fn default() -> ZbiFlags {
1008 ZbiFlags::VERSION
1009 }
1010 }
1011
1012 /// Rust type generated from C-reference structure `zbi_header_t`.
1013 ///
1014 /// It must correspond to following definition:
1015 /// ```c++
1016 /// typedef struct {
1017 /// // ZBI_TYPE_* constant.
1018 /// zbi_type_t type;
1019 ///
1020 /// // Size of the payload immediately following this header. This
1021 /// // does not include the header itself nor any alignment padding
1022 /// // after the payload.
1023 /// uint32_t length;
1024 ///
1025 /// // Type-specific extra data. Each type specifies the use of this
1026 /// // field. When not explicitly specified, it should be zero.
1027 /// uint32_t extra;
1028 ///
1029 /// // Flags for this item.
1030 /// zbi_flags_t flags;
1031 ///
1032 /// // For future expansion. Set to 0.
1033 /// uint32_t reserved0;
1034 /// uint32_t reserved1;
1035 ///
1036 /// // Must be ZBI_ITEM_MAGIC.
1037 /// uint32_t magic;
1038 ///
1039 /// // Must be the CRC32 of payload if ZBI_FLAGS_CRC32 is set,
1040 /// // otherwise must be ZBI_ITEM_NO_CRC32.
1041 /// uint32_t crc32;
1042 /// } zbi_header_t;
1043 /// ```
1044 pub type ZbiHeader = zbi_header_t;
1045
1046 impl ZbiHeader {
1047 /// Helper function to get `ZbiHeader.flags: u32` as `ZbiFlags`.
get_flags(&self) -> ZbiFlags1048 pub fn get_flags(&self) -> ZbiFlags {
1049 ZbiFlags::from_bits_truncate(self.flags)
1050 }
1051 /// Helper function to set `ZbiHeader.flags: u32` from `ZbiFlags`.
set_flags(&mut self, flags: &ZbiFlags)1052 pub fn set_flags(&mut self, flags: &ZbiFlags) {
1053 self.flags = flags.bits();
1054 }
1055 }
1056
1057 /// The kernel image.
1058 ///
1059 /// In a bootable ZBI this item must always be first,
1060 /// immediately after the [`ZbiType::Container`] header. The contiguous memory
1061 /// image of the kernel is formed from the [`ZbiType::Container`] header, the
1062 /// `ZbiType::Kernel{ARCH}` header, and the payload.
1063 ///
1064 /// The boot loader loads the whole image starting with the container header
1065 /// through to the end of the kernel item's payload into contiguous physical
1066 /// memory. It then constructs a partial ZBI elsewhere in memory, which has
1067 /// a [`ZbiType::Container`] header of its own followed by all the other items
1068 /// that were in the booted ZBI plus other items synthesized by the boot
1069 /// loader to describe the machine. This partial ZBI must be placed at an
1070 /// address (where the container header is found) that is aligned to the
1071 /// machine's page size. The precise protocol for transferring control to
1072 /// the kernel's entry point varies by machine.
1073 ///
1074 /// On all machines, the kernel requires some amount of scratch memory to be
1075 /// available immediately after the kernel image at boot. It needs this
1076 /// space for early setup work before it has a chance to read any memory-map
1077 /// information from the boot loader. The `reserve_memory_size` field tells
1078 /// the boot loader how much space after the kernel's load image it must
1079 /// leave available for the kernel's use. The boot loader must place its
1080 /// constructed ZBI or other reserved areas at least this many bytes after
1081 /// the kernel image.
1082 ///
1083 /// # x86-64
1084 ///
1085 /// The kernel assumes it was loaded at a fixed physical address of
1086 /// 0x100000 (1MB). `ZbiKernel.entry` is the absolute physical address
1087 /// of the PC location where the kernel will start.
1088 /// TODO(fxbug.dev/24762): Perhaps this will change??
1089 /// The processor is in 64-bit mode with direct virtual to physical
1090 /// mapping covering the physical memory where the kernel and
1091 /// bootloader-constructed ZBI were loaded.
1092 /// The %rsi register holds the physical address of the
1093 /// bootloader-constructed ZBI.
1094 /// All other registers are unspecified.
1095 ///
1096 /// # ARM64
1097 ///
1098 /// `ZbiKernel.entry` is an offset from the beginning of the image
1099 /// (i.e., the [`ZbiType::Container`] header before the [`ZbiType::KernelArm64`]
1100 /// header) to the PC location in the image where the kernel will
1101 /// start. The processor is in physical address mode at EL1 or
1102 /// above. The kernel image and the bootloader-constructed ZBI each
1103 /// can be loaded anywhere in physical memory. The x0 register
1104 /// holds the physical address of the bootloader-constructed ZBI.
1105 /// All other registers are unspecified.
1106 ///
1107 /// # RISCV64
1108 ///
1109 /// `ZbiKernel.entry` is an offset from the beginning of the image (i.e.,
1110 /// the [`ZbiType::Container`] header before the [`ZbiType::KernelRiscv64`] header)
1111 /// to the PC location in the image where the kernel will start. The
1112 /// processor is in S mode, satp is zero, sstatus.SIE is zero. The kernel
1113 /// image and the bootloader-constructed ZBI each can be loaded anywhere in
1114 /// physical memory, aligned to 4KiB. The a0 register holds the HART ID,
1115 /// and the a1 register holds the 4KiB-aligned physical address of the
1116 /// bootloader-constructed ZBI. All other registers are unspecified.
1117 ///
1118 /// # C-reference type
1119 /// ```c
1120 /// typedef struct {
1121 /// // Entry-point address. The interpretation of this differs by machine.
1122 /// uint64_t entry;
1123 ///
1124 /// // Minimum amount (in bytes) of scratch memory that the kernel requires
1125 /// // immediately after its load image.
1126 /// uint64_t reserve_memory_size;
1127 /// } zbi_kernel_t;
1128 /// ```
1129 pub type ZbiKernel = zbi_kernel_t;
1130
1131 #[derive(Debug, PartialEq)]
1132 /// Error values that can be returned by function in this library
1133 pub enum ZbiError {
1134 /// Generic error
1135 Error,
1136 /// Bad type
1137 BadType,
1138 /// Bad magic
1139 BadMagic,
1140 /// Bad version
1141 BadVersion,
1142 /// Bad CRC
1143 BadCrc,
1144 /// Bad Alignment
1145 BadAlignment,
1146 /// Truncaded error
1147 Truncated,
1148 /// Too big
1149 TooBig,
1150 /// Incomplete Kernel
1151 IncompleteKernel,
1152 /// Bad ZBI length for this platform
1153 PlatformBadLength,
1154 /// CRC32 is not supported yet
1155 Crc32NotSupported,
1156 /// Length type overflow
1157 LengthOverflow,
1158 }
1159
1160 // Unfortunately thiserror is not available in `no_std` world.
1161 // Thus `Display` implementation is required.
1162 impl Display for ZbiError {
fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result1163 fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
1164 let str = match self {
1165 ZbiError::Error => "Generic error",
1166 ZbiError::BadType => "Bad type",
1167 ZbiError::BadMagic => "Bad magic",
1168 ZbiError::BadVersion => "Bad version",
1169 ZbiError::BadCrc => "Bad CRC",
1170 ZbiError::BadAlignment => "Bad Alignment",
1171 ZbiError::Truncated => "Truncaded error",
1172 ZbiError::TooBig => "Too big",
1173 ZbiError::IncompleteKernel => "Incomplete Kernel",
1174 ZbiError::PlatformBadLength => "Bad ZBI length for this platform",
1175 ZbiError::Crc32NotSupported => "CRC32 is not supported yet",
1176 ZbiError::LengthOverflow => "Length type overflow",
1177 };
1178 write!(f, "{str}")
1179 }
1180 }
1181
1182 // Returns offset/idx of the first buffer element that will be aligned to `ZBI_ALIGNMENT`
get_align_buffer_offset(buffer: impl ByteSlice) -> ZbiResult<usize>1183 fn get_align_buffer_offset(buffer: impl ByteSlice) -> ZbiResult<usize> {
1184 let addr = buffer.as_ptr() as usize;
1185 match addr % ZBI_ALIGNMENT_USIZE {
1186 0 => Ok(0),
1187 rem => {
1188 let tail_offset = ZBI_ALIGNMENT_USIZE - rem;
1189 if tail_offset > buffer.len() {
1190 return Err(ZbiError::TooBig);
1191 }
1192 Ok(tail_offset)
1193 }
1194 }
1195 }
1196
1197 // Check if buffer is ZbiAligned
is_zbi_aligned(buffer: &impl ByteSlice) -> ZbiResult<()>1198 fn is_zbi_aligned(buffer: &impl ByteSlice) -> ZbiResult<()> {
1199 match (buffer.as_ptr() as usize) % ZBI_ALIGNMENT_USIZE {
1200 0 => Ok(()),
1201 _ => Err(ZbiError::BadAlignment),
1202 }
1203 }
1204
1205 #[cfg(test)]
1206 mod tests {
1207 use super::*;
1208
1209 #[derive(Debug, PartialEq, Default)]
1210 struct TestZbiBuilder<'a> {
1211 buffer: &'a mut [u8],
1212 tail_offset: usize,
1213 }
1214 impl<'a> TestZbiBuilder<'a> {
new(buffer: &'a mut [u8]) -> TestZbiBuilder<'a>1215 pub fn new(buffer: &'a mut [u8]) -> TestZbiBuilder<'a> {
1216 TestZbiBuilder { buffer, tail_offset: 0 }
1217 }
add<T: AsBytes>(mut self, t: T) -> Self1218 pub fn add<T: AsBytes>(mut self, t: T) -> Self {
1219 t.write_to_prefix(&mut self.buffer[self.tail_offset..]).unwrap();
1220 self.tail_offset += size_of::<T>();
1221 self
1222 }
add_slice(mut self, buf: &'a [u8]) -> Self1223 pub fn add_slice(mut self, buf: &'a [u8]) -> Self {
1224 self.buffer[self.tail_offset..self.tail_offset + buf.len()].copy_from_slice(buf);
1225 self.tail_offset += buf.len();
1226 self
1227 }
get_header_default() -> ZbiHeader1228 pub fn get_header_default() -> ZbiHeader {
1229 ZbiHeader {
1230 type_: ZbiType::KernelX64 as u32,
1231 length: 0,
1232 extra: ZBI_ITEM_MAGIC,
1233 flags: ZbiFlags::default().bits(),
1234 magic: ZBI_ITEM_MAGIC,
1235 crc32: ZBI_ITEM_NO_CRC32,
1236 ..Default::default()
1237 }
1238 }
item_default(self, payload: &'a [u8]) -> Self1239 pub fn item_default(self, payload: &'a [u8]) -> Self {
1240 self.item(
1241 ZbiHeader {
1242 length: payload.len().try_into().unwrap(),
1243 ..Self::get_header_default()
1244 },
1245 payload,
1246 )
1247 }
item(self, header: ZbiHeader, payload: &'a [u8]) -> Self1248 pub fn item(self, header: ZbiHeader, payload: &'a [u8]) -> Self {
1249 self.add(header).add_slice(&payload[..payload.len()])
1250 }
container_hdr(self, payload_len: usize) -> Self1251 pub fn container_hdr(self, payload_len: usize) -> Self {
1252 self.item(
1253 ZbiHeader {
1254 type_: ZBI_TYPE_CONTAINER,
1255 length: payload_len.try_into().unwrap(),
1256 extra: ZBI_CONTAINER_MAGIC,
1257 flags: ZbiFlags::default().bits(),
1258 magic: ZBI_ITEM_MAGIC,
1259 crc32: ZBI_ITEM_NO_CRC32,
1260 ..Default::default()
1261 },
1262 &[],
1263 )
1264 }
padding(mut self, val: u8, bytes: usize) -> Self1265 pub fn padding(mut self, val: u8, bytes: usize) -> Self {
1266 self.buffer[self.tail_offset..self.tail_offset + bytes].fill(val);
1267 self.tail_offset += bytes;
1268 self
1269 }
align(mut self) -> Self1270 pub fn align(mut self) -> Self {
1271 let rem = self.tail_offset % ZBI_ALIGNMENT_USIZE;
1272 if rem != 0 {
1273 self.tail_offset += ZBI_ALIGNMENT_USIZE - rem;
1274 }
1275 self
1276 }
1277 // Assumption is that first item in buffer is container header/item
update_container_length(self) -> Self1278 pub fn update_container_length(self) -> Self {
1279 let payload_length = self.tail_offset - size_of::<ZbiHeader>();
1280 let item = ZbiHeader {
1281 type_: ZBI_TYPE_CONTAINER,
1282 length: payload_length.try_into().unwrap(),
1283 extra: ZBI_CONTAINER_MAGIC,
1284 flags: ZbiFlags::default().bits(),
1285 magic: ZBI_ITEM_MAGIC,
1286 crc32: ZBI_ITEM_NO_CRC32,
1287 ..Default::default()
1288 };
1289 item.write_to_prefix(&mut self.buffer[..]).unwrap();
1290 self
1291 }
build(self) -> &'a mut [u8]1292 pub fn build(self) -> &'a mut [u8] {
1293 &mut self.buffer[..self.tail_offset]
1294 }
1295 }
1296
1297 const ZBI_HEADER_SIZE: usize = core::mem::size_of::<ZbiHeader>();
1298 const ALIGNED_8_SIZE: usize = ZBI_HEADER_SIZE * 20;
1299 #[repr(align(8))]
1300 struct ZbiAligned([u8; ALIGNED_8_SIZE]);
1301 impl Default for ZbiAligned {
default() -> Self1302 fn default() -> Self {
1303 ZbiAligned(core::array::from_fn::<_, ALIGNED_8_SIZE, _>(|_| 0u8))
1304 }
1305 }
1306
1307 #[test]
zbi_test_align_overflow()1308 fn zbi_test_align_overflow() {
1309 assert!(usize::MAX > ZBI_ALIGNMENT.try_into().unwrap());
1310 assert_eq!(u32::try_from(ZBI_ALIGNMENT_USIZE).unwrap(), ZBI_ALIGNMENT);
1311 }
1312
1313 #[test]
zbi_test_item_new()1314 fn zbi_test_item_new() {
1315 let mut buffer = ZbiAligned::default();
1316 let expect = get_test_zbi_headers(1)[0];
1317
1318 let (item, _) = ZbiItem::new(
1319 &mut buffer.0[..],
1320 expect.type_.try_into().unwrap(),
1321 expect.extra,
1322 expect.get_flags(),
1323 expect.length.try_into().unwrap(),
1324 )
1325 .unwrap();
1326
1327 assert_eq!(*item.header, expect);
1328 assert_eq!(item.payload.len(), expect.length.try_into().unwrap());
1329
1330 let u32_array =
1331 Ref::<&[u8], [u32]>::new_slice_from_prefix(&buffer.0[..ZBI_HEADER_SIZE], 8).unwrap().0;
1332 assert_eq!(u32_array[0], expect.type_);
1333 assert_eq!(u32_array[1], expect.length);
1334 assert_eq!(u32_array[2], expect.extra);
1335 assert_eq!(u32_array[3], expect.flags);
1336 // u32_array[4..5] - reserved
1337 assert_eq!(u32_array[6], expect.magic);
1338 assert_eq!(u32_array[7], expect.crc32);
1339 }
1340
1341 #[test]
zbi_test_item_new_too_small()1342 fn zbi_test_item_new_too_small() {
1343 let mut buffer = ZbiAligned::default();
1344
1345 assert_eq!(
1346 ZbiItem::new(
1347 &mut buffer.0[..ZBI_HEADER_SIZE - 1],
1348 ZbiType::Container,
1349 0,
1350 ZbiFlags::default(),
1351 0
1352 ),
1353 Err(ZbiError::TooBig)
1354 );
1355 }
1356
1357 #[test]
zbi_test_item_new_not_aligned()1358 fn zbi_test_item_new_not_aligned() {
1359 let mut buffer = ZbiAligned::default();
1360 for offset in [1, 2, 4] {
1361 assert_eq!(
1362 ZbiItem::new(
1363 &mut buffer.0[offset..ZBI_HEADER_SIZE + offset],
1364 ZbiType::Container,
1365 0,
1366 ZbiFlags::default(),
1367 0
1368 ),
1369 Err(ZbiError::BadAlignment)
1370 );
1371 }
1372 }
1373
1374 #[test]
zbi_test_item_parse()1375 fn zbi_test_item_parse() {
1376 let mut buffer = ZbiAligned::default();
1377 let buffer = TestZbiBuilder::new(&mut buffer.0[..]).container_hdr(0).build();
1378 let buffer_hdr_extra_expected =
1379 Ref::<&[u8], [u32]>::new_slice_from_prefix(&buffer[8..12], 1).unwrap().0[0];
1380
1381 let (zbi_item, _tail) = ZbiItem::parse(buffer).unwrap();
1382
1383 assert_eq!(zbi_item.header.extra, buffer_hdr_extra_expected);
1384 }
1385
1386 #[test]
zbi_test_item_edit()1387 fn zbi_test_item_edit() {
1388 let mut buffer = ZbiAligned::default();
1389 let buffer_build = TestZbiBuilder::new(&mut buffer.0[..]).container_hdr(0).build();
1390 let buffer_hdr_type =
1391 Ref::<&[u8], [u32]>::new_slice_from_prefix(&buffer_build[0..4], 1).unwrap().0[0];
1392 assert_eq!(buffer_hdr_type, ZBI_TYPE_CONTAINER);
1393
1394 let (mut zbi_item, _tail) = ZbiItem::parse(&mut buffer_build[..]).unwrap();
1395 zbi_item.header.type_ = ZBI_TYPE_KERNEL_X64;
1396 let buffer_hdr_type =
1397 Ref::<&[u8], [u32]>::new_slice_from_prefix(&buffer_build[0..4], 1).unwrap().0[0];
1398 assert_eq!(buffer_hdr_type, ZBI_TYPE_KERNEL_X64);
1399 }
1400
1401 #[test]
zbi_test_container_new()1402 fn zbi_test_container_new() {
1403 let mut buffer = ZbiAligned::default();
1404 let _container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
1405 let expect_hdr = ZbiHeader {
1406 type_: ZBI_TYPE_CONTAINER,
1407 length: 0,
1408 extra: ZBI_CONTAINER_MAGIC,
1409 flags: ZbiFlags::default().bits(),
1410 magic: ZBI_ITEM_MAGIC,
1411 crc32: ZBI_ITEM_NO_CRC32,
1412 ..Default::default()
1413 };
1414
1415 let (item, _) = ZbiItem::parse(&buffer.0[..]).unwrap();
1416 assert_eq!(*item.header, expect_hdr);
1417 assert_eq!(item.payload.len(), 0);
1418 }
1419
1420 #[test]
zbi_test_container_new_too_small()1421 fn zbi_test_container_new_too_small() {
1422 let mut buffer = ZbiAligned::default();
1423 assert_eq!(ZbiContainer::new(&mut buffer.0[..ZBI_HEADER_SIZE - 1]), Err(ZbiError::TooBig));
1424 }
1425
1426 #[test]
zbi_test_container_new_unaligned()1427 fn zbi_test_container_new_unaligned() {
1428 let mut buffer = ZbiAligned::default();
1429 for offset in [1, 2, 3, 4, 5, 6, 7] {
1430 assert_eq!(
1431 ZbiContainer::new(&mut buffer.0[offset..ZBI_HEADER_SIZE + offset]),
1432 Err(ZbiError::BadAlignment)
1433 );
1434 }
1435 }
1436
1437 #[test]
zbi_test_container_parse_empty()1438 fn zbi_test_container_parse_empty() {
1439 let mut buffer = ZbiAligned::default();
1440 let _container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
1441 let expect_hdr = ZbiHeader {
1442 type_: ZBI_TYPE_CONTAINER,
1443 length: 0,
1444 extra: ZBI_CONTAINER_MAGIC,
1445 flags: ZbiFlags::default().bits(),
1446 magic: ZBI_ITEM_MAGIC,
1447 crc32: ZBI_ITEM_NO_CRC32,
1448 ..Default::default()
1449 };
1450
1451 let ZbiContainer { header, buffer: _, payload_length } =
1452 ZbiContainer::parse(&buffer.0[..]).unwrap();
1453 assert_eq!(*header, expect_hdr);
1454 assert_eq!(payload_length, 0);
1455 }
1456
1457 #[test]
zbi_test_container_parse_bad_type()1458 fn zbi_test_container_parse_bad_type() {
1459 let mut buffer = ZbiAligned::default();
1460 let _ = TestZbiBuilder::new(&mut buffer.0[..])
1461 .item(
1462 ZbiHeader {
1463 type_: 0,
1464 length: 0,
1465 extra: ZBI_CONTAINER_MAGIC,
1466 flags: ZbiFlags::default().bits(),
1467 magic: ZBI_ITEM_MAGIC,
1468 crc32: ZBI_ITEM_NO_CRC32,
1469 ..Default::default()
1470 },
1471 &[],
1472 )
1473 .build();
1474
1475 assert_eq!(ZbiContainer::parse(&buffer.0[..]), Err(ZbiError::BadType))
1476 }
1477
1478 #[test]
zbi_test_container_parse_bad_magic()1479 fn zbi_test_container_parse_bad_magic() {
1480 let mut buffer = ZbiAligned::default();
1481 let _ = TestZbiBuilder::new(&mut buffer.0[..])
1482 .item(
1483 ZbiHeader {
1484 type_: ZBI_TYPE_CONTAINER,
1485 length: 0,
1486 extra: ZBI_CONTAINER_MAGIC,
1487 flags: ZbiFlags::default().bits(),
1488 magic: 0,
1489 crc32: ZBI_ITEM_NO_CRC32,
1490 ..Default::default()
1491 },
1492 &[],
1493 )
1494 .build();
1495
1496 assert_eq!(ZbiContainer::parse(&buffer.0[..]), Err(ZbiError::BadMagic))
1497 }
1498
1499 #[test]
zbi_test_container_parse_bad_version()1500 fn zbi_test_container_parse_bad_version() {
1501 let mut buffer = ZbiAligned::default();
1502 let _ = TestZbiBuilder::new(&mut buffer.0[..])
1503 .item(
1504 ZbiHeader {
1505 type_: ZBI_TYPE_CONTAINER,
1506 length: 0,
1507 extra: ZBI_CONTAINER_MAGIC,
1508 flags: (ZbiFlags::default() & !ZbiFlags::VERSION).bits(),
1509 magic: ZBI_ITEM_MAGIC,
1510 crc32: ZBI_ITEM_NO_CRC32,
1511 ..Default::default()
1512 },
1513 &[],
1514 )
1515 .build();
1516
1517 assert_eq!(ZbiContainer::parse(&buffer.0[..]), Err(ZbiError::BadVersion))
1518 }
1519
1520 #[test]
zbi_test_container_parse_bad_crc32()1521 fn zbi_test_container_parse_bad_crc32() {
1522 let mut buffer = ZbiAligned::default();
1523 let _ = TestZbiBuilder::new(&mut buffer.0[..])
1524 .item(
1525 ZbiHeader {
1526 type_: ZBI_TYPE_CONTAINER,
1527 length: 0,
1528 extra: ZBI_CONTAINER_MAGIC,
1529 flags: (ZbiFlags::default() & !ZbiFlags::CRC32).bits(),
1530 magic: ZBI_ITEM_MAGIC,
1531 crc32: 0,
1532 ..Default::default()
1533 },
1534 &[],
1535 )
1536 .build();
1537
1538 assert_eq!(ZbiContainer::parse(&buffer.0[..]), Err(ZbiError::BadCrc))
1539 }
1540
1541 #[test]
zbi_test_container_parse_entries_bad_magic()1542 fn zbi_test_container_parse_entries_bad_magic() {
1543 let mut buffer = ZbiAligned::default();
1544 let _ = TestZbiBuilder::new(&mut buffer.0[..])
1545 .item(
1546 ZbiHeader {
1547 type_: ZBI_TYPE_CONTAINER,
1548 length: 0,
1549 extra: ZBI_CONTAINER_MAGIC,
1550 flags: (ZbiFlags::default() & !ZbiFlags::CRC32).bits(),
1551 magic: ZBI_ITEM_MAGIC,
1552 crc32: 0,
1553 ..Default::default()
1554 },
1555 &[],
1556 )
1557 .build();
1558
1559 assert_eq!(ZbiContainer::parse(&buffer.0[..]), Err(ZbiError::BadCrc))
1560 }
1561
1562 #[test]
zbi_test_container_parse()1563 fn zbi_test_container_parse() {
1564 let expected_payloads: [&[u8]; 9] = [
1565 &[1],
1566 &[1, 2],
1567 &[1, 2, 3],
1568 &[1, 2, 3, 4],
1569 &[1, 2, 3, 4, 5],
1570 &[1, 2, 3, 4, 5, 6],
1571 &[1, 2, 3, 4, 5, 6, 7],
1572 &[1, 2, 3, 4, 5, 6, 7, 8],
1573 &[1, 2, 3, 4, 5, 6, 7, 8, 9],
1574 ];
1575 let expected_items = expected_payloads.map(|x| {
1576 (
1577 ZbiHeader {
1578 length: x.len().try_into().unwrap(),
1579 ..TestZbiBuilder::get_header_default()
1580 },
1581 x,
1582 )
1583 });
1584 let mut buffer = ZbiAligned::default();
1585 let mut builder = TestZbiBuilder::new(&mut buffer.0[..]).container_hdr(0);
1586 for payloads in expected_payloads {
1587 builder = builder.align().item_default(payloads).align()
1588 }
1589 let buffer = builder.update_container_length().build();
1590
1591 let zbi_container = ZbiContainer::parse(&*buffer).unwrap();
1592
1593 let mut it = zbi_container.iter();
1594 for (expected_hdr, expected_payload) in expected_items.iter() {
1595 let Some(item) = it.next() else { panic!("expecting iterator with value") };
1596 assert_eq!(item.header.into_ref(), expected_hdr);
1597 assert_eq!(&item.payload[..], *expected_payload);
1598 }
1599 assert!(it.next().is_none());
1600 }
1601
1602 #[test]
zbi_test_container_parse_unaligned()1603 fn zbi_test_container_parse_unaligned() {
1604 let buffer = ZbiAligned::default();
1605 for offset in [1, 2, 3, 4, 5, 6, 7] {
1606 assert_eq!(ZbiContainer::parse(&buffer.0[offset..]), Err(ZbiError::BadAlignment));
1607 }
1608 }
1609
1610 #[test]
zbi_test_container_parse_without_last_padding_fail_truncated()1611 fn zbi_test_container_parse_without_last_padding_fail_truncated() {
1612 let mut buffer = ZbiAligned::default();
1613 let buffer = TestZbiBuilder::new(&mut buffer.0[..])
1614 .container_hdr(0)
1615 .align()
1616 .item_default(&[1])
1617 .align()
1618 .item_default(&[1, 2])
1619 .update_container_length()
1620 .build();
1621
1622 assert_eq!(ZbiContainer::parse(&*buffer), Err(ZbiError::Truncated));
1623 }
1624
1625 #[test]
zbi_test_container_parse_error_payload_truncated()1626 fn zbi_test_container_parse_error_payload_truncated() {
1627 let mut buffer = ZbiAligned::default();
1628 let buffer = TestZbiBuilder::new(&mut buffer.0[..])
1629 .container_hdr(0)
1630 .add_slice(&[1])
1631 .update_container_length()
1632 .build();
1633
1634 assert_eq!(ZbiContainer::parse(&buffer[..buffer.len() - 1]), Err(ZbiError::Truncated));
1635 }
1636
1637 #[test]
zbi_test_container_parse_error_truncated()1638 fn zbi_test_container_parse_error_truncated() {
1639 let mut buffer = ZbiAligned::default();
1640 let buffer = TestZbiBuilder::new(&mut buffer.0[..])
1641 .container_hdr(0)
1642 .padding(0, 1)
1643 .update_container_length()
1644 .build();
1645
1646 assert_eq!(ZbiContainer::parse(&buffer[..buffer.len() - 1]), Err(ZbiError::Truncated));
1647 }
1648
1649 #[test]
zbi_test_container_parse_bad_first_entry_marked()1650 fn zbi_test_container_parse_bad_first_entry_marked() {
1651 let mut buffer = get_test_creference_buffer();
1652 let mut container = ZbiContainer::parse(&mut buffer.0[..]).unwrap();
1653
1654 container
1655 .iter_mut()
1656 .filter(|e| {
1657 [ZbiType::CmdLine as u32, ZbiType::StorageRamdisk as u32].contains(&e.header.type_)
1658 })
1659 .for_each(|mut e| e.header.magic = 0);
1660
1661 assert_eq!(ZbiContainer::parse(&buffer.0[..]), Err(ZbiError::BadMagic));
1662 }
1663
1664 #[test]
zbi_test_container_parse_bad_entry_magic()1665 fn zbi_test_container_parse_bad_entry_magic() {
1666 let mut buffer = get_test_creference_buffer();
1667 let mut container = ZbiContainer::parse(&mut buffer.0[..]).unwrap();
1668
1669 container
1670 .iter_mut()
1671 .filter(|e| ZbiType::CmdLine as u32 == e.header.type_)
1672 .for_each(|mut e| e.header.magic = 0);
1673
1674 assert_eq!(ZbiContainer::parse(&buffer.0[..]), Err(ZbiError::BadMagic));
1675 }
1676
1677 #[test]
zbi_test_container_parse_bad_entry_version()1678 fn zbi_test_container_parse_bad_entry_version() {
1679 let mut buffer = get_test_creference_buffer();
1680 let mut container = ZbiContainer::parse(&mut buffer.0[..]).unwrap();
1681
1682 container
1683 .iter_mut()
1684 .filter(|e| ZbiType::CmdLine as u32 == e.header.type_)
1685 .for_each(|mut e| e.header.flags &= (!ZbiFlags::VERSION).bits());
1686
1687 assert_eq!(ZbiContainer::parse(&buffer.0[..]), Err(ZbiError::BadVersion));
1688 }
1689
1690 #[test]
zbi_test_container_parse_bad_entry_crc()1691 fn zbi_test_container_parse_bad_entry_crc() {
1692 let mut buffer = get_test_creference_buffer();
1693 let mut container = ZbiContainer::parse(&mut buffer.0[..]).unwrap();
1694
1695 container.iter_mut().filter(|e| ZbiType::CmdLine as u32 == e.header.type_).for_each(
1696 |mut e| {
1697 e.header.flags &= (!ZbiFlags::CRC32).bits();
1698 e.header.crc32 = 0;
1699 },
1700 );
1701
1702 assert_eq!(ZbiContainer::parse(&buffer.0[..]), Err(ZbiError::BadCrc));
1703 }
1704
1705 #[test]
zbi_test_container_new_entry()1706 fn zbi_test_container_new_entry() {
1707 let mut buffer = ZbiAligned::default();
1708 let new_entries = get_test_entries_all();
1709
1710 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
1711 for (e, payload) in &new_entries {
1712 container.get_next_payload().unwrap()[..payload.len()].copy_from_slice(payload);
1713 container
1714 .create_entry(e.type_.try_into().unwrap(), e.extra, e.get_flags(), payload.len())
1715 .unwrap();
1716 }
1717
1718 let container = ZbiContainer::parse(&buffer.0[..]).unwrap();
1719 check_container_made_of(&container, &new_entries);
1720 }
1721
1722 #[test]
zbi_test_container_new_entry_crc32_not_supported()1723 fn zbi_test_container_new_entry_crc32_not_supported() {
1724 let mut buffer = ZbiAligned::default();
1725 let (new_entry, payload) = get_test_entry_nonempty_payload();
1726 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
1727 assert_eq!(
1728 container.create_entry_with_payload(
1729 new_entry.type_.try_into().unwrap(),
1730 new_entry.extra,
1731 ZbiFlags::default() | ZbiFlags::CRC32,
1732 payload,
1733 ),
1734 Err(ZbiError::Crc32NotSupported)
1735 );
1736 }
1737
1738 #[test]
zbi_test_container_new_entry_no_space_left()1739 fn zbi_test_container_new_entry_no_space_left() {
1740 let mut buffer = ZbiAligned::default();
1741 let new_entry = get_test_entry_empty_payload().0;
1742
1743 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
1744
1745 for _ in 1..(ALIGNED_8_SIZE / ZBI_HEADER_SIZE) {
1746 container
1747 .create_entry(
1748 new_entry.type_.try_into().unwrap(),
1749 new_entry.extra,
1750 new_entry.get_flags(),
1751 new_entry.length.try_into().unwrap(),
1752 )
1753 .unwrap();
1754 }
1755
1756 // Now there is not enough space and it should fail
1757 assert_eq!(
1758 container.create_entry(
1759 new_entry.type_.try_into().unwrap(),
1760 new_entry.extra,
1761 new_entry.get_flags(),
1762 new_entry.length.try_into().unwrap(),
1763 ),
1764 Err(ZbiError::TooBig)
1765 );
1766 }
1767
1768 #[test]
zbi_test_container_new_entry_no_space_for_header()1769 fn zbi_test_container_new_entry_no_space_for_header() {
1770 let mut buffer = ZbiAligned::default();
1771 let new_entry = get_test_entry_empty_payload().0;
1772
1773 let buf_len = 2 * core::mem::size_of::<ZbiHeader>() - 1;
1774 let mut container = ZbiContainer::new(&mut buffer.0[..buf_len]).unwrap();
1775
1776 // Now there is not enough space for header and it should fail
1777 assert_eq!(
1778 container.create_entry(
1779 new_entry.type_.try_into().unwrap(),
1780 new_entry.extra,
1781 new_entry.get_flags(),
1782 0,
1783 ),
1784 Err(ZbiError::TooBig)
1785 );
1786 }
1787
1788 #[test]
zbi_test_container_new_entry_no_space_for_payload()1789 fn zbi_test_container_new_entry_no_space_for_payload() {
1790 let mut buffer = ZbiAligned::default();
1791 let (new_entry, payload) = get_test_entry_nonempty_payload();
1792
1793 let buf_len = 2 * core::mem::size_of::<ZbiHeader>() + payload.len() - 1;
1794 let mut container = ZbiContainer::new(&mut buffer.0[..buf_len]).unwrap();
1795
1796 // Now there is not enough space for header and it should fail
1797 assert_eq!(
1798 container.create_entry(
1799 new_entry.type_.try_into().unwrap(),
1800 new_entry.extra,
1801 new_entry.get_flags(),
1802 new_entry.length.try_into().unwrap(),
1803 ),
1804 Err(ZbiError::TooBig)
1805 );
1806 }
1807
1808 #[test]
zbi_test_container_new_entry_with_payload_just_enough_to_fit_no_align()1809 fn zbi_test_container_new_entry_with_payload_just_enough_to_fit_no_align() {
1810 let mut buffer = ZbiAligned::default();
1811 let (new_entry, _payload) = get_test_entry_empty_payload();
1812 let payload = [0; ZBI_ALIGNMENT_USIZE];
1813 let buf_len = 2 * core::mem::size_of::<ZbiHeader>()
1814 + payload.len()
1815 + (/*alignment*/ZBI_ALIGNMENT_USIZE - payload.len());
1816 let mut container = ZbiContainer::new(&mut buffer.0[..buf_len]).unwrap();
1817 assert_eq!(
1818 container.create_entry(
1819 new_entry.type_.try_into().unwrap(),
1820 new_entry.extra,
1821 new_entry.get_flags(),
1822 payload.len(),
1823 ),
1824 Ok(())
1825 );
1826 }
1827 #[test]
zbi_test_container_new_entry_with_payload_just_enough_to_fit_with_alignment()1828 fn zbi_test_container_new_entry_with_payload_just_enough_to_fit_with_alignment() {
1829 let mut buffer = ZbiAligned::default();
1830 let (new_entry, payload) = get_test_entry_nonempty_payload();
1831 let buf_len = 2 * core::mem::size_of::<ZbiHeader>()
1832 + payload.len()
1833 + (ZBI_ALIGNMENT_USIZE - payload.len()/*alignment*/);
1834 let mut container = ZbiContainer::new(&mut buffer.0[..buf_len]).unwrap();
1835 assert_eq!(
1836 container.create_entry(
1837 new_entry.type_.try_into().unwrap(),
1838 new_entry.extra,
1839 new_entry.get_flags(),
1840 new_entry.length.try_into().unwrap(),
1841 ),
1842 Ok(())
1843 );
1844 }
1845
1846 #[test]
zbi_test_container_new_entry_payload_too_big()1847 fn zbi_test_container_new_entry_payload_too_big() {
1848 let mut buffer = ZbiAligned::default();
1849 let (new_entry, _payload) = get_test_entry_nonempty_payload();
1850 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
1851 assert_eq!(
1852 container.create_entry(
1853 new_entry.type_.try_into().unwrap(),
1854 new_entry.extra,
1855 new_entry.get_flags(),
1856 usize::MAX,
1857 ),
1858 Err(ZbiError::TooBig)
1859 );
1860 }
1861
1862 #[test]
zbi_test_container_new_entry_no_space_left_unaligned()1863 fn zbi_test_container_new_entry_no_space_left_unaligned() {
1864 let mut buffer = ZbiAligned::default();
1865 let new_entry = get_test_entry_empty_payload().0;
1866
1867 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
1868
1869 for _ in 1..(ALIGNED_8_SIZE / ZBI_HEADER_SIZE) {
1870 container
1871 .create_entry(
1872 new_entry.type_.try_into().unwrap(),
1873 new_entry.extra,
1874 new_entry.get_flags(),
1875 new_entry.length.try_into().unwrap(),
1876 )
1877 .unwrap();
1878 }
1879
1880 // Now there is not enough space and it should fail
1881 assert_eq!(
1882 container.create_entry(
1883 new_entry.type_.try_into().unwrap(),
1884 new_entry.extra,
1885 new_entry.get_flags(),
1886 new_entry.length.try_into().unwrap(),
1887 ),
1888 Err(ZbiError::TooBig)
1889 );
1890 }
1891
1892 #[test]
zbi_test_container_extend_new()1893 fn zbi_test_container_extend_new() {
1894 let mut buffer = ZbiAligned::default();
1895 let buffer = TestZbiBuilder::new(&mut buffer.0[..])
1896 .container_hdr(0)
1897 .align()
1898 .item_default(&[1])
1899 .align()
1900 .update_container_length()
1901 .build();
1902 let container_0 = ZbiContainer::parse(buffer).unwrap();
1903 let mut buffer = ZbiAligned::default();
1904 let buffer = TestZbiBuilder::new(&mut buffer.0[..])
1905 .container_hdr(0)
1906 .align()
1907 .item_default(&[1, 2])
1908 .align()
1909 .update_container_length()
1910 .build();
1911 let container_1 = ZbiContainer::parse(buffer).unwrap();
1912
1913 let mut buffer = ZbiAligned::default();
1914 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
1915 container.extend(&container_0).unwrap();
1916 container.extend(&container_1).unwrap();
1917
1918 let container_check = ZbiContainer::parse(&buffer.0[..]).unwrap();
1919 assert_eq!(container_check.iter().count(), 2);
1920 assert_eq!(container_0.iter().count(), 1);
1921 assert_eq!(container_1.iter().count(), 1);
1922 let mut it = container_check.iter();
1923 assert_eq!(it.next().unwrap(), container_0.iter().next().unwrap());
1924 assert_eq!(it.next().unwrap(), container_1.iter().next().unwrap());
1925 assert!(it.next().is_none());
1926 }
1927
1928 #[test]
zbi_test_container_extend_with_empty()1929 fn zbi_test_container_extend_with_empty() {
1930 let mut buffer = ZbiAligned::default();
1931 let buffer = TestZbiBuilder::new(&mut buffer.0[..])
1932 .container_hdr(0)
1933 .align()
1934 .item_default(&[1])
1935 .align()
1936 .update_container_length()
1937 .build();
1938 let mut container_0 = ZbiContainer::parse(&mut buffer[..]).unwrap();
1939 let mut buffer = ZbiAligned::default();
1940 let buffer = TestZbiBuilder::new(&mut buffer.0[..]).container_hdr(0).build();
1941 let container_1 = ZbiContainer::parse(&mut buffer[..]).unwrap();
1942
1943 assert_eq!(container_0.iter().count(), 1);
1944 container_0.extend(&container_1).unwrap();
1945 assert_eq!(container_0.iter().count(), 1);
1946 }
1947
1948 #[test]
zbi_test_container_extend_full()1949 fn zbi_test_container_extend_full() {
1950 let mut buffer = ZbiAligned::default();
1951 let buffer = TestZbiBuilder::new(&mut buffer.0[..])
1952 .container_hdr(0)
1953 .align()
1954 .update_container_length()
1955 .build();
1956 let mut container_full = ZbiContainer::parse(&mut buffer[..]).unwrap();
1957 let mut buffer = ZbiAligned::default();
1958 let buffer = TestZbiBuilder::new(&mut buffer.0[..])
1959 .container_hdr(0)
1960 .align()
1961 .item_default(&[1, 2])
1962 .align()
1963 .update_container_length()
1964 .build();
1965 let container = ZbiContainer::parse(buffer).unwrap();
1966
1967 assert_eq!(container_full.extend(&container), Err(ZbiError::TooBig));
1968 }
1969
1970 #[test]
zbi_test_container_extend_1_byte_short()1971 fn zbi_test_container_extend_1_byte_short() {
1972 let mut buffer = ZbiAligned::default();
1973 let _ = TestZbiBuilder::new(&mut buffer.0[..])
1974 .container_hdr(0)
1975 .align()
1976 .update_container_length()
1977 .build();
1978 let mut container_small =
1979 ZbiContainer::parse(&mut buffer.0[..ZBI_HEADER_SIZE * 2 + ZBI_ALIGNMENT_USIZE - 1])
1980 .unwrap();
1981 let mut buffer = ZbiAligned::default();
1982 let buffer = TestZbiBuilder::new(&mut buffer.0[..])
1983 .container_hdr(0)
1984 .align()
1985 .item_default(&[1, 2])
1986 .align()
1987 .update_container_length()
1988 .build();
1989 let container = ZbiContainer::parse(buffer).unwrap();
1990
1991 assert_eq!(container_small.extend(&container), Err(ZbiError::TooBig));
1992 }
1993
1994 #[test]
zbi_test_container_extend_use_all_buffer()1995 fn zbi_test_container_extend_use_all_buffer() {
1996 let mut buffer = ZbiAligned::default();
1997 let _ = TestZbiBuilder::new(&mut buffer.0[..])
1998 .container_hdr(0)
1999 .align()
2000 .update_container_length()
2001 .build();
2002 let mut container_full = ZbiContainer::parse(
2003 &mut buffer.0[..ZBI_HEADER_SIZE + ZBI_HEADER_SIZE + ZBI_ALIGNMENT_USIZE],
2004 )
2005 .unwrap();
2006 let mut buffer = ZbiAligned::default();
2007 let buffer = TestZbiBuilder::new(&mut buffer.0[..])
2008 .container_hdr(0)
2009 .align()
2010 .item_default(&[1, 2])
2011 .align()
2012 .update_container_length()
2013 .build();
2014 let container = ZbiContainer::parse(buffer).unwrap();
2015
2016 assert!(container_full.extend(&container).is_ok());
2017 }
2018
2019 #[test]
zbi_test_container_new_entry_with_payload()2020 fn zbi_test_container_new_entry_with_payload() {
2021 let mut buffer = ZbiAligned::default();
2022 let new_entries = get_test_entries_all();
2023
2024 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
2025 for (e, payload) in &new_entries {
2026 container
2027 .create_entry_with_payload(
2028 e.type_.try_into().unwrap(),
2029 e.extra,
2030 e.get_flags(),
2031 payload,
2032 )
2033 .unwrap();
2034 }
2035
2036 let container = ZbiContainer::parse(&buffer.0[..]).unwrap();
2037 check_container_made_of(&container, &new_entries);
2038 }
2039
check_container_made_of<B: ByteSlice + PartialEq>( container: &ZbiContainer<B>, expected_items: &[(ZbiHeader, &[u8])], )2040 fn check_container_made_of<B: ByteSlice + PartialEq>(
2041 container: &ZbiContainer<B>,
2042 expected_items: &[(ZbiHeader, &[u8])],
2043 ) {
2044 // Check container header length
2045 assert_eq!(
2046 container.get_payload_length_usize(),
2047 expected_items.len() * ZBI_HEADER_SIZE // add header len
2048 + expected_items // add payloads
2049 .iter()
2050 .map(|(_, payload)| -> usize {
2051 payload.len() +
2052 match payload.len() % ZBI_ALIGNMENT_USIZE{
2053 0 => 0,
2054 rem => ZBI_ALIGNMENT_USIZE- rem,
2055 }
2056 })
2057 .sum::<usize>()
2058 );
2059
2060 // Check if container elements match provided items
2061 let mut it = expected_items.iter();
2062 for b in container.iter() {
2063 let (header, payload) = it.next().unwrap();
2064 assert_eq!(*b.header, *header);
2065 assert_eq!(b.payload.len(), payload.len());
2066 assert!(b.payload.iter().zip(payload.iter()).all(|(a, b)| a == b))
2067 }
2068 }
2069
2070 #[test]
zbi_test_container_get_next_paylad()2071 fn zbi_test_container_get_next_paylad() {
2072 let mut buffer = ZbiAligned::default();
2073 let new_entries = get_test_entries_all();
2074
2075 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
2076
2077 for (e, payload) in &new_entries {
2078 let next_payload: &mut [u8] = container.get_next_payload().unwrap();
2079 next_payload[..payload.len()].copy_from_slice(payload);
2080 container
2081 .create_entry(e.type_.try_into().unwrap(), e.extra, e.get_flags(), payload.len())
2082 .unwrap();
2083 }
2084
2085 let container = ZbiContainer::parse(&buffer.0[..]).unwrap();
2086 check_container_made_of(&container, &new_entries);
2087 }
2088
2089 #[test]
zbi_test_container_get_next_paylad_length()2090 fn zbi_test_container_get_next_paylad_length() {
2091 let mut buffer = ZbiAligned::default();
2092 // Expected payload length is same as buffer - container header - item header
2093 let expected_payload_len = buffer.0.len() - 2 * core::mem::size_of::<ZbiHeader>();
2094
2095 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
2096 let next_payload: &mut [u8] = container.get_next_payload().unwrap();
2097
2098 assert_eq!(next_payload.len(), expected_payload_len);
2099 }
2100
2101 #[test]
zbi_test_container_get_next_paylad_only_header_can_fit()2102 fn zbi_test_container_get_next_paylad_only_header_can_fit() {
2103 let mut buffer = ZbiAligned::default();
2104 // Buffer length that only fits container and item header.
2105 let len = 2 * core::mem::size_of::<ZbiHeader>();
2106
2107 let mut container = ZbiContainer::new(&mut buffer.0[..len]).unwrap();
2108 let next_payload: &mut [u8] = container.get_next_payload().unwrap();
2109
2110 assert_eq!(next_payload.len(), 0);
2111 }
2112
2113 #[test]
zbi_test_container_get_next_paylad_header_cant_fit()2114 fn zbi_test_container_get_next_paylad_header_cant_fit() {
2115 let mut buffer = ZbiAligned::default();
2116 // Buffer length that only fits container but not item header.
2117 let len = 2 * core::mem::size_of::<ZbiHeader>() - 1;
2118
2119 let mut container = ZbiContainer::new(&mut buffer.0[..len]).unwrap();
2120 assert_eq!(container.get_next_payload(), Err(ZbiError::TooBig));
2121 }
2122
2123 #[test]
zbi_test_container_get_next_paylad_length_overflow()2124 fn zbi_test_container_get_next_paylad_length_overflow() {
2125 let mut buffer = ZbiAligned::default();
2126 // Buffer length that only fits container but not item header.
2127 let len = 2 * core::mem::size_of::<ZbiHeader>() - 1;
2128
2129 let mut container = ZbiContainer::new(&mut buffer.0[..len]).unwrap();
2130 container.payload_length = usize::MAX; // Pretend that length is too big and cause
2131 // overflow in following functions
2132 assert_eq!(container.get_next_payload(), Err(ZbiError::LengthOverflow));
2133 }
2134
2135 /* Binary blob for parsing container was generated from C implementation, running following
2136 * test:
2137 * --- a/src/firmware/lib/zbi/test/zbi.cc
2138 * +++ b/src/firmware/lib/zbi/test/zbi.cc
2139 * @@ -926,3 +926,21 @@ TEST(ZbiTests, ZbiTestNoOverflow) {
2140 *
2141 * ASSERT_NE(zbi_extend(dst_buffer, kUsableBufferSize, src_buffer), ZBI_RESULT_OK);
2142 * }
2143 * +
2144 * +TEST(ZbiTests, ZbiTestGenDataForRustTest) {
2145 * + const size_t kExtraBytes = 10;
2146 * + uint8_t* buffer = get_test_zbi_extra(kExtraBytes);
2147 * + // Based on `get_test_zbi_extra()` implementation this is buffer size
2148 * + const size_t kBufferSize = sizeof(test_zbi_t) + kExtraBytes;
2149 * +
2150 * + printf("buffer length = %zu\n", kBufferSize);
2151 * + printf("----BEGIN----\n");
2152 * + for (size_t i = 0; i < kBufferSize; i++) {
2153 * + if (i % 16 == 0) {
2154 * + printf("\n");
2155 * + }
2156 * + printf("%02x", buffer[i]);
2157 * + }
2158 * + printf("\n");
2159 * + printf("-----END-----\n");
2160 * +}
2161 */
2162 #[test]
zbi_test_container_parse_c_reference()2163 fn zbi_test_container_parse_c_reference() {
2164 let ref_buffer = get_test_creference_buffer_vec();
2165 let expected_container_hdr = ZbiHeader {
2166 type_: ZBI_TYPE_CONTAINER,
2167 extra: ZBI_CONTAINER_MAGIC,
2168 length: 184,
2169 magic: ZBI_ITEM_MAGIC,
2170 crc32: ZBI_ITEM_NO_CRC32,
2171 flags: ZbiFlags::default().bits(),
2172 ..Default::default()
2173 };
2174 // Reference C implementation test uses cstrings for payload. That is why we need '\0' at
2175 // the end of the string.
2176 let expected_entries = get_test_entries_creference();
2177
2178 let mut buffer = ZbiAligned::default();
2179 buffer.0[..ref_buffer.len()].clone_from_slice(&ref_buffer);
2180
2181 let container = ZbiContainer::parse(&buffer.0[..ref_buffer.len()]).unwrap();
2182 assert_eq!(*container.header, expected_container_hdr);
2183 check_container_made_of(&container, &expected_entries);
2184 }
2185
2186 #[test]
zbi_test_container_new_entry_iterate()2187 fn zbi_test_container_new_entry_iterate() {
2188 let mut buffer = ZbiAligned::default();
2189 let new_entry = get_test_entry_nonempty_payload();
2190
2191 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
2192 let (e, payload) = new_entry;
2193 container
2194 .create_entry_with_payload(e.type_.try_into().unwrap(), e.extra, e.get_flags(), payload)
2195 .unwrap();
2196
2197 assert_eq!(container.iter().count(), 1);
2198 let mut it = container.iter();
2199 let item = it.next().unwrap();
2200 assert_eq!(*item.header, e);
2201 assert_eq!(&item.payload[..], payload);
2202 assert!(it.next().is_none());
2203 }
2204
2205 #[test]
zbi_test_container_new_entry_mut_iterate()2206 fn zbi_test_container_new_entry_mut_iterate() {
2207 let mut buffer = ZbiAligned::default();
2208 let new_entry = get_test_entry_nonempty_payload();
2209
2210 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
2211 let (e, payload) = new_entry;
2212 container
2213 .create_entry_with_payload(e.type_.try_into().unwrap(), e.extra, e.get_flags(), payload)
2214 .unwrap();
2215
2216 {
2217 let mut item = container.iter_mut().next().unwrap();
2218 assert_ne!(item.header.type_, ZbiType::DebugData.into());
2219 item.header.type_ = ZbiType::DebugData.into();
2220 }
2221 {
2222 let item = container.iter().next().unwrap();
2223 assert_eq!(item.header.type_, ZbiType::DebugData.into());
2224 }
2225 }
2226
2227 #[test]
zbi_test_container_parse_new_entry_mut_iterate()2228 fn zbi_test_container_parse_new_entry_mut_iterate() {
2229 let mut buffer = ZbiAligned::default();
2230 let _ = TestZbiBuilder::new(&mut buffer.0[..])
2231 .container_hdr(0)
2232 .align()
2233 .item_default(&[1, 2])
2234 .align()
2235 .update_container_length()
2236 .build();
2237 let mut container = ZbiContainer::parse(&mut buffer.0[..]).unwrap();
2238 let new_entry = get_test_entry_nonempty_payload();
2239
2240 let (e, payload) = new_entry;
2241 container
2242 .create_entry_with_payload(e.type_.try_into().unwrap(), e.extra, e.get_flags(), payload)
2243 .unwrap();
2244
2245 assert_eq!(container.iter().count(), 2);
2246 for mut item in container.iter_mut() {
2247 assert_ne!(item.header.type_, ZbiType::DebugData.into());
2248 item.header.type_ = ZbiType::DebugData.into();
2249 }
2250
2251 for item in container.iter() {
2252 assert_eq!(item.header.type_, ZbiType::DebugData.into());
2253 }
2254 }
2255
2256 #[test]
zbi_test_container_iterate_empty()2257 fn zbi_test_container_iterate_empty() {
2258 let mut buffer = ZbiAligned::default();
2259 let _ = TestZbiBuilder::new(&mut buffer.0[..]).container_hdr(0).build();
2260
2261 assert_eq!(ZbiContainer::parse(&buffer.0[..]).unwrap().iter().count(), 0);
2262 let mut container = ZbiContainer::parse(&mut buffer.0[..]).unwrap();
2263 assert_eq!(container.iter().count(), 0);
2264 assert_eq!(container.iter_mut().count(), 0);
2265 }
2266
byteslice_cmp(byteslice: impl ByteSlice, slice: &[u8]) -> bool2267 fn byteslice_cmp(byteslice: impl ByteSlice, slice: &[u8]) -> bool {
2268 byteslice.len() == slice.len() && byteslice.iter().zip(slice.iter()).all(|(a, b)| a == b)
2269 }
2270
2271 #[test]
zbi_test_container_iterate_ref()2272 fn zbi_test_container_iterate_ref() {
2273 let mut buffer = get_test_creference_buffer();
2274 let container = ZbiContainer::parse(&mut buffer.0[..]).unwrap();
2275
2276 assert_eq!(container.iter().count(), 4);
2277 assert!(container.iter().zip(get_test_entries_creference().iter()).all(
2278 |(it, (entry, payload))| { *it.header == *entry && byteslice_cmp(it.payload, payload) }
2279 ));
2280 }
2281
2282 #[test]
zbi_test_container_iterate_modify()2283 fn zbi_test_container_iterate_modify() {
2284 let mut buffer = ZbiAligned::default();
2285 let _ = TestZbiBuilder::new(&mut buffer.0[..])
2286 .container_hdr(0)
2287 .align()
2288 .item_default(b"A")
2289 .align()
2290 .item_default(b"BB")
2291 .align()
2292 .item_default(b"CCC")
2293 .align()
2294 .update_container_length()
2295 .build();
2296 let mut container = ZbiContainer::parse(&mut buffer.0[..]).unwrap();
2297
2298 container.iter_mut().for_each(|mut item| item.payload[0] = b'D');
2299
2300 assert!(container.iter().all(|b| b.payload[0] == b'D'));
2301 }
2302
2303 #[test]
zbi_test_bad_type()2304 fn zbi_test_bad_type() {
2305 assert_eq!(ZbiType::try_from(0), Err(ZbiError::BadType));
2306 }
2307
get_all_zbi_type_values() -> Vec<ZbiType>2308 fn get_all_zbi_type_values() -> Vec<ZbiType> {
2309 // strum and enum-iterator crates are not available at the moment, so just hard coding
2310 // values
2311 vec![
2312 ZbiType::KernelX64,
2313 ZbiType::KernelArm64,
2314 ZbiType::KernelRiscv64,
2315 ZbiType::Container,
2316 ZbiType::Discard,
2317 ZbiType::StorageRamdisk,
2318 ZbiType::StorageBootFs,
2319 ZbiType::StorageKernel,
2320 ZbiType::StorageBootFsFactory,
2321 ZbiType::CmdLine,
2322 ZbiType::CrashLog,
2323 ZbiType::Nvram,
2324 ZbiType::PlatformId,
2325 ZbiType::DrvBoardInfo,
2326 ZbiType::CpuTopology,
2327 ZbiType::MemConfig,
2328 ZbiType::KernelDriver,
2329 ZbiType::AcpiRsdp,
2330 ZbiType::Smbios,
2331 ZbiType::EfiSystemTable,
2332 ZbiType::EfiMemoryAttributesTable,
2333 ZbiType::FrameBuffer,
2334 ZbiType::ImageArgs,
2335 ZbiType::BootVersion,
2336 ZbiType::DrvMacAddress,
2337 ZbiType::DrvPartitionMap,
2338 ZbiType::DrvBoardPrivate,
2339 ZbiType::HwRebootReason,
2340 ZbiType::SerialNumber,
2341 ZbiType::BootloaderFile,
2342 ZbiType::DeviceTree,
2343 ZbiType::SecureEntropy,
2344 ZbiType::DebugData,
2345 ]
2346 }
2347
get_kernel_zbi_types() -> Vec<ZbiType>2348 fn get_kernel_zbi_types() -> Vec<ZbiType> {
2349 vec![ZbiType::KernelRiscv64, ZbiType::KernelX64, ZbiType::KernelArm64]
2350 }
get_metadata_zbi_types() -> Vec<ZbiType>2351 fn get_metadata_zbi_types() -> Vec<ZbiType> {
2352 vec![
2353 ZbiType::DrvBoardInfo,
2354 ZbiType::DrvMacAddress,
2355 ZbiType::DrvPartitionMap,
2356 ZbiType::DrvBoardPrivate,
2357 ]
2358 }
2359
2360 #[test]
zbi_test_type_is_kernel()2361 fn zbi_test_type_is_kernel() {
2362 assert!(get_kernel_zbi_types().iter().all(|t| t.is_kernel()))
2363 }
2364
2365 #[test]
zbi_test_type_is_not_kernel()2366 fn zbi_test_type_is_not_kernel() {
2367 assert!(get_all_zbi_type_values()
2368 .iter()
2369 .filter(|v| !get_kernel_zbi_types().contains(v))
2370 .all(|v| !v.is_kernel()));
2371 }
2372
2373 #[test]
zbi_test_type_is_driver_metadata()2374 fn zbi_test_type_is_driver_metadata() {
2375 assert!(get_metadata_zbi_types().iter().all(|t| t.is_driver_metadata()));
2376 }
2377
2378 #[test]
zbi_test_type_is_not_driver_metadata()2379 fn zbi_test_type_is_not_driver_metadata() {
2380 assert!(get_all_zbi_type_values()
2381 .iter()
2382 .filter(|v| !get_metadata_zbi_types().contains(v))
2383 .all(|v| !v.is_driver_metadata()));
2384 }
2385
2386 #[test]
zbi_test_default_type_has_version()2387 fn zbi_test_default_type_has_version() {
2388 assert!(ZbiFlags::default().contains(ZbiFlags::VERSION));
2389 }
2390
2391 #[test]
zbi_test_is_bootable()2392 fn zbi_test_is_bootable() {
2393 let mut buffer = ZbiAligned::default();
2394 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
2395
2396 container
2397 .create_entry_with_payload(ZBI_ARCH_KERNEL_TYPE, 0, ZbiFlags::default(), &[])
2398 .unwrap();
2399
2400 assert!(container.is_bootable().is_ok());
2401 }
2402
2403 #[cfg(target_arch = "x86_64")]
2404 #[test]
zbi_test_is_bootable_reference()2405 fn zbi_test_is_bootable_reference() {
2406 let ref_buffer = get_test_creference_buffer_vec();
2407 let mut buffer = ZbiAligned::default();
2408 buffer.0[..ref_buffer.len()].clone_from_slice(&ref_buffer);
2409 let container = ZbiContainer::parse(&buffer.0[..]).unwrap();
2410 assert!(container.is_bootable().is_ok());
2411 }
2412
2413 #[test]
zbi_test_is_bootable_empty_container()2414 fn zbi_test_is_bootable_empty_container() {
2415 let mut buffer = ZbiAligned::default();
2416 let container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
2417 assert_eq!(container.is_bootable(), Err(ZbiError::Truncated));
2418 }
2419
2420 #[test]
zbi_test_is_bootable_wrong_arch()2421 fn zbi_test_is_bootable_wrong_arch() {
2422 let mut buffer = ZbiAligned::default();
2423 let _ = TestZbiBuilder::new(&mut buffer.0[..])
2424 .container_hdr(0)
2425 .align()
2426 .item(ZbiHeader { type_: 0, ..TestZbiBuilder::get_header_default() }, &[])
2427 .align()
2428 .update_container_length()
2429 .build();
2430 let container = ZbiContainer::parse(&mut buffer.0[..]).unwrap();
2431 assert_eq!(container.is_bootable(), Err(ZbiError::IncompleteKernel));
2432 }
2433
2434 #[test]
zbi_test_is_bootable_not_first_item_fail()2435 fn zbi_test_is_bootable_not_first_item_fail() {
2436 let mut buffer = ZbiAligned::default();
2437 let mut container = ZbiContainer::new(&mut buffer.0[..]).unwrap();
2438
2439 container
2440 .create_entry_with_payload(ZbiType::DebugData, 0, ZbiFlags::default(), &[])
2441 .unwrap();
2442 container
2443 .create_entry_with_payload(ZBI_ARCH_KERNEL_TYPE, 0, ZbiFlags::default(), &[])
2444 .unwrap();
2445
2446 assert_eq!(container.is_bootable(), Err(ZbiError::IncompleteKernel));
2447 }
2448
2449 #[test]
zbi_test_header_alignment()2450 fn zbi_test_header_alignment() {
2451 assert_eq!(core::mem::size_of::<ZbiHeader>() & ZBI_ALIGNMENT_USIZE, 0);
2452 }
2453
get_test_payloads_all() -> Vec<&'static [u8]>2454 fn get_test_payloads_all() -> Vec<&'static [u8]> {
2455 vec![
2456 &[],
2457 &[1],
2458 &[1, 2],
2459 &[1, 2, 3, 4, 5],
2460 // This 4 elements are for C reference binary testing
2461 b"4567\0",
2462 b"0123\0",
2463 b"0123456789\0",
2464 b"abcdefghijklmnopqrs\0",
2465 ]
2466 }
2467
get_test_zbi_headers_all() -> Vec<ZbiHeader>2468 fn get_test_zbi_headers_all() -> Vec<ZbiHeader> {
2469 let test_payloads = get_test_payloads_all();
2470 vec![
2471 ZbiHeader {
2472 type_: ZBI_TYPE_KERNEL_RISCV64,
2473 length: test_payloads[0].len().try_into().unwrap(),
2474 extra: 0,
2475 flags: ZbiFlags::default().bits(),
2476 magic: ZBI_ITEM_MAGIC,
2477 crc32: ZBI_ITEM_NO_CRC32,
2478 ..Default::default()
2479 },
2480 ZbiHeader {
2481 type_: ZBI_TYPE_KERNEL_ARM64,
2482 length: test_payloads[1].len().try_into().unwrap(),
2483 extra: 0,
2484 flags: ZbiFlags::default().bits(),
2485 magic: ZBI_ITEM_MAGIC,
2486 crc32: ZBI_ITEM_NO_CRC32,
2487 ..Default::default()
2488 },
2489 ZbiHeader {
2490 type_: ZBI_TYPE_KERNEL_RISCV64,
2491 length: test_payloads[2].len().try_into().unwrap(),
2492 extra: 0,
2493 flags: ZbiFlags::default().bits(),
2494 magic: ZBI_ITEM_MAGIC,
2495 crc32: ZBI_ITEM_NO_CRC32,
2496 ..Default::default()
2497 },
2498 ZbiHeader {
2499 type_: ZBI_TYPE_KERNEL_X64,
2500 length: test_payloads[3].len().try_into().unwrap(),
2501 extra: 0,
2502 flags: ZbiFlags::default().bits(),
2503 magic: ZBI_ITEM_MAGIC,
2504 crc32: ZBI_ITEM_NO_CRC32,
2505 ..Default::default()
2506 },
2507 ZbiHeader {
2508 type_: ZBI_TYPE_KERNEL_X64,
2509 length: test_payloads[4].len().try_into().unwrap(),
2510 extra: 0,
2511 flags: ZbiFlags::default().bits(),
2512 magic: ZBI_ITEM_MAGIC,
2513 crc32: ZBI_ITEM_NO_CRC32,
2514 ..Default::default()
2515 },
2516 ZbiHeader {
2517 type_: ZBI_TYPE_CMDLINE,
2518 length: test_payloads[5].len().try_into().unwrap(),
2519 extra: 0,
2520 flags: ZbiFlags::default().bits(),
2521 magic: ZBI_ITEM_MAGIC,
2522 crc32: ZBI_ITEM_NO_CRC32,
2523 ..Default::default()
2524 },
2525 ZbiHeader {
2526 type_: ZBI_TYPE_STORAGE_RAMDISK,
2527 length: test_payloads[6].len().try_into().unwrap(),
2528 extra: 0,
2529 flags: ZbiFlags::default().bits(),
2530 magic: ZBI_ITEM_MAGIC,
2531 crc32: ZBI_ITEM_NO_CRC32,
2532 ..Default::default()
2533 },
2534 ZbiHeader {
2535 type_: ZBI_TYPE_STORAGE_BOOTFS,
2536 length: test_payloads[7].len().try_into().unwrap(),
2537 extra: 0,
2538 flags: ZbiFlags::default().bits(),
2539 magic: ZBI_ITEM_MAGIC,
2540 crc32: ZBI_ITEM_NO_CRC32,
2541 ..Default::default()
2542 },
2543 ]
2544 }
2545
get_test_zbi_headers(num: usize) -> Vec<ZbiHeader>2546 fn get_test_zbi_headers(num: usize) -> Vec<ZbiHeader> {
2547 get_test_zbi_headers_all()[..num].to_vec()
2548 }
2549
get_test_entries_all() -> Vec<(ZbiHeader, &'static [u8])>2550 fn get_test_entries_all() -> Vec<(ZbiHeader, &'static [u8])> {
2551 let headers = get_test_zbi_headers_all();
2552 let payloads = get_test_payloads_all();
2553 assert_eq!(headers.len(), payloads.len());
2554 headers.iter().cloned().zip(payloads.iter().cloned()).collect()
2555 }
2556
get_test_entries(num: usize) -> Vec<(ZbiHeader, &'static [u8])>2557 fn get_test_entries(num: usize) -> Vec<(ZbiHeader, &'static [u8])> {
2558 get_test_entries_all()[..num].to_vec()
2559 }
2560
get_test_entry_empty_payload() -> (ZbiHeader, &'static [u8])2561 fn get_test_entry_empty_payload() -> (ZbiHeader, &'static [u8]) {
2562 get_test_entries(1)[0]
2563 }
2564
get_test_entry_nonempty_payload() -> (ZbiHeader, &'static [u8])2565 fn get_test_entry_nonempty_payload() -> (ZbiHeader, &'static [u8]) {
2566 get_test_entries(2)[1]
2567 }
2568
get_test_entries_creference() -> Vec<(ZbiHeader, &'static [u8])>2569 fn get_test_entries_creference() -> Vec<(ZbiHeader, &'static [u8])> {
2570 let entries = get_test_entries_all();
2571 entries[entries.len() - 4..].to_vec()
2572 }
2573
get_test_creference_buffer() -> ZbiAligned2574 fn get_test_creference_buffer() -> ZbiAligned {
2575 let entries = get_test_entries_creference();
2576 let mut buffer = ZbiAligned::default();
2577 let mut builder = TestZbiBuilder::new(&mut buffer.0[..]).container_hdr(0);
2578 for entry in entries {
2579 builder = builder.item(entry.0, entry.1).align();
2580 }
2581 let _ = builder.update_container_length().padding(0xab_u8, 10).build();
2582 buffer
2583 }
2584
get_test_creference_buffer_vec() -> Vec<u8>2585 fn get_test_creference_buffer_vec() -> Vec<u8> {
2586 hex::decode(
2587 "424f4f54b8000000e6f78c8600000100\
2588 0000000000000000291778b5d6e8874a\
2589 4b524e4c050000000000000000000100\
2590 0000000000000000291778b5d6e8874a\
2591 3435363700000000434d444c05000000\
2592 00000000000001000000000000000000\
2593 291778b5d6e8874a3031323300000000\
2594 5244534b0b0000000000000000000100\
2595 0000000000000000291778b5d6e8874a\
2596 30313233343536373839000000000000\
2597 42465342140000000000000000000100\
2598 0000000000000000291778b5d6e8874a\
2599 6162636465666768696a6b6c6d6e6f70\
2600 7172730000000000abababababababab\
2601 abab",
2602 )
2603 .unwrap()
2604 }
2605
2606 #[test]
test_creference_buffer_generation()2607 fn test_creference_buffer_generation() {
2608 let ref_buffer = get_test_creference_buffer_vec();
2609 let buffer = get_test_creference_buffer();
2610 assert_eq!(&ref_buffer[..ref_buffer.len()], &buffer.0[..ref_buffer.len()]);
2611 }
2612
2613 #[test]
zbi_test_zbi_error()2614 fn zbi_test_zbi_error() {
2615 let e = ZbiError::Error;
2616 println!("{e}");
2617 println!("{e:?}");
2618 println!("{e:#?}");
2619 }
2620
2621 #[test]
zby_test_container_align_buffer()2622 fn zby_test_container_align_buffer() {
2623 let buffer = ZbiAligned::default();
2624 let original_len = buffer.0.len();
2625 let buffer = align_buffer(&buffer.0[1..]).unwrap();
2626 assert_eq!(buffer.as_ptr() as usize % ZBI_ALIGNMENT_USIZE, 0);
2627 assert_eq!(buffer.len(), original_len - ZBI_ALIGNMENT_USIZE);
2628 }
2629
2630 #[test]
zby_test_container_align_buffer_empty()2631 fn zby_test_container_align_buffer_empty() {
2632 let buffer = ZbiAligned::default();
2633 let buffer = align_buffer(&buffer.0[..0]).unwrap();
2634 assert_eq!(buffer.as_ptr() as usize % ZBI_ALIGNMENT_USIZE, 0);
2635 assert_eq!(buffer.len(), 0);
2636 }
2637
2638 #[test]
zby_test_container_align_buffer_too_short()2639 fn zby_test_container_align_buffer_too_short() {
2640 let buffer = ZbiAligned::default();
2641 assert_eq!(align_buffer(&buffer.0[1..ZBI_ALIGNMENT_USIZE - 1]), Err(ZbiError::TooBig));
2642 }
2643
2644 #[test]
zby_test_container_align_buffer_just_enough()2645 fn zby_test_container_align_buffer_just_enough() {
2646 let buffer = ZbiAligned::default();
2647 let buffer = align_buffer(&buffer.0[1..ZBI_ALIGNMENT_USIZE]).unwrap();
2648 assert_eq!(buffer.as_ptr() as usize % ZBI_ALIGNMENT_USIZE, 0);
2649 assert_eq!(buffer.len(), 0);
2650 }
2651 }
2652