1 // Copyright 2023, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 //! APIs for reading/writing with non-block-aligned ranges and unaligned buffer.
16 //!
17 //! Most block devices require reading/writing in the unit of block and that the input/output
18 //! buffer satisfies certain alignment (i.e. DMA). This library provides APIs that build on top
19 //! of them and relax these constraints. The library supports reading/writing raw block content
20 //! as well as parsing/reading/writing GPT partitions.
21 //!
22 //! # Examples
23 //!
24 //! ```rust
25 //! use gbl_storage::{
26 //! AsBlockDevice, BlockIo, BlockDevice, required_scratch_size, BlockInfo, BlockIoError
27 //! };
28 //!
29 //! /// Mocks a block device using a buffer.
30 //! pub struct RamBlockIo {
31 //! storage: std::vec::Vec<u8>,
32 //! }
33 //!
34 //! impl BlockIo for RamBlockIo {
35 //! fn info(&mut self) -> BlockInfo {
36 //! BlockInfo {
37 //! block_size: 512,
38 //! num_blocks: self.storage.len() as u64 / 512,
39 //! alignment: 64,
40 //! }
41 //! }
42 //!
43 //! fn read_blocks(&mut self, blk_offset: u64, out: &mut [u8]) -> Result<(), BlockIoError> {
44 //! let start = blk_offset * self.block_size();
45 //! let end = start + out.len() as u64;
46 //! out.clone_from_slice(&self.storage[start as usize..end as usize]);
47 //! Ok(())
48 //! }
49 //!
50 //! fn write_blocks(&mut self, blk_offset: u64, data: &mut [u8]) -> Result<(), BlockIoError> {
51 //! let start = blk_offset * self.block_size();
52 //! let end = start + data.len() as u64;
53 //! self.storage[start as usize..end as usize].clone_from_slice(&data);
54 //! Ok(())
55 //! }
56 //! }
57 //!
58 //! const MAX_GPT_ENTRIES: u64 = 128;
59 //!
60 //! let mut ram_block_io = RamBlockIo { storage: vec![0u8; 64 * 1024] };
61 //! // Prepare a scratch buffer, size calculated with `required_scratch_size()`.
62 //! let mut scratch =
63 //! vec![0u8; required_scratch_size(&mut ram_block_io, MAX_GPT_ENTRIES).unwrap()];
64 //! // Create a `BlockDevice`
65 //! let mut ram_block_dev =
66 //! BlockDevice::new(&mut ram_block_io, &mut scratch[..], MAX_GPT_ENTRIES);
67 //!
68 //! // Read/write with arbitrary range and buffer without worrying about alignment.
69 //! let mut out = vec![0u8; 1234];
70 //! ram_block_dev.read(4321, &mut out[..]).unwrap();
71 //! let mut data = vec![0u8; 5678];
72 //! // Mutable input. More efficient
73 //! ram_block_dev.write(8765, data.as_mut_slice()).unwrap();
74 //!
75 //! // Sync GPT
76 //! let _ = ram_block_dev.sync_gpt();
77 //! // Access GPT entries
78 //! let _ = ram_block_dev.find_partition("some-partition");
79 //! // Read/Write GPT partitions with arbitrary offset, size, buffer
80 //! let _ = ram_block_dev.read_gpt_partition("partition", 4321, &mut out[..]);
81 //! let _ = ram_block_dev.write_gpt_partition("partition", 8765, data.as_mut_slice());
82 //!
83 //! // Alterantively, you can also define a custom type that internally owns and binds the
84 //! // implementation of `BlockIo` and scratch buffer together, and then implement the
85 //! // `AsBlockDevice` trait. This gives a cleaner management of resources.
86 //! pub struct OwnedBlockDevice {
87 //! io: RamBlockIo,
88 //! scratch: std::vec::Vec<u8>,
89 //! }
90 //!
91 //! impl AsBlockDevice for OwnedBlockDevice {
92 //! fn with(&mut self, f: &mut dyn FnMut(&mut dyn BlockIo, &mut [u8], u64)) {
93 //! f(&mut self.io, &mut self.scratch[..], MAX_GPT_ENTRIES)
94 //! }
95 //! }
96 //!
97 //! // `owned_block_dev` has the same APIs as `BlockDevice`.
98 //! let mut owned_block_dev = OwnedBlockDevice { io: ram_block_io, scratch: scratch };
99 //! ```
100
101 #![cfg_attr(not(test), no_std)]
102
103 use core::cmp::min;
104
105 // Selective export of submodule types.
106 mod gpt;
107 use gpt::check_gpt_rw_params;
108 use gpt::Gpt;
109 pub use gpt::{GptEntry, GptHeader, GPT_MAGIC, GPT_NAME_LEN_U16};
110
111 use safemath::SafeNum;
112
113 mod multi_blocks;
114 pub use multi_blocks::AsMultiBlockDevices;
115
116 mod non_blocking;
117 pub use non_blocking::{BlockDeviceEx, IoStatus, NonBlockingBlockIo, Transaction};
118
119 /// The type of Result used in this library.
120 pub type Result<T> = core::result::Result<T, StorageError>;
121
122 /// Error code for this library.
123 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
124 pub enum StorageError {
125 ArithmeticOverflow(safemath::Error),
126 BlockDeviceNotFound,
127 BlockIoError(BlockIoError),
128 BlockIoNotProvided,
129 FailedGettingBlockDevices(Option<&'static str>),
130 IoAborted,
131 InvalidInput,
132 NoValidGpt,
133 NotExist,
134 NotReady,
135 OutOfRange,
136 PartitionNotUnique,
137 ScratchTooSmall,
138 }
139
140 impl From<safemath::Error> for StorageError {
from(err: safemath::Error) -> Self141 fn from(err: safemath::Error) -> Self {
142 Self::ArithmeticOverflow(err)
143 }
144 }
145
146 impl From<core::num::TryFromIntError> for StorageError {
from(_: core::num::TryFromIntError) -> Self147 fn from(_: core::num::TryFromIntError) -> Self {
148 Self::OutOfRange
149 }
150 }
151
152 impl From<BlockIoError> for StorageError {
from(val: BlockIoError) -> Self153 fn from(val: BlockIoError) -> Self {
154 Self::BlockIoError(val)
155 }
156 }
157
158 impl core::fmt::Display for StorageError {
fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result159 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
160 write!(f, "{:?}", self)
161 }
162 }
163
164 /// `BlockInfo` contains information for a block device.
165 pub struct BlockInfo {
166 /// Native block size of the block device.
167 pub block_size: u64,
168 /// Total number of blocks of the block device.
169 pub num_blocks: u64,
170 /// The alignment requirement for IO buffers. For example, many block device drivers use DMA
171 /// for data transfer, which typically requires that the buffer address for DMA be aligned to
172 /// 16/32/64 bytes etc. If the block device has no alignment requirement, it can return 1.
173 pub alignment: u64,
174 }
175
176 impl BlockInfo {
177 /// Computes the total size in bytes of the block device.
total_size(&self) -> Result<u64>178 pub fn total_size(&self) -> Result<u64> {
179 Ok((SafeNum::from(self.block_size) * self.num_blocks).try_into()?)
180 }
181 }
182
183 /// `BlockIoError` represents the error code for returned by implementation of `BlockIo` and
184 /// `NonBlockingBlockIo` interfaces.
185 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
186 pub enum BlockIoError {
187 MediaBusy,
188 Others(Option<&'static str>),
189 }
190
191 /// `BlockIo` contains methods for reading/writing blocks of data to a block device with aligned
192 /// input/output buffers.
193 pub trait BlockIo {
194 /// Gets the `BlockInfo` for this block device
info(&mut self) -> BlockInfo195 fn info(&mut self) -> BlockInfo;
196
197 /// Returns the block size of the block device.
block_size(&mut self) -> u64198 fn block_size(&mut self) -> u64 {
199 self.info().block_size
200 }
201
202 /// Returns the total number of blocks of the block device.
num_blocks(&mut self) -> u64203 fn num_blocks(&mut self) -> u64 {
204 self.info().num_blocks
205 }
206
207 /// Returns the alignment requirement for buffers passed to the `write_blocks()` and
208 /// `read_blocks()` methods. For example, many block device drivers use DMA for data transfer,
209 /// which typically requires that the buffer address for DMA be aligned to 16/32/64 bytes etc.
210 /// If the block device has no alignment requirement, it can return 1.
alignment(&mut self) -> u64211 fn alignment(&mut self) -> u64 {
212 self.info().alignment
213 }
214
215 /// Read blocks of data from the block device
216 ///
217 /// # Args
218 ///
219 /// * `blk_offset`: Offset in number of blocks.
220 ///
221 /// * `out`: Buffer to store the read data. Callers of this method ensure that it is
222 /// aligned according to alignment() and `out.len()` is multiples of `block_size()`.
223 ///
224 /// # Returns
225 ///
226 /// Returns true if exactly out.len() number of bytes are read. Otherwise false.
read_blocks( &mut self, blk_offset: u64, out: &mut [u8], ) -> core::result::Result<(), BlockIoError>227 fn read_blocks(
228 &mut self,
229 blk_offset: u64,
230 out: &mut [u8],
231 ) -> core::result::Result<(), BlockIoError>;
232
233 /// Write blocks of data to the block device
234 ///
235 /// # Args
236 ///
237 /// * `blk_offset`: Offset in number of blocks.
238 ///
239 /// * `data`: Data to write. Callers of this method ensure that it is aligned according to
240 /// `alignment()` and `data.len()` is multiples of `block_size()`.
241 ///
242 /// # Returns
243 ///
244 /// Returns true if exactly data.len() number of bytes are written. Otherwise false.
write_blocks( &mut self, blk_offset: u64, data: &mut [u8], ) -> core::result::Result<(), BlockIoError>245 fn write_blocks(
246 &mut self,
247 blk_offset: u64,
248 data: &mut [u8],
249 ) -> core::result::Result<(), BlockIoError>;
250 }
251
252 /// `Partition` contains information about a GPT partition.
253 #[derive(Debug, Copy, Clone)]
254 pub struct Partition {
255 entry: GptEntry,
256 block_size: u64,
257 }
258
259 impl Partition {
260 /// Creates a new instance.
new(entry: GptEntry, block_size: u64) -> Self261 fn new(entry: GptEntry, block_size: u64) -> Self {
262 Self { entry, block_size }
263 }
264
265 /// Returns the partition size in bytes.
size(&self) -> Result<u64>266 pub fn size(&self) -> Result<u64> {
267 (SafeNum::from(self.entry.blocks()?) * self.block_size)
268 .try_into()
269 .map_err(|e: safemath::Error| e.into())
270 }
271
272 /// Returns the block size of this partition.
block_size(&self) -> u64273 pub fn block_size(&self) -> u64 {
274 self.block_size
275 }
276
277 /// Returns the partition entry structure in the GPT header.
gpt_entry(&self) -> &GptEntry278 pub fn gpt_entry(&self) -> &GptEntry {
279 &self.entry
280 }
281 }
282
283 /// `PartitionIterator` is returned by `AsBlockDevice::partition_iter()` and can be used to iterate
284 /// all GPT partition entries.
285 pub struct PartitionIterator<'a> {
286 dev: &'a mut dyn AsBlockDevice,
287 idx: usize,
288 }
289
290 impl Iterator for PartitionIterator<'_> {
291 type Item = Partition;
292
next(&mut self) -> Option<Self::Item>293 fn next(&mut self) -> Option<Self::Item> {
294 let res = with_partitioned_scratch(
295 self.dev,
296 |io, _, gpt_buffer, _| -> Result<Option<Partition>> {
297 Ok(Gpt::from_existing(gpt_buffer)?
298 .entries()?
299 .get(self.idx)
300 .map(|v| Partition::new(*v, io.block_size())))
301 },
302 )
303 .ok()?
304 .ok()??;
305 self.idx += 1;
306 Some(res)
307 }
308 }
309
310 /// `AsBlockDevice` provides APIs for reading raw block content and GPT partitions with
311 /// arbirary offset, size and input/output buffer.
312 pub trait AsBlockDevice {
313 /// Runs the provided closure `f` with the following parameters:
314 ///
315 /// 1. An implementation of block IO `&mut dyn BlockIo`.
316 /// 2. A scratch buffer `&mut [u8]`.
317 /// 3. A `u64` specifying the maximum allowed number of GPT entries.
318 ///
319 /// * The scratch buffer is internally used for two purposes: 1. to handle read/write with
320 /// offset, size that are not multiples of block size or input/output buffer that are not
321 /// aligned, and 2. to load and sync GPT headers.
322 ///
323 /// * The necessary size for the scratch buffer depends on `BlockIo:alignment()`,
324 /// `BlockIo:block_size()` and maximum allowed GPT entries. It can be computed using the
325 /// helper API `required_scratch_size()`. If maximum allowed GPT entries is 0, GPT is
326 /// considered unavailable and no buffer will be reserved for GPT headers. If additionally,
327 /// `BlockIo` has no alignment requirement, i.e. both alignment and block size are 1, the
328 /// total required scratch size is 0.
329 ///
330 /// * GPT headers will be cached in the scratch buffer after calling `Self::sync_gpt()` and
331 /// returning success. Subsequent call of `Self:read_gpt_partiton()`,
332 /// `Self::write_gpt_partition()`, and `Self::write_gpt_partition()`
333 /// will look up partition entries from the cached GPT header.
334 /// Thus callers should make sure to always return the same scratch buffer and avoid
335 /// modifying its content.
336 ///
337 /// * A smaller value of maximum allowed GPT entries gives smaller required scratch buffer
338 /// size. However if the `entries_count` field in the GPT header is greater than this value,
339 /// GPT parsing will fail. Note that most tools and OS fix the `entries_count` value to the
340 /// max value 128 regardless of the actual number of partition entries used. Thus unless you
341 /// have full control of GPT generation in your entire system where you can always ensure a
342 /// smaller bound on it, it is recommended to always return 128.
with(&mut self, f: &mut dyn FnMut(&mut dyn BlockIo, &mut [u8], u64))343 fn with(&mut self, f: &mut dyn FnMut(&mut dyn BlockIo, &mut [u8], u64));
344
345 /// Returns the block size of the underlying `BlockIo`
block_size(&mut self) -> Result<u64>346 fn block_size(&mut self) -> Result<u64> {
347 with_partitioned_scratch(self, |io, _, _, _| io.block_size())
348 }
349
350 /// Returns the number of blocks of the underlying `BlockIo`
num_blocks(&mut self) -> Result<u64>351 fn num_blocks(&mut self) -> Result<u64> {
352 with_partitioned_scratch(self, |io, _, _, _| io.num_blocks())
353 }
354
355 /// Returns the total size in number of bytes.
total_size(&mut self) -> Result<u64>356 fn total_size(&mut self) -> Result<u64> {
357 Ok((SafeNum::from(self.block_size()?) * self.num_blocks()?).try_into()?)
358 }
359
360 /// Read data from the block device.
361 ///
362 /// # Args
363 ///
364 /// * `offset`: Offset in number of bytes.
365 ///
366 /// * `out`: Buffer to store the read data.
367 ///
368 /// * Returns success when exactly `out.len()` number of bytes are read.
read(&mut self, offset: u64, out: &mut [u8]) -> Result<()>369 fn read(&mut self, offset: u64, out: &mut [u8]) -> Result<()> {
370 with_partitioned_scratch(self, |io, alignment, _, _| read(io, offset, out, alignment))?
371 }
372
373 /// Write data to the device.
374 ///
375 /// # Args
376 ///
377 /// * `offset`: Offset in number of bytes.
378 ///
379 /// * `data`: Data to write.
380 ///
381 /// * The API enables an optimization which temporarily changes `data` layout internally and
382 /// reduces the number of calls to `Self::write_blocks()` down to O(1) regardless of input's
383 /// alignment. This is the recommended usage.
384 ///
385 /// * Returns success when exactly `data.len()` number of bytes are written.
write(&mut self, offset: u64, data: &mut [u8]) -> Result<()>386 fn write(&mut self, offset: u64, data: &mut [u8]) -> Result<()> {
387 with_partitioned_scratch(self, |io, alignment_scratch, _, _| {
388 write_bytes_mut(io, offset, data, alignment_scratch)
389 })?
390 }
391
392 /// Parse and sync GPT from a block device.
393 ///
394 /// The API validates and restores primary/secondary GPT header.
395 ///
396 /// # Returns
397 ///
398 /// Returns success if GPT is loaded/restored successfully.
sync_gpt(&mut self) -> Result<()>399 fn sync_gpt(&mut self) -> Result<()> {
400 with_partitioned_scratch(self, |io, alignment_scratch, gpt_buffer, max_entries| {
401 gpt::gpt_sync(
402 io,
403 &mut Gpt::new_from_buffer(max_entries, gpt_buffer)?,
404 alignment_scratch,
405 )
406 })?
407 }
408
409 /// Returns an iterator to GPT partition entries.
partition_iter(&mut self) -> PartitionIterator where Self: Sized,410 fn partition_iter(&mut self) -> PartitionIterator
411 where
412 Self: Sized,
413 {
414 PartitionIterator { dev: self, idx: 0 }
415 }
416
417 /// Returns the `Partition` for a partition.
418 ///
419 /// # Args
420 ///
421 /// * `part`: Name of the partition.
find_partition(&mut self, part: &str) -> Result<Partition>422 fn find_partition(&mut self, part: &str) -> Result<Partition> {
423 with_partitioned_scratch(self, |io, _, gpt_buffer, _| {
424 Ok(Partition::new(
425 *Gpt::from_existing(gpt_buffer)?.find_partition(part)?,
426 io.block_size(),
427 ))
428 })?
429 }
430
431 /// Read a GPT partition on a block device
432 ///
433 /// # Args
434 ///
435 /// * `part_name`: Name of the partition.
436 ///
437 /// * `offset`: Offset in number of bytes into the partition.
438 ///
439 /// * `out`: Buffer to store the read data.
440 ///
441 /// # Returns
442 ///
443 /// Returns success when exactly `out.len()` of bytes are read successfully.
read_gpt_partition(&mut self, part_name: &str, offset: u64, out: &mut [u8]) -> Result<()>444 fn read_gpt_partition(&mut self, part_name: &str, offset: u64, out: &mut [u8]) -> Result<()> {
445 let offset = with_partitioned_scratch(self, |_, _, gpt_buffer, _| {
446 check_gpt_rw_params(gpt_buffer, part_name, offset, out.len())
447 })??;
448 self.read(offset, out)
449 }
450
451 /// Write a GPT partition on a block device.
452 /// Optimization for mutable buffers.
453 /// See `AsBlockDevice::write` for details on alignment requirements
454 /// for optimized performance.
455 ///
456 /// # Args
457 ///
458 /// * `part_name`: Name of the partition.
459 ///
460 /// * `offset`: Offset in number of bytes into the partition.
461 ///
462 /// * `data`: Data to write. See `data` passed to `BlockIo::write()` for details.
463 ///
464 /// # Returns
465 ///
466 /// Returns success when exactly `data.len()` of bytes are written successfully.
write_gpt_partition(&mut self, part_name: &str, offset: u64, data: &mut [u8]) -> Result<()>467 fn write_gpt_partition(&mut self, part_name: &str, offset: u64, data: &mut [u8]) -> Result<()> {
468 let offset = with_partitioned_scratch(self, |_, _, gpt_buffer, _| {
469 check_gpt_rw_params(gpt_buffer, part_name, offset, data.len())
470 })??;
471 self.write(offset, data)
472 }
473 }
474
475 impl<T: ?Sized + AsBlockDevice> AsBlockDevice for &mut T {
with(&mut self, f: &mut dyn FnMut(&mut dyn BlockIo, &mut [u8], u64))476 fn with(&mut self, f: &mut dyn FnMut(&mut dyn BlockIo, &mut [u8], u64)) {
477 (*self).with(f)
478 }
479 }
480
481 /// `BlockDevice` borrows a `BlockIo`, scratch buffer and implements `AsBlockDevice`.
482 pub struct BlockDevice<'a, 'b> {
483 io: &'a mut dyn BlockIo,
484 scratch: &'b mut [u8],
485 max_gpt_entries: u64,
486 }
487
488 impl<'a, 'b> BlockDevice<'a, 'b> {
new(io: &'a mut dyn BlockIo, scratch: &'b mut [u8], max_gpt_entries: u64) -> Self489 pub fn new(io: &'a mut dyn BlockIo, scratch: &'b mut [u8], max_gpt_entries: u64) -> Self {
490 Self { io, scratch, max_gpt_entries }
491 }
492 }
493
494 impl AsBlockDevice for BlockDevice<'_, '_> {
with(&mut self, f: &mut dyn FnMut(&mut dyn BlockIo, &mut [u8], u64))495 fn with(&mut self, f: &mut dyn FnMut(&mut dyn BlockIo, &mut [u8], u64)) {
496 f(self.io, self.scratch, self.max_gpt_entries)
497 }
498 }
499
500 /// Calculates the required scratch buffer size for a `BlockIo` and number of maximum GPT entries.
required_scratch_size( io: &mut (impl BlockIo + ?Sized), max_gpt_entries: u64, ) -> Result<usize>501 pub fn required_scratch_size(
502 io: &mut (impl BlockIo + ?Sized),
503 max_gpt_entries: u64,
504 ) -> Result<usize> {
505 let alignment_size: SafeNum = alignment_scratch_size(io)?.into();
506 let gpt_buffer_size = match max_gpt_entries {
507 0 => 0,
508 v => Gpt::required_buffer_size(v)?,
509 };
510 (alignment_size + gpt_buffer_size).try_into().map_err(|e: safemath::Error| e.into())
511 }
512
513 /// A helper that wraps `AsBlockDevice::with` and additionally partitions the scratch buffer into
514 /// alignment scratch and GPT buffers.
with_partitioned_scratch<F, R>( dev: &mut (impl AsBlockDevice + ?Sized), mut f: F, ) -> Result<R> where F: FnMut(&mut dyn BlockIo, &mut [u8], &mut [u8], u64) -> R,515 pub(crate) fn with_partitioned_scratch<F, R>(
516 dev: &mut (impl AsBlockDevice + ?Sized),
517 mut f: F,
518 ) -> Result<R>
519 where
520 F: FnMut(&mut dyn BlockIo, &mut [u8], &mut [u8], u64) -> R,
521 {
522 let mut res: Result<R> = Err(StorageError::BlockIoNotProvided);
523 dev.with(&mut |io, scratch, max_entries| {
524 res = (|| {
525 if scratch.len() < required_scratch_size(io, max_entries)? {
526 return Err(StorageError::ScratchTooSmall);
527 }
528 let (alignment, gpt) = scratch.split_at_mut(alignment_scratch_size(io)?);
529 Ok(f(io, alignment, gpt, max_entries))
530 })();
531 });
532 res
533 }
534
535 /// Check if `value` is aligned to (multiples of) `alignment`
536 /// It can fail if the remainider calculation fails overflow check.
is_aligned(value: SafeNum, alignment: SafeNum) -> Result<bool>537 pub fn is_aligned(value: SafeNum, alignment: SafeNum) -> Result<bool> {
538 Ok(u64::try_from(value % alignment)? == 0)
539 }
540
541 /// Check if `buffer` address is aligned to `alignment`
542 /// It can fail if the remainider calculation fails overflow check.
is_buffer_aligned(buffer: &[u8], alignment: u64) -> Result<bool>543 pub fn is_buffer_aligned(buffer: &[u8], alignment: u64) -> Result<bool> {
544 is_aligned((buffer.as_ptr() as usize).into(), alignment.into())
545 }
546
547 /// Check read/write range and calculate offset in number of blocks.
check_range( blk_io: &mut (impl BlockIo + ?Sized), offset: u64, buffer: &[u8], ) -> Result<SafeNum>548 fn check_range(
549 blk_io: &mut (impl BlockIo + ?Sized),
550 offset: u64,
551 buffer: &[u8],
552 ) -> Result<SafeNum> {
553 let offset: SafeNum = offset.into();
554 let block_size: SafeNum = blk_io.block_size().into();
555 debug_assert!(is_aligned(offset, block_size)?);
556 debug_assert!(is_aligned(buffer.len().into(), block_size)?);
557 debug_assert!(is_buffer_aligned(buffer, blk_io.alignment().into())?);
558 let blk_offset = offset / block_size;
559 let blk_count = SafeNum::from(buffer.len()) / block_size;
560 match u64::try_from(blk_offset + blk_count)? <= blk_io.num_blocks() {
561 true => Ok(blk_offset),
562 false => Err(StorageError::OutOfRange),
563 }
564 }
565
566 /// Read with block-aligned offset, length and aligned buffer
read_aligned_all( blk_io: &mut (impl BlockIo + ?Sized), offset: u64, out: &mut [u8], ) -> Result<()>567 fn read_aligned_all(
568 blk_io: &mut (impl BlockIo + ?Sized),
569 offset: u64,
570 out: &mut [u8],
571 ) -> Result<()> {
572 let blk_offset = check_range(blk_io, offset, out).map(u64::try_from)??;
573 Ok(blk_io.read_blocks(blk_offset, out)?)
574 }
575
576 /// Read with block-aligned offset and aligned buffer. Size don't need to be block aligned.
577 /// |~~~~~~~~~read~~~~~~~~~|
578 /// |---------|---------|---------|
read_aligned_offset_and_buffer( blk_io: &mut (impl BlockIo + ?Sized), offset: u64, out: &mut [u8], scratch: &mut [u8], ) -> Result<()>579 fn read_aligned_offset_and_buffer(
580 blk_io: &mut (impl BlockIo + ?Sized),
581 offset: u64,
582 out: &mut [u8],
583 scratch: &mut [u8],
584 ) -> Result<()> {
585 let block_size = SafeNum::from(blk_io.block_size());
586 debug_assert!(is_aligned(offset.into(), block_size)?);
587 debug_assert!(is_buffer_aligned(out, blk_io.alignment())?);
588
589 let aligned_read: usize = SafeNum::from(out.len()).round_down(block_size).try_into()?;
590
591 if aligned_read > 0 {
592 read_aligned_all(blk_io, offset, &mut out[..aligned_read])?;
593 }
594 let unaligned = &mut out[aligned_read..];
595 if unaligned.is_empty() {
596 return Ok(());
597 }
598 // Read unalinged part.
599 let block_scratch = &mut scratch[..block_size.try_into()?];
600 let aligned_offset = SafeNum::from(offset) + aligned_read;
601 read_aligned_all(blk_io, aligned_offset.try_into()?, block_scratch)?;
602 unaligned.clone_from_slice(&block_scratch[..unaligned.len()]);
603 Ok(())
604 }
605
606 /// Read with aligned buffer. Offset and size don't need to be block aligned.
607 /// Case 1:
608 /// |~~~~~~read~~~~~~~|
609 /// |------------|------------|
610 /// Case 2:
611 /// |~~~read~~~|
612 /// |---------------|--------------|
read_aligned_buffer( blk_io: &mut (impl BlockIo + ?Sized), offset: u64, out: &mut [u8], scratch: &mut [u8], ) -> Result<()>613 fn read_aligned_buffer(
614 blk_io: &mut (impl BlockIo + ?Sized),
615 offset: u64,
616 out: &mut [u8],
617 scratch: &mut [u8],
618 ) -> Result<()> {
619 debug_assert!(is_buffer_aligned(out, blk_io.alignment())?);
620
621 if is_aligned(offset.into(), blk_io.block_size().into())? {
622 return read_aligned_offset_and_buffer(blk_io, offset, out, scratch);
623 }
624 let offset = SafeNum::from(offset);
625 let aligned_start: u64 =
626 min(offset.round_up(blk_io.block_size()).try_into()?, (offset + out.len()).try_into()?);
627
628 let aligned_relative_offset: usize = (SafeNum::from(aligned_start) - offset).try_into()?;
629 if aligned_relative_offset < out.len() {
630 if is_buffer_aligned(&out[aligned_relative_offset..], blk_io.alignment())? {
631 // If new output address is aligned, read directly.
632 read_aligned_offset_and_buffer(
633 blk_io,
634 aligned_start,
635 &mut out[aligned_relative_offset..],
636 scratch,
637 )?;
638 } else {
639 // Otherwise read into `out` (assumed aligned) and memmove to the correct
640 // position
641 let read_len: usize =
642 (SafeNum::from(out.len()) - aligned_relative_offset).try_into()?;
643 read_aligned_offset_and_buffer(blk_io, aligned_start, &mut out[..read_len], scratch)?;
644 out.copy_within(..read_len, aligned_relative_offset);
645 }
646 }
647
648 // Now read the unaligned part
649 let block_scratch = &mut scratch[..SafeNum::from(blk_io.block_size()).try_into()?];
650 let round_down_offset = offset.round_down(blk_io.block_size());
651 read_aligned_all(blk_io, round_down_offset.try_into()?, block_scratch)?;
652 let offset_relative = offset - round_down_offset;
653 let unaligned = &mut out[..aligned_relative_offset];
654 unaligned.clone_from_slice(
655 &block_scratch
656 [offset_relative.try_into()?..(offset_relative + unaligned.len()).try_into()?],
657 );
658 Ok(())
659 }
660
661 /// Calculates the necessary scratch buffer size for handling block and buffer misalignment.
alignment_scratch_size(blk_io: &mut (impl BlockIo + ?Sized)) -> Result<usize>662 pub fn alignment_scratch_size(blk_io: &mut (impl BlockIo + ?Sized)) -> Result<usize> {
663 let block_alignment = match blk_io.block_size() {
664 1 => 0,
665 v => v,
666 };
667 ((SafeNum::from(blk_io.alignment()) - 1) * 2 + block_alignment)
668 .try_into()
669 .map_err(|e: safemath::Error| e.into())
670 }
671
672 /// Gets a subslice of the given slice with aligned address according to `alignment`
aligned_subslice(buffer: &mut [u8], alignment: u64) -> Result<&mut [u8]>673 fn aligned_subslice(buffer: &mut [u8], alignment: u64) -> Result<&mut [u8]> {
674 let addr = SafeNum::from(buffer.as_ptr() as usize);
675 Ok(&mut buffer[(addr.round_up(alignment) - addr).try_into()?..])
676 }
677
678 // Partition a scratch into two aligned parts: [u8; alignment()-1] and [u8; block_size())]
679 // for handling block and buffer misalignment respecitvely.
split_scratch<'a>( blk_io: &mut (impl BlockIo + ?Sized), scratch: &'a mut [u8], ) -> Result<(&'a mut [u8], &'a mut [u8])>680 fn split_scratch<'a>(
681 blk_io: &mut (impl BlockIo + ?Sized),
682 scratch: &'a mut [u8],
683 ) -> Result<(&'a mut [u8], &'a mut [u8])> {
684 let (buffer_alignment, block_alignment) = aligned_subslice(scratch, blk_io.alignment())?
685 .split_at_mut((SafeNum::from(blk_io.alignment()) - 1).try_into()?);
686 let block_alignment = aligned_subslice(block_alignment, blk_io.alignment())?;
687 let block_alignment_scratch_size = match blk_io.block_size() {
688 1 => SafeNum::ZERO,
689 v => v.into(),
690 };
691 Ok((buffer_alignment, &mut block_alignment[..block_alignment_scratch_size.try_into()?]))
692 }
693
694 /// Read with no alignment requirement.
read( blk_io: &mut (impl BlockIo + ?Sized), offset: u64, out: &mut [u8], scratch: &mut [u8], ) -> Result<()>695 fn read(
696 blk_io: &mut (impl BlockIo + ?Sized),
697 offset: u64,
698 out: &mut [u8],
699 scratch: &mut [u8],
700 ) -> Result<()> {
701 let (buffer_alignment_scratch, block_alignment_scratch) = split_scratch(blk_io, scratch)?;
702
703 if is_buffer_aligned(out, blk_io.alignment())? {
704 return read_aligned_buffer(blk_io, offset, out, block_alignment_scratch);
705 }
706
707 // Buffer misalignment:
708 // Case 1:
709 // |~~~~~~~~~~~~buffer~~~~~~~~~~~~|
710 // |----------------------|---------------------|
711 // blk_io.alignment()
712 //
713 // Case 2:
714 // |~~~~~~buffer~~~~~|
715 // |----------------------|---------------------|
716 // blk_io.alignment()
717
718 let out_addr_value = SafeNum::from(out.as_ptr() as usize);
719 let unaligned_read: usize =
720 min((out_addr_value.round_up(blk_io.alignment()) - out_addr_value).try_into()?, out.len());
721
722 // Read unaligned part
723 let unaligned_out = &mut buffer_alignment_scratch[..unaligned_read];
724 read_aligned_buffer(blk_io, offset, unaligned_out, block_alignment_scratch)?;
725 out[..unaligned_read].clone_from_slice(unaligned_out);
726
727 if unaligned_read == out.len() {
728 return Ok(());
729 }
730 // Read aligned part
731 read_aligned_buffer(
732 blk_io,
733 (SafeNum::from(offset) + unaligned_read).try_into()?,
734 &mut out[unaligned_read..],
735 block_alignment_scratch,
736 )
737 }
738
write_aligned_all( blk_io: &mut (impl BlockIo + ?Sized), offset: u64, data: &mut [u8], ) -> Result<()>739 fn write_aligned_all(
740 blk_io: &mut (impl BlockIo + ?Sized),
741 offset: u64,
742 data: &mut [u8],
743 ) -> Result<()> {
744 let blk_offset = check_range(blk_io, offset, data)?;
745 Ok(blk_io.write_blocks(blk_offset.try_into()?, data)?)
746 }
747
748 /// Write with block-aligned offset and aligned buffer. `data.len()` can be unaligned.
749 /// |~~~~~~~~~size~~~~~~~~~|
750 /// |---------|---------|---------|
write_aligned_offset_and_buffer( blk_io: &mut (impl BlockIo + ?Sized), offset: u64, data: &mut [u8], scratch: &mut [u8], ) -> Result<()>751 fn write_aligned_offset_and_buffer(
752 blk_io: &mut (impl BlockIo + ?Sized),
753 offset: u64,
754 data: &mut [u8],
755 scratch: &mut [u8],
756 ) -> Result<()> {
757 debug_assert!(is_aligned(offset.into(), blk_io.block_size().into())?);
758 debug_assert!(is_buffer_aligned(data, blk_io.alignment())?);
759
760 let aligned_write: usize =
761 SafeNum::from(data.len()).round_down(blk_io.block_size()).try_into()?;
762 if aligned_write > 0 {
763 write_aligned_all(blk_io, offset, &mut data[..aligned_write])?;
764 }
765 let unaligned = &data[aligned_write..];
766 if unaligned.len() == 0 {
767 return Ok(());
768 }
769
770 // Perform read-modify-write for the unaligned part
771 let unaligned_start: u64 = (SafeNum::from(offset) + aligned_write).try_into()?;
772 let block_scratch = &mut scratch[..SafeNum::from(blk_io.block_size()).try_into()?];
773 read_aligned_all(blk_io, unaligned_start, block_scratch)?;
774 block_scratch[..unaligned.len()].clone_from_slice(unaligned);
775 write_aligned_all(blk_io, unaligned_start, block_scratch)
776 }
777
778 /// Swap the position of sub segment [0..pos] and [pos..]
swap_slice(slice: &mut [u8], pos: usize)779 fn swap_slice(slice: &mut [u8], pos: usize) {
780 let (left, right) = slice.split_at_mut(pos);
781 left.reverse();
782 right.reverse();
783 slice.reverse();
784 }
785
786 /// Write with aligned buffer. Offset and size don't need to be block aligned.
787 /// Case 1:
788 /// |~~~~~~write~~~~~~~|
789 /// |------------|------------|
790 /// Case 2:
791 /// |~~~write~~~|
792 /// |---------------|--------------|
write_aligned_buffer( blk_io: &mut (impl BlockIo + ?Sized), offset: u64, data: &mut [u8], scratch: &mut [u8], ) -> Result<()>793 fn write_aligned_buffer(
794 blk_io: &mut (impl BlockIo + ?Sized),
795 offset: u64,
796 data: &mut [u8],
797 scratch: &mut [u8],
798 ) -> Result<()> {
799 debug_assert!(is_buffer_aligned(data, blk_io.alignment())?);
800
801 let offset = SafeNum::from(offset);
802 if is_aligned(offset, blk_io.block_size().into())? {
803 return write_aligned_offset_and_buffer(blk_io, offset.try_into()?, data, scratch);
804 }
805
806 let aligned_start: u64 =
807 min(offset.round_up(blk_io.block_size()).try_into()?, (offset + data.len()).try_into()?);
808 let aligned_relative_offset: usize = (SafeNum::from(aligned_start) - offset).try_into()?;
809 if aligned_relative_offset < data.len() {
810 if is_buffer_aligned(&data[aligned_relative_offset..], blk_io.alignment())? {
811 // If new address is aligned, write directly.
812 write_aligned_offset_and_buffer(
813 blk_io,
814 aligned_start,
815 &mut data[aligned_relative_offset..],
816 scratch,
817 )?;
818 } else {
819 let write_len: usize =
820 (SafeNum::from(data.len()) - aligned_relative_offset).try_into()?;
821 // Swap the offset-aligned part to the beginning of the buffer (assumed aligned)
822 swap_slice(data, aligned_relative_offset);
823 let res = write_aligned_offset_and_buffer(
824 blk_io,
825 aligned_start,
826 &mut data[..write_len],
827 scratch,
828 );
829 // Swap the two parts back before checking the result.
830 swap_slice(data, write_len);
831 res?;
832 }
833 }
834
835 // perform read-modify-write for the unaligned part.
836 let block_scratch = &mut scratch[..SafeNum::from(blk_io.block_size()).try_into()?];
837 let round_down_offset: u64 = offset.round_down(blk_io.block_size()).try_into()?;
838 read_aligned_all(blk_io, round_down_offset, block_scratch)?;
839 let offset_relative = offset - round_down_offset;
840 block_scratch
841 [offset_relative.try_into()?..(offset_relative + aligned_relative_offset).try_into()?]
842 .clone_from_slice(&data[..aligned_relative_offset]);
843 write_aligned_all(blk_io, round_down_offset, block_scratch)
844 }
845
846 /// Same as write_bytes(). Expcet that the API takes a mutable input bytes slice instead.
847 /// It does internal optimization that temporarily modifies `data` layout to minimize number of
848 /// calls to `blk_io.read_blocks()`/`blk_io.write_blocks()` (down to O(1)).
write_bytes_mut( blk_io: &mut (impl BlockIo + ?Sized), offset: u64, data: &mut [u8], scratch: &mut [u8], ) -> Result<()>849 fn write_bytes_mut(
850 blk_io: &mut (impl BlockIo + ?Sized),
851 offset: u64,
852 data: &mut [u8],
853 scratch: &mut [u8],
854 ) -> Result<()> {
855 let (buffer_alignment_scratch, block_alignment_scratch) = split_scratch(blk_io, scratch)?;
856 if is_buffer_aligned(data, blk_io.alignment())? {
857 return write_aligned_buffer(blk_io, offset, data, block_alignment_scratch);
858 }
859
860 // Buffer misalignment:
861 // Case 1:
862 // |~~~~~~~~~~~~buffer~~~~~~~~~~~~|
863 // |----------------------|---------------------|
864 // blk_io.alignment()
865 //
866 // Case 2:
867 // |~~~~~~buffer~~~~~|
868 // |----------------------|---------------------|
869 // blk_io.alignment()
870
871 // Write unaligned part
872 let data_addr_value = SafeNum::from(data.as_ptr() as usize);
873 let unaligned_write: usize = min(
874 (data_addr_value.round_up(blk_io.alignment()) - data_addr_value).try_into()?,
875 data.len(),
876 );
877 let mut unaligned_data = &mut buffer_alignment_scratch[..unaligned_write];
878 unaligned_data.clone_from_slice(&data[..unaligned_write]);
879 write_aligned_buffer(blk_io, offset, &mut unaligned_data, block_alignment_scratch)?;
880 if unaligned_write == data.len() {
881 return Ok(());
882 }
883
884 // Write aligned part
885 write_aligned_buffer(
886 blk_io,
887 (SafeNum::from(offset) + unaligned_write).try_into()?,
888 &mut data[unaligned_write..],
889 block_alignment_scratch,
890 )
891 }
892
893 #[cfg(test)]
894 mod test {
895 use core::mem::size_of;
896 use gbl_storage_testlib::{
897 required_scratch_size, AsBlockDevice, TestBlockDevice, TestBlockDeviceBuilder,
898 };
899 use safemath::SafeNum;
900
901 #[derive(Debug)]
902 struct TestCase {
903 rw_offset: u64,
904 rw_size: u64,
905 misalignment: u64,
906 alignment: u64,
907 block_size: u64,
908 storage_size: u64,
909 }
910
911 impl TestCase {
new( rw_offset: u64, rw_size: u64, misalignment: u64, alignment: u64, block_size: u64, storage_size: u64, ) -> Self912 fn new(
913 rw_offset: u64,
914 rw_size: u64,
915 misalignment: u64,
916 alignment: u64,
917 block_size: u64,
918 storage_size: u64,
919 ) -> Self {
920 Self { rw_offset, rw_size, misalignment, alignment, block_size, storage_size }
921 }
922 }
923
924 // Helper object for allocating aligned buffer.
925 struct AlignedBuffer {
926 buffer: Vec<u8>,
927 alignment: u64,
928 size: u64,
929 }
930
931 impl AlignedBuffer {
new(alignment: u64, size: u64) -> Self932 pub fn new(alignment: u64, size: u64) -> Self {
933 let aligned_size = (SafeNum::from(size) + alignment).try_into().unwrap();
934 let buffer = vec![0u8; aligned_size];
935 Self { buffer, alignment, size }
936 }
937
get(&mut self) -> &mut [u8]938 pub fn get(&mut self) -> &mut [u8] {
939 let addr = SafeNum::from(self.buffer.as_ptr() as usize);
940 let aligned_start = addr.round_up(self.alignment) - addr;
941 &mut self.buffer
942 [aligned_start.try_into().unwrap()..(aligned_start + self.size).try_into().unwrap()]
943 }
944 }
945
946 /// Upper bound on the number of `BlockIo::read_blocks()/write_blocks()` calls by
947 /// `AsBlockDevice::read()` and `AsBlockDevice::write()` with mutable input buffer.
948 ///
949 /// * `fn read_aligned_all()`: At most 1 call to `BlockIo::read_blocks()`.
950 /// * `fn read_aligned_offset_and_buffer()`: At most 2 calls to `read_aligned_all()`.
951 /// * `fn read_aligned_buffer()`: At most 1 call to `read_aligned_offset_and_buffer()` plus 1
952 /// call to `BlockIo::read_blocks()`.
953 /// * `fn read()`: At most 2 calls to `read_aligned_buffer()`.
954 ///
955 /// Analysis is similar for `fn write()`.
956 const READ_WRITE_BLOCKS_UPPER_BOUND: usize = 6;
957
read_test_helper(case: &TestCase)958 fn read_test_helper(case: &TestCase) {
959 let data = (0..case.storage_size).map(|v| v as u8).collect::<Vec<_>>();
960 let mut blk = TestBlockDeviceBuilder::new()
961 .set_alignment(case.alignment)
962 .set_block_size(case.block_size)
963 .set_data(&data)
964 .build();
965 // Make an aligned buffer. A misaligned version is created by taking a sub slice that
966 // starts at an unaligned offset. Because of this we need to allocate
967 // `case.misalignment` more to accommodate it.
968 let mut aligned_buf = AlignedBuffer::new(case.alignment, case.rw_size + case.misalignment);
969 let misalignment = SafeNum::from(case.misalignment);
970 let out = &mut aligned_buf.get()
971 [misalignment.try_into().unwrap()..(misalignment + case.rw_size).try_into().unwrap()];
972 blk.read(case.rw_offset, out).unwrap();
973 let rw_offset = SafeNum::from(case.rw_offset);
974 assert_eq!(
975 out.to_vec(),
976 blk.io.storage
977 [rw_offset.try_into().unwrap()..(rw_offset + case.rw_size).try_into().unwrap()]
978 .to_vec(),
979 "Failed. Test case {:?}",
980 case,
981 );
982
983 assert!(blk.io.num_reads <= READ_WRITE_BLOCKS_UPPER_BOUND);
984 }
985
write_test_helper(case: &TestCase, write_func: fn(&mut TestBlockDevice, u64, &mut [u8]))986 fn write_test_helper(case: &TestCase, write_func: fn(&mut TestBlockDevice, u64, &mut [u8])) {
987 let data = (0..case.storage_size).map(|v| v as u8).collect::<Vec<_>>();
988 let mut blk = TestBlockDeviceBuilder::new()
989 .set_alignment(case.alignment)
990 .set_block_size(case.block_size)
991 .set_data(&data)
992 .build();
993 // Write a reverse version of the current data.
994 let rw_offset = SafeNum::from(case.rw_offset);
995 let mut expected = blk.io.storage
996 [rw_offset.try_into().unwrap()..(rw_offset + case.rw_size).try_into().unwrap()]
997 .to_vec();
998 expected.reverse();
999 // Make an aligned buffer. A misaligned version is created by taking a sub slice that
1000 // starts at an unaligned offset. Because of this we need to allocate
1001 // `case.misalignment` more to accommodate it.
1002 let misalignment = SafeNum::from(case.misalignment);
1003 let mut aligned_buf = AlignedBuffer::new(case.alignment, case.rw_size + case.misalignment);
1004 let data = &mut aligned_buf.get()
1005 [misalignment.try_into().unwrap()..(misalignment + case.rw_size).try_into().unwrap()];
1006 data.clone_from_slice(&expected);
1007 write_func(&mut blk, case.rw_offset, data);
1008 let rw_offset = SafeNum::from(case.rw_offset);
1009 assert_eq!(
1010 expected,
1011 blk.io.storage
1012 [rw_offset.try_into().unwrap()..(rw_offset + case.rw_size).try_into().unwrap()]
1013 .to_vec(),
1014 "Failed. Test case {:?}",
1015 case,
1016 );
1017 // Check that input is not modified.
1018 assert_eq!(expected, data, "Input is modified. Test case {:?}", case,);
1019 }
1020
1021 macro_rules! read_write_test {
1022 ($name:ident, $x0:expr, $x1:expr, $x2:expr, $x3:expr, $x4:expr, $x5:expr) => {
1023 mod $name {
1024 use super::*;
1025
1026 #[test]
1027 fn read_test() {
1028 read_test_helper(&TestCase::new($x0, $x1, $x2, $x3, $x4, $x5));
1029 }
1030
1031 #[test]
1032 fn read_scaled_test() {
1033 // Scaled all parameters by double and test again.
1034 let (x0, x1, x2, x3, x4, x5) =
1035 (2 * $x0, 2 * $x1, 2 * $x2, 2 * $x3, 2 * $x4, 2 * $x5);
1036 read_test_helper(&TestCase::new(x0, x1, x2, x3, x4, x5));
1037 }
1038
1039 // Input bytes slice is a mutable reference
1040 #[test]
1041 fn write_mut_test() {
1042 let func = |blk: &mut TestBlockDevice, offset: u64, data: &mut [u8]| {
1043 blk.write(offset, data).unwrap();
1044 assert!(blk.io.num_reads <= READ_WRITE_BLOCKS_UPPER_BOUND);
1045 assert!(blk.io.num_writes <= READ_WRITE_BLOCKS_UPPER_BOUND);
1046 };
1047 write_test_helper(&TestCase::new($x0, $x1, $x2, $x3, $x4, $x5), func);
1048 }
1049
1050 #[test]
1051 fn write_mut_scaled_test() {
1052 // Scaled all parameters by double and test again.
1053 let (x0, x1, x2, x3, x4, x5) =
1054 (2 * $x0, 2 * $x1, 2 * $x2, 2 * $x3, 2 * $x4, 2 * $x5);
1055 let func = |blk: &mut TestBlockDevice, offset: u64, data: &mut [u8]| {
1056 blk.write(offset, data).unwrap();
1057 assert!(blk.io.num_reads <= READ_WRITE_BLOCKS_UPPER_BOUND);
1058 assert!(blk.io.num_writes <= READ_WRITE_BLOCKS_UPPER_BOUND);
1059 };
1060 write_test_helper(&TestCase::new(x0, x1, x2, x3, x4, x5), func);
1061 }
1062 }
1063 };
1064 }
1065
1066 const BLOCK_SIZE: u64 = 512;
1067 const ALIGNMENT: u64 = 64;
1068 const STORAGE: u64 = BLOCK_SIZE * 32;
1069
1070 // Test cases for different scenarios of read/write windows w.r.t buffer/block alignmnet
1071 // boundary.
1072 // offset
1073 // |~~~~~~~~~~~~~size~~~~~~~~~~~~|
1074 // |---------|---------|---------|
1075 read_write_test! {aligned_all, 0, STORAGE, 0, ALIGNMENT, BLOCK_SIZE, STORAGE
1076 }
1077
1078 // offset
1079 // |~~~~~~~~~size~~~~~~~~~|
1080 // |---------|---------|---------|
1081 read_write_test! {
1082 aligned_offset_uanligned_size, 0, STORAGE - 1, 0, ALIGNMENT, BLOCK_SIZE, STORAGE
1083 }
1084 // offset
1085 // |~~size~~|
1086 // |---------|---------|---------|
1087 read_write_test! {
1088 aligned_offset_intra_block, 0, BLOCK_SIZE - 1, 0, ALIGNMENT, BLOCK_SIZE, STORAGE
1089 }
1090 // offset
1091 // |~~~~~~~~~~~size~~~~~~~~~~|
1092 // |---------|---------|---------|
1093 read_write_test! {
1094 unaligned_offset_aligned_end, 1, STORAGE - 1, 0, ALIGNMENT, BLOCK_SIZE, STORAGE
1095 }
1096 // offset
1097 // |~~~~~~~~~size~~~~~~~~|
1098 // |---------|---------|---------|
1099 read_write_test! {unaligned_offset_len, 1, STORAGE - 2, 0, ALIGNMENT, BLOCK_SIZE, STORAGE
1100 }
1101 // offset
1102 // |~~~size~~~|
1103 // |---------|---------|---------|
1104 read_write_test! {
1105 unaligned_offset_len_partial_cross_block, 1, BLOCK_SIZE, 0, ALIGNMENT, BLOCK_SIZE, STORAGE
1106 }
1107 // offset
1108 // |~size~|
1109 // |---------|---------|---------|
1110 read_write_test! {
1111 ualigned_offset_len_partial_intra_block,
1112 1,
1113 BLOCK_SIZE - 2,
1114 0,
1115 ALIGNMENT,
1116 BLOCK_SIZE,
1117 STORAGE
1118 }
1119
1120 // Same sets of test cases but with an additional block added to `rw_offset`
1121 read_write_test! {
1122 aligned_all_extra_offset,
1123 BLOCK_SIZE,
1124 STORAGE,
1125 0,
1126 ALIGNMENT,
1127 BLOCK_SIZE,
1128 STORAGE + BLOCK_SIZE
1129 }
1130 read_write_test! {
1131 aligned_offset_uanligned_size_extra_offset,
1132 BLOCK_SIZE,
1133 STORAGE - 1,
1134 0,
1135 ALIGNMENT,
1136 BLOCK_SIZE,
1137 STORAGE + BLOCK_SIZE
1138 }
1139 read_write_test! {
1140 aligned_offset_intra_block_extra_offset,
1141 BLOCK_SIZE,
1142 BLOCK_SIZE - 1,
1143 0,
1144 ALIGNMENT,
1145 BLOCK_SIZE,
1146 STORAGE + BLOCK_SIZE
1147 }
1148 read_write_test! {
1149 unaligned_offset_aligned_end_extra_offset,
1150 BLOCK_SIZE + 1,
1151 STORAGE - 1,
1152 0,
1153 ALIGNMENT,
1154 BLOCK_SIZE,
1155 STORAGE + BLOCK_SIZE
1156 }
1157 read_write_test! {
1158 unaligned_offset_len_extra_offset,
1159 BLOCK_SIZE + 1,
1160 STORAGE - 2,
1161 0,
1162 ALIGNMENT,
1163 BLOCK_SIZE,
1164 STORAGE + BLOCK_SIZE
1165 }
1166 read_write_test! {
1167 unaligned_offset_len_partial_cross_block_extra_offset,
1168 BLOCK_SIZE + 1,
1169 BLOCK_SIZE,
1170 0,
1171 ALIGNMENT,
1172 BLOCK_SIZE,
1173 STORAGE + BLOCK_SIZE
1174 }
1175 read_write_test! {
1176 ualigned_offset_len_partial_intra_block_extra_offset,
1177 BLOCK_SIZE + 1,
1178 BLOCK_SIZE - 2,
1179 0,
1180 ALIGNMENT,
1181 BLOCK_SIZE,
1182 STORAGE + BLOCK_SIZE
1183 }
1184
1185 // Same sets of test cases but with unaligned output buffer {'misALIGNMENT` != 0}
1186 read_write_test! {
1187 aligned_all_unaligned_buffer,
1188 0,
1189 STORAGE,
1190 1,
1191 ALIGNMENT,
1192 BLOCK_SIZE,
1193 STORAGE
1194 }
1195 read_write_test! {
1196 aligned_offset_uanligned_size_unaligned_buffer,
1197 0,
1198 STORAGE - 1,
1199 1,
1200 ALIGNMENT,
1201 BLOCK_SIZE,
1202 STORAGE
1203 }
1204 read_write_test! {
1205 aligned_offset_intra_block_unaligned_buffer,
1206 0,
1207 BLOCK_SIZE - 1,
1208 1,
1209 ALIGNMENT,
1210 BLOCK_SIZE,
1211 STORAGE
1212 }
1213 read_write_test! {
1214 unaligned_offset_aligned_end_unaligned_buffer,
1215 1,
1216 STORAGE - 1,
1217 1,
1218 ALIGNMENT,
1219 BLOCK_SIZE,
1220 STORAGE
1221 }
1222 read_write_test! {
1223 unaligned_offset_len_unaligned_buffer,
1224 1,
1225 STORAGE - 2,
1226 1,
1227 ALIGNMENT,
1228 BLOCK_SIZE,
1229 STORAGE
1230 }
1231 read_write_test! {
1232 unaligned_offset_len_partial_cross_block_unaligned_buffer,
1233 1,
1234 BLOCK_SIZE,
1235 1,
1236 ALIGNMENT,
1237 BLOCK_SIZE,
1238 STORAGE
1239 }
1240 read_write_test! {
1241 ualigned_offset_len_partial_intra_block_unaligned_buffer,
1242 1,
1243 BLOCK_SIZE - 2,
1244 1,
1245 ALIGNMENT,
1246 BLOCK_SIZE,
1247 STORAGE
1248 }
1249
1250 // Special cases where `rw_offset` is not block aligned but buffer aligned. This can
1251 // trigger some internal optimization code path.
1252 read_write_test! {
1253 buffer_aligned_offset_and_len,
1254 ALIGNMENT,
1255 STORAGE - ALIGNMENT,
1256 0,
1257 ALIGNMENT,
1258 BLOCK_SIZE,
1259 STORAGE
1260 }
1261 read_write_test! {
1262 buffer_aligned_offset,
1263 ALIGNMENT,
1264 STORAGE - ALIGNMENT - 1,
1265 0,
1266 ALIGNMENT,
1267 BLOCK_SIZE,
1268 STORAGE
1269 }
1270 read_write_test! {
1271 buffer_aligned_offset_aligned_end,
1272 ALIGNMENT,
1273 BLOCK_SIZE,
1274 0,
1275 ALIGNMENT,
1276 BLOCK_SIZE,
1277 STORAGE
1278 }
1279 read_write_test! {
1280 buffer_aligned_offset_intra_block,
1281 ALIGNMENT,
1282 BLOCK_SIZE - ALIGNMENT - 1,
1283 0,
1284 ALIGNMENT,
1285 BLOCK_SIZE,
1286 STORAGE
1287 }
1288
1289 #[test]
test_no_alignment_require_zero_size_scratch()1290 fn test_no_alignment_require_zero_size_scratch() {
1291 let mut blk = TestBlockDeviceBuilder::new()
1292 .set_alignment(1)
1293 .set_block_size(1)
1294 .set_max_gpt_entries(0)
1295 .set_size(1)
1296 .build();
1297 assert_eq!(required_scratch_size(&mut blk.io, 0).unwrap(), 0);
1298 }
1299
1300 #[test]
test_scratch_too_small()1301 fn test_scratch_too_small() {
1302 let storage_size = (TestBlockDeviceBuilder::DEFAULT_BLOCK_SIZE * 3) as usize;
1303 let scratch_size =
1304 TestBlockDeviceBuilder::new().set_size(storage_size).build().scratch.len() - 1;
1305 let mut blk = TestBlockDeviceBuilder::new()
1306 .set_size(storage_size)
1307 .set_scratch_size(scratch_size)
1308 .build();
1309 let block_size = TestBlockDeviceBuilder::DEFAULT_BLOCK_SIZE;
1310 assert!(blk.read(0, &mut vec![0u8; block_size.try_into().unwrap()]).is_err());
1311 }
1312
1313 #[test]
test_read_overflow()1314 fn test_read_overflow() {
1315 let mut blk = TestBlockDeviceBuilder::new()
1316 .set_alignment(1)
1317 .set_block_size(1)
1318 .set_max_gpt_entries(0)
1319 .set_size(512)
1320 .build();
1321 assert!(blk.read(512, &mut vec![0u8; 1]).is_err());
1322 assert!(blk.read(0, &mut vec![0u8; 513]).is_err());
1323 }
1324
1325 #[test]
test_read_arithmetic_overflow()1326 fn test_read_arithmetic_overflow() {
1327 let mut blk = TestBlockDeviceBuilder::new()
1328 .set_alignment(1)
1329 .set_block_size(1)
1330 .set_max_gpt_entries(0)
1331 .set_size(512)
1332 .build();
1333 assert!(blk.read(u64::MAX, &mut vec![0u8; 1]).is_err());
1334 }
1335
1336 #[test]
test_write_overflow()1337 fn test_write_overflow() {
1338 let mut blk = TestBlockDeviceBuilder::new()
1339 .set_alignment(1)
1340 .set_block_size(1)
1341 .set_max_gpt_entries(0)
1342 .set_size(512)
1343 .build();
1344 assert!(blk.write(512, vec![0u8; 1].as_mut_slice()).is_err());
1345 assert!(blk.write(0, vec![0u8; 513].as_mut_slice()).is_err());
1346 }
1347
1348 #[test]
test_write_arithmetic_overflow()1349 fn test_write_arithmetic_overflow() {
1350 let mut blk = TestBlockDeviceBuilder::new()
1351 .set_alignment(1)
1352 .set_block_size(1)
1353 .set_max_gpt_entries(0)
1354 .set_size(512)
1355 .build();
1356 assert!(blk.write(u64::MAX, vec![0u8; 1].as_mut_slice()).is_err());
1357 }
1358
1359 #[test]
test_u64_not_narrower_than_usize()1360 fn test_u64_not_narrower_than_usize() {
1361 // If this ever fails we need to adjust all code for >64 bit pointers and size.
1362 assert!(size_of::<u64>() >= size_of::<*const u8>());
1363 assert!(size_of::<u64>() >= size_of::<usize>());
1364 }
1365 }
1366