1 /*
2  * Copyright (C) 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //! A module for writing to a file from a trusted world to an untrusted storage.
18 //!
19 //! Architectural Model:
20 //!  * Trusted world: the writer, a signing secret, has some memory, but NO persistent storage.
21 //!  * Untrusted world: persistent storage, assuming untrusted.
22 //!  * IPC mechanism between trusted and untrusted world
23 //!
24 //! Use cases:
25 //!  * In the trusted world, we want to generate a large file, sign it, and share the signature for
26 //!    a third party to verify the file.
27 //!  * In the trusted world, we want to read a previously signed file back with signature check
28 //!    without having to touch the whole file.
29 //!
30 //! Requirements:
31 //!  * Communication between trusted and untrusted world is not cheap, and files can be large.
32 //!  * A file write pattern may not be sequential, neither does read.
33 //!
34 //! Considering the above, a technique similar to fs-verity is used. fs-verity uses an alternative
35 //! hash function, a Merkle tree, to calculate the hash of file content. A file update at any
36 //! location will propagate the hash update from the leaf to the root node. Unlike fs-verity, which
37 //! assumes static files, to support write operation, we need to allow the file (thus tree) to
38 //! update.
39 //!
40 //! For the trusted world to generate a large file with random write and hash it, the writer needs
41 //! to hold some private information and update the Merkle tree during a file write (or even when
42 //! the Merkle tree needs to be stashed to the untrusted storage).
43 //!
44 //! A write to a file must update the root hash. In order for the root hash to update, a tree
45 //! walk to update from the write location to the root node is necessary. Importantly, in case when
46 //! (part of) the Merkle tree needs to be read from the untrusted storage (e.g. not yet verified in
47 //! cache), the original path must be verified by the trusted signature before the update to happen.
48 //!
49 //! Denial-of-service is a known weakness if the untrusted storage decides to simply remove the
50 //! file. But there is nothing we can do in this architecture.
51 //!
52 //! Rollback attack is another possible attack, but can be addressed with a rollback counter when
53 //! possible.
54 
55 use std::io;
56 use std::sync::{Arc, RwLock};
57 
58 use super::builder::MerkleLeaves;
59 use crate::common::{ChunkedSizeIter, CHUNK_SIZE};
60 use crate::crypto::{CryptoError, Sha256Hash, Sha256Hasher};
61 use crate::file::{ChunkBuffer, RandomWrite, ReadByChunk};
62 
63 // Implement the conversion from `CryptoError` to `io::Error` just to avoid manual error type
64 // mapping below.
65 impl From<CryptoError> for io::Error {
66     fn from(error: CryptoError) -> Self {
67         io::Error::new(io::ErrorKind::Other, error)
68     }
69 }
70 
71 fn debug_assert_usize_is_u64() {
72     // Since we don't need to support 32-bit CPU, make an assert to make conversion between
73     // u64 and usize easy below. Otherwise, we need to check `divide_roundup(offset + buf.len()
74     // <= usize::MAX` or handle `TryInto` errors.
75     debug_assert!(usize::MAX as u64 == u64::MAX, "Only 64-bit arch is supported");
76 }
77 
78 /// VerifiedFileEditor provides an integrity layer to an underlying read-writable file, which may
79 /// not be stored in a trusted environment. Only new, empty files are currently supported.
80 pub struct VerifiedFileEditor<F: ReadByChunk + RandomWrite> {
81     file: F,
82     merkle_tree: Arc<RwLock<MerkleLeaves>>,
83 }
84 
85 impl<F: ReadByChunk + RandomWrite> VerifiedFileEditor<F> {
86     /// Wraps a supposedly new file for integrity protection.
87     pub fn new(file: F) -> Self {
88         Self { file, merkle_tree: Arc::new(RwLock::new(MerkleLeaves::new())) }
89     }
90 
91     /// Calculates the fs-verity digest of the current file.
92     #[allow(dead_code)]
93     pub fn calculate_fsverity_digest(&self) -> io::Result<Sha256Hash> {
94         let merkle_tree = self.merkle_tree.read().unwrap();
95         merkle_tree.calculate_fsverity_digest().map_err(|e| io::Error::new(io::ErrorKind::Other, e))
96     }
97 
98     fn new_hash_for_incomplete_write(
99         &self,
100         source: &[u8],
101         offset_from_alignment: usize,
102         output_chunk_index: usize,
103         merkle_tree: &mut MerkleLeaves,
104     ) -> io::Result<Sha256Hash> {
105         // The buffer is initialized to 0 purposely. To calculate the block hash, the data is
106         // 0-padded to the block size. When a chunk read is less than a chunk, the initial value
107         // conveniently serves the padding purpose.
108         let mut orig_data = [0u8; CHUNK_SIZE as usize];
109 
110         // If previous data exists, read back and verify against the known hash (since the
111         // storage / remote server is not trusted).
112         if merkle_tree.is_index_valid(output_chunk_index) {
113             self.read_chunk(output_chunk_index as u64, &mut orig_data)?;
114 
115             // Verify original content
116             let hash = Sha256Hasher::new()?.update(&orig_data)?.finalize()?;
117             if !merkle_tree.is_consistent(output_chunk_index, &hash) {
118                 return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
119             }
120         }
121 
122         Ok(Sha256Hasher::new()?
123             .update(&orig_data[..offset_from_alignment])?
124             .update(source)?
125             .update(&orig_data[offset_from_alignment + source.len()..])?
126             .finalize()?)
127     }
128 
129     fn new_chunk_hash(
130         &self,
131         source: &[u8],
132         offset_from_alignment: usize,
133         current_size: usize,
134         output_chunk_index: usize,
135         merkle_tree: &mut MerkleLeaves,
136     ) -> io::Result<Sha256Hash> {
137         if current_size as u64 == CHUNK_SIZE {
138             // Case 1: If the chunk is a complete one, just calculate the hash, regardless of
139             // write location.
140             Ok(Sha256Hasher::new()?.update(source)?.finalize()?)
141         } else {
142             // Case 2: For an incomplete write, calculate the hash based on previous data (if
143             // any).
144             self.new_hash_for_incomplete_write(
145                 source,
146                 offset_from_alignment,
147                 output_chunk_index,
148                 merkle_tree,
149             )
150         }
151     }
152 
153     pub fn size(&self) -> u64 {
154         self.merkle_tree.read().unwrap().file_size()
155     }
156 }
157 
158 impl<F: ReadByChunk + RandomWrite> RandomWrite for VerifiedFileEditor<F> {
159     fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
160         debug_assert_usize_is_u64();
161 
162         // The write range may not be well-aligned with the chunk boundary. There are various cases
163         // to deal with:
164         //  1. A write of a full 4K chunk.
165         //  2. A write of an incomplete chunk, possibly beyond the original EOF.
166         //
167         // Note that a write beyond EOF can create a hole. But we don't need to handle it here
168         // because holes are zeros, and leaves in MerkleLeaves are hashes of 4096-zeros by
169         // default.
170 
171         // Now iterate on the input data, considering the alignment at the destination.
172         for (output_offset, current_size) in
173             ChunkedSizeIter::new(buf.len(), offset, CHUNK_SIZE as usize)
174         {
175             // Lock the tree for the whole write for now. There may be room to improve to increase
176             // throughput.
177             let mut merkle_tree = self.merkle_tree.write().unwrap();
178 
179             let offset_in_buf = (output_offset - offset) as usize;
180             let source = &buf[offset_in_buf as usize..offset_in_buf as usize + current_size];
181             let output_chunk_index = (output_offset / CHUNK_SIZE) as usize;
182             let offset_from_alignment = (output_offset % CHUNK_SIZE) as usize;
183 
184             let new_hash = match self.new_chunk_hash(
185                 source,
186                 offset_from_alignment,
187                 current_size,
188                 output_chunk_index,
189                 &mut merkle_tree,
190             ) {
191                 Ok(hash) => hash,
192                 Err(e) => {
193                     // Return early when any error happens before the right. Even if the hash is not
194                     // consistent for the current chunk, we can still consider the earlier writes
195                     // successful. Note that nothing persistent has been done in this iteration.
196                     let written = output_offset - offset;
197                     if written > 0 {
198                         return Ok(written as usize);
199                     }
200                     return Err(e);
201                 }
202             };
203 
204             // A failed, partial write here will make the backing file inconsistent to the (old)
205             // hash. Nothing can be done within this writer, but at least it still maintains the
206             // (original) integrity for the file. To matches what write(2) describes for an error
207             // case (though it's about direct I/O), "Partial data may be written ... should be
208             // considered inconsistent", an error below is propagated.
209             self.file.write_all_at(&source, output_offset)?;
210 
211             // Update the hash only after the write succeeds. Note that this only attempts to keep
212             // the tree consistent to what has been written regardless the actual state beyond the
213             // writer.
214             let size_at_least = offset.saturating_add(buf.len() as u64);
215             merkle_tree.update_hash(output_chunk_index, &new_hash, size_at_least);
216         }
217         Ok(buf.len())
218     }
219 
220     fn resize(&self, size: u64) -> io::Result<()> {
221         debug_assert_usize_is_u64();
222 
223         let mut merkle_tree = self.merkle_tree.write().unwrap();
224         // In case when we are truncating the file, we may need to recalculate the hash of the (new)
225         // last chunk. Since the content is provided by the untrusted backend, we need to read the
226         // data back first, verify it, then override the truncated portion with 0-padding for
227         // hashing. As an optimization, we only need to read the data back if the new size isn't a
228         // multiple of CHUNK_SIZE (since the hash is already correct).
229         //
230         // The same thing does not need to happen when the size is growing. Since the new extended
231         // data is always 0, we can just resize the `MerkleLeaves`, where a new hash is always
232         // calculated from 4096 zeros.
233         if size < merkle_tree.file_size() && size % CHUNK_SIZE > 0 {
234             let new_tail_size = (size % CHUNK_SIZE) as usize;
235             let chunk_index = size / CHUNK_SIZE;
236             if new_tail_size > 0 {
237                 let mut buf: ChunkBuffer = [0; CHUNK_SIZE as usize];
238                 let s = self.read_chunk(chunk_index, &mut buf)?;
239                 debug_assert!(new_tail_size <= s);
240 
241                 let zeros = vec![0; CHUNK_SIZE as usize - new_tail_size];
242                 let new_hash = Sha256Hasher::new()?
243                     .update(&buf[..new_tail_size])?
244                     .update(&zeros)?
245                     .finalize()?;
246                 merkle_tree.update_hash(chunk_index as usize, &new_hash, size);
247             }
248         }
249 
250         self.file.resize(size)?;
251         merkle_tree.resize(size as usize);
252 
253         Ok(())
254     }
255 }
256 
257 impl<F: ReadByChunk + RandomWrite> ReadByChunk for VerifiedFileEditor<F> {
258     fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
259         self.file.read_chunk(chunk_index, buf)
260     }
261 }
262 
263 #[cfg(test)]
264 mod tests {
265     // Test data below can be generated by:
266     //  $ perl -e 'print "\x{00}" x 6000' > foo
267     //  $ perl -e 'print "\x{01}" x 5000' >> foo
268     //  $ fsverity digest foo
269     use super::*;
270     use anyhow::Result;
271     use std::cell::RefCell;
272     use std::convert::TryInto;
273 
274     struct InMemoryEditor {
275         data: RefCell<Vec<u8>>,
276         fail_read: bool,
277     }
278 
279     impl InMemoryEditor {
280         pub fn new() -> InMemoryEditor {
281             InMemoryEditor { data: RefCell::new(Vec::new()), fail_read: false }
282         }
283     }
284 
285     impl RandomWrite for InMemoryEditor {
286         fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
287             let begin: usize =
288                 offset.try_into().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
289             let end = begin + buf.len();
290             if end > self.data.borrow().len() {
291                 self.data.borrow_mut().resize(end, 0);
292             }
293             self.data.borrow_mut().as_mut_slice()[begin..end].copy_from_slice(&buf);
294             Ok(buf.len())
295         }
296 
297         fn resize(&self, size: u64) -> io::Result<()> {
298             let size: usize =
299                 size.try_into().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
300             self.data.borrow_mut().resize(size, 0);
301             Ok(())
302         }
303     }
304 
305     impl ReadByChunk for InMemoryEditor {
306         fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
307             if self.fail_read {
308                 return Err(io::Error::new(io::ErrorKind::Other, "test!"));
309             }
310 
311             let borrowed = self.data.borrow();
312             let chunk = &borrowed
313                 .chunks(CHUNK_SIZE as usize)
314                 .nth(chunk_index as usize)
315                 .ok_or_else(|| {
316                     io::Error::new(
317                         io::ErrorKind::InvalidInput,
318                         format!("read_chunk out of bound: index {}", chunk_index),
319                     )
320                 })?;
321             buf[..chunk.len()].copy_from_slice(&chunk);
322             Ok(chunk.len())
323         }
324     }
325 
326     #[test]
327     fn test_writer() -> Result<()> {
328         let writer = InMemoryEditor::new();
329         let buf = [1; 4096];
330         assert_eq!(writer.data.borrow().len(), 0);
331 
332         assert_eq!(writer.write_at(&buf, 16384)?, 4096);
333         assert_eq!(writer.data.borrow()[16384..16384 + 4096], buf);
334 
335         assert_eq!(writer.write_at(&buf, 2048)?, 4096);
336         assert_eq!(writer.data.borrow()[2048..2048 + 4096], buf);
337 
338         assert_eq!(writer.data.borrow().len(), 16384 + 4096);
339         Ok(())
340     }
341 
342     #[test]
343     fn test_verified_writer_no_write() -> Result<()> {
344         // Verify fs-verity hash without any write.
345         let file = VerifiedFileEditor::new(InMemoryEditor::new());
346         assert_eq!(
347             file.calculate_fsverity_digest()?,
348             to_u8_vec("3d248ca542a24fc62d1c43b916eae5016878e2533c88238480b26128a1f1af95")
349                 .as_slice()
350         );
351         Ok(())
352     }
353 
354     #[test]
355     fn test_verified_writer_from_zero() -> Result<()> {
356         // Verify a write of a full chunk.
357         let file = VerifiedFileEditor::new(InMemoryEditor::new());
358         assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
359         assert_eq!(
360             file.calculate_fsverity_digest()?,
361             to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")
362                 .as_slice()
363         );
364 
365         // Verify a write of across multiple chunks.
366         let file = VerifiedFileEditor::new(InMemoryEditor::new());
367         assert_eq!(file.write_at(&[1; 4097], 0)?, 4097);
368         assert_eq!(
369             file.calculate_fsverity_digest()?,
370             to_u8_vec("2901b849fda2d91e3929524561c4a47e77bb64734319759507b2029f18b9cc52")
371                 .as_slice()
372         );
373 
374         // Verify another write of across multiple chunks.
375         let file = VerifiedFileEditor::new(InMemoryEditor::new());
376         assert_eq!(file.write_at(&[1; 10000], 0)?, 10000);
377         assert_eq!(
378             file.calculate_fsverity_digest()?,
379             to_u8_vec("7545409b556071554d18973a29b96409588c7cda4edd00d5586b27a11e1a523b")
380                 .as_slice()
381         );
382         Ok(())
383     }
384 
385     #[test]
386     fn test_verified_writer_unaligned() -> Result<()> {
387         // Verify small, unaligned write beyond EOF.
388         let file = VerifiedFileEditor::new(InMemoryEditor::new());
389         assert_eq!(file.write_at(&[1; 5], 3)?, 5);
390         assert_eq!(
391             file.calculate_fsverity_digest()?,
392             to_u8_vec("a23fc5130d3d7b3323fc4b4a5e79d5d3e9ddf3a3f5872639e867713512c6702f")
393                 .as_slice()
394         );
395 
396         // Verify bigger, unaligned write beyond EOF.
397         let file = VerifiedFileEditor::new(InMemoryEditor::new());
398         assert_eq!(file.write_at(&[1; 6000], 4000)?, 6000);
399         assert_eq!(
400             file.calculate_fsverity_digest()?,
401             to_u8_vec("d16d4c1c186d757e646f76208b21254f50d7f07ea07b1505ff48b2a6f603f989")
402                 .as_slice()
403         );
404         Ok(())
405     }
406 
407     #[test]
408     fn test_verified_writer_with_hole() -> Result<()> {
409         // Verify an aligned write beyond EOF with holes.
410         let file = VerifiedFileEditor::new(InMemoryEditor::new());
411         assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
412         assert_eq!(
413             file.calculate_fsverity_digest()?,
414             to_u8_vec("4df2aefd8c2a9101d1d8770dca3ede418232eabce766bb8e020395eae2e97103")
415                 .as_slice()
416         );
417 
418         // Verify an unaligned write beyond EOF with holes.
419         let file = VerifiedFileEditor::new(InMemoryEditor::new());
420         assert_eq!(file.write_at(&[1; 5000], 6000)?, 5000);
421         assert_eq!(
422             file.calculate_fsverity_digest()?,
423             to_u8_vec("47d5da26f6934484e260630a69eb2eebb21b48f69bc8fbf8486d1694b7dba94f")
424                 .as_slice()
425         );
426 
427         // Just another example with a small write.
428         let file = VerifiedFileEditor::new(InMemoryEditor::new());
429         assert_eq!(file.write_at(&[1; 5], 16381)?, 5);
430         assert_eq!(
431             file.calculate_fsverity_digest()?,
432             to_u8_vec("8bd118821fb4aff26bb4b51d485cc481a093c68131b7f4f112e9546198449752")
433                 .as_slice()
434         );
435         Ok(())
436     }
437 
438     #[test]
439     fn test_verified_writer_various_writes() -> Result<()> {
440         let file = VerifiedFileEditor::new(InMemoryEditor::new());
441         assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
442         assert_eq!(file.write_at(&[1; 2048], 4096 + 2048)?, 2048);
443         assert_eq!(
444             file.calculate_fsverity_digest()?,
445             to_u8_vec("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")
446                 .as_slice()
447         );
448         assert_eq!(file.write_at(&[1; 2048], 2048)?, 2048);
449         assert_eq!(file.write_at(&[1; 2048], 4096)?, 2048);
450         assert_eq!(
451             file.calculate_fsverity_digest()?,
452             to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")
453                 .as_slice()
454         );
455         assert_eq!(file.write_at(&[0; 2048], 2048)?, 2048);
456         assert_eq!(file.write_at(&[0; 2048], 4096)?, 2048);
457         assert_eq!(
458             file.calculate_fsverity_digest()?,
459             to_u8_vec("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")
460                 .as_slice()
461         );
462         assert_eq!(file.write_at(&[1; 4096], 2048)?, 4096);
463         assert_eq!(
464             file.calculate_fsverity_digest()?,
465             to_u8_vec("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")
466                 .as_slice()
467         );
468         assert_eq!(file.write_at(&[1; 2048], 8192)?, 2048);
469         assert_eq!(file.write_at(&[1; 2048], 8192 + 2048)?, 2048);
470         assert_eq!(
471             file.calculate_fsverity_digest()?,
472             to_u8_vec("23cbac08371e6ee838ebcc7ae6512b939d2226e802337be7b383c3e046047d24")
473                 .as_slice()
474         );
475         Ok(())
476     }
477 
478     #[test]
479     fn test_verified_writer_inconsistent_read() -> Result<()> {
480         let file = VerifiedFileEditor::new(InMemoryEditor::new());
481         assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
482 
483         // Replace the expected hash of the first/0-th chunk. An incomplete write will fail when it
484         // detects the inconsistent read.
485         {
486             let mut merkle_tree = file.merkle_tree.write().unwrap();
487             let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
488             merkle_tree.update_hash(0, &overriding_hash, 8192);
489         }
490         assert!(file.write_at(&[1; 1], 2048).is_err());
491 
492         // A write of full chunk can still succeed. Also fixed the inconsistency.
493         assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
494 
495         // Replace the expected hash of the second/1-th chunk. A write range from previous chunk can
496         // still succeed, but returns early due to an inconsistent read but still successfully. A
497         // resumed write will fail since no bytes can be written due to the same inconsistency.
498         {
499             let mut merkle_tree = file.merkle_tree.write().unwrap();
500             let overriding_hash = [42; Sha256Hasher::HASH_SIZE];
501             merkle_tree.update_hash(1, &overriding_hash, 8192);
502         }
503         assert_eq!(file.write_at(&[10; 8000], 0)?, 4096);
504         assert!(file.write_at(&[10; 8000 - 4096], 4096).is_err());
505         Ok(())
506     }
507 
508     #[test]
509     fn test_verified_writer_failed_read_back() -> Result<()> {
510         let mut writer = InMemoryEditor::new();
511         writer.fail_read = true;
512         let file = VerifiedFileEditor::new(writer);
513         assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
514 
515         // When a read back is needed, a read failure will fail to write.
516         assert!(file.write_at(&[1; 1], 2048).is_err());
517         Ok(())
518     }
519 
520     #[test]
521     fn test_resize_to_same_size() -> Result<()> {
522         let file = VerifiedFileEditor::new(InMemoryEditor::new());
523         assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
524 
525         assert!(file.resize(2048).is_ok());
526         assert_eq!(file.size(), 2048);
527 
528         assert_eq!(
529             file.calculate_fsverity_digest()?,
530             to_u8_vec("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")
531                 .as_slice()
532         );
533         Ok(())
534     }
535 
536     #[test]
537     fn test_resize_to_grow() -> Result<()> {
538         let file = VerifiedFileEditor::new(InMemoryEditor::new());
539         assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
540 
541         // Resize should grow with 0s.
542         assert!(file.resize(4096).is_ok());
543         assert_eq!(file.size(), 4096);
544 
545         assert_eq!(
546             file.calculate_fsverity_digest()?,
547             to_u8_vec("9e0e2745c21e4e74065240936d2047340d96a466680c3c9d177b82433e7a0bb1")
548                 .as_slice()
549         );
550         Ok(())
551     }
552 
553     #[test]
554     fn test_resize_to_shrink() -> Result<()> {
555         let file = VerifiedFileEditor::new(InMemoryEditor::new());
556         assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
557 
558         // Truncate.
559         file.resize(2048)?;
560         assert_eq!(file.size(), 2048);
561 
562         assert_eq!(
563             file.calculate_fsverity_digest()?,
564             to_u8_vec("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")
565                 .as_slice()
566         );
567         Ok(())
568     }
569 
570     #[test]
571     fn test_resize_to_shrink_with_read_failure() -> Result<()> {
572         let mut writer = InMemoryEditor::new();
573         writer.fail_read = true;
574         let file = VerifiedFileEditor::new(writer);
575         assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
576 
577         // A truncate needs a read back. If the read fail, the resize should fail.
578         assert!(file.resize(2048).is_err());
579         Ok(())
580     }
581 
582     #[test]
583     fn test_resize_to_shirink_to_chunk_boundary() -> Result<()> {
584         let mut writer = InMemoryEditor::new();
585         writer.fail_read = true;
586         let file = VerifiedFileEditor::new(writer);
587         assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
588 
589         // Truncate to a chunk boundary. A read error doesn't matter since we won't need to
590         // recalcuate the leaf hash.
591         file.resize(4096)?;
592         assert_eq!(file.size(), 4096);
593 
594         assert_eq!(
595             file.calculate_fsverity_digest()?,
596             to_u8_vec("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")
597                 .as_slice()
598         );
599         Ok(())
600     }
601 
602     fn to_u8_vec(hex_str: &str) -> Vec<u8> {
603         assert!(hex_str.len() % 2 == 0);
604         (0..hex_str.len())
605             .step_by(2)
606             .map(|i| u8::from_str_radix(&hex_str[i..i + 2], 16).unwrap())
607             .collect()
608     }
609 }
610