1 /*
2  * Copyright (C) 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //! A module for writing to a file from a trusted world to an untrusted storage.
18 //!
19 //! Architectural Model:
20 //!  * Trusted world: the writer, a signing secret, has some memory, but NO persistent storage.
21 //!  * Untrusted world: persistent storage, assuming untrusted.
22 //!  * IPC mechanism between trusted and untrusted world
23 //!
24 //! Use cases:
25 //!  * In the trusted world, we want to generate a large file, sign it, and share the signature for
26 //!    a third party to verify the file.
27 //!  * In the trusted world, we want to read a previously signed file back with signature check
28 //!    without having to touch the whole file.
29 //!
30 //! Requirements:
31 //!  * Communication between trusted and untrusted world is not cheap, and files can be large.
32 //!  * A file write pattern may not be sequential, neither does read.
33 //!
34 //! Considering the above, a technique similar to fs-verity is used. fs-verity uses an alternative
35 //! hash function, a Merkle tree, to calculate the hash of file content. A file update at any
36 //! location will propagate the hash update from the leaf to the root node. Unlike fs-verity, which
37 //! assumes static files, to support write operation, we need to allow the file (thus tree) to
38 //! update.
39 //!
40 //! For the trusted world to generate a large file with random write and hash it, the writer needs
41 //! to hold some private information and update the Merkle tree during a file write (or even when
42 //! the Merkle tree needs to be stashed to the untrusted storage).
43 //!
44 //! A write to a file must update the root hash. In order for the root hash to update, a tree
45 //! walk to update from the write location to the root node is necessary. Importantly, in case when
46 //! (part of) the Merkle tree needs to be read from the untrusted storage (e.g. not yet verified in
47 //! cache), the original path must be verified by the trusted signature before the update to happen.
48 //!
49 //! Denial-of-service is a known weakness if the untrusted storage decides to simply remove the
50 //! file. But there is nothing we can do in this architecture.
51 //!
52 //! Rollback attack is another possible attack, but can be addressed with a rollback counter when
53 //! possible.
54 
55 use std::io;
56 use std::sync::{Arc, RwLock};
57 
58 use super::builder::MerkleLeaves;
59 use super::common::{Sha256Hash, SHA256_HASH_SIZE};
60 use crate::common::{ChunkedSizeIter, CHUNK_SIZE};
61 use crate::file::{ChunkBuffer, RandomWrite, ReadByChunk};
62 use openssl::sha::{sha256, Sha256};
63 
debug_assert_usize_is_u64()64 fn debug_assert_usize_is_u64() {
65     // Since we don't need to support 32-bit CPU, make an assert to make conversion between
66     // u64 and usize easy below. Otherwise, we need to check `divide_roundup(offset + buf.len()
67     // <= usize::MAX` or handle `TryInto` errors.
68     debug_assert!(usize::MAX as u64 == u64::MAX, "Only 64-bit arch is supported");
69 }
70 
71 /// VerifiedFileEditor provides an integrity layer to an underlying read-writable file, which may
72 /// not be stored in a trusted environment. Only new, empty files are currently supported.
73 pub struct VerifiedFileEditor<F: ReadByChunk + RandomWrite> {
74     file: F,
75     merkle_tree: Arc<RwLock<MerkleLeaves>>,
76 }
77 
78 impl<F: ReadByChunk + RandomWrite> VerifiedFileEditor<F> {
79     /// Wraps a supposedly new file for integrity protection.
new(file: F) -> Self80     pub fn new(file: F) -> Self {
81         Self { file, merkle_tree: Arc::new(RwLock::new(MerkleLeaves::new())) }
82     }
83 
84     /// Returns the fs-verity digest size in bytes.
get_fsverity_digest_size(&self) -> usize85     pub fn get_fsverity_digest_size(&self) -> usize {
86         SHA256_HASH_SIZE
87     }
88 
89     /// Calculates the fs-verity digest of the current file.
calculate_fsverity_digest(&self) -> io::Result<Sha256Hash>90     pub fn calculate_fsverity_digest(&self) -> io::Result<Sha256Hash> {
91         let merkle_tree = self.merkle_tree.read().unwrap();
92         merkle_tree.calculate_fsverity_digest().map_err(|e| io::Error::new(io::ErrorKind::Other, e))
93     }
94 
read_backing_chunk_unverified( &self, chunk_index: u64, buf: &mut ChunkBuffer, ) -> io::Result<usize>95     fn read_backing_chunk_unverified(
96         &self,
97         chunk_index: u64,
98         buf: &mut ChunkBuffer,
99     ) -> io::Result<usize> {
100         self.file.read_chunk(chunk_index, buf)
101     }
102 
read_backing_chunk_verified( &self, chunk_index: u64, buf: &mut ChunkBuffer, merkle_tree_locked: &MerkleLeaves, ) -> io::Result<usize>103     fn read_backing_chunk_verified(
104         &self,
105         chunk_index: u64,
106         buf: &mut ChunkBuffer,
107         merkle_tree_locked: &MerkleLeaves,
108     ) -> io::Result<usize> {
109         debug_assert_usize_is_u64();
110 
111         if merkle_tree_locked.is_index_valid(chunk_index as usize) {
112             let size = self.read_backing_chunk_unverified(chunk_index, buf)?;
113 
114             // Ensure the returned buffer matches the known hash.
115             let hash = sha256(buf);
116             if !merkle_tree_locked.is_consistent(chunk_index as usize, &hash) {
117                 return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
118             }
119             Ok(size)
120         } else {
121             Ok(0)
122         }
123     }
124 
new_hash_for_incomplete_write( &self, source: &[u8], offset_from_alignment: usize, output_chunk_index: usize, merkle_tree: &mut MerkleLeaves, ) -> io::Result<Sha256Hash>125     fn new_hash_for_incomplete_write(
126         &self,
127         source: &[u8],
128         offset_from_alignment: usize,
129         output_chunk_index: usize,
130         merkle_tree: &mut MerkleLeaves,
131     ) -> io::Result<Sha256Hash> {
132         // The buffer is initialized to 0 purposely. To calculate the block hash, the data is
133         // 0-padded to the block size. When a chunk read is less than a chunk, the initial value
134         // conveniently serves the padding purpose.
135         let mut orig_data = [0u8; CHUNK_SIZE as usize];
136 
137         // If previous data exists, read back and verify against the known hash (since the
138         // storage / remote server is not trusted).
139         if merkle_tree.is_index_valid(output_chunk_index) {
140             self.read_backing_chunk_unverified(output_chunk_index as u64, &mut orig_data)?;
141 
142             // Verify original content
143             let hash = sha256(&orig_data);
144             if !merkle_tree.is_consistent(output_chunk_index, &hash) {
145                 return Err(io::Error::new(io::ErrorKind::InvalidData, "Inconsistent hash"));
146             }
147         }
148 
149         let mut ctx = Sha256::new();
150         ctx.update(&orig_data[..offset_from_alignment]);
151         ctx.update(source);
152         ctx.update(&orig_data[offset_from_alignment + source.len()..]);
153         Ok(ctx.finish())
154     }
155 
new_chunk_hash( &self, source: &[u8], offset_from_alignment: usize, current_size: usize, output_chunk_index: usize, merkle_tree: &mut MerkleLeaves, ) -> io::Result<Sha256Hash>156     fn new_chunk_hash(
157         &self,
158         source: &[u8],
159         offset_from_alignment: usize,
160         current_size: usize,
161         output_chunk_index: usize,
162         merkle_tree: &mut MerkleLeaves,
163     ) -> io::Result<Sha256Hash> {
164         if current_size as u64 == CHUNK_SIZE {
165             // Case 1: If the chunk is a complete one, just calculate the hash, regardless of
166             // write location.
167             Ok(sha256(source))
168         } else {
169             // Case 2: For an incomplete write, calculate the hash based on previous data (if
170             // any).
171             self.new_hash_for_incomplete_write(
172                 source,
173                 offset_from_alignment,
174                 output_chunk_index,
175                 merkle_tree,
176             )
177         }
178     }
179 
size(&self) -> u64180     pub fn size(&self) -> u64 {
181         self.merkle_tree.read().unwrap().file_size()
182     }
183 }
184 
185 impl<F: ReadByChunk + RandomWrite> RandomWrite for VerifiedFileEditor<F> {
write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize>186     fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
187         debug_assert_usize_is_u64();
188 
189         // The write range may not be well-aligned with the chunk boundary. There are various cases
190         // to deal with:
191         //  1. A write of a full 4K chunk.
192         //  2. A write of an incomplete chunk, possibly beyond the original EOF.
193         //
194         // Note that a write beyond EOF can create a hole. But we don't need to handle it here
195         // because holes are zeros, and leaves in MerkleLeaves are hashes of 4096-zeros by
196         // default.
197 
198         // Now iterate on the input data, considering the alignment at the destination.
199         for (output_offset, current_size) in
200             ChunkedSizeIter::new(buf.len(), offset, CHUNK_SIZE as usize)
201         {
202             // Lock the tree for the whole write for now. There may be room to improve to increase
203             // throughput.
204             let mut merkle_tree = self.merkle_tree.write().unwrap();
205 
206             let offset_in_buf = (output_offset - offset) as usize;
207             let source = &buf[offset_in_buf..offset_in_buf + current_size];
208             let output_chunk_index = (output_offset / CHUNK_SIZE) as usize;
209             let offset_from_alignment = (output_offset % CHUNK_SIZE) as usize;
210 
211             let new_hash = match self.new_chunk_hash(
212                 source,
213                 offset_from_alignment,
214                 current_size,
215                 output_chunk_index,
216                 &mut merkle_tree,
217             ) {
218                 Ok(hash) => hash,
219                 Err(e) => {
220                     // Return early when any error happens before the right. Even if the hash is not
221                     // consistent for the current chunk, we can still consider the earlier writes
222                     // successful. Note that nothing persistent has been done in this iteration.
223                     let written = output_offset - offset;
224                     if written > 0 {
225                         return Ok(written as usize);
226                     }
227                     return Err(e);
228                 }
229             };
230 
231             // A failed, partial write here will make the backing file inconsistent to the (old)
232             // hash. Nothing can be done within this writer, but at least it still maintains the
233             // (original) integrity for the file. To matches what write(2) describes for an error
234             // case (though it's about direct I/O), "Partial data may be written ... should be
235             // considered inconsistent", an error below is propagated.
236             self.file.write_all_at(source, output_offset)?;
237 
238             // Update the hash only after the write succeeds. Note that this only attempts to keep
239             // the tree consistent to what has been written regardless the actual state beyond the
240             // writer.
241             let size_at_least = offset.saturating_add(buf.len() as u64);
242             merkle_tree.update_hash(output_chunk_index, &new_hash, size_at_least);
243         }
244         Ok(buf.len())
245     }
246 
resize(&self, size: u64) -> io::Result<()>247     fn resize(&self, size: u64) -> io::Result<()> {
248         debug_assert_usize_is_u64();
249 
250         let mut merkle_tree = self.merkle_tree.write().unwrap();
251         // In case when we are truncating the file, we may need to recalculate the hash of the (new)
252         // last chunk. Since the content is provided by the untrusted backend, we need to read the
253         // data back first, verify it, then override the truncated portion with 0-padding for
254         // hashing. As an optimization, we only need to read the data back if the new size isn't a
255         // multiple of CHUNK_SIZE (since the hash is already correct).
256         //
257         // The same thing does not need to happen when the size is growing. Since the new extended
258         // data is always 0, we can just resize the `MerkleLeaves`, where a new hash is always
259         // calculated from 4096 zeros.
260         if size < merkle_tree.file_size() && size % CHUNK_SIZE > 0 {
261             let new_tail_size = (size % CHUNK_SIZE) as usize;
262             let chunk_index = size / CHUNK_SIZE;
263             if new_tail_size > 0 {
264                 let mut buf: ChunkBuffer = [0; CHUNK_SIZE as usize];
265                 let s = self.read_backing_chunk_verified(chunk_index, &mut buf, &merkle_tree)?;
266                 debug_assert!(new_tail_size <= s);
267 
268                 let zeros = vec![0; CHUNK_SIZE as usize - new_tail_size];
269                 let mut ctx = Sha256::new();
270                 ctx.update(&buf[..new_tail_size]);
271                 ctx.update(&zeros);
272                 let new_hash = ctx.finish();
273                 merkle_tree.update_hash(chunk_index as usize, &new_hash, size);
274             }
275         }
276 
277         self.file.resize(size)?;
278         merkle_tree.resize(size as usize);
279 
280         Ok(())
281     }
282 }
283 
284 impl<F: ReadByChunk + RandomWrite> ReadByChunk for VerifiedFileEditor<F> {
read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize>285     fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
286         let merkle_tree = self.merkle_tree.read().unwrap();
287         self.read_backing_chunk_verified(chunk_index, buf, &merkle_tree)
288     }
289 }
290 
291 #[cfg(test)]
292 mod tests {
293     // Test data below can be generated by:
294     //  $ perl -e 'print "\x{00}" x 6000' > foo
295     //  $ perl -e 'print "\x{01}" x 5000' >> foo
296     //  $ fsverity digest foo
297     use super::*;
298     use anyhow::Result;
299     use std::cell::RefCell;
300     use std::convert::TryInto;
301 
302     struct InMemoryEditor {
303         data: RefCell<Vec<u8>>,
304         fail_read: bool,
305     }
306 
307     impl InMemoryEditor {
new() -> InMemoryEditor308         pub fn new() -> InMemoryEditor {
309             InMemoryEditor { data: RefCell::new(Vec::new()), fail_read: false }
310         }
311     }
312 
313     impl RandomWrite for InMemoryEditor {
write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize>314         fn write_at(&self, buf: &[u8], offset: u64) -> io::Result<usize> {
315             let begin: usize =
316                 offset.try_into().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
317             let end = begin + buf.len();
318             if end > self.data.borrow().len() {
319                 self.data.borrow_mut().resize(end, 0);
320             }
321             self.data.borrow_mut().as_mut_slice()[begin..end].copy_from_slice(buf);
322             Ok(buf.len())
323         }
324 
resize(&self, size: u64) -> io::Result<()>325         fn resize(&self, size: u64) -> io::Result<()> {
326             let size: usize =
327                 size.try_into().map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
328             self.data.borrow_mut().resize(size, 0);
329             Ok(())
330         }
331     }
332 
333     impl ReadByChunk for InMemoryEditor {
read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize>334         fn read_chunk(&self, chunk_index: u64, buf: &mut ChunkBuffer) -> io::Result<usize> {
335             if self.fail_read {
336                 return Err(io::Error::new(io::ErrorKind::Other, "test!"));
337             }
338 
339             let borrowed = self.data.borrow();
340             let chunk = &borrowed
341                 .chunks(CHUNK_SIZE as usize)
342                 .nth(chunk_index as usize)
343                 .ok_or_else(|| {
344                     io::Error::new(
345                         io::ErrorKind::InvalidInput,
346                         format!("read_chunk out of bound: index {}", chunk_index),
347                     )
348                 })?;
349             buf[..chunk.len()].copy_from_slice(chunk);
350             Ok(chunk.len())
351         }
352     }
353 
354     #[test]
test_writer() -> Result<()>355     fn test_writer() -> Result<()> {
356         let writer = InMemoryEditor::new();
357         let buf = [1; 4096];
358         assert_eq!(writer.data.borrow().len(), 0);
359 
360         assert_eq!(writer.write_at(&buf, 16384)?, 4096);
361         assert_eq!(writer.data.borrow()[16384..16384 + 4096], buf);
362 
363         assert_eq!(writer.write_at(&buf, 2048)?, 4096);
364         assert_eq!(writer.data.borrow()[2048..2048 + 4096], buf);
365 
366         assert_eq!(writer.data.borrow().len(), 16384 + 4096);
367         Ok(())
368     }
369 
370     #[test]
test_verified_writer_no_write() -> Result<()>371     fn test_verified_writer_no_write() -> Result<()> {
372         // Verify fs-verity hash without any write.
373         let file = VerifiedFileEditor::new(InMemoryEditor::new());
374         assert_eq!(
375             file.calculate_fsverity_digest()?,
376             hex::decode("3d248ca542a24fc62d1c43b916eae5016878e2533c88238480b26128a1f1af95")?
377                 .as_slice()
378         );
379         Ok(())
380     }
381 
382     #[test]
test_verified_writer_from_zero() -> Result<()>383     fn test_verified_writer_from_zero() -> Result<()> {
384         // Verify a write of a full chunk.
385         let file = VerifiedFileEditor::new(InMemoryEditor::new());
386         assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
387         assert_eq!(
388             file.calculate_fsverity_digest()?,
389             hex::decode("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")?
390                 .as_slice()
391         );
392 
393         // Verify a write of across multiple chunks.
394         let file = VerifiedFileEditor::new(InMemoryEditor::new());
395         assert_eq!(file.write_at(&[1; 4097], 0)?, 4097);
396         assert_eq!(
397             file.calculate_fsverity_digest()?,
398             hex::decode("2901b849fda2d91e3929524561c4a47e77bb64734319759507b2029f18b9cc52")?
399                 .as_slice()
400         );
401 
402         // Verify another write of across multiple chunks.
403         let file = VerifiedFileEditor::new(InMemoryEditor::new());
404         assert_eq!(file.write_at(&[1; 10000], 0)?, 10000);
405         assert_eq!(
406             file.calculate_fsverity_digest()?,
407             hex::decode("7545409b556071554d18973a29b96409588c7cda4edd00d5586b27a11e1a523b")?
408                 .as_slice()
409         );
410         Ok(())
411     }
412 
413     #[test]
test_verified_writer_unaligned() -> Result<()>414     fn test_verified_writer_unaligned() -> Result<()> {
415         // Verify small, unaligned write beyond EOF.
416         let file = VerifiedFileEditor::new(InMemoryEditor::new());
417         assert_eq!(file.write_at(&[1; 5], 3)?, 5);
418         assert_eq!(
419             file.calculate_fsverity_digest()?,
420             hex::decode("a23fc5130d3d7b3323fc4b4a5e79d5d3e9ddf3a3f5872639e867713512c6702f")?
421                 .as_slice()
422         );
423 
424         // Verify bigger, unaligned write beyond EOF.
425         let file = VerifiedFileEditor::new(InMemoryEditor::new());
426         assert_eq!(file.write_at(&[1; 6000], 4000)?, 6000);
427         assert_eq!(
428             file.calculate_fsverity_digest()?,
429             hex::decode("d16d4c1c186d757e646f76208b21254f50d7f07ea07b1505ff48b2a6f603f989")?
430                 .as_slice()
431         );
432         Ok(())
433     }
434 
435     #[test]
test_verified_writer_with_hole() -> Result<()>436     fn test_verified_writer_with_hole() -> Result<()> {
437         // Verify an aligned write beyond EOF with holes.
438         let file = VerifiedFileEditor::new(InMemoryEditor::new());
439         assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
440         assert_eq!(
441             file.calculate_fsverity_digest()?,
442             hex::decode("4df2aefd8c2a9101d1d8770dca3ede418232eabce766bb8e020395eae2e97103")?
443                 .as_slice()
444         );
445 
446         // Verify an unaligned write beyond EOF with holes.
447         let file = VerifiedFileEditor::new(InMemoryEditor::new());
448         assert_eq!(file.write_at(&[1; 5000], 6000)?, 5000);
449         assert_eq!(
450             file.calculate_fsverity_digest()?,
451             hex::decode("47d5da26f6934484e260630a69eb2eebb21b48f69bc8fbf8486d1694b7dba94f")?
452                 .as_slice()
453         );
454 
455         // Just another example with a small write.
456         let file = VerifiedFileEditor::new(InMemoryEditor::new());
457         assert_eq!(file.write_at(&[1; 5], 16381)?, 5);
458         assert_eq!(
459             file.calculate_fsverity_digest()?,
460             hex::decode("8bd118821fb4aff26bb4b51d485cc481a093c68131b7f4f112e9546198449752")?
461                 .as_slice()
462         );
463         Ok(())
464     }
465 
466     #[test]
test_verified_writer_various_writes() -> Result<()>467     fn test_verified_writer_various_writes() -> Result<()> {
468         let file = VerifiedFileEditor::new(InMemoryEditor::new());
469         assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
470         assert_eq!(file.write_at(&[1; 2048], 4096 + 2048)?, 2048);
471         assert_eq!(
472             file.calculate_fsverity_digest()?,
473             hex::decode("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")?
474                 .as_slice()
475         );
476         assert_eq!(file.write_at(&[1; 2048], 2048)?, 2048);
477         assert_eq!(file.write_at(&[1; 2048], 4096)?, 2048);
478         assert_eq!(
479             file.calculate_fsverity_digest()?,
480             hex::decode("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")?
481                 .as_slice()
482         );
483         assert_eq!(file.write_at(&[0; 2048], 2048)?, 2048);
484         assert_eq!(file.write_at(&[0; 2048], 4096)?, 2048);
485         assert_eq!(
486             file.calculate_fsverity_digest()?,
487             hex::decode("4c433d8640c888b629dc673d318cbb8d93b1eebcc784d9353e07f09f0dcfe707")?
488                 .as_slice()
489         );
490         assert_eq!(file.write_at(&[1; 4096], 2048)?, 4096);
491         assert_eq!(
492             file.calculate_fsverity_digest()?,
493             hex::decode("2a476d58eb80394052a3a783111e1458ac3ecf68a7878183fed86ca0ff47ec0d")?
494                 .as_slice()
495         );
496         assert_eq!(file.write_at(&[1; 2048], 8192)?, 2048);
497         assert_eq!(file.write_at(&[1; 2048], 8192 + 2048)?, 2048);
498         assert_eq!(
499             file.calculate_fsverity_digest()?,
500             hex::decode("23cbac08371e6ee838ebcc7ae6512b939d2226e802337be7b383c3e046047d24")?
501                 .as_slice()
502         );
503         Ok(())
504     }
505 
506     #[test]
test_verified_writer_inconsistent_read() -> Result<()>507     fn test_verified_writer_inconsistent_read() -> Result<()> {
508         let file = VerifiedFileEditor::new(InMemoryEditor::new());
509         assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
510 
511         // Replace the expected hash of the first/0-th chunk. An incomplete write will fail when it
512         // detects the inconsistent read.
513         {
514             let mut merkle_tree = file.merkle_tree.write().unwrap();
515             let overriding_hash = [42; SHA256_HASH_SIZE];
516             merkle_tree.update_hash(0, &overriding_hash, 8192);
517         }
518         assert!(file.write_at(&[1; 1], 2048).is_err());
519 
520         // A write of full chunk can still succeed. Also fixed the inconsistency.
521         assert_eq!(file.write_at(&[1; 4096], 4096)?, 4096);
522 
523         // Replace the expected hash of the second/1-th chunk. A write range from previous chunk can
524         // still succeed, but returns early due to an inconsistent read but still successfully. A
525         // resumed write will fail since no bytes can be written due to the same inconsistency.
526         {
527             let mut merkle_tree = file.merkle_tree.write().unwrap();
528             let overriding_hash = [42; SHA256_HASH_SIZE];
529             merkle_tree.update_hash(1, &overriding_hash, 8192);
530         }
531         assert_eq!(file.write_at(&[10; 8000], 0)?, 4096);
532         assert!(file.write_at(&[10; 8000 - 4096], 4096).is_err());
533         Ok(())
534     }
535 
536     #[test]
test_verified_writer_failed_read_back() -> Result<()>537     fn test_verified_writer_failed_read_back() -> Result<()> {
538         let mut writer = InMemoryEditor::new();
539         writer.fail_read = true;
540         let file = VerifiedFileEditor::new(writer);
541         assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
542 
543         // When a read back is needed, a read failure will fail to write.
544         assert!(file.write_at(&[1; 1], 2048).is_err());
545         Ok(())
546     }
547 
548     #[test]
test_resize_to_same_size() -> Result<()>549     fn test_resize_to_same_size() -> Result<()> {
550         let file = VerifiedFileEditor::new(InMemoryEditor::new());
551         assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
552 
553         assert!(file.resize(2048).is_ok());
554         assert_eq!(file.size(), 2048);
555 
556         assert_eq!(
557             file.calculate_fsverity_digest()?,
558             hex::decode("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")?
559                 .as_slice()
560         );
561         Ok(())
562     }
563 
564     #[test]
test_resize_to_grow() -> Result<()>565     fn test_resize_to_grow() -> Result<()> {
566         let file = VerifiedFileEditor::new(InMemoryEditor::new());
567         assert_eq!(file.write_at(&[1; 2048], 0)?, 2048);
568 
569         // Resize should grow with 0s.
570         assert!(file.resize(4096).is_ok());
571         assert_eq!(file.size(), 4096);
572 
573         assert_eq!(
574             file.calculate_fsverity_digest()?,
575             hex::decode("9e0e2745c21e4e74065240936d2047340d96a466680c3c9d177b82433e7a0bb1")?
576                 .as_slice()
577         );
578         Ok(())
579     }
580 
581     #[test]
test_resize_to_shrink() -> Result<()>582     fn test_resize_to_shrink() -> Result<()> {
583         let file = VerifiedFileEditor::new(InMemoryEditor::new());
584         assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
585 
586         // Truncate.
587         file.resize(2048)?;
588         assert_eq!(file.size(), 2048);
589 
590         assert_eq!(
591             file.calculate_fsverity_digest()?,
592             hex::decode("fef1b4f19bb7a2cd944d7cdee44d1accb12726389ca5b0f61ac0f548ae40876f")?
593                 .as_slice()
594         );
595         Ok(())
596     }
597 
598     #[test]
test_resize_to_shrink_with_read_failure() -> Result<()>599     fn test_resize_to_shrink_with_read_failure() -> Result<()> {
600         let mut writer = InMemoryEditor::new();
601         writer.fail_read = true;
602         let file = VerifiedFileEditor::new(writer);
603         assert_eq!(file.write_at(&[1; 4096], 0)?, 4096);
604 
605         // A truncate needs a read back. If the read fail, the resize should fail.
606         assert!(file.resize(2048).is_err());
607         Ok(())
608     }
609 
610     #[test]
test_resize_to_shirink_to_chunk_boundary() -> Result<()>611     fn test_resize_to_shirink_to_chunk_boundary() -> Result<()> {
612         let mut writer = InMemoryEditor::new();
613         writer.fail_read = true;
614         let file = VerifiedFileEditor::new(writer);
615         assert_eq!(file.write_at(&[1; 8192], 0)?, 8192);
616 
617         // Truncate to a chunk boundary. A read error doesn't matter since we won't need to
618         // recalcuate the leaf hash.
619         file.resize(4096)?;
620         assert_eq!(file.size(), 4096);
621 
622         assert_eq!(
623             file.calculate_fsverity_digest()?,
624             hex::decode("cd0875ca59c7d37e962c5e8f5acd3770750ac80225e2df652ce5672fd34500af")?
625                 .as_slice()
626         );
627         Ok(())
628     }
629 }
630