1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "ziparchive/zip_writer.h"
18 
19 #include <sys/param.h>
20 #include <sys/stat.h>
21 #include <zlib.h>
22 #include <cstdio>
23 #define DEF_MEM_LEVEL 8  // normally in zutil.h?
24 
25 #include <memory>
26 #include <vector>
27 
28 #include "android-base/logging.h"
29 
30 #include "entry_name_utils-inl.h"
31 #include "zip_archive_common.h"
32 
33 #undef powerof2
34 #define powerof2(x)                                               \
35   ({                                                              \
36     __typeof__(x) _x = (x);                                       \
37     __typeof__(x) _x2;                                            \
38     __builtin_add_overflow(_x, -1, &_x2) ? 1 : ((_x2 & _x) == 0); \
39   })
40 
41 /* Zip compression methods we support */
42 enum {
43   kCompressStored = 0,    // no compression
44   kCompressDeflated = 8,  // standard deflate
45 };
46 
47 // Size of the output buffer used for compression.
48 static const size_t kBufSize = 32768u;
49 
50 // No error, operation completed successfully.
51 static const int32_t kNoError = 0;
52 
53 // The ZipWriter is in a bad state.
54 static const int32_t kInvalidState = -1;
55 
56 // There was an IO error while writing to disk.
57 static const int32_t kIoError = -2;
58 
59 // The zip entry name was invalid.
60 static const int32_t kInvalidEntryName = -3;
61 
62 // An error occurred in zlib.
63 static const int32_t kZlibError = -4;
64 
65 // The start aligned function was called with the aligned flag.
66 static const int32_t kInvalidAlign32Flag = -5;
67 
68 // The alignment parameter is not a power of 2.
69 static const int32_t kInvalidAlignment = -6;
70 
71 static const char* sErrorCodes[] = {
72     "Invalid state", "IO error", "Invalid entry name", "Zlib error",
73 };
74 
ErrorCodeString(int32_t error_code)75 const char* ZipWriter::ErrorCodeString(int32_t error_code) {
76   if (error_code < 0 && (-error_code) < static_cast<int32_t>(arraysize(sErrorCodes))) {
77     return sErrorCodes[-error_code];
78   }
79   return nullptr;
80 }
81 
DeleteZStream(z_stream * stream)82 static void DeleteZStream(z_stream* stream) {
83   deflateEnd(stream);
84   delete stream;
85 }
86 
ZipWriter(FILE * f)87 ZipWriter::ZipWriter(FILE* f)
88     : file_(f),
89       seekable_(false),
90       current_offset_(0),
91       state_(State::kWritingZip),
92       z_stream_(nullptr, DeleteZStream),
93       buffer_(kBufSize) {
94   // Check if the file is seekable (regular file). If fstat fails, that's fine, subsequent calls
95   // will fail as well.
96   struct stat file_stats;
97   if (fstat(fileno(f), &file_stats) == 0) {
98     seekable_ = S_ISREG(file_stats.st_mode);
99   }
100 }
101 
ZipWriter(ZipWriter && writer)102 ZipWriter::ZipWriter(ZipWriter&& writer) noexcept
103     : file_(writer.file_),
104       seekable_(writer.seekable_),
105       current_offset_(writer.current_offset_),
106       state_(writer.state_),
107       files_(std::move(writer.files_)),
108       z_stream_(std::move(writer.z_stream_)),
109       buffer_(std::move(writer.buffer_)) {
110   writer.file_ = nullptr;
111   writer.state_ = State::kError;
112 }
113 
operator =(ZipWriter && writer)114 ZipWriter& ZipWriter::operator=(ZipWriter&& writer) noexcept {
115   file_ = writer.file_;
116   seekable_ = writer.seekable_;
117   current_offset_ = writer.current_offset_;
118   state_ = writer.state_;
119   files_ = std::move(writer.files_);
120   z_stream_ = std::move(writer.z_stream_);
121   buffer_ = std::move(writer.buffer_);
122   writer.file_ = nullptr;
123   writer.state_ = State::kError;
124   return *this;
125 }
126 
HandleError(int32_t error_code)127 int32_t ZipWriter::HandleError(int32_t error_code) {
128   state_ = State::kError;
129   z_stream_.reset();
130   return error_code;
131 }
132 
StartEntry(std::string_view path,size_t flags)133 int32_t ZipWriter::StartEntry(std::string_view path, size_t flags) {
134   uint32_t alignment = 0;
135   if (flags & kAlign32) {
136     flags &= ~kAlign32;
137     alignment = 4;
138   }
139   return StartAlignedEntryWithTime(path, flags, time_t(), alignment);
140 }
141 
StartAlignedEntry(std::string_view path,size_t flags,uint32_t alignment)142 int32_t ZipWriter::StartAlignedEntry(std::string_view path, size_t flags, uint32_t alignment) {
143   return StartAlignedEntryWithTime(path, flags, time_t(), alignment);
144 }
145 
StartEntryWithTime(std::string_view path,size_t flags,time_t time)146 int32_t ZipWriter::StartEntryWithTime(std::string_view path, size_t flags, time_t time) {
147   uint32_t alignment = 0;
148   if (flags & kAlign32) {
149     flags &= ~kAlign32;
150     alignment = 4;
151   }
152   return StartAlignedEntryWithTime(path, flags, time, alignment);
153 }
154 
ExtractTimeAndDate(time_t when,uint16_t * out_time,uint16_t * out_date)155 static void ExtractTimeAndDate(time_t when, uint16_t* out_time, uint16_t* out_date) {
156   /* round up to an even number of seconds */
157   when = static_cast<time_t>((static_cast<unsigned long>(when) + 1) & (~1));
158 
159   struct tm* ptm;
160 #if !defined(_WIN32)
161   struct tm tm_result;
162   ptm = localtime_r(&when, &tm_result);
163 #else
164   ptm = localtime(&when);
165 #endif
166 
167   int year = ptm->tm_year;
168   if (year < 80) {
169     year = 80;
170   }
171 
172   *out_date = static_cast<uint16_t>((year - 80) << 9 | (ptm->tm_mon + 1) << 5 | ptm->tm_mday);
173   *out_time = static_cast<uint16_t>(ptm->tm_hour << 11 | ptm->tm_min << 5 | ptm->tm_sec >> 1);
174 }
175 
CopyFromFileEntry(const ZipWriter::FileEntry & src,bool use_data_descriptor,LocalFileHeader * dst)176 static void CopyFromFileEntry(const ZipWriter::FileEntry& src, bool use_data_descriptor,
177                               LocalFileHeader* dst) {
178   dst->lfh_signature = LocalFileHeader::kSignature;
179   if (use_data_descriptor) {
180     // Set this flag to denote that a DataDescriptor struct will appear after the data,
181     // containing the crc and size fields.
182     dst->gpb_flags |= kGPBDDFlagMask;
183 
184     // The size and crc fields must be 0.
185     dst->compressed_size = 0u;
186     dst->uncompressed_size = 0u;
187     dst->crc32 = 0u;
188   } else {
189     dst->compressed_size = src.compressed_size;
190     dst->uncompressed_size = src.uncompressed_size;
191     dst->crc32 = src.crc32;
192   }
193   dst->compression_method = src.compression_method;
194   dst->last_mod_time = src.last_mod_time;
195   dst->last_mod_date = src.last_mod_date;
196   DCHECK_LE(src.path.size(), std::numeric_limits<uint16_t>::max());
197   dst->file_name_length = static_cast<uint16_t>(src.path.size());
198   dst->extra_field_length = src.padding_length;
199 }
200 
StartAlignedEntryWithTime(std::string_view path,size_t flags,time_t time,uint32_t alignment)201 int32_t ZipWriter::StartAlignedEntryWithTime(std::string_view path, size_t flags, time_t time,
202                                              uint32_t alignment) {
203   if (state_ != State::kWritingZip) {
204     return kInvalidState;
205   }
206 
207   // Can only have 16535 entries because of zip records.
208   if (files_.size() == std::numeric_limits<uint16_t>::max()) {
209     return HandleError(kIoError);
210   }
211 
212   if (flags & kAlign32) {
213     return kInvalidAlign32Flag;
214   }
215 
216   if (powerof2(alignment) == 0) {
217     return kInvalidAlignment;
218   }
219   if (alignment > std::numeric_limits<uint16_t>::max()) {
220     return kInvalidAlignment;
221   }
222 
223   FileEntry file_entry = {};
224   file_entry.local_file_header_offset = current_offset_;
225   file_entry.path = path;
226   // No support for larger than 4GB files.
227   if (file_entry.local_file_header_offset > std::numeric_limits<uint32_t>::max()) {
228     return HandleError(kIoError);
229   }
230 
231   if (!IsValidEntryName(reinterpret_cast<const uint8_t*>(file_entry.path.data()),
232                         file_entry.path.size())) {
233     return kInvalidEntryName;
234   }
235 
236   if (flags & ZipWriter::kCompress) {
237     file_entry.compression_method = kCompressDeflated;
238 
239     int32_t result = PrepareDeflate();
240     if (result != kNoError) {
241       return result;
242     }
243   } else {
244     file_entry.compression_method = kCompressStored;
245   }
246 
247   ExtractTimeAndDate(time, &file_entry.last_mod_time, &file_entry.last_mod_date);
248 
249   off_t offset = current_offset_ + sizeof(LocalFileHeader) + file_entry.path.size();
250   // prepare a pre-zeroed memory page in case when we need to pad some aligned data.
251   static constexpr auto kPageSize = 4096;
252   static constexpr char kSmallZeroPadding[kPageSize] = {};
253   // use this buffer if our preallocated one is too small
254   std::vector<char> zero_padding_big;
255   const char* zero_padding = nullptr;
256 
257   if (alignment != 0 && (offset & (alignment - 1))) {
258     // Pad the extra field so the data will be aligned.
259     uint16_t padding = static_cast<uint16_t>(alignment - (offset % alignment));
260     file_entry.padding_length = padding;
261     offset += padding;
262     if (padding <= std::size(kSmallZeroPadding)) {
263         zero_padding = kSmallZeroPadding;
264     } else {
265         zero_padding_big.resize(padding, 0);
266         zero_padding = zero_padding_big.data();
267     }
268   }
269 
270   LocalFileHeader header = {};
271   // Always start expecting a data descriptor. When the data has finished being written,
272   // if it is possible to seek back, the GPB flag will reset and the sizes written.
273   CopyFromFileEntry(file_entry, true /*use_data_descriptor*/, &header);
274 
275   if (fwrite(&header, sizeof(header), 1, file_) != 1) {
276     return HandleError(kIoError);
277   }
278 
279   if (fwrite(path.data(), 1, path.size(), file_) != path.size()) {
280     return HandleError(kIoError);
281   }
282 
283   if (file_entry.padding_length != 0 && fwrite(zero_padding, 1, file_entry.padding_length,
284                                                file_) != file_entry.padding_length) {
285     return HandleError(kIoError);
286   }
287 
288   current_file_entry_ = std::move(file_entry);
289   current_offset_ = offset;
290   state_ = State::kWritingEntry;
291   return kNoError;
292 }
293 
DiscardLastEntry()294 int32_t ZipWriter::DiscardLastEntry() {
295   if (state_ != State::kWritingZip || files_.empty()) {
296     return kInvalidState;
297   }
298 
299   FileEntry& last_entry = files_.back();
300   current_offset_ = last_entry.local_file_header_offset;
301   if (fseeko(file_, current_offset_, SEEK_SET) != 0) {
302     return HandleError(kIoError);
303   }
304   files_.pop_back();
305   return kNoError;
306 }
307 
GetLastEntry(FileEntry * out_entry)308 int32_t ZipWriter::GetLastEntry(FileEntry* out_entry) {
309   CHECK(out_entry != nullptr);
310 
311   if (files_.empty()) {
312     return kInvalidState;
313   }
314   *out_entry = files_.back();
315   return kNoError;
316 }
317 
PrepareDeflate()318 int32_t ZipWriter::PrepareDeflate() {
319   CHECK(state_ == State::kWritingZip);
320 
321   // Initialize the z_stream for compression.
322   z_stream_ = std::unique_ptr<z_stream, void (*)(z_stream*)>(new z_stream(), DeleteZStream);
323 
324 #pragma GCC diagnostic push
325 #pragma GCC diagnostic ignored "-Wold-style-cast"
326   int zerr = deflateInit2(z_stream_.get(), Z_BEST_COMPRESSION, Z_DEFLATED, -MAX_WBITS,
327                           DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY);
328 #pragma GCC diagnostic pop
329 
330   if (zerr != Z_OK) {
331     if (zerr == Z_VERSION_ERROR) {
332       LOG(ERROR) << "Installed zlib is not compatible with linked version (" << ZLIB_VERSION << ")";
333       return HandleError(kZlibError);
334     } else {
335       LOG(ERROR) << "deflateInit2 failed (zerr=" << zerr << ")";
336       return HandleError(kZlibError);
337     }
338   }
339 
340   z_stream_->next_out = buffer_.data();
341   DCHECK_EQ(buffer_.size(), kBufSize);
342   z_stream_->avail_out = static_cast<uint32_t>(buffer_.size());
343   return kNoError;
344 }
345 
WriteBytes(const void * data,size_t len)346 int32_t ZipWriter::WriteBytes(const void* data, size_t len) {
347   if (state_ != State::kWritingEntry) {
348     return HandleError(kInvalidState);
349   }
350   // Need to be able to mark down data correctly.
351   if (len + static_cast<uint64_t>(current_file_entry_.uncompressed_size) >
352       std::numeric_limits<uint32_t>::max()) {
353     return HandleError(kIoError);
354   }
355   uint32_t len32 = static_cast<uint32_t>(len);
356 
357   int32_t result = kNoError;
358   if (current_file_entry_.compression_method & kCompressDeflated) {
359     result = CompressBytes(&current_file_entry_, data, len32);
360   } else {
361     result = StoreBytes(&current_file_entry_, data, len32);
362   }
363 
364   if (result != kNoError) {
365     return result;
366   }
367 
368   current_file_entry_.crc32 = static_cast<uint32_t>(
369       crc32(current_file_entry_.crc32, reinterpret_cast<const Bytef*>(data), len32));
370   current_file_entry_.uncompressed_size += len32;
371   return kNoError;
372 }
373 
StoreBytes(FileEntry * file,const void * data,uint32_t len)374 int32_t ZipWriter::StoreBytes(FileEntry* file, const void* data, uint32_t len) {
375   CHECK(state_ == State::kWritingEntry);
376 
377   if (fwrite(data, 1, len, file_) != len) {
378     return HandleError(kIoError);
379   }
380   file->compressed_size += len;
381   current_offset_ += len;
382   return kNoError;
383 }
384 
CompressBytes(FileEntry * file,const void * data,uint32_t len)385 int32_t ZipWriter::CompressBytes(FileEntry* file, const void* data, uint32_t len) {
386   CHECK(state_ == State::kWritingEntry);
387   CHECK(z_stream_);
388   CHECK(z_stream_->next_out != nullptr);
389   CHECK(z_stream_->avail_out != 0);
390 
391   // Prepare the input.
392   z_stream_->next_in = reinterpret_cast<const uint8_t*>(data);
393   z_stream_->avail_in = len;
394 
395   while (z_stream_->avail_in > 0) {
396     // We have more data to compress.
397     int zerr = deflate(z_stream_.get(), Z_NO_FLUSH);
398     if (zerr != Z_OK) {
399       return HandleError(kZlibError);
400     }
401 
402     if (z_stream_->avail_out == 0) {
403       // The output is full, let's write it to disk.
404       size_t write_bytes = z_stream_->next_out - buffer_.data();
405       if (fwrite(buffer_.data(), 1, write_bytes, file_) != write_bytes) {
406         return HandleError(kIoError);
407       }
408       file->compressed_size += write_bytes;
409       current_offset_ += write_bytes;
410 
411       // Reset the output buffer for the next input.
412       z_stream_->next_out = buffer_.data();
413       DCHECK_EQ(buffer_.size(), kBufSize);
414       z_stream_->avail_out = static_cast<uint32_t>(buffer_.size());
415     }
416   }
417   return kNoError;
418 }
419 
FlushCompressedBytes(FileEntry * file)420 int32_t ZipWriter::FlushCompressedBytes(FileEntry* file) {
421   CHECK(state_ == State::kWritingEntry);
422   CHECK(z_stream_);
423   CHECK(z_stream_->next_out != nullptr);
424   CHECK(z_stream_->avail_out != 0);
425 
426   // Keep deflating while there isn't enough space in the buffer to
427   // to complete the compress.
428   int zerr;
429   while ((zerr = deflate(z_stream_.get(), Z_FINISH)) == Z_OK) {
430     CHECK(z_stream_->avail_out == 0);
431     size_t write_bytes = z_stream_->next_out - buffer_.data();
432     if (fwrite(buffer_.data(), 1, write_bytes, file_) != write_bytes) {
433       return HandleError(kIoError);
434     }
435     file->compressed_size += write_bytes;
436     current_offset_ += write_bytes;
437 
438     z_stream_->next_out = buffer_.data();
439     DCHECK_EQ(buffer_.size(), kBufSize);
440     z_stream_->avail_out = static_cast<uint32_t>(buffer_.size());
441   }
442   if (zerr != Z_STREAM_END) {
443     return HandleError(kZlibError);
444   }
445 
446   size_t write_bytes = z_stream_->next_out - buffer_.data();
447   if (write_bytes != 0) {
448     if (fwrite(buffer_.data(), 1, write_bytes, file_) != write_bytes) {
449       return HandleError(kIoError);
450     }
451     file->compressed_size += write_bytes;
452     current_offset_ += write_bytes;
453   }
454   z_stream_.reset();
455   return kNoError;
456 }
457 
ShouldUseDataDescriptor() const458 bool ZipWriter::ShouldUseDataDescriptor() const {
459   // Only use a trailing "data descriptor" if the output isn't seekable.
460   return !seekable_;
461 }
462 
FinishEntry()463 int32_t ZipWriter::FinishEntry() {
464   if (state_ != State::kWritingEntry) {
465     return kInvalidState;
466   }
467 
468   if (current_file_entry_.compression_method & kCompressDeflated) {
469     int32_t result = FlushCompressedBytes(&current_file_entry_);
470     if (result != kNoError) {
471       return result;
472     }
473   }
474 
475   if (ShouldUseDataDescriptor()) {
476     // Some versions of ZIP don't allow STORED data to have a trailing DataDescriptor.
477     // If this file is not seekable, or if the data is compressed, write a DataDescriptor.
478     // We haven't supported zip64 format yet. Write both uncompressed size and compressed
479     // size as uint32_t.
480     std::vector<uint32_t> dataDescriptor = {
481         DataDescriptor::kOptSignature, current_file_entry_.crc32,
482         current_file_entry_.compressed_size, current_file_entry_.uncompressed_size};
483     if (fwrite(dataDescriptor.data(), dataDescriptor.size() * sizeof(uint32_t), 1, file_) != 1) {
484       return HandleError(kIoError);
485     }
486 
487     current_offset_ += sizeof(uint32_t) * dataDescriptor.size();
488   } else {
489     // Seek back to the header and rewrite to include the size.
490     if (fseeko(file_, current_file_entry_.local_file_header_offset, SEEK_SET) != 0) {
491       return HandleError(kIoError);
492     }
493 
494     LocalFileHeader header = {};
495     CopyFromFileEntry(current_file_entry_, false /*use_data_descriptor*/, &header);
496 
497     if (fwrite(&header, sizeof(header), 1, file_) != 1) {
498       return HandleError(kIoError);
499     }
500 
501     if (fseeko(file_, current_offset_, SEEK_SET) != 0) {
502       return HandleError(kIoError);
503     }
504   }
505 
506   files_.emplace_back(std::move(current_file_entry_));
507   state_ = State::kWritingZip;
508   return kNoError;
509 }
510 
Finish()511 int32_t ZipWriter::Finish() {
512   if (state_ != State::kWritingZip) {
513     return kInvalidState;
514   }
515 
516   off_t startOfCdr = current_offset_;
517   for (FileEntry& file : files_) {
518     CentralDirectoryRecord cdr = {};
519     cdr.record_signature = CentralDirectoryRecord::kSignature;
520     if (ShouldUseDataDescriptor()) {
521       cdr.gpb_flags |= kGPBDDFlagMask;
522     }
523     cdr.compression_method = file.compression_method;
524     cdr.last_mod_time = file.last_mod_time;
525     cdr.last_mod_date = file.last_mod_date;
526     cdr.crc32 = file.crc32;
527     cdr.compressed_size = file.compressed_size;
528     cdr.uncompressed_size = file.uncompressed_size;
529     // Checked in IsValidEntryName.
530     DCHECK_LE(file.path.size(), std::numeric_limits<uint16_t>::max());
531     cdr.file_name_length = static_cast<uint16_t>(file.path.size());
532     // Checked in StartAlignedEntryWithTime.
533     DCHECK_LE(file.local_file_header_offset, std::numeric_limits<uint32_t>::max());
534     cdr.local_file_header_offset = static_cast<uint32_t>(file.local_file_header_offset);
535     if (fwrite(&cdr, sizeof(cdr), 1, file_) != 1) {
536       return HandleError(kIoError);
537     }
538 
539     if (fwrite(file.path.data(), 1, file.path.size(), file_) != file.path.size()) {
540       return HandleError(kIoError);
541     }
542 
543     current_offset_ += sizeof(cdr) + file.path.size();
544   }
545 
546   EocdRecord er = {};
547   er.eocd_signature = EocdRecord::kSignature;
548   er.disk_num = 0;
549   er.cd_start_disk = 0;
550   // Checked when adding entries.
551   DCHECK_LE(files_.size(), std::numeric_limits<uint16_t>::max());
552   er.num_records_on_disk = static_cast<uint16_t>(files_.size());
553   er.num_records = static_cast<uint16_t>(files_.size());
554   if (current_offset_ > std::numeric_limits<uint32_t>::max()) {
555     return HandleError(kIoError);
556   }
557   er.cd_size = static_cast<uint32_t>(current_offset_ - startOfCdr);
558   er.cd_start_offset = static_cast<uint32_t>(startOfCdr);
559 
560   if (fwrite(&er, sizeof(er), 1, file_) != 1) {
561     return HandleError(kIoError);
562   }
563 
564   current_offset_ += sizeof(er);
565 
566   // Since we can BackUp() and potentially finish writing at an offset less than one we had
567   // already written at, we must truncate the file.
568 
569   if (ftruncate(fileno(file_), current_offset_) != 0) {
570     return HandleError(kIoError);
571   }
572 
573   if (fflush(file_) != 0) {
574     return HandleError(kIoError);
575   }
576 
577   state_ = State::kDone;
578   return kNoError;
579 }
580