1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_IMAGE_WRITER_H_
18 #define ART_COMPILER_IMAGE_WRITER_H_
19 
20 #include <stdint.h>
21 #include "base/memory_tool.h"
22 
23 #include <cstddef>
24 #include <memory>
25 #include <set>
26 #include <stack>
27 #include <string>
28 #include <ostream>
29 
30 #include "art_method.h"
31 #include "base/bit_utils.h"
32 #include "base/dchecked_vector.h"
33 #include "base/enums.h"
34 #include "base/length_prefixed_array.h"
35 #include "base/macros.h"
36 #include "driver/compiler_driver.h"
37 #include "gc/space/space.h"
38 #include "image.h"
39 #include "lock_word.h"
40 #include "mem_map.h"
41 #include "mirror/dex_cache.h"
42 #include "obj_ptr.h"
43 #include "oat_file.h"
44 #include "os.h"
45 #include "safe_map.h"
46 #include "utils.h"
47 
48 namespace art {
49 namespace gc {
50 namespace space {
51 class ImageSpace;
52 }  // namespace space
53 }  // namespace gc
54 
55 namespace mirror {
56 class ClassLoader;
57 }  // namespace mirror
58 
59 class ClassLoaderVisitor;
60 class ClassTable;
61 class ImtConflictTable;
62 
63 static constexpr int kInvalidFd = -1;
64 
65 // Write a Space built during compilation for use during execution.
66 class ImageWriter FINAL {
67  public:
68   ImageWriter(const CompilerDriver& compiler_driver,
69               uintptr_t image_begin,
70               bool compile_pic,
71               bool compile_app_image,
72               ImageHeader::StorageMode image_storage_mode,
73               const std::vector<const char*>& oat_filenames,
74               const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map);
75 
76   bool PrepareImageAddressSpace();
77 
IsImageAddressSpaceReady()78   bool IsImageAddressSpaceReady() const {
79     DCHECK(!image_infos_.empty());
80     for (const ImageInfo& image_info : image_infos_) {
81       if (image_info.image_roots_address_ == 0u) {
82         return false;
83       }
84     }
85     return true;
86   }
87 
GetClassLoader()88   ObjPtr<mirror::ClassLoader> GetClassLoader() {
89     CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u);
90     return compile_app_image_ ? *class_loaders_.begin() : nullptr;
91   }
92 
93   template <typename T>
GetImageAddress(T * object)94   T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
95     if (object == nullptr || IsInBootImage(object)) {
96       return object;
97     } else {
98       size_t oat_index = GetOatIndex(object);
99       const ImageInfo& image_info = GetImageInfo(oat_index);
100       return reinterpret_cast<T*>(image_info.image_begin_ + GetImageOffset(object));
101     }
102   }
103 
104   ArtMethod* GetImageMethodAddress(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
105 
106   template <typename PtrType>
GetDexCacheArrayElementImageAddress(const DexFile * dex_file,uint32_t offset)107   PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
108       const REQUIRES_SHARED(Locks::mutator_lock_) {
109     auto oat_it = dex_file_oat_index_map_.find(dex_file);
110     DCHECK(oat_it != dex_file_oat_index_map_.end());
111     const ImageInfo& image_info = GetImageInfo(oat_it->second);
112     auto it = image_info.dex_cache_array_starts_.find(dex_file);
113     DCHECK(it != image_info.dex_cache_array_starts_.end());
114     return reinterpret_cast<PtrType>(
115         image_info.image_begin_ + image_info.bin_slot_offsets_[kBinDexCacheArray] +
116             it->second + offset);
117   }
118 
GetOatFileOffset(size_t oat_index)119   size_t GetOatFileOffset(size_t oat_index) const {
120     return GetImageInfo(oat_index).oat_offset_;
121   }
122 
GetOatFileBegin(size_t oat_index)123   const uint8_t* GetOatFileBegin(size_t oat_index) const {
124     return GetImageInfo(oat_index).oat_file_begin_;
125   }
126 
127   // If image_fd is not kInvalidFd, then we use that for the image file. Otherwise we open
128   // the names in image_filenames.
129   // If oat_fd is not kInvalidFd, then we use that for the oat file. Otherwise we open
130   // the names in oat_filenames.
131   bool Write(int image_fd,
132              const std::vector<const char*>& image_filenames,
133              const std::vector<const char*>& oat_filenames)
134       REQUIRES(!Locks::mutator_lock_);
135 
GetOatDataBegin(size_t oat_index)136   uintptr_t GetOatDataBegin(size_t oat_index) {
137     return reinterpret_cast<uintptr_t>(GetImageInfo(oat_index).oat_data_begin_);
138   }
139 
140   // Get the index of the oat file containing the dex file.
141   //
142   // This "oat_index" is used to retrieve information about the the memory layout
143   // of the oat file and its associated image file, needed for link-time patching
144   // of references to the image or across oat files.
145   size_t GetOatIndexForDexFile(const DexFile* dex_file) const;
146 
147   // Get the index of the oat file containing the dex file served by the dex cache.
148   size_t GetOatIndexForDexCache(ObjPtr<mirror::DexCache> dex_cache) const
149       REQUIRES_SHARED(Locks::mutator_lock_);
150 
151   // Update the oat layout for the given oat file.
152   // This will make the oat_offset for the next oat file valid.
153   void UpdateOatFileLayout(size_t oat_index,
154                            size_t oat_loaded_size,
155                            size_t oat_data_offset,
156                            size_t oat_data_size);
157   // Update information about the oat header, i.e. checksum and trampoline offsets.
158   void UpdateOatFileHeader(size_t oat_index, const OatHeader& oat_header);
159 
160  private:
161   using WorkStack = std::stack<std::pair<mirror::Object*, size_t>>;
162 
163   bool AllocMemory();
164 
165   // Mark the objects defined in this space in the given live bitmap.
166   void RecordImageAllocations() REQUIRES_SHARED(Locks::mutator_lock_);
167 
168   // Classify different kinds of bins that objects end up getting packed into during image writing.
169   // Ordered from dirtiest to cleanest (until ArtMethods).
170   enum Bin {
171     kBinMiscDirty,                // Dex caches, object locks, etc...
172     kBinClassVerified,            // Class verified, but initializers haven't been run
173     // Unknown mix of clean/dirty:
174     kBinRegular,
175     kBinClassInitialized,         // Class initializers have been run
176     // All classes get their own bins since their fields often dirty
177     kBinClassInitializedFinalStatics,  // Class initializers have been run, no non-final statics
178     // Likely-clean:
179     kBinString,                        // [String] Almost always immutable (except for obj header).
180     // Add more bins here if we add more segregation code.
181     // Non mirror fields must be below.
182     // ArtFields should be always clean.
183     kBinArtField,
184     // If the class is initialized, then the ArtMethods are probably clean.
185     kBinArtMethodClean,
186     // ArtMethods may be dirty if the class has native methods or a declaring class that isn't
187     // initialized.
188     kBinArtMethodDirty,
189     // IMT (clean)
190     kBinImTable,
191     // Conflict tables (clean).
192     kBinIMTConflictTable,
193     // Runtime methods (always clean, do not have a length prefix array).
194     kBinRuntimeMethod,
195     // Dex cache arrays have a special slot for PC-relative addressing. Since they are
196     // huge, and as such their dirtiness is not important for the clean/dirty separation,
197     // we arbitrarily keep them at the end of the native data.
198     kBinDexCacheArray,            // Arrays belonging to dex cache.
199     kBinSize,
200     // Number of bins which are for mirror objects.
201     kBinMirrorCount = kBinArtField,
202   };
203   friend std::ostream& operator<<(std::ostream& stream, const Bin& bin);
204 
205   enum NativeObjectRelocationType {
206     kNativeObjectRelocationTypeArtField,
207     kNativeObjectRelocationTypeArtFieldArray,
208     kNativeObjectRelocationTypeArtMethodClean,
209     kNativeObjectRelocationTypeArtMethodArrayClean,
210     kNativeObjectRelocationTypeArtMethodDirty,
211     kNativeObjectRelocationTypeArtMethodArrayDirty,
212     kNativeObjectRelocationTypeRuntimeMethod,
213     kNativeObjectRelocationTypeIMTable,
214     kNativeObjectRelocationTypeIMTConflictTable,
215     kNativeObjectRelocationTypeDexCacheArray,
216   };
217   friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type);
218 
219   enum OatAddress {
220     kOatAddressInterpreterToInterpreterBridge,
221     kOatAddressInterpreterToCompiledCodeBridge,
222     kOatAddressJNIDlsymLookup,
223     kOatAddressQuickGenericJNITrampoline,
224     kOatAddressQuickIMTConflictTrampoline,
225     kOatAddressQuickResolutionTrampoline,
226     kOatAddressQuickToInterpreterBridge,
227     // Number of elements in the enum.
228     kOatAddressCount,
229   };
230   friend std::ostream& operator<<(std::ostream& stream, const OatAddress& oat_address);
231 
232   static constexpr size_t kBinBits = MinimumBitsToStore<uint32_t>(kBinMirrorCount - 1);
233   // uint32 = typeof(lockword_)
234   // Subtract read barrier bits since we want these to remain 0, or else it may result in DCHECK
235   // failures due to invalid read barrier bits during object field reads.
236   static const size_t kBinShift = BitSizeOf<uint32_t>() - kBinBits - LockWord::kGCStateSize;
237   // 111000.....0
238   static const size_t kBinMask = ((static_cast<size_t>(1) << kBinBits) - 1) << kBinShift;
239 
240   // We use the lock word to store the bin # and bin index of the object in the image.
241   //
242   // The struct size must be exactly sizeof(LockWord), currently 32-bits, since this will end up
243   // stored in the lock word bit-for-bit when object forwarding addresses are being calculated.
244   struct BinSlot {
245     explicit BinSlot(uint32_t lockword);
246     BinSlot(Bin bin, uint32_t index);
247 
248     // The bin an object belongs to, i.e. regular, class/verified, class/initialized, etc.
249     Bin GetBin() const;
250     // The offset in bytes from the beginning of the bin. Aligned to object size.
251     uint32_t GetIndex() const;
252     // Pack into a single uint32_t, for storing into a lock word.
Uint32ValueBinSlot253     uint32_t Uint32Value() const { return lockword_; }
254     // Comparison operator for map support
255     bool operator<(const BinSlot& other) const  { return lockword_ < other.lockword_; }
256 
257   private:
258     // Must be the same size as LockWord, any larger and we would truncate the data.
259     const uint32_t lockword_;
260   };
261 
262   struct ImageInfo {
263     ImageInfo();
264     ImageInfo(ImageInfo&&) = default;
265 
266     // Create the image sections into the out sections variable, returns the size of the image
267     // excluding the bitmap.
268     size_t CreateImageSections(ImageSection* out_sections) const;
269 
270     std::unique_ptr<MemMap> image_;  // Memory mapped for generating the image.
271 
272     // Target begin of this image. Notes: It is not valid to write here, this is the address
273     // of the target image, not necessarily where image_ is mapped. The address is only valid
274     // after layouting (otherwise null).
275     uint8_t* image_begin_ = nullptr;
276 
277     // Offset to the free space in image_, initially size of image header.
278     size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment);
279     uint32_t image_roots_address_ = 0;  // The image roots address in the image.
280     size_t image_offset_ = 0;  // Offset of this image from the start of the first image.
281 
282     // Image size is the *address space* covered by this image. As the live bitmap is aligned
283     // to the page size, the live bitmap will cover more address space than necessary. But live
284     // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size.
285     // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be
286     // page-aligned).
287     size_t image_size_ = 0;
288 
289     // Oat data.
290     // Offset of the oat file for this image from start of oat files. This is
291     // valid when the previous oat file has been written.
292     size_t oat_offset_ = 0;
293     // Layout of the loaded ELF file containing the oat file, valid after UpdateOatFileLayout().
294     const uint8_t* oat_file_begin_ = nullptr;
295     size_t oat_loaded_size_ = 0;
296     const uint8_t* oat_data_begin_ = nullptr;
297     size_t oat_size_ = 0;  // Size of the corresponding oat data.
298     // The oat header checksum, valid after UpdateOatFileHeader().
299     uint32_t oat_checksum_ = 0u;
300 
301     // Image bitmap which lets us know where the objects inside of the image reside.
302     std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_;
303 
304     // The start offsets of the dex cache arrays.
305     SafeMap<const DexFile*, size_t> dex_cache_array_starts_;
306 
307     // Offset from oat_data_begin_ to the stubs.
308     uint32_t oat_address_offsets_[kOatAddressCount] = {};
309 
310     // Bin slot tracking for dirty object packing.
311     size_t bin_slot_sizes_[kBinSize] = {};  // Number of bytes in a bin.
312     size_t bin_slot_offsets_[kBinSize] = {};  // Number of bytes in previous bins.
313     size_t bin_slot_count_[kBinSize] = {};  // Number of objects in a bin.
314 
315     // Cached size of the intern table for when we allocate memory.
316     size_t intern_table_bytes_ = 0;
317 
318     // Number of image class table bytes.
319     size_t class_table_bytes_ = 0;
320 
321     // Number of object fixup bytes.
322     size_t object_fixup_bytes_ = 0;
323 
324     // Number of pointer fixup bytes.
325     size_t pointer_fixup_bytes_ = 0;
326 
327     // Intern table associated with this image for serialization.
328     std::unique_ptr<InternTable> intern_table_;
329 
330     // Class table associated with this image for serialization.
331     std::unique_ptr<ClassTable> class_table_;
332   };
333 
334   // We use the lock word to store the offset of the object in the image.
335   void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
336       REQUIRES_SHARED(Locks::mutator_lock_);
337   void SetImageOffset(mirror::Object* object, size_t offset)
338       REQUIRES_SHARED(Locks::mutator_lock_);
339   bool IsImageOffsetAssigned(mirror::Object* object) const
340       REQUIRES_SHARED(Locks::mutator_lock_);
341   size_t GetImageOffset(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
342   void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
343       REQUIRES_SHARED(Locks::mutator_lock_);
344 
345   void PrepareDexCacheArraySlots() REQUIRES_SHARED(Locks::mutator_lock_);
346   void AssignImageBinSlot(mirror::Object* object, size_t oat_index)
347       REQUIRES_SHARED(Locks::mutator_lock_);
348   mirror::Object* TryAssignBinSlot(WorkStack& work_stack, mirror::Object* obj, size_t oat_index)
349       REQUIRES_SHARED(Locks::mutator_lock_);
350   void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
351       REQUIRES_SHARED(Locks::mutator_lock_);
352   bool IsImageBinSlotAssigned(mirror::Object* object) const
353       REQUIRES_SHARED(Locks::mutator_lock_);
354   BinSlot GetImageBinSlot(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
355 
356   void AddDexCacheArrayRelocation(void* array, size_t offset, ObjPtr<mirror::DexCache> dex_cache)
357       REQUIRES_SHARED(Locks::mutator_lock_);
358   void AddMethodPointerArray(mirror::PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_);
359 
GetImageAddressCallback(void * writer,mirror::Object * obj)360   static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
361       REQUIRES_SHARED(Locks::mutator_lock_) {
362     return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
363   }
364 
GetLocalAddress(mirror::Object * object)365   mirror::Object* GetLocalAddress(mirror::Object* object) const
366       REQUIRES_SHARED(Locks::mutator_lock_) {
367     size_t offset = GetImageOffset(object);
368     size_t oat_index = GetOatIndex(object);
369     const ImageInfo& image_info = GetImageInfo(oat_index);
370     uint8_t* dst = image_info.image_->Begin() + offset;
371     return reinterpret_cast<mirror::Object*>(dst);
372   }
373 
374   // Returns the address in the boot image if we are compiling the app image.
375   const uint8_t* GetOatAddress(OatAddress type) const;
376 
GetOatAddressForOffset(uint32_t offset,const ImageInfo & image_info)377   const uint8_t* GetOatAddressForOffset(uint32_t offset, const ImageInfo& image_info) const {
378     // With Quick, code is within the OatFile, as there are all in one
379     // .o ELF object. But interpret it as signed.
380     DCHECK_LE(static_cast<int32_t>(offset), static_cast<int32_t>(image_info.oat_size_));
381     DCHECK(image_info.oat_data_begin_ != nullptr);
382     return offset == 0u ? nullptr : image_info.oat_data_begin_ + static_cast<int32_t>(offset);
383   }
384 
385   // Returns true if the class was in the original requested image classes list.
386   bool KeepClass(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
387 
388   // Debug aid that list of requested image classes.
389   void DumpImageClasses();
390 
391   // Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
392   void ComputeLazyFieldsForImageClasses()
393       REQUIRES_SHARED(Locks::mutator_lock_);
394 
395   // Visit all class loaders.
396   void VisitClassLoaders(ClassLoaderVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
397 
398   // Remove unwanted classes from various roots.
399   void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
400 
401   // Remove unwanted classes from the DexCache roots and preload deterministic DexCache contents.
402   void PruneAndPreloadDexCache(ObjPtr<mirror::DexCache> dex_cache,
403                                ObjPtr<mirror::ClassLoader> class_loader)
404       REQUIRES_SHARED(Locks::mutator_lock_)
405       REQUIRES(!Locks::classlinker_classes_lock_);
406 
407   // Verify unwanted classes removed.
408   void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_);
409   static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
410       REQUIRES_SHARED(Locks::mutator_lock_);
411 
412   // Lays out where the image objects will be at runtime.
413   void CalculateNewObjectOffsets()
414       REQUIRES_SHARED(Locks::mutator_lock_);
415   void ProcessWorkStack(WorkStack* work_stack)
416       REQUIRES_SHARED(Locks::mutator_lock_);
417   void CreateHeader(size_t oat_index)
418       REQUIRES_SHARED(Locks::mutator_lock_);
419   mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
420       REQUIRES_SHARED(Locks::mutator_lock_);
421   void CalculateObjectBinSlots(mirror::Object* obj)
422       REQUIRES_SHARED(Locks::mutator_lock_);
423   void UnbinObjectsIntoOffset(mirror::Object* obj)
424       REQUIRES_SHARED(Locks::mutator_lock_);
425 
426   static void EnsureBinSlotAssignedCallback(mirror::Object* obj, void* arg)
427       REQUIRES_SHARED(Locks::mutator_lock_);
428   static void DeflateMonitorCallback(mirror::Object* obj, void* arg)
429       REQUIRES_SHARED(Locks::mutator_lock_);
430   static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
431       REQUIRES_SHARED(Locks::mutator_lock_);
432 
433   // Creates the contiguous image in memory and adjusts pointers.
434   void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
435   void CopyAndFixupObjects() REQUIRES_SHARED(Locks::mutator_lock_);
436   static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
437       REQUIRES_SHARED(Locks::mutator_lock_);
438   void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
439   void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
440       REQUIRES_SHARED(Locks::mutator_lock_);
441   void CopyAndFixupImTable(ImTable* orig, ImTable* copy) REQUIRES_SHARED(Locks::mutator_lock_);
442   void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
443       REQUIRES_SHARED(Locks::mutator_lock_);
444   void FixupClass(mirror::Class* orig, mirror::Class* copy)
445       REQUIRES_SHARED(Locks::mutator_lock_);
446   void FixupObject(mirror::Object* orig, mirror::Object* copy)
447       REQUIRES_SHARED(Locks::mutator_lock_);
448   void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
449       REQUIRES_SHARED(Locks::mutator_lock_);
450   void FixupPointerArray(mirror::Object* dst,
451                          mirror::PointerArray* arr,
452                          mirror::Class* klass,
453                          Bin array_type)
454       REQUIRES_SHARED(Locks::mutator_lock_);
455 
456   // Get quick code for non-resolution/imt_conflict/abstract method.
457   const uint8_t* GetQuickCode(ArtMethod* method,
458                               const ImageInfo& image_info,
459                               bool* quick_is_interpreted)
460       REQUIRES_SHARED(Locks::mutator_lock_);
461 
462   // Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
463   size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
464 
465   // Return true if a method is likely to be dirtied at runtime.
466   bool WillMethodBeDirty(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_);
467 
468   // Assign the offset for an ArtMethod.
469   void AssignMethodOffset(ArtMethod* method,
470                           NativeObjectRelocationType type,
471                           size_t oat_index)
472       REQUIRES_SHARED(Locks::mutator_lock_);
473 
474   // Return true if imt was newly inserted.
475   bool TryAssignImTableOffset(ImTable* imt, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
476 
477   // Assign the offset for an IMT conflict table. Does nothing if the table already has a native
478   // relocation.
479   void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
480       REQUIRES_SHARED(Locks::mutator_lock_);
481 
482   // Return true if klass is loaded by the boot class loader but not in the boot image.
483   bool IsBootClassLoaderNonImageClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
484 
485   // Return true if klass depends on a boot class loader non image class. We want to prune these
486   // classes since we do not want any boot class loader classes in the image. This means that
487   // we also cannot have any classes which refer to these boot class loader non image classes.
488   // PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
489   // driver.
490   bool PruneAppImageClass(ObjPtr<mirror::Class> klass)
491       REQUIRES_SHARED(Locks::mutator_lock_);
492 
493   // early_exit is true if we had a cyclic dependency anywhere down the chain.
494   bool PruneAppImageClassInternal(ObjPtr<mirror::Class> klass,
495                                   bool* early_exit,
496                                   std::unordered_set<mirror::Class*>* visited)
497       REQUIRES_SHARED(Locks::mutator_lock_);
498 
IsMultiImage()499   bool IsMultiImage() const {
500     return image_infos_.size() > 1;
501   }
502 
503   static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
504 
505   uintptr_t NativeOffsetInImage(void* obj) REQUIRES_SHARED(Locks::mutator_lock_);
506 
507   // Location of where the object will be when the image is loaded at runtime.
508   template <typename T>
509   T* NativeLocationInImage(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
510 
511   // Location of where the temporary copy of the object currently is.
512   template <typename T>
513   T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
514 
515   // Return true of obj is inside of the boot image space. This may only return true if we are
516   // compiling an app image.
517   bool IsInBootImage(const void* obj) const;
518 
519   // Return true if ptr is within the boot oat file.
520   bool IsInBootOatFile(const void* ptr) const;
521 
522   // Get the index of the oat file associated with the object.
523   size_t GetOatIndex(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
524 
525   // The oat index for shared data in multi-image and all data in single-image compilation.
GetDefaultOatIndex()526   size_t GetDefaultOatIndex() const {
527     return 0u;
528   }
529 
GetImageInfo(size_t oat_index)530   ImageInfo& GetImageInfo(size_t oat_index) {
531     return image_infos_[oat_index];
532   }
533 
GetImageInfo(size_t oat_index)534   const ImageInfo& GetImageInfo(size_t oat_index) const {
535     return image_infos_[oat_index];
536   }
537 
538   // Find an already strong interned string in the other images or in the boot image. Used to
539   // remove duplicates in the multi image and app image case.
540   mirror::String* FindInternedString(mirror::String* string) REQUIRES_SHARED(Locks::mutator_lock_);
541 
542   // Return true if there already exists a native allocation for an object.
543   bool NativeRelocationAssigned(void* ptr) const;
544 
545   void CopyReference(mirror::HeapReference<mirror::Object>* dest, ObjPtr<mirror::Object> src)
546       REQUIRES_SHARED(Locks::mutator_lock_);
547 
548   void CopyReference(mirror::CompressedReference<mirror::Object>* dest, ObjPtr<mirror::Object> src)
549       REQUIRES_SHARED(Locks::mutator_lock_);
550 
551   void CopyAndFixupPointer(void** target, void* value);
552 
553   const CompilerDriver& compiler_driver_;
554 
555   // Beginning target image address for the first image.
556   uint8_t* global_image_begin_;
557 
558   // Offset from image_begin_ to where the first object is in image_.
559   size_t image_objects_offset_begin_;
560 
561   // Pointer arrays that need to be updated. Since these are only some int and long arrays, we need
562   // to keep track. These include vtable arrays, iftable arrays, and dex caches.
563   std::unordered_map<mirror::PointerArray*, Bin> pointer_arrays_;
564 
565   // Saved hash codes. We use these to restore lockwords which were temporarily used to have
566   // forwarding addresses as well as copying over hash codes.
567   std::unordered_map<mirror::Object*, uint32_t> saved_hashcode_map_;
568 
569   // Oat index map for objects.
570   std::unordered_map<mirror::Object*, uint32_t> oat_index_map_;
571 
572   // Boolean flags.
573   const bool compile_pic_;
574   const bool compile_app_image_;
575 
576   // Size of pointers on the target architecture.
577   PointerSize target_ptr_size_;
578 
579   // Image data indexed by the oat file index.
580   dchecked_vector<ImageInfo> image_infos_;
581 
582   // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to
583   // have one entry per art field for convenience. ArtFields are placed right after the end of the
584   // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields.
585   struct NativeObjectRelocation {
586     size_t oat_index;
587     uintptr_t offset;
588     NativeObjectRelocationType type;
589 
IsArtMethodRelocationNativeObjectRelocation590     bool IsArtMethodRelocation() const {
591       return type == kNativeObjectRelocationTypeArtMethodClean ||
592           type == kNativeObjectRelocationTypeArtMethodDirty ||
593           type == kNativeObjectRelocationTypeRuntimeMethod;
594     }
595   };
596   std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_;
597 
598   // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image.
599   ArtMethod* image_methods_[ImageHeader::kImageMethodsCount];
600 
601   // Counters for measurements, used for logging only.
602   uint64_t dirty_methods_;
603   uint64_t clean_methods_;
604 
605   // Prune class memoization table to speed up ContainsBootClassLoaderNonImageClass.
606   std::unordered_map<mirror::Class*, bool> prune_class_memo_;
607 
608   // Class loaders with a class table to write out. There should only be one class loader because
609   // dex2oat loads the dex files to be compiled into a single class loader. For the boot image,
610   // null is a valid entry.
611   std::unordered_set<mirror::ClassLoader*> class_loaders_;
612 
613   // Which mode the image is stored as, see image.h
614   const ImageHeader::StorageMode image_storage_mode_;
615 
616   // The file names of oat files.
617   const std::vector<const char*>& oat_filenames_;
618 
619   // Map of dex files to the indexes of oat files that they were compiled into.
620   const std::unordered_map<const DexFile*, size_t>& dex_file_oat_index_map_;
621 
622   class ComputeLazyFieldsForClassesVisitor;
623   class FixupClassVisitor;
624   class FixupRootVisitor;
625   class FixupVisitor;
626   class GetRootsVisitor;
627   class ImageAddressVisitorForDexCacheArray;
628   class NativeLocationVisitor;
629   class PruneClassesVisitor;
630   class PruneClassLoaderClassesVisitor;
631   class RegisterBootClassPathClassesVisitor;
632   class VisitReferencesVisitor;
633 
634   DISALLOW_COPY_AND_ASSIGN(ImageWriter);
635 };
636 
637 }  // namespace art
638 
639 #endif  // ART_COMPILER_IMAGE_WRITER_H_
640