1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_LIBARTBASE_BASE_MEM_MAP_H_
18 #define ART_LIBARTBASE_BASE_MEM_MAP_H_
19 
20 #include <stddef.h>
21 #include <sys/types.h>
22 
23 #include <map>
24 #include <mutex>
25 #include <string>
26 
27 #include "android-base/thread_annotations.h"
28 #include "macros.h"
29 
30 namespace art {
31 
32 #if defined(__LP64__) && !defined(__Fuchsia__) && \
33     (defined(__aarch64__) || defined(__mips__) || defined(__APPLE__))
34 #define USE_ART_LOW_4G_ALLOCATOR 1
35 #else
36 #if defined(__LP64__) && !defined(__Fuchsia__) && !defined(__x86_64__)
37 #error "Unrecognized 64-bit architecture."
38 #endif
39 #define USE_ART_LOW_4G_ALLOCATOR 0
40 #endif
41 
42 #ifdef __linux__
43 static constexpr bool kMadviseZeroes = true;
44 #define HAVE_MREMAP_SYSCALL true
45 #else
46 static constexpr bool kMadviseZeroes = false;
47 // We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
48 // present.
49 #define HAVE_MREMAP_SYSCALL false
50 #endif
51 
52 // Used to keep track of mmap segments.
53 //
54 // On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
55 // for free pages. For security, the start of this scan should be randomized. This requires a
56 // dynamic initializer.
57 // For this to work, it is paramount that there are no other static initializers that access MemMap.
58 // Otherwise, calls might see uninitialized values.
59 class MemMap {
60  public:
61   static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
62 
63   // Creates an invalid mapping.
MemMap()64   MemMap() {}
65 
66   // Creates an invalid mapping. Used when we want to be more explicit than MemMap().
Invalid()67   static MemMap Invalid() {
68     return MemMap();
69   }
70 
71   MemMap(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_);
72   MemMap& operator=(MemMap&& other) noexcept REQUIRES(!MemMap::mem_maps_lock_) {
73     Reset();
74     swap(other);
75     return *this;
76   }
77 
78   // Releases the memory mapping.
79   ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
80 
81   // Swap two MemMaps.
82   void swap(MemMap& other);
83 
Reset()84   void Reset() {
85     if (IsValid()) {
86       DoReset();
87     }
88   }
89 
IsValid()90   bool IsValid() const {
91     return base_size_ != 0u;
92   }
93 
94   // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
95   // relinquishes ownership of the source mmap.
96   //
97   // For the call to be successful:
98   //   * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
99   //     [source->Begin(), source->End()].
100   //   * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
101   //     with them.
102   //   * kCanReplaceMapping must be true.
103   //   * Neither source nor dest may use manual redzones.
104   //   * Both source and dest must have the same offset from the nearest page boundary.
105   //   * mremap must succeed when called on the mappings.
106   //
107   // If this call succeeds it will return true and:
108   //   * Invalidate *source
109   //   * The protection of this will remain the same.
110   //   * The size of this will be the size of the source
111   //   * The data in this will be the data from source.
112   //
113   // If this call fails it will return false and make no changes to *source or this. The ownership
114   // of the source mmap is returned to the caller.
115   bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error);
116 
117   // Set a debug friendly name for a map. It will be prefixed with "dalvik-".
118   static void SetDebugName(void* map_ptr, const char* name, size_t size);
119 
120   // Request an anonymous region of length 'byte_count' and a requested base address.
121   // Use null as the requested base address if you don't care.
122   //
123   // `reuse` allows re-mapping an address range from an existing mapping which retains the
124   // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
125   // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
126   //
127   // The word "anonymous" in this context means "not backed by a file". The supplied
128   // 'name' will be used -- on systems that support it -- to give the mapping
129   // a name.
130   //
131   // On success, returns returns a valid MemMap.  On failure, returns an invalid MemMap.
132   static MemMap MapAnonymous(const char* name,
133                              uint8_t* addr,
134                              size_t byte_count,
135                              int prot,
136                              bool low_4gb,
137                              bool reuse,
138                              /*inout*/MemMap* reservation,
139                              /*out*/std::string* error_msg,
140                              bool use_debug_name = true);
MapAnonymous(const char * name,size_t byte_count,int prot,bool low_4gb,std::string * error_msg)141   static MemMap MapAnonymous(const char* name,
142                              size_t byte_count,
143                              int prot,
144                              bool low_4gb,
145                              /*out*/std::string* error_msg) {
146     return MapAnonymous(name,
147                         /*addr=*/ nullptr,
148                         byte_count,
149                         prot,
150                         low_4gb,
151                         /*reuse=*/ false,
152                         /*reservation=*/ nullptr,
153                         error_msg);
154   }
MapAnonymous(const char * name,size_t byte_count,int prot,bool low_4gb,MemMap * reservation,std::string * error_msg)155   static MemMap MapAnonymous(const char* name,
156                              size_t byte_count,
157                              int prot,
158                              bool low_4gb,
159                              MemMap* reservation,
160                              /*out*/std::string* error_msg) {
161     return MapAnonymous(name,
162                         /*addr=*/ (reservation != nullptr) ? reservation->Begin() : nullptr,
163                         byte_count,
164                         prot,
165                         low_4gb,
166                         /*reuse=*/ false,
167                         reservation,
168                         error_msg);
169   }
170 
171   // Create placeholder for a region allocated by direct call to mmap.
172   // This is useful when we do not have control over the code calling mmap,
173   // but when we still want to keep track of it in the list.
174   // The region is not considered to be owned and will not be unmmaped.
175   static MemMap MapDummy(const char* name, uint8_t* addr, size_t byte_count);
176 
177   // Map part of a file, taking care of non-page aligned offsets.  The
178   // "start" offset is absolute, not relative.
179   //
180   // On success, returns returns a valid MemMap.  On failure, returns an invalid MemMap.
MapFile(size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,const char * filename,std::string * error_msg)181   static MemMap MapFile(size_t byte_count,
182                         int prot,
183                         int flags,
184                         int fd,
185                         off_t start,
186                         bool low_4gb,
187                         const char* filename,
188                         std::string* error_msg) {
189     return MapFileAtAddress(nullptr,
190                             byte_count,
191                             prot,
192                             flags,
193                             fd,
194                             start,
195                             /*low_4gb=*/ low_4gb,
196                             filename,
197                             /*reuse=*/ false,
198                             /*reservation=*/ nullptr,
199                             error_msg);
200   }
201 
202   // Map part of a file, taking care of non-page aligned offsets.  The "start" offset is absolute,
203   // not relative. This version allows requesting a specific address for the base of the mapping.
204   //
205   // `reuse` allows re-mapping an address range from an existing mapping which retains the
206   // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
207   // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
208   //
209   // If error_msg is null then we do not print /proc/maps to the log if MapFileAtAddress fails.
210   // This helps improve performance of the fail case since reading and printing /proc/maps takes
211   // several milliseconds in the worst case.
212   //
213   // On success, returns returns a valid MemMap.  On failure, returns an invalid MemMap.
214   static MemMap MapFileAtAddress(uint8_t* addr,
215                                  size_t byte_count,
216                                  int prot,
217                                  int flags,
218                                  int fd,
219                                  off_t start,
220                                  bool low_4gb,
221                                  const char* filename,
222                                  bool reuse,
223                                  /*inout*/MemMap* reservation,
224                                  /*out*/std::string* error_msg);
225 
GetName()226   const std::string& GetName() const {
227     return name_;
228   }
229 
230   bool Sync();
231 
232   bool Protect(int prot);
233 
234   void MadviseDontNeedAndZero();
235 
GetProtect()236   int GetProtect() const {
237     return prot_;
238   }
239 
Begin()240   uint8_t* Begin() const {
241     return begin_;
242   }
243 
Size()244   size_t Size() const {
245     return size_;
246   }
247 
248   // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking.
249   void SetSize(size_t new_size);
250 
End()251   uint8_t* End() const {
252     return Begin() + Size();
253   }
254 
BaseBegin()255   void* BaseBegin() const {
256     return base_begin_;
257   }
258 
BaseSize()259   size_t BaseSize() const {
260     return base_size_;
261   }
262 
BaseEnd()263   void* BaseEnd() const {
264     return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
265   }
266 
HasAddress(const void * addr)267   bool HasAddress(const void* addr) const {
268     return Begin() <= addr && addr < End();
269   }
270 
271   // Unmap the pages at end and remap them to create another memory map.
272   MemMap RemapAtEnd(uint8_t* new_end,
273                     const char* tail_name,
274                     int tail_prot,
275                     std::string* error_msg,
276                     bool use_debug_name = true);
277 
278   // Unmap the pages of a file at end and remap them to create another memory map.
279   MemMap RemapAtEnd(uint8_t* new_end,
280                     const char* tail_name,
281                     int tail_prot,
282                     int tail_flags,
283                     int fd,
284                     off_t offset,
285                     std::string* error_msg,
286                     bool use_debug_name = true);
287 
288   // Take ownership of pages at the beginning of the mapping. The mapping must be an
289   // anonymous reservation mapping, owning entire pages. The `byte_count` must not
290   // exceed the size of this reservation.
291   //
292   // Returns a mapping owning `byte_count` bytes rounded up to entire pages
293   // with size set to the passed `byte_count`.
294   MemMap TakeReservedMemory(size_t byte_count);
295 
296   static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map)
297       REQUIRES(!MemMap::mem_maps_lock_);
298   static void DumpMaps(std::ostream& os, bool terse = false)
299       REQUIRES(!MemMap::mem_maps_lock_);
300 
301   // Init and Shutdown are NOT thread safe.
302   // Both may be called multiple times and MemMap objects may be created any
303   // time after the first call to Init and before the first call to Shutodwn.
304   static void Init() REQUIRES(!MemMap::mem_maps_lock_);
305   static void Shutdown() REQUIRES(!MemMap::mem_maps_lock_);
306 
307   // If the map is PROT_READ, try to read each page of the map to check it is in fact readable (not
308   // faulting). This is used to diagnose a bug b/19894268 where mprotect doesn't seem to be working
309   // intermittently.
310   void TryReadable();
311 
312   // Align the map by unmapping the unaligned parts at the lower and the higher ends.
313   void AlignBy(size_t size);
314 
315   // For annotation reasons.
GetMemMapsLock()316   static std::mutex* GetMemMapsLock() RETURN_CAPABILITY(mem_maps_lock_) {
317     return nullptr;
318   }
319 
320  private:
321   MemMap(const std::string& name,
322          uint8_t* begin,
323          size_t size,
324          void* base_begin,
325          size_t base_size,
326          int prot,
327          bool reuse,
328          size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
329 
330   void DoReset();
331   void Invalidate();
332   void SwapMembers(MemMap& other);
333 
334   static void DumpMapsLocked(std::ostream& os, bool terse)
335       REQUIRES(MemMap::mem_maps_lock_);
336   static bool HasMemMap(MemMap& map)
337       REQUIRES(MemMap::mem_maps_lock_);
338   static MemMap* GetLargestMemMapAt(void* address)
339       REQUIRES(MemMap::mem_maps_lock_);
340   static bool ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg)
341       REQUIRES(!MemMap::mem_maps_lock_);
342 
343   // Internal version of mmap that supports low 4gb emulation.
344   static void* MapInternal(void* addr,
345                            size_t length,
346                            int prot,
347                            int flags,
348                            int fd,
349                            off_t offset,
350                            bool low_4gb)
351       REQUIRES(!MemMap::mem_maps_lock_);
352   static void* MapInternalArtLow4GBAllocator(size_t length,
353                                              int prot,
354                                              int flags,
355                                              int fd,
356                                              off_t offset)
357       REQUIRES(!MemMap::mem_maps_lock_);
358 
359   // Release memory owned by a reservation mapping.
360   void ReleaseReservedMemory(size_t byte_count);
361 
362   // member function to access real_munmap
363   static bool CheckMapRequest(uint8_t* expected_ptr,
364                               void* actual_ptr,
365                               size_t byte_count,
366                               std::string* error_msg);
367 
368   static bool CheckReservation(uint8_t* expected_ptr,
369                                size_t byte_count,
370                                const char* name,
371                                const MemMap& reservation,
372                                /*out*/std::string* error_msg);
373 
374   std::string name_;
375   uint8_t* begin_ = nullptr;    // Start of data. May be changed by AlignBy.
376   size_t size_ = 0u;            // Length of data.
377 
378   void* base_begin_ = nullptr;  // Page-aligned base address. May be changed by AlignBy.
379   size_t base_size_ = 0u;       // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
380   int prot_ = 0;                // Protection of the map.
381 
382   // When reuse_ is true, this is just a view of an existing mapping
383   // and we do not take ownership and are not responsible for
384   // unmapping.
385   bool reuse_ = false;
386 
387   // When already_unmapped_ is true the destructor will not call munmap.
388   bool already_unmapped_ = false;
389 
390   size_t redzone_size_ = 0u;
391 
392 #if USE_ART_LOW_4G_ALLOCATOR
393   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
394 
395   static void* TryMemMapLow4GB(void* ptr,
396                                size_t page_aligned_byte_count,
397                                int prot,
398                                int flags,
399                                int fd,
400                                off_t offset);
401 #endif
402 
403   static void TargetMMapInit();
404   static void* TargetMMap(void* start, size_t len, int prot, int flags, int fd, off_t fd_off);
405   static int TargetMUnmap(void* start, size_t len);
406 
407   static std::mutex* mem_maps_lock_;
408 
409   friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
410 };
411 
swap(MemMap & lhs,MemMap & rhs)412 inline void swap(MemMap& lhs, MemMap& rhs) {
413   lhs.swap(rhs);
414 }
415 
416 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
417 
418 // Zero and release pages if possible, no requirements on alignments.
419 void ZeroAndReleasePages(void* address, size_t length);
420 
421 }  // namespace art
422 
423 #endif  // ART_LIBARTBASE_BASE_MEM_MAP_H_
424