1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mem_map.h"
18 
19 #include <inttypes.h>
20 #include <stdlib.h>
21 #include <sys/mman.h>  // For the PROT_* and MAP_* constants.
22 #ifndef ANDROID_OS
23 #include <sys/resource.h>
24 #endif
25 
26 #include <memory>
27 #include <sstream>
28 
29 #include "android-base/stringprintf.h"
30 #include "android-base/unique_fd.h"
31 #include "backtrace/BacktraceMap.h"
32 #include "cutils/ashmem.h"
33 
34 #include "base/allocator.h"
35 #include "base/memory_tool.h"
36 #include "globals.h"
37 #include "utils.h"
38 
39 
40 #ifndef MAP_ANONYMOUS
41 #define MAP_ANONYMOUS MAP_ANON
42 #endif
43 
44 namespace art {
45 
46 using android::base::StringPrintf;
47 using android::base::unique_fd;
48 
49 using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
50 
51 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
52 static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
53 
operator <<(std::ostream & os,std::pair<BacktraceMap::const_iterator,BacktraceMap::const_iterator> iters)54 static std::ostream& operator<<(
55     std::ostream& os,
56     std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) {
57   for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) {
58     os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
59                        static_cast<uint32_t>(it->start),
60                        static_cast<uint32_t>(it->end),
61                        (it->flags & PROT_READ) ? 'r' : '-',
62                        (it->flags & PROT_WRITE) ? 'w' : '-',
63                        (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str());
64   }
65   return os;
66 }
67 
operator <<(std::ostream & os,const Maps & mem_maps)68 std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
69   os << "MemMap:" << std::endl;
70   for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
71     void* base = it->first;
72     MemMap* map = it->second;
73     CHECK_EQ(base, map->BaseBegin());
74     os << *map << std::endl;
75   }
76   return os;
77 }
78 
79 std::mutex* MemMap::mem_maps_lock_ = nullptr;
80 
81 #if USE_ART_LOW_4G_ALLOCATOR
82 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
83 
84 // The regular start of memory allocations. The first 64KB is protected by SELinux.
85 static constexpr uintptr_t LOW_MEM_START = 64 * KB;
86 
87 // Generate random starting position.
88 // To not interfere with image position, take the image's address and only place it below. Current
89 // formula (sketch):
90 //
91 // ART_BASE_ADDR      = 0001XXXXXXXXXXXXXXX
92 // ----------------------------------------
93 //                    = 0000111111111111111
94 // & ~(kPageSize - 1) =~0000000000000001111
95 // ----------------------------------------
96 // mask               = 0000111111111110000
97 // & random data      = YYYYYYYYYYYYYYYYYYY
98 // -----------------------------------
99 // tmp                = 0000YYYYYYYYYYY0000
100 // + LOW_MEM_START    = 0000000000001000000
101 // --------------------------------------
102 // start
103 //
104 // arc4random as an entropy source is exposed in Bionic, but not in glibc. When we
105 // do not have Bionic, simply start with LOW_MEM_START.
106 
107 // Function is standalone so it can be tested somewhat in mem_map_test.cc.
108 #ifdef __BIONIC__
CreateStartPos(uint64_t input)109 uintptr_t CreateStartPos(uint64_t input) {
110   CHECK_NE(0, ART_BASE_ADDRESS);
111 
112   // Start with all bits below highest bit in ART_BASE_ADDRESS.
113   constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
114   constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
115 
116   // Lowest (usually 12) bits are not used, as aligned by page size.
117   constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
118 
119   // Mask input data.
120   return (input & mask) + LOW_MEM_START;
121 }
122 #endif
123 
GenerateNextMemPos()124 static uintptr_t GenerateNextMemPos() {
125 #ifdef __BIONIC__
126   uint64_t random_data;
127   arc4random_buf(&random_data, sizeof(random_data));
128   return CreateStartPos(random_data);
129 #else
130   // No arc4random on host, see above.
131   return LOW_MEM_START;
132 #endif
133 }
134 
135 // Initialize linear scan to random position.
136 uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
137 #endif
138 
139 // Return true if the address range is contained in a single memory map by either reading
140 // the gMaps variable or the /proc/self/map entry.
ContainedWithinExistingMap(uint8_t * ptr,size_t size,std::string * error_msg)141 bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
142   uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
143   uintptr_t end = begin + size;
144 
145   // There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate
146   // further.
147   {
148     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
149     for (auto& pair : *gMaps) {
150       MemMap* const map = pair.second;
151       if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
152           end <= reinterpret_cast<uintptr_t>(map->End())) {
153         return true;
154       }
155     }
156   }
157 
158   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
159   if (map == nullptr) {
160     if (error_msg != nullptr) {
161       *error_msg = StringPrintf("Failed to build process map");
162     }
163     return false;
164   }
165 
166   ScopedBacktraceMapIteratorLock lock(map.get());
167   for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
168     if ((begin >= it->start && begin < it->end)  // start of new within old
169         && (end > it->start && end <= it->end)) {  // end of new within old
170       return true;
171     }
172   }
173   if (error_msg != nullptr) {
174     PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
175     *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
176                               "any existing map. See process maps in the log.", begin, end);
177   }
178   return false;
179 }
180 
181 // Return true if the address range does not conflict with any /proc/self/maps entry.
CheckNonOverlapping(uintptr_t begin,uintptr_t end,std::string * error_msg)182 static bool CheckNonOverlapping(uintptr_t begin,
183                                 uintptr_t end,
184                                 std::string* error_msg) {
185   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
186   if (map.get() == nullptr) {
187     *error_msg = StringPrintf("Failed to build process map");
188     return false;
189   }
190   ScopedBacktraceMapIteratorLock(map.get());
191   for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
192     if ((begin >= it->start && begin < it->end)      // start of new within old
193         || (end > it->start && end < it->end)        // end of new within old
194         || (begin <= it->start && end > it->end)) {  // start/end of new includes all of old
195       std::ostringstream map_info;
196       map_info << std::make_pair(it, map->end());
197       *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
198                                 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
199                                 begin, end,
200                                 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
201                                 it->name.c_str(),
202                                 map_info.str().c_str());
203       return false;
204     }
205   }
206   return true;
207 }
208 
209 // CheckMapRequest to validate a non-MAP_FAILED mmap result based on
210 // the expected value, calling munmap if validation fails, giving the
211 // reason in error_msg.
212 //
213 // If the expected_ptr is null, nothing is checked beyond the fact
214 // that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
215 // non-null, we check that pointer is the actual_ptr == expected_ptr,
216 // and if not, report in error_msg what the conflict mapping was if
217 // found, or a generic error in other cases.
CheckMapRequest(uint8_t * expected_ptr,void * actual_ptr,size_t byte_count,std::string * error_msg)218 static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
219                             std::string* error_msg) {
220   // Handled first by caller for more specific error messages.
221   CHECK(actual_ptr != MAP_FAILED);
222 
223   if (expected_ptr == nullptr) {
224     return true;
225   }
226 
227   uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
228   uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
229   uintptr_t limit = expected + byte_count;
230 
231   if (expected_ptr == actual_ptr) {
232     return true;
233   }
234 
235   // We asked for an address but didn't get what we wanted, all paths below here should fail.
236   int result = munmap(actual_ptr, byte_count);
237   if (result == -1) {
238     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
239   }
240 
241   if (error_msg != nullptr) {
242     // We call this here so that we can try and generate a full error
243     // message with the overlapping mapping. There's no guarantee that
244     // that there will be an overlap though, since
245     // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
246     //   true, even if there is no overlap
247     // - There might have been an overlap at the point of mmap, but the
248     //   overlapping region has since been unmapped.
249     std::string error_detail;
250     CheckNonOverlapping(expected, limit, &error_detail);
251     std::ostringstream os;
252     os <<  StringPrintf("Failed to mmap at expected address, mapped at "
253                         "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
254                         actual, expected);
255     if (!error_detail.empty()) {
256       os << " : " << error_detail;
257     }
258     *error_msg = os.str();
259   }
260   return false;
261 }
262 
263 #if USE_ART_LOW_4G_ALLOCATOR
TryMemMapLow4GB(void * ptr,size_t page_aligned_byte_count,int prot,int flags,int fd,off_t offset)264 static inline void* TryMemMapLow4GB(void* ptr,
265                                     size_t page_aligned_byte_count,
266                                     int prot,
267                                     int flags,
268                                     int fd,
269                                     off_t offset) {
270   void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
271   if (actual != MAP_FAILED) {
272     // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
273     // 4GB. If this is the case, unmap and retry.
274     if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
275       munmap(actual, page_aligned_byte_count);
276       actual = MAP_FAILED;
277     }
278   }
279   return actual;
280 }
281 #endif
282 
MapAnonymous(const char * name,uint8_t * expected_ptr,size_t byte_count,int prot,bool low_4gb,bool reuse,std::string * error_msg,bool use_ashmem)283 MemMap* MemMap::MapAnonymous(const char* name,
284                              uint8_t* expected_ptr,
285                              size_t byte_count,
286                              int prot,
287                              bool low_4gb,
288                              bool reuse,
289                              std::string* error_msg,
290                              bool use_ashmem) {
291 #ifndef __LP64__
292   UNUSED(low_4gb);
293 #endif
294   use_ashmem = use_ashmem && !kIsTargetLinux;
295   if (byte_count == 0) {
296     return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
297   }
298   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
299 
300   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
301   if (reuse) {
302     // reuse means it is okay that it overlaps an existing page mapping.
303     // Only use this if you actually made the page reservation yourself.
304     CHECK(expected_ptr != nullptr);
305 
306     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
307     flags |= MAP_FIXED;
308   }
309 
310   if (use_ashmem) {
311     if (!kIsTargetBuild) {
312       // When not on Android (either host or assuming a linux target) ashmem is faked using
313       // files in /tmp. Ensure that such files won't fail due to ulimit restrictions. If they
314       // will then use a regular mmap.
315       struct rlimit rlimit_fsize;
316       CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
317       use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
318         (page_aligned_byte_count < rlimit_fsize.rlim_cur);
319     }
320   }
321 
322   unique_fd fd;
323 
324 
325   if (use_ashmem) {
326     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
327     // prefixed "dalvik-".
328     std::string debug_friendly_name("dalvik-");
329     debug_friendly_name += name;
330     fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
331 
332     if (fd.get() == -1) {
333       // We failed to create the ashmem region. Print a warning, but continue
334       // anyway by creating a true anonymous mmap with an fd of -1. It is
335       // better to use an unlabelled anonymous map than to fail to create a
336       // map at all.
337       PLOG(WARNING) << "ashmem_create_region failed for '" << name << "'";
338     } else {
339       // We succeeded in creating the ashmem region. Use the created ashmem
340       // region as backing for the mmap.
341       flags &= ~MAP_ANONYMOUS;
342     }
343   }
344 
345   // We need to store and potentially set an error number for pretty printing of errors
346   int saved_errno = 0;
347 
348   void* actual = MapInternal(expected_ptr,
349                              page_aligned_byte_count,
350                              prot,
351                              flags,
352                              fd.get(),
353                              0,
354                              low_4gb);
355   saved_errno = errno;
356 
357   if (actual == MAP_FAILED) {
358     if (error_msg != nullptr) {
359       if (kIsDebugBuild || VLOG_IS_ON(oat)) {
360         PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
361       }
362 
363       *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
364                                     "See process maps in the log.",
365                                 expected_ptr,
366                                 page_aligned_byte_count,
367                                 prot,
368                                 flags,
369                                 fd.get(),
370                                 strerror(saved_errno));
371     }
372     return nullptr;
373   }
374   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
375     return nullptr;
376   }
377   return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
378                     page_aligned_byte_count, prot, reuse);
379 }
380 
MapDummy(const char * name,uint8_t * addr,size_t byte_count)381 MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
382   if (byte_count == 0) {
383     return new MemMap(name, nullptr, 0, nullptr, 0, 0, false);
384   }
385   const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
386   return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
387 }
388 
MapFileAtAddress(uint8_t * expected_ptr,size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,bool reuse,const char * filename,std::string * error_msg)389 MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
390                                  size_t byte_count,
391                                  int prot,
392                                  int flags,
393                                  int fd,
394                                  off_t start,
395                                  bool low_4gb,
396                                  bool reuse,
397                                  const char* filename,
398                                  std::string* error_msg) {
399   CHECK_NE(0, prot);
400   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
401 
402   // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
403   // expect his mapping to be contained within an existing map.
404   if (reuse) {
405     // reuse means it is okay that it overlaps an existing page mapping.
406     // Only use this if you actually made the page reservation yourself.
407     CHECK(expected_ptr != nullptr);
408     DCHECK(error_msg != nullptr);
409     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
410         << ((error_msg != nullptr) ? *error_msg : std::string());
411     flags |= MAP_FIXED;
412   } else {
413     CHECK_EQ(0, flags & MAP_FIXED);
414     // Don't bother checking for an overlapping region here. We'll
415     // check this if required after the fact inside CheckMapRequest.
416   }
417 
418   if (byte_count == 0) {
419     return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
420   }
421   // Adjust 'offset' to be page-aligned as required by mmap.
422   int page_offset = start % kPageSize;
423   off_t page_aligned_offset = start - page_offset;
424   // Adjust 'byte_count' to be page-aligned as we will map this anyway.
425   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
426   // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
427   // not necessarily to virtual memory. mmap will page align 'expected' for us.
428   uint8_t* page_aligned_expected =
429       (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
430 
431   size_t redzone_size = 0;
432   if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
433     redzone_size = kPageSize;
434     page_aligned_byte_count += redzone_size;
435   }
436 
437   uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected,
438                                                            page_aligned_byte_count,
439                                                            prot,
440                                                            flags,
441                                                            fd,
442                                                            page_aligned_offset,
443                                                            low_4gb));
444   if (actual == MAP_FAILED) {
445     if (error_msg != nullptr) {
446       auto saved_errno = errno;
447 
448       if (kIsDebugBuild || VLOG_IS_ON(oat)) {
449         PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
450       }
451 
452       *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
453                                 ") of file '%s' failed: %s. See process maps in the log.",
454                                 page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
455                                 static_cast<int64_t>(page_aligned_offset), filename,
456                                 strerror(saved_errno));
457     }
458     return nullptr;
459   }
460   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
461     return nullptr;
462   }
463   if (redzone_size != 0) {
464     const uint8_t *real_start = actual + page_offset;
465     const uint8_t *real_end = actual + page_offset + byte_count;
466     const uint8_t *mapping_end = actual + page_aligned_byte_count;
467 
468     MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
469     MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
470     page_aligned_byte_count -= redzone_size;
471   }
472 
473   return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
474                     prot, reuse, redzone_size);
475 }
476 
~MemMap()477 MemMap::~MemMap() {
478   if (base_begin_ == nullptr && base_size_ == 0) {
479     return;
480   }
481 
482   // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
483   // before it is returned to the system.
484   if (redzone_size_ != 0) {
485     MEMORY_TOOL_MAKE_UNDEFINED(
486         reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_,
487         redzone_size_);
488   }
489 
490   if (!reuse_) {
491     MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
492     int result = munmap(base_begin_, base_size_);
493     if (result == -1) {
494       PLOG(FATAL) << "munmap failed";
495     }
496   }
497 
498   // Remove it from gMaps.
499   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
500   bool found = false;
501   DCHECK(gMaps != nullptr);
502   for (auto it = gMaps->lower_bound(base_begin_), end = gMaps->end();
503        it != end && it->first == base_begin_; ++it) {
504     if (it->second == this) {
505       found = true;
506       gMaps->erase(it);
507       break;
508     }
509   }
510   CHECK(found) << "MemMap not found";
511 }
512 
MemMap(const std::string & name,uint8_t * begin,size_t size,void * base_begin,size_t base_size,int prot,bool reuse,size_t redzone_size)513 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
514                size_t base_size, int prot, bool reuse, size_t redzone_size)
515     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
516       prot_(prot), reuse_(reuse), redzone_size_(redzone_size) {
517   if (size_ == 0) {
518     CHECK(begin_ == nullptr);
519     CHECK(base_begin_ == nullptr);
520     CHECK_EQ(base_size_, 0U);
521   } else {
522     CHECK(begin_ != nullptr);
523     CHECK(base_begin_ != nullptr);
524     CHECK_NE(base_size_, 0U);
525 
526     // Add it to gMaps.
527     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
528     DCHECK(gMaps != nullptr);
529     gMaps->insert(std::make_pair(base_begin_, this));
530   }
531 }
532 
RemapAtEnd(uint8_t * new_end,const char * tail_name,int tail_prot,std::string * error_msg,bool use_ashmem)533 MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
534                            std::string* error_msg, bool use_ashmem) {
535   use_ashmem = use_ashmem && !kIsTargetLinux;
536   DCHECK_GE(new_end, Begin());
537   DCHECK_LE(new_end, End());
538   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
539   DCHECK_ALIGNED(begin_, kPageSize);
540   DCHECK_ALIGNED(base_begin_, kPageSize);
541   DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
542   DCHECK_ALIGNED(new_end, kPageSize);
543   uint8_t* old_end = begin_ + size_;
544   uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
545   uint8_t* new_base_end = new_end;
546   DCHECK_LE(new_base_end, old_base_end);
547   if (new_base_end == old_base_end) {
548     return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
549   }
550   size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
551   base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
552   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
553   size_t tail_size = old_end - new_end;
554   uint8_t* tail_base_begin = new_base_end;
555   size_t tail_base_size = old_base_end - new_base_end;
556   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
557   DCHECK_ALIGNED(tail_base_size, kPageSize);
558 
559   unique_fd fd;
560   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
561   if (use_ashmem) {
562     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
563     // prefixed "dalvik-".
564     std::string debug_friendly_name("dalvik-");
565     debug_friendly_name += tail_name;
566     fd.reset(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
567     flags = MAP_PRIVATE | MAP_FIXED;
568     if (fd.get() == -1) {
569       *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
570                                 tail_name, strerror(errno));
571       return nullptr;
572     }
573   }
574 
575   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
576   // Unmap/map the tail region.
577   int result = munmap(tail_base_begin, tail_base_size);
578   if (result == -1) {
579     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
580     *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
581                               tail_base_begin, tail_base_size, name_.c_str());
582     return nullptr;
583   }
584   // Don't cause memory allocation between the munmap and the mmap
585   // calls. Otherwise, libc (or something else) might take this memory
586   // region. Note this isn't perfect as there's no way to prevent
587   // other threads to try to take this memory region here.
588   uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin,
589                                                     tail_base_size,
590                                                     tail_prot,
591                                                     flags,
592                                                     fd.get(),
593                                                     0));
594   if (actual == MAP_FAILED) {
595     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
596     *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
597                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
598                               fd.get());
599     return nullptr;
600   }
601   return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
602 }
603 
MadviseDontNeedAndZero()604 void MemMap::MadviseDontNeedAndZero() {
605   if (base_begin_ != nullptr || base_size_ != 0) {
606     if (!kMadviseZeroes) {
607       memset(base_begin_, 0, base_size_);
608     }
609     int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
610     if (result == -1) {
611       PLOG(WARNING) << "madvise failed";
612     }
613   }
614 }
615 
Sync()616 bool MemMap::Sync() {
617   bool result;
618   if (redzone_size_ != 0) {
619     // To avoid valgrind errors, temporarily lift the lower-end noaccess protection before passing
620     // it to msync() as it only accepts page-aligned base address, and exclude the higher-end
621     // noaccess protection from the msync range. b/27552451.
622     uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
623     MEMORY_TOOL_MAKE_DEFINED(base_begin, begin_ - base_begin);
624     result = msync(BaseBegin(), End() - base_begin, MS_SYNC) == 0;
625     MEMORY_TOOL_MAKE_NOACCESS(base_begin, begin_ - base_begin);
626   } else {
627     result = msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
628   }
629   return result;
630 }
631 
Protect(int prot)632 bool MemMap::Protect(int prot) {
633   if (base_begin_ == nullptr && base_size_ == 0) {
634     prot_ = prot;
635     return true;
636   }
637 
638   if (mprotect(base_begin_, base_size_, prot) == 0) {
639     prot_ = prot;
640     return true;
641   }
642 
643   PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
644               << prot << ") failed";
645   return false;
646 }
647 
CheckNoGaps(MemMap * begin_map,MemMap * end_map)648 bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
649   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
650   CHECK(begin_map != nullptr);
651   CHECK(end_map != nullptr);
652   CHECK(HasMemMap(begin_map));
653   CHECK(HasMemMap(end_map));
654   CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
655   MemMap* map = begin_map;
656   while (map->BaseBegin() != end_map->BaseBegin()) {
657     MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
658     if (next_map == nullptr) {
659       // Found a gap.
660       return false;
661     }
662     map = next_map;
663   }
664   return true;
665 }
666 
DumpMaps(std::ostream & os,bool terse)667 void MemMap::DumpMaps(std::ostream& os, bool terse) {
668   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
669   DumpMapsLocked(os, terse);
670 }
671 
DumpMapsLocked(std::ostream & os,bool terse)672 void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
673   const auto& mem_maps = *gMaps;
674   if (!terse) {
675     os << mem_maps;
676     return;
677   }
678 
679   // Terse output example:
680   //   [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
681   //   [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
682   // The details:
683   //   "+0x20P" means 0x20 pages taken by a single mapping,
684   //   "~0x11dP" means a gap of 0x11d pages,
685   //   "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
686   os << "MemMap:" << std::endl;
687   for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
688     MemMap* map = it->second;
689     void* base = it->first;
690     CHECK_EQ(base, map->BaseBegin());
691     os << "[MemMap: " << base;
692     ++it;
693     // Merge consecutive maps with the same protect flags and name.
694     constexpr size_t kMaxGaps = 9;
695     size_t num_gaps = 0;
696     size_t num = 1u;
697     size_t size = map->BaseSize();
698     CHECK_ALIGNED(size, kPageSize);
699     void* end = map->BaseEnd();
700     while (it != maps_end &&
701         it->second->GetProtect() == map->GetProtect() &&
702         it->second->GetName() == map->GetName() &&
703         (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
704       if (it->second->BaseBegin() != end) {
705         ++num_gaps;
706         os << "+0x" << std::hex << (size / kPageSize) << "P";
707         if (num != 1u) {
708           os << "(" << std::dec << num << ")";
709         }
710         size_t gap =
711             reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
712         CHECK_ALIGNED(gap, kPageSize);
713         os << "~0x" << std::hex << (gap / kPageSize) << "P";
714         num = 0u;
715         size = 0u;
716       }
717       CHECK_ALIGNED(it->second->BaseSize(), kPageSize);
718       ++num;
719       size += it->second->BaseSize();
720       end = it->second->BaseEnd();
721       ++it;
722     }
723     os << "+0x" << std::hex << (size / kPageSize) << "P";
724     if (num != 1u) {
725       os << "(" << std::dec << num << ")";
726     }
727     os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
728   }
729 }
730 
HasMemMap(MemMap * map)731 bool MemMap::HasMemMap(MemMap* map) {
732   void* base_begin = map->BaseBegin();
733   for (auto it = gMaps->lower_bound(base_begin), end = gMaps->end();
734        it != end && it->first == base_begin; ++it) {
735     if (it->second == map) {
736       return true;
737     }
738   }
739   return false;
740 }
741 
GetLargestMemMapAt(void * address)742 MemMap* MemMap::GetLargestMemMapAt(void* address) {
743   size_t largest_size = 0;
744   MemMap* largest_map = nullptr;
745   DCHECK(gMaps != nullptr);
746   for (auto it = gMaps->lower_bound(address), end = gMaps->end();
747        it != end && it->first == address; ++it) {
748     MemMap* map = it->second;
749     CHECK(map != nullptr);
750     if (largest_size < map->BaseSize()) {
751       largest_size = map->BaseSize();
752       largest_map = map;
753     }
754   }
755   return largest_map;
756 }
757 
Init()758 void MemMap::Init() {
759   if (mem_maps_lock_ != nullptr) {
760     // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
761     return;
762   }
763   mem_maps_lock_ = new std::mutex();
764   // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
765   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
766   DCHECK(gMaps == nullptr);
767   gMaps = new Maps;
768 }
769 
Shutdown()770 void MemMap::Shutdown() {
771   if (mem_maps_lock_ == nullptr) {
772     // If MemMap::Shutdown is called more than once, there is no effect.
773     return;
774   }
775   {
776     // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
777     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
778     DCHECK(gMaps != nullptr);
779     delete gMaps;
780     gMaps = nullptr;
781   }
782   delete mem_maps_lock_;
783   mem_maps_lock_ = nullptr;
784 }
785 
SetSize(size_t new_size)786 void MemMap::SetSize(size_t new_size) {
787   if (new_size == base_size_) {
788     return;
789   }
790   CHECK_ALIGNED(new_size, kPageSize);
791   CHECK_EQ(base_size_, size_) << "Unsupported";
792   CHECK_LE(new_size, base_size_);
793   MEMORY_TOOL_MAKE_UNDEFINED(
794       reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
795                               new_size),
796       base_size_ - new_size);
797   CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
798                   base_size_ - new_size), 0) << new_size << " " << base_size_;
799   base_size_ = new_size;
800   size_ = new_size;
801 }
802 
MapInternal(void * addr,size_t length,int prot,int flags,int fd,off_t offset,bool low_4gb)803 void* MemMap::MapInternal(void* addr,
804                           size_t length,
805                           int prot,
806                           int flags,
807                           int fd,
808                           off_t offset,
809                           bool low_4gb) {
810 #ifdef __LP64__
811   // When requesting low_4g memory and having an expectation, the requested range should fit into
812   // 4GB.
813   if (low_4gb && (
814       // Start out of bounds.
815       (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 ||
816       // End out of bounds. For simplicity, this will fail for the last page of memory.
817       ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) {
818     LOG(ERROR) << "The requested address space (" << addr << ", "
819                << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length)
820                << ") cannot fit in low_4gb";
821     return MAP_FAILED;
822   }
823 #else
824   UNUSED(low_4gb);
825 #endif
826   DCHECK_ALIGNED(length, kPageSize);
827   if (low_4gb) {
828     DCHECK_EQ(flags & MAP_FIXED, 0);
829   }
830   // TODO:
831   // A page allocator would be a useful abstraction here, as
832   // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
833   void* actual = MAP_FAILED;
834 #if USE_ART_LOW_4G_ALLOCATOR
835   // MAP_32BIT only available on x86_64.
836   if (low_4gb && addr == nullptr) {
837     bool first_run = true;
838 
839     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
840     for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
841       // Use gMaps as an optimization to skip over large maps.
842       // Find the first map which is address > ptr.
843       auto it = gMaps->upper_bound(reinterpret_cast<void*>(ptr));
844       if (it != gMaps->begin()) {
845         auto before_it = it;
846         --before_it;
847         // Start at the end of the map before the upper bound.
848         ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
849         CHECK_ALIGNED(ptr, kPageSize);
850       }
851       while (it != gMaps->end()) {
852         // How much space do we have until the next map?
853         size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
854         // If the space may be sufficient, break out of the loop.
855         if (delta >= length) {
856           break;
857         }
858         // Otherwise, skip to the end of the map.
859         ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
860         CHECK_ALIGNED(ptr, kPageSize);
861         ++it;
862       }
863 
864       // Try to see if we get lucky with this address since none of the ART maps overlap.
865       actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
866       if (actual != MAP_FAILED) {
867         next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
868         return actual;
869       }
870 
871       if (4U * GB - ptr < length) {
872         // Not enough memory until 4GB.
873         if (first_run) {
874           // Try another time from the bottom;
875           ptr = LOW_MEM_START - kPageSize;
876           first_run = false;
877           continue;
878         } else {
879           // Second try failed.
880           break;
881         }
882       }
883 
884       uintptr_t tail_ptr;
885 
886       // Check pages are free.
887       bool safe = true;
888       for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) {
889         if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
890           safe = false;
891           break;
892         } else {
893           DCHECK_EQ(errno, ENOMEM);
894         }
895       }
896 
897       next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
898 
899       if (safe == true) {
900         actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
901         if (actual != MAP_FAILED) {
902           return actual;
903         }
904       } else {
905         // Skip over last page.
906         ptr = tail_ptr;
907       }
908     }
909 
910     if (actual == MAP_FAILED) {
911       LOG(ERROR) << "Could not find contiguous low-memory space.";
912       errno = ENOMEM;
913     }
914   } else {
915     actual = mmap(addr, length, prot, flags, fd, offset);
916   }
917 
918 #else
919 #if defined(__LP64__)
920   if (low_4gb && addr == nullptr) {
921     flags |= MAP_32BIT;
922   }
923 #endif
924   actual = mmap(addr, length, prot, flags, fd, offset);
925 #endif
926   return actual;
927 }
928 
operator <<(std::ostream & os,const MemMap & mem_map)929 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
930   os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
931                      mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
932                      mem_map.GetName().c_str());
933   return os;
934 }
935 
TryReadable()936 void MemMap::TryReadable() {
937   if (base_begin_ == nullptr && base_size_ == 0) {
938     return;
939   }
940   CHECK_NE(prot_ & PROT_READ, 0);
941   volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
942   volatile uint8_t* end = begin + base_size_;
943   DCHECK(IsAligned<kPageSize>(begin));
944   DCHECK(IsAligned<kPageSize>(end));
945   // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
946   // reads.
947   for (volatile uint8_t* ptr = begin; ptr < end; ptr += kPageSize) {
948     // This read could fault if protection wasn't set correctly.
949     uint8_t value = *ptr;
950     UNUSED(value);
951   }
952 }
953 
ZeroAndReleasePages(void * address,size_t length)954 void ZeroAndReleasePages(void* address, size_t length) {
955   if (length == 0) {
956     return;
957   }
958   uint8_t* const mem_begin = reinterpret_cast<uint8_t*>(address);
959   uint8_t* const mem_end = mem_begin + length;
960   uint8_t* const page_begin = AlignUp(mem_begin, kPageSize);
961   uint8_t* const page_end = AlignDown(mem_end, kPageSize);
962   if (!kMadviseZeroes || page_begin >= page_end) {
963     // No possible area to madvise.
964     std::fill(mem_begin, mem_end, 0);
965   } else {
966     // Spans one or more pages.
967     DCHECK_LE(mem_begin, page_begin);
968     DCHECK_LE(page_begin, page_end);
969     DCHECK_LE(page_end, mem_end);
970     std::fill(mem_begin, page_begin, 0);
971     CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
972     std::fill(page_end, mem_end, 0);
973   }
974 }
975 
AlignBy(size_t size)976 void MemMap::AlignBy(size_t size) {
977   CHECK_EQ(begin_, base_begin_) << "Unsupported";
978   CHECK_EQ(size_, base_size_) << "Unsupported";
979   CHECK_GT(size, static_cast<size_t>(kPageSize));
980   CHECK_ALIGNED(size, kPageSize);
981   if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), size) &&
982       IsAlignedParam(base_size_, size)) {
983     // Already aligned.
984     return;
985   }
986   uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
987   uint8_t* base_end = base_begin + base_size_;
988   uint8_t* aligned_base_begin = AlignUp(base_begin, size);
989   uint8_t* aligned_base_end = AlignDown(base_end, size);
990   CHECK_LE(base_begin, aligned_base_begin);
991   CHECK_LE(aligned_base_end, base_end);
992   size_t aligned_base_size = aligned_base_end - aligned_base_begin;
993   CHECK_LT(aligned_base_begin, aligned_base_end)
994       << "base_begin = " << reinterpret_cast<void*>(base_begin)
995       << " base_end = " << reinterpret_cast<void*>(base_end);
996   CHECK_GE(aligned_base_size, size);
997   // Unmap the unaligned parts.
998   if (base_begin < aligned_base_begin) {
999     MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
1000     CHECK_EQ(munmap(base_begin, aligned_base_begin - base_begin), 0)
1001         << "base_begin=" << reinterpret_cast<void*>(base_begin)
1002         << " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
1003   }
1004   if (aligned_base_end < base_end) {
1005     MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
1006     CHECK_EQ(munmap(aligned_base_end, base_end - aligned_base_end), 0)
1007         << "base_end=" << reinterpret_cast<void*>(base_end)
1008         << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
1009   }
1010   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1011   base_begin_ = aligned_base_begin;
1012   base_size_ = aligned_base_size;
1013   begin_ = aligned_base_begin;
1014   size_ = aligned_base_size;
1015   DCHECK(gMaps != nullptr);
1016   if (base_begin < aligned_base_begin) {
1017     auto it = gMaps->find(base_begin);
1018     CHECK(it != gMaps->end()) << "MemMap not found";
1019     gMaps->erase(it);
1020     gMaps->insert(std::make_pair(base_begin_, this));
1021   }
1022 }
1023 
1024 }  // namespace art
1025