1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mem_map.h"
18 
19 #include <inttypes.h>
20 #include <stdlib.h>
21 #include <sys/mman.h>  // For the PROT_* and MAP_* constants.
22 #ifndef ANDROID_OS
23 #include <sys/resource.h>
24 #endif
25 
26 #include <map>
27 #include <memory>
28 #include <sstream>
29 
30 #include "android-base/stringprintf.h"
31 #include "android-base/unique_fd.h"
32 #include "backtrace/BacktraceMap.h"
33 #include "cutils/ashmem.h"
34 
35 #include "base/allocator.h"
36 #include "base/bit_utils.h"
37 #include "base/file_utils.h"
38 #include "base/globals.h"
39 #include "base/logging.h"  // For VLOG_IS_ON.
40 #include "base/memory_tool.h"
41 #include "base/utils.h"
42 
43 #ifndef MAP_ANONYMOUS
44 #define MAP_ANONYMOUS MAP_ANON
45 #endif
46 
47 namespace art {
48 
49 using android::base::StringPrintf;
50 using android::base::unique_fd;
51 
52 template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
53 using AllocationTrackingMultiMap =
54     std::multimap<Key, T, Compare, TrackingAllocator<std::pair<const Key, T>, kTag>>;
55 
56 using Maps = AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps>;
57 
58 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
59 static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
60 
operator <<(std::ostream & os,std::pair<BacktraceMap::iterator,BacktraceMap::iterator> iters)61 static std::ostream& operator<<(
62     std::ostream& os,
63     std::pair<BacktraceMap::iterator, BacktraceMap::iterator> iters) {
64   for (BacktraceMap::iterator it = iters.first; it != iters.second; ++it) {
65     const backtrace_map_t* entry = *it;
66     os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
67                        static_cast<uint32_t>(entry->start),
68                        static_cast<uint32_t>(entry->end),
69                        (entry->flags & PROT_READ) ? 'r' : '-',
70                        (entry->flags & PROT_WRITE) ? 'w' : '-',
71                        (entry->flags & PROT_EXEC) ? 'x' : '-', entry->name.c_str());
72   }
73   return os;
74 }
75 
operator <<(std::ostream & os,const Maps & mem_maps)76 std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
77   os << "MemMap:" << std::endl;
78   for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
79     void* base = it->first;
80     MemMap* map = it->second;
81     CHECK_EQ(base, map->BaseBegin());
82     os << *map << std::endl;
83   }
84   return os;
85 }
86 
87 std::mutex* MemMap::mem_maps_lock_ = nullptr;
88 
89 #if USE_ART_LOW_4G_ALLOCATOR
90 // Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
91 
92 // The regular start of memory allocations. The first 64KB is protected by SELinux.
93 static constexpr uintptr_t LOW_MEM_START = 64 * KB;
94 
95 // Generate random starting position.
96 // To not interfere with image position, take the image's address and only place it below. Current
97 // formula (sketch):
98 //
99 // ART_BASE_ADDR      = 0001XXXXXXXXXXXXXXX
100 // ----------------------------------------
101 //                    = 0000111111111111111
102 // & ~(kPageSize - 1) =~0000000000000001111
103 // ----------------------------------------
104 // mask               = 0000111111111110000
105 // & random data      = YYYYYYYYYYYYYYYYYYY
106 // -----------------------------------
107 // tmp                = 0000YYYYYYYYYYY0000
108 // + LOW_MEM_START    = 0000000000001000000
109 // --------------------------------------
110 // start
111 //
112 // arc4random as an entropy source is exposed in Bionic, but not in glibc. When we
113 // do not have Bionic, simply start with LOW_MEM_START.
114 
115 // Function is standalone so it can be tested somewhat in mem_map_test.cc.
116 #ifdef __BIONIC__
CreateStartPos(uint64_t input)117 uintptr_t CreateStartPos(uint64_t input) {
118   CHECK_NE(0, ART_BASE_ADDRESS);
119 
120   // Start with all bits below highest bit in ART_BASE_ADDRESS.
121   constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
122   constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
123 
124   // Lowest (usually 12) bits are not used, as aligned by page size.
125   constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
126 
127   // Mask input data.
128   return (input & mask) + LOW_MEM_START;
129 }
130 #endif
131 
GenerateNextMemPos()132 static uintptr_t GenerateNextMemPos() {
133 #ifdef __BIONIC__
134   uint64_t random_data;
135   arc4random_buf(&random_data, sizeof(random_data));
136   return CreateStartPos(random_data);
137 #else
138   // No arc4random on host, see above.
139   return LOW_MEM_START;
140 #endif
141 }
142 
143 // Initialize linear scan to random position.
144 uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
145 #endif
146 
147 // Return true if the address range is contained in a single memory map by either reading
148 // the gMaps variable or the /proc/self/map entry.
ContainedWithinExistingMap(uint8_t * ptr,size_t size,std::string * error_msg)149 bool MemMap::ContainedWithinExistingMap(uint8_t* ptr, size_t size, std::string* error_msg) {
150   uintptr_t begin = reinterpret_cast<uintptr_t>(ptr);
151   uintptr_t end = begin + size;
152 
153   // There is a suspicion that BacktraceMap::Create is occasionally missing maps. TODO: Investigate
154   // further.
155   {
156     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
157     for (auto& pair : *gMaps) {
158       MemMap* const map = pair.second;
159       if (begin >= reinterpret_cast<uintptr_t>(map->Begin()) &&
160           end <= reinterpret_cast<uintptr_t>(map->End())) {
161         return true;
162       }
163     }
164   }
165 
166   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
167   if (map == nullptr) {
168     if (error_msg != nullptr) {
169       *error_msg = StringPrintf("Failed to build process map");
170     }
171     return false;
172   }
173 
174   ScopedBacktraceMapIteratorLock lock(map.get());
175   for (BacktraceMap::iterator it = map->begin(); it != map->end(); ++it) {
176     const backtrace_map_t* entry = *it;
177     if ((begin >= entry->start && begin < entry->end)     // start of new within old
178         && (end > entry->start && end <= entry->end)) {   // end of new within old
179       return true;
180     }
181   }
182   if (error_msg != nullptr) {
183     PrintFileToLog("/proc/self/maps", LogSeverity::ERROR);
184     *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " does not overlap "
185                               "any existing map. See process maps in the log.", begin, end);
186   }
187   return false;
188 }
189 
190 // Return true if the address range does not conflict with any /proc/self/maps entry.
CheckNonOverlapping(uintptr_t begin,uintptr_t end,std::string * error_msg)191 static bool CheckNonOverlapping(uintptr_t begin,
192                                 uintptr_t end,
193                                 std::string* error_msg) {
194   std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
195   if (map.get() == nullptr) {
196     *error_msg = StringPrintf("Failed to build process map");
197     return false;
198   }
199   ScopedBacktraceMapIteratorLock lock(map.get());
200   for (BacktraceMap::iterator it = map->begin(); it != map->end(); ++it) {
201     const backtrace_map_t* entry = *it;
202     if ((begin >= entry->start && begin < entry->end)      // start of new within old
203         || (end > entry->start && end < entry->end)        // end of new within old
204         || (begin <= entry->start && end > entry->end)) {  // start/end of new includes all of old
205       std::ostringstream map_info;
206       map_info << std::make_pair(it, map->end());
207       *error_msg = StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
208                                 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n%s",
209                                 begin, end,
210                                 static_cast<uintptr_t>(entry->start), static_cast<uintptr_t>(entry->end),
211                                 entry->name.c_str(),
212                                 map_info.str().c_str());
213       return false;
214     }
215   }
216   return true;
217 }
218 
219 // CheckMapRequest to validate a non-MAP_FAILED mmap result based on
220 // the expected value, calling munmap if validation fails, giving the
221 // reason in error_msg.
222 //
223 // If the expected_ptr is null, nothing is checked beyond the fact
224 // that the actual_ptr is not MAP_FAILED. However, if expected_ptr is
225 // non-null, we check that pointer is the actual_ptr == expected_ptr,
226 // and if not, report in error_msg what the conflict mapping was if
227 // found, or a generic error in other cases.
CheckMapRequest(uint8_t * expected_ptr,void * actual_ptr,size_t byte_count,std::string * error_msg)228 static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte_count,
229                             std::string* error_msg) {
230   // Handled first by caller for more specific error messages.
231   CHECK(actual_ptr != MAP_FAILED);
232 
233   if (expected_ptr == nullptr) {
234     return true;
235   }
236 
237   uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
238   uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
239   uintptr_t limit = expected + byte_count;
240 
241   if (expected_ptr == actual_ptr) {
242     return true;
243   }
244 
245   // We asked for an address but didn't get what we wanted, all paths below here should fail.
246   int result = munmap(actual_ptr, byte_count);
247   if (result == -1) {
248     PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
249   }
250 
251   if (error_msg != nullptr) {
252     // We call this here so that we can try and generate a full error
253     // message with the overlapping mapping. There's no guarantee that
254     // that there will be an overlap though, since
255     // - The kernel is not *required* to honor expected_ptr unless MAP_FIXED is
256     //   true, even if there is no overlap
257     // - There might have been an overlap at the point of mmap, but the
258     //   overlapping region has since been unmapped.
259     std::string error_detail;
260     CheckNonOverlapping(expected, limit, &error_detail);
261     std::ostringstream os;
262     os <<  StringPrintf("Failed to mmap at expected address, mapped at "
263                         "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR,
264                         actual, expected);
265     if (!error_detail.empty()) {
266       os << " : " << error_detail;
267     }
268     *error_msg = os.str();
269   }
270   return false;
271 }
272 
273 #if USE_ART_LOW_4G_ALLOCATOR
TryMemMapLow4GB(void * ptr,size_t page_aligned_byte_count,int prot,int flags,int fd,off_t offset)274 static inline void* TryMemMapLow4GB(void* ptr,
275                                     size_t page_aligned_byte_count,
276                                     int prot,
277                                     int flags,
278                                     int fd,
279                                     off_t offset) {
280   void* actual = mmap(ptr, page_aligned_byte_count, prot, flags, fd, offset);
281   if (actual != MAP_FAILED) {
282     // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
283     // 4GB. If this is the case, unmap and retry.
284     if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count >= 4 * GB) {
285       munmap(actual, page_aligned_byte_count);
286       actual = MAP_FAILED;
287     }
288   }
289   return actual;
290 }
291 #endif
292 
MapAnonymous(const char * name,uint8_t * expected_ptr,size_t byte_count,int prot,bool low_4gb,bool reuse,std::string * error_msg,bool use_ashmem)293 MemMap* MemMap::MapAnonymous(const char* name,
294                              uint8_t* expected_ptr,
295                              size_t byte_count,
296                              int prot,
297                              bool low_4gb,
298                              bool reuse,
299                              std::string* error_msg,
300                              bool use_ashmem) {
301 #ifndef __LP64__
302   UNUSED(low_4gb);
303 #endif
304   use_ashmem = use_ashmem && !kIsTargetLinux;
305   if (byte_count == 0) {
306     return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
307   }
308   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
309 
310   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
311   if (reuse) {
312     // reuse means it is okay that it overlaps an existing page mapping.
313     // Only use this if you actually made the page reservation yourself.
314     CHECK(expected_ptr != nullptr);
315 
316     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
317     flags |= MAP_FIXED;
318   }
319 
320   if (use_ashmem) {
321     if (!kIsTargetBuild) {
322       // When not on Android (either host or assuming a linux target) ashmem is faked using
323       // files in /tmp. Ensure that such files won't fail due to ulimit restrictions. If they
324       // will then use a regular mmap.
325       struct rlimit rlimit_fsize;
326       CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
327       use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
328         (page_aligned_byte_count < rlimit_fsize.rlim_cur);
329     }
330   }
331 
332   unique_fd fd;
333 
334 
335   if (use_ashmem) {
336     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
337     // prefixed "dalvik-".
338     std::string debug_friendly_name("dalvik-");
339     debug_friendly_name += name;
340     fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
341 
342     if (fd.get() == -1) {
343       // We failed to create the ashmem region. Print a warning, but continue
344       // anyway by creating a true anonymous mmap with an fd of -1. It is
345       // better to use an unlabelled anonymous map than to fail to create a
346       // map at all.
347       PLOG(WARNING) << "ashmem_create_region failed for '" << name << "'";
348     } else {
349       // We succeeded in creating the ashmem region. Use the created ashmem
350       // region as backing for the mmap.
351       flags &= ~MAP_ANONYMOUS;
352     }
353   }
354 
355   // We need to store and potentially set an error number for pretty printing of errors
356   int saved_errno = 0;
357 
358   void* actual = MapInternal(expected_ptr,
359                              page_aligned_byte_count,
360                              prot,
361                              flags,
362                              fd.get(),
363                              0,
364                              low_4gb);
365   saved_errno = errno;
366 
367   if (actual == MAP_FAILED) {
368     if (error_msg != nullptr) {
369       if (kIsDebugBuild || VLOG_IS_ON(oat)) {
370         PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
371       }
372 
373       *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
374                                     "See process maps in the log.",
375                                 expected_ptr,
376                                 page_aligned_byte_count,
377                                 prot,
378                                 flags,
379                                 fd.get(),
380                                 strerror(saved_errno));
381     }
382     return nullptr;
383   }
384   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
385     return nullptr;
386   }
387   return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
388                     page_aligned_byte_count, prot, reuse);
389 }
390 
MapDummy(const char * name,uint8_t * addr,size_t byte_count)391 MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
392   if (byte_count == 0) {
393     return new MemMap(name, nullptr, 0, nullptr, 0, 0, false);
394   }
395   const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
396   return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
397 }
398 
399 template<typename A, typename B>
PointerDiff(A * a,B * b)400 static ptrdiff_t PointerDiff(A* a, B* b) {
401   return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
402 }
403 
ReplaceWith(MemMap ** source_ptr,std::string * error)404 bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
405 #if !HAVE_MREMAP_SYSCALL
406   UNUSED(source_ptr);
407   *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
408   return false;
409 #else  // !HAVE_MREMAP_SYSCALL
410   CHECK(source_ptr != nullptr);
411   CHECK(*source_ptr != nullptr);
412   if (!MemMap::kCanReplaceMapping) {
413     *error = "Unable to perform atomic replace due to runtime environment!";
414     return false;
415   }
416   MemMap* source = *source_ptr;
417   // neither can be reuse.
418   if (source->reuse_ || reuse_) {
419     *error = "One or both mappings is not a real mmap!";
420     return false;
421   }
422   // TODO Support redzones.
423   if (source->redzone_size_ != 0 || redzone_size_ != 0) {
424     *error = "source and dest have different redzone sizes";
425     return false;
426   }
427   // Make sure they have the same offset from the actual mmap'd address
428   if (PointerDiff(BaseBegin(), Begin()) != PointerDiff(source->BaseBegin(), source->Begin())) {
429     *error =
430         "source starts at a different offset from the mmap. Cannot atomically replace mappings";
431     return false;
432   }
433   // mremap doesn't allow the final [start, end] to overlap with the initial [start, end] (it's like
434   // memcpy but the check is explicit and actually done).
435   if (source->BaseBegin() > BaseBegin() &&
436       reinterpret_cast<uint8_t*>(BaseBegin()) + source->BaseSize() >
437       reinterpret_cast<uint8_t*>(source->BaseBegin())) {
438     *error = "destination memory pages overlap with source memory pages";
439     return false;
440   }
441   // Change the protection to match the new location.
442   int old_prot = source->GetProtect();
443   if (!source->Protect(GetProtect())) {
444     *error = "Could not change protections for source to those required for dest.";
445     return false;
446   }
447 
448   // Do the mremap.
449   void* res = mremap(/*old_address*/source->BaseBegin(),
450                      /*old_size*/source->BaseSize(),
451                      /*new_size*/source->BaseSize(),
452                      /*flags*/MREMAP_MAYMOVE | MREMAP_FIXED,
453                      /*new_address*/BaseBegin());
454   if (res == MAP_FAILED) {
455     int saved_errno = errno;
456     // Wasn't able to move mapping. Change the protection of source back to the original one and
457     // return.
458     source->Protect(old_prot);
459     *error = std::string("Failed to mremap source to dest. Error was ") + strerror(saved_errno);
460     return false;
461   }
462   CHECK(res == BaseBegin());
463 
464   // The new base_size is all the pages of the 'source' plus any remaining dest pages. We will unmap
465   // them later.
466   size_t new_base_size = std::max(source->base_size_, base_size_);
467 
468   // Delete the old source, don't unmap it though (set reuse) since it is already gone.
469   *source_ptr = nullptr;
470   size_t source_size = source->size_;
471   source->already_unmapped_ = true;
472   delete source;
473   source = nullptr;
474 
475   size_ = source_size;
476   base_size_ = new_base_size;
477   // Reduce base_size if needed (this will unmap the extra pages).
478   SetSize(source_size);
479 
480   return true;
481 #endif  // !HAVE_MREMAP_SYSCALL
482 }
483 
MapFileAtAddress(uint8_t * expected_ptr,size_t byte_count,int prot,int flags,int fd,off_t start,bool low_4gb,bool reuse,const char * filename,std::string * error_msg)484 MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
485                                  size_t byte_count,
486                                  int prot,
487                                  int flags,
488                                  int fd,
489                                  off_t start,
490                                  bool low_4gb,
491                                  bool reuse,
492                                  const char* filename,
493                                  std::string* error_msg) {
494   CHECK_NE(0, prot);
495   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
496 
497   // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
498   // expect his mapping to be contained within an existing map.
499   if (reuse) {
500     // reuse means it is okay that it overlaps an existing page mapping.
501     // Only use this if you actually made the page reservation yourself.
502     CHECK(expected_ptr != nullptr);
503     DCHECK(error_msg != nullptr);
504     DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
505         << ((error_msg != nullptr) ? *error_msg : std::string());
506     flags |= MAP_FIXED;
507   } else {
508     CHECK_EQ(0, flags & MAP_FIXED);
509     // Don't bother checking for an overlapping region here. We'll
510     // check this if required after the fact inside CheckMapRequest.
511   }
512 
513   if (byte_count == 0) {
514     return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
515   }
516   // Adjust 'offset' to be page-aligned as required by mmap.
517   int page_offset = start % kPageSize;
518   off_t page_aligned_offset = start - page_offset;
519   // Adjust 'byte_count' to be page-aligned as we will map this anyway.
520   size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
521   // The 'expected_ptr' is modified (if specified, ie non-null) to be page aligned to the file but
522   // not necessarily to virtual memory. mmap will page align 'expected' for us.
523   uint8_t* page_aligned_expected =
524       (expected_ptr == nullptr) ? nullptr : (expected_ptr - page_offset);
525 
526   size_t redzone_size = 0;
527   if (RUNNING_ON_MEMORY_TOOL && kMemoryToolAddsRedzones && expected_ptr == nullptr) {
528     redzone_size = kPageSize;
529     page_aligned_byte_count += redzone_size;
530   }
531 
532   uint8_t* actual = reinterpret_cast<uint8_t*>(MapInternal(page_aligned_expected,
533                                                            page_aligned_byte_count,
534                                                            prot,
535                                                            flags,
536                                                            fd,
537                                                            page_aligned_offset,
538                                                            low_4gb));
539   if (actual == MAP_FAILED) {
540     if (error_msg != nullptr) {
541       auto saved_errno = errno;
542 
543       if (kIsDebugBuild || VLOG_IS_ON(oat)) {
544         PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
545       }
546 
547       *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
548                                 ") of file '%s' failed: %s. See process maps in the log.",
549                                 page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
550                                 static_cast<int64_t>(page_aligned_offset), filename,
551                                 strerror(saved_errno));
552     }
553     return nullptr;
554   }
555   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
556     return nullptr;
557   }
558   if (redzone_size != 0) {
559     const uint8_t *real_start = actual + page_offset;
560     const uint8_t *real_end = actual + page_offset + byte_count;
561     const uint8_t *mapping_end = actual + page_aligned_byte_count;
562 
563     MEMORY_TOOL_MAKE_NOACCESS(actual, real_start - actual);
564     MEMORY_TOOL_MAKE_NOACCESS(real_end, mapping_end - real_end);
565     page_aligned_byte_count -= redzone_size;
566   }
567 
568   return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
569                     prot, reuse, redzone_size);
570 }
571 
~MemMap()572 MemMap::~MemMap() {
573   if (base_begin_ == nullptr && base_size_ == 0) {
574     return;
575   }
576 
577   // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
578   // before it is returned to the system.
579   if (redzone_size_ != 0) {
580     MEMORY_TOOL_MAKE_UNDEFINED(
581         reinterpret_cast<char*>(base_begin_) + base_size_ - redzone_size_,
582         redzone_size_);
583   }
584 
585   if (!reuse_) {
586     MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
587     if (!already_unmapped_) {
588       int result = munmap(base_begin_, base_size_);
589       if (result == -1) {
590         PLOG(FATAL) << "munmap failed";
591       }
592     }
593   }
594 
595   // Remove it from gMaps.
596   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
597   bool found = false;
598   DCHECK(gMaps != nullptr);
599   for (auto it = gMaps->lower_bound(base_begin_), end = gMaps->end();
600        it != end && it->first == base_begin_; ++it) {
601     if (it->second == this) {
602       found = true;
603       gMaps->erase(it);
604       break;
605     }
606   }
607   CHECK(found) << "MemMap not found";
608 }
609 
MemMap(const std::string & name,uint8_t * begin,size_t size,void * base_begin,size_t base_size,int prot,bool reuse,size_t redzone_size)610 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
611                size_t base_size, int prot, bool reuse, size_t redzone_size)
612     : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
613       prot_(prot), reuse_(reuse), already_unmapped_(false), redzone_size_(redzone_size) {
614   if (size_ == 0) {
615     CHECK(begin_ == nullptr);
616     CHECK(base_begin_ == nullptr);
617     CHECK_EQ(base_size_, 0U);
618   } else {
619     CHECK(begin_ != nullptr);
620     CHECK(base_begin_ != nullptr);
621     CHECK_NE(base_size_, 0U);
622 
623     // Add it to gMaps.
624     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
625     DCHECK(gMaps != nullptr);
626     gMaps->insert(std::make_pair(base_begin_, this));
627   }
628 }
629 
RemapAtEnd(uint8_t * new_end,const char * tail_name,int tail_prot,std::string * error_msg,bool use_ashmem)630 MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
631                            std::string* error_msg, bool use_ashmem) {
632   use_ashmem = use_ashmem && !kIsTargetLinux;
633   DCHECK_GE(new_end, Begin());
634   DCHECK_LE(new_end, End());
635   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
636   DCHECK_ALIGNED(begin_, kPageSize);
637   DCHECK_ALIGNED(base_begin_, kPageSize);
638   DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
639   DCHECK_ALIGNED(new_end, kPageSize);
640   uint8_t* old_end = begin_ + size_;
641   uint8_t* old_base_end = reinterpret_cast<uint8_t*>(base_begin_) + base_size_;
642   uint8_t* new_base_end = new_end;
643   DCHECK_LE(new_base_end, old_base_end);
644   if (new_base_end == old_base_end) {
645     return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
646   }
647   size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
648   base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
649   DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
650   size_t tail_size = old_end - new_end;
651   uint8_t* tail_base_begin = new_base_end;
652   size_t tail_base_size = old_base_end - new_base_end;
653   DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
654   DCHECK_ALIGNED(tail_base_size, kPageSize);
655 
656   unique_fd fd;
657   int flags = MAP_PRIVATE | MAP_ANONYMOUS;
658   if (use_ashmem) {
659     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
660     // prefixed "dalvik-".
661     std::string debug_friendly_name("dalvik-");
662     debug_friendly_name += tail_name;
663     fd.reset(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
664     flags = MAP_PRIVATE | MAP_FIXED;
665     if (fd.get() == -1) {
666       *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
667                                 tail_name, strerror(errno));
668       return nullptr;
669     }
670   }
671 
672   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
673   // Unmap/map the tail region.
674   int result = munmap(tail_base_begin, tail_base_size);
675   if (result == -1) {
676     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
677     *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
678                               tail_base_begin, tail_base_size, name_.c_str());
679     return nullptr;
680   }
681   // Don't cause memory allocation between the munmap and the mmap
682   // calls. Otherwise, libc (or something else) might take this memory
683   // region. Note this isn't perfect as there's no way to prevent
684   // other threads to try to take this memory region here.
685   uint8_t* actual = reinterpret_cast<uint8_t*>(mmap(tail_base_begin,
686                                                     tail_base_size,
687                                                     tail_prot,
688                                                     flags,
689                                                     fd.get(),
690                                                     0));
691   if (actual == MAP_FAILED) {
692     PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
693     *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
694                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
695                               fd.get());
696     return nullptr;
697   }
698   return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
699 }
700 
MadviseDontNeedAndZero()701 void MemMap::MadviseDontNeedAndZero() {
702   if (base_begin_ != nullptr || base_size_ != 0) {
703     if (!kMadviseZeroes) {
704       memset(base_begin_, 0, base_size_);
705     }
706     int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
707     if (result == -1) {
708       PLOG(WARNING) << "madvise failed";
709     }
710   }
711 }
712 
Sync()713 bool MemMap::Sync() {
714   bool result;
715   if (redzone_size_ != 0) {
716     // To avoid valgrind errors, temporarily lift the lower-end noaccess protection before passing
717     // it to msync() as it only accepts page-aligned base address, and exclude the higher-end
718     // noaccess protection from the msync range. b/27552451.
719     uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
720     MEMORY_TOOL_MAKE_DEFINED(base_begin, begin_ - base_begin);
721     result = msync(BaseBegin(), End() - base_begin, MS_SYNC) == 0;
722     MEMORY_TOOL_MAKE_NOACCESS(base_begin, begin_ - base_begin);
723   } else {
724     result = msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
725   }
726   return result;
727 }
728 
Protect(int prot)729 bool MemMap::Protect(int prot) {
730   if (base_begin_ == nullptr && base_size_ == 0) {
731     prot_ = prot;
732     return true;
733   }
734 
735   if (mprotect(base_begin_, base_size_, prot) == 0) {
736     prot_ = prot;
737     return true;
738   }
739 
740   PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
741               << prot << ") failed";
742   return false;
743 }
744 
CheckNoGaps(MemMap * begin_map,MemMap * end_map)745 bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
746   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
747   CHECK(begin_map != nullptr);
748   CHECK(end_map != nullptr);
749   CHECK(HasMemMap(begin_map));
750   CHECK(HasMemMap(end_map));
751   CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
752   MemMap* map = begin_map;
753   while (map->BaseBegin() != end_map->BaseBegin()) {
754     MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
755     if (next_map == nullptr) {
756       // Found a gap.
757       return false;
758     }
759     map = next_map;
760   }
761   return true;
762 }
763 
DumpMaps(std::ostream & os,bool terse)764 void MemMap::DumpMaps(std::ostream& os, bool terse) {
765   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
766   DumpMapsLocked(os, terse);
767 }
768 
DumpMapsLocked(std::ostream & os,bool terse)769 void MemMap::DumpMapsLocked(std::ostream& os, bool terse) {
770   const auto& mem_maps = *gMaps;
771   if (!terse) {
772     os << mem_maps;
773     return;
774   }
775 
776   // Terse output example:
777   //   [MemMap: 0x409be000+0x20P~0x11dP+0x20P~0x61cP+0x20P prot=0x3 LinearAlloc]
778   //   [MemMap: 0x451d6000+0x6bP(3) prot=0x3 large object space allocation]
779   // The details:
780   //   "+0x20P" means 0x20 pages taken by a single mapping,
781   //   "~0x11dP" means a gap of 0x11d pages,
782   //   "+0x6bP(3)" means 3 mappings one after another, together taking 0x6b pages.
783   os << "MemMap:" << std::endl;
784   for (auto it = mem_maps.begin(), maps_end = mem_maps.end(); it != maps_end;) {
785     MemMap* map = it->second;
786     void* base = it->first;
787     CHECK_EQ(base, map->BaseBegin());
788     os << "[MemMap: " << base;
789     ++it;
790     // Merge consecutive maps with the same protect flags and name.
791     constexpr size_t kMaxGaps = 9;
792     size_t num_gaps = 0;
793     size_t num = 1u;
794     size_t size = map->BaseSize();
795     CHECK_ALIGNED(size, kPageSize);
796     void* end = map->BaseEnd();
797     while (it != maps_end &&
798         it->second->GetProtect() == map->GetProtect() &&
799         it->second->GetName() == map->GetName() &&
800         (it->second->BaseBegin() == end || num_gaps < kMaxGaps)) {
801       if (it->second->BaseBegin() != end) {
802         ++num_gaps;
803         os << "+0x" << std::hex << (size / kPageSize) << "P";
804         if (num != 1u) {
805           os << "(" << std::dec << num << ")";
806         }
807         size_t gap =
808             reinterpret_cast<uintptr_t>(it->second->BaseBegin()) - reinterpret_cast<uintptr_t>(end);
809         CHECK_ALIGNED(gap, kPageSize);
810         os << "~0x" << std::hex << (gap / kPageSize) << "P";
811         num = 0u;
812         size = 0u;
813       }
814       CHECK_ALIGNED(it->second->BaseSize(), kPageSize);
815       ++num;
816       size += it->second->BaseSize();
817       end = it->second->BaseEnd();
818       ++it;
819     }
820     os << "+0x" << std::hex << (size / kPageSize) << "P";
821     if (num != 1u) {
822       os << "(" << std::dec << num << ")";
823     }
824     os << " prot=0x" << std::hex << map->GetProtect() << " " << map->GetName() << "]" << std::endl;
825   }
826 }
827 
HasMemMap(MemMap * map)828 bool MemMap::HasMemMap(MemMap* map) {
829   void* base_begin = map->BaseBegin();
830   for (auto it = gMaps->lower_bound(base_begin), end = gMaps->end();
831        it != end && it->first == base_begin; ++it) {
832     if (it->second == map) {
833       return true;
834     }
835   }
836   return false;
837 }
838 
GetLargestMemMapAt(void * address)839 MemMap* MemMap::GetLargestMemMapAt(void* address) {
840   size_t largest_size = 0;
841   MemMap* largest_map = nullptr;
842   DCHECK(gMaps != nullptr);
843   for (auto it = gMaps->lower_bound(address), end = gMaps->end();
844        it != end && it->first == address; ++it) {
845     MemMap* map = it->second;
846     CHECK(map != nullptr);
847     if (largest_size < map->BaseSize()) {
848       largest_size = map->BaseSize();
849       largest_map = map;
850     }
851   }
852   return largest_map;
853 }
854 
Init()855 void MemMap::Init() {
856   if (mem_maps_lock_ != nullptr) {
857     // dex2oat calls MemMap::Init twice since its needed before the runtime is created.
858     return;
859   }
860   mem_maps_lock_ = new std::mutex();
861   // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
862   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
863   DCHECK(gMaps == nullptr);
864   gMaps = new Maps;
865 }
866 
Shutdown()867 void MemMap::Shutdown() {
868   if (mem_maps_lock_ == nullptr) {
869     // If MemMap::Shutdown is called more than once, there is no effect.
870     return;
871   }
872   {
873     // Not for thread safety, but for the annotation that gMaps is GUARDED_BY(mem_maps_lock_).
874     std::lock_guard<std::mutex> mu(*mem_maps_lock_);
875     DCHECK(gMaps != nullptr);
876     delete gMaps;
877     gMaps = nullptr;
878   }
879   delete mem_maps_lock_;
880   mem_maps_lock_ = nullptr;
881 }
882 
SetSize(size_t new_size)883 void MemMap::SetSize(size_t new_size) {
884   CHECK_LE(new_size, size_);
885   size_t new_base_size = RoundUp(new_size + static_cast<size_t>(PointerDiff(Begin(), BaseBegin())),
886                                  kPageSize);
887   if (new_base_size == base_size_) {
888     size_ = new_size;
889     return;
890   }
891   CHECK_LT(new_base_size, base_size_);
892   MEMORY_TOOL_MAKE_UNDEFINED(
893       reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
894                               new_base_size),
895       base_size_ - new_base_size);
896   CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
897                   base_size_ - new_base_size), 0) << new_base_size << " " << base_size_;
898   base_size_ = new_base_size;
899   size_ = new_size;
900 }
901 
MapInternalArtLow4GBAllocator(size_t length,int prot,int flags,int fd,off_t offset)902 void* MemMap::MapInternalArtLow4GBAllocator(size_t length,
903                                             int prot,
904                                             int flags,
905                                             int fd,
906                                             off_t offset) {
907 #if USE_ART_LOW_4G_ALLOCATOR
908   void* actual = MAP_FAILED;
909 
910   bool first_run = true;
911 
912   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
913   for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
914     // Use gMaps as an optimization to skip over large maps.
915     // Find the first map which is address > ptr.
916     auto it = gMaps->upper_bound(reinterpret_cast<void*>(ptr));
917     if (it != gMaps->begin()) {
918       auto before_it = it;
919       --before_it;
920       // Start at the end of the map before the upper bound.
921       ptr = std::max(ptr, reinterpret_cast<uintptr_t>(before_it->second->BaseEnd()));
922       CHECK_ALIGNED(ptr, kPageSize);
923     }
924     while (it != gMaps->end()) {
925       // How much space do we have until the next map?
926       size_t delta = reinterpret_cast<uintptr_t>(it->first) - ptr;
927       // If the space may be sufficient, break out of the loop.
928       if (delta >= length) {
929         break;
930       }
931       // Otherwise, skip to the end of the map.
932       ptr = reinterpret_cast<uintptr_t>(it->second->BaseEnd());
933       CHECK_ALIGNED(ptr, kPageSize);
934       ++it;
935     }
936 
937     // Try to see if we get lucky with this address since none of the ART maps overlap.
938     actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
939     if (actual != MAP_FAILED) {
940       next_mem_pos_ = reinterpret_cast<uintptr_t>(actual) + length;
941       return actual;
942     }
943 
944     if (4U * GB - ptr < length) {
945       // Not enough memory until 4GB.
946       if (first_run) {
947         // Try another time from the bottom;
948         ptr = LOW_MEM_START - kPageSize;
949         first_run = false;
950         continue;
951       } else {
952         // Second try failed.
953         break;
954       }
955     }
956 
957     uintptr_t tail_ptr;
958 
959     // Check pages are free.
960     bool safe = true;
961     for (tail_ptr = ptr; tail_ptr < ptr + length; tail_ptr += kPageSize) {
962       if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
963         safe = false;
964         break;
965       } else {
966         DCHECK_EQ(errno, ENOMEM);
967       }
968     }
969 
970     next_mem_pos_ = tail_ptr;  // update early, as we break out when we found and mapped a region
971 
972     if (safe == true) {
973       actual = TryMemMapLow4GB(reinterpret_cast<void*>(ptr), length, prot, flags, fd, offset);
974       if (actual != MAP_FAILED) {
975         return actual;
976       }
977     } else {
978       // Skip over last page.
979       ptr = tail_ptr;
980     }
981   }
982 
983   if (actual == MAP_FAILED) {
984     LOG(ERROR) << "Could not find contiguous low-memory space.";
985     errno = ENOMEM;
986   }
987   return actual;
988 #else
989   UNUSED(length, prot, flags, fd, offset);
990   LOG(FATAL) << "Unreachable";
991   UNREACHABLE();
992 #endif
993 }
994 
MapInternal(void * addr,size_t length,int prot,int flags,int fd,off_t offset,bool low_4gb)995 void* MemMap::MapInternal(void* addr,
996                           size_t length,
997                           int prot,
998                           int flags,
999                           int fd,
1000                           off_t offset,
1001                           bool low_4gb) {
1002 #ifdef __LP64__
1003   // When requesting low_4g memory and having an expectation, the requested range should fit into
1004   // 4GB.
1005   if (low_4gb && (
1006       // Start out of bounds.
1007       (reinterpret_cast<uintptr_t>(addr) >> 32) != 0 ||
1008       // End out of bounds. For simplicity, this will fail for the last page of memory.
1009       ((reinterpret_cast<uintptr_t>(addr) + length) >> 32) != 0)) {
1010     LOG(ERROR) << "The requested address space (" << addr << ", "
1011                << reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) + length)
1012                << ") cannot fit in low_4gb";
1013     return MAP_FAILED;
1014   }
1015 #else
1016   UNUSED(low_4gb);
1017 #endif
1018   DCHECK_ALIGNED(length, kPageSize);
1019   // TODO:
1020   // A page allocator would be a useful abstraction here, as
1021   // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
1022   void* actual = MAP_FAILED;
1023 #if USE_ART_LOW_4G_ALLOCATOR
1024   // MAP_32BIT only available on x86_64.
1025   if (low_4gb && addr == nullptr) {
1026     // The linear-scan allocator has an issue when executable pages are denied (e.g., by selinux
1027     // policies in sensitive processes). In that case, the error code will still be ENOMEM. So
1028     // the allocator will scan all low 4GB twice, and still fail. This is *very* slow.
1029     //
1030     // To avoid the issue, always map non-executable first, and mprotect if necessary.
1031     const int orig_prot = prot;
1032     const int prot_non_exec = prot & ~PROT_EXEC;
1033     actual = MapInternalArtLow4GBAllocator(length, prot_non_exec, flags, fd, offset);
1034 
1035     if (actual == MAP_FAILED) {
1036       return MAP_FAILED;
1037     }
1038 
1039     // See if we need to remap with the executable bit now.
1040     if (orig_prot != prot_non_exec) {
1041       if (mprotect(actual, length, orig_prot) != 0) {
1042         PLOG(ERROR) << "Could not protect to requested prot: " << orig_prot;
1043         munmap(actual, length);
1044         errno = ENOMEM;
1045         return MAP_FAILED;
1046       }
1047     }
1048     return actual;
1049   }
1050 
1051   actual = mmap(addr, length, prot, flags, fd, offset);
1052 #else
1053 #if defined(__LP64__)
1054   if (low_4gb && addr == nullptr) {
1055     flags |= MAP_32BIT;
1056   }
1057 #endif
1058   actual = mmap(addr, length, prot, flags, fd, offset);
1059 #endif
1060   return actual;
1061 }
1062 
operator <<(std::ostream & os,const MemMap & mem_map)1063 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
1064   os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
1065                      mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
1066                      mem_map.GetName().c_str());
1067   return os;
1068 }
1069 
TryReadable()1070 void MemMap::TryReadable() {
1071   if (base_begin_ == nullptr && base_size_ == 0) {
1072     return;
1073   }
1074   CHECK_NE(prot_ & PROT_READ, 0);
1075   volatile uint8_t* begin = reinterpret_cast<volatile uint8_t*>(base_begin_);
1076   volatile uint8_t* end = begin + base_size_;
1077   DCHECK(IsAligned<kPageSize>(begin));
1078   DCHECK(IsAligned<kPageSize>(end));
1079   // Read the first byte of each page. Use volatile to prevent the compiler from optimizing away the
1080   // reads.
1081   for (volatile uint8_t* ptr = begin; ptr < end; ptr += kPageSize) {
1082     // This read could fault if protection wasn't set correctly.
1083     uint8_t value = *ptr;
1084     UNUSED(value);
1085   }
1086 }
1087 
ZeroAndReleasePages(void * address,size_t length)1088 void ZeroAndReleasePages(void* address, size_t length) {
1089   if (length == 0) {
1090     return;
1091   }
1092   uint8_t* const mem_begin = reinterpret_cast<uint8_t*>(address);
1093   uint8_t* const mem_end = mem_begin + length;
1094   uint8_t* const page_begin = AlignUp(mem_begin, kPageSize);
1095   uint8_t* const page_end = AlignDown(mem_end, kPageSize);
1096   if (!kMadviseZeroes || page_begin >= page_end) {
1097     // No possible area to madvise.
1098     std::fill(mem_begin, mem_end, 0);
1099   } else {
1100     // Spans one or more pages.
1101     DCHECK_LE(mem_begin, page_begin);
1102     DCHECK_LE(page_begin, page_end);
1103     DCHECK_LE(page_end, mem_end);
1104     std::fill(mem_begin, page_begin, 0);
1105     CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
1106     std::fill(page_end, mem_end, 0);
1107   }
1108 }
1109 
AlignBy(size_t size)1110 void MemMap::AlignBy(size_t size) {
1111   CHECK_EQ(begin_, base_begin_) << "Unsupported";
1112   CHECK_EQ(size_, base_size_) << "Unsupported";
1113   CHECK_GT(size, static_cast<size_t>(kPageSize));
1114   CHECK_ALIGNED(size, kPageSize);
1115   if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), size) &&
1116       IsAlignedParam(base_size_, size)) {
1117     // Already aligned.
1118     return;
1119   }
1120   uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
1121   uint8_t* base_end = base_begin + base_size_;
1122   uint8_t* aligned_base_begin = AlignUp(base_begin, size);
1123   uint8_t* aligned_base_end = AlignDown(base_end, size);
1124   CHECK_LE(base_begin, aligned_base_begin);
1125   CHECK_LE(aligned_base_end, base_end);
1126   size_t aligned_base_size = aligned_base_end - aligned_base_begin;
1127   CHECK_LT(aligned_base_begin, aligned_base_end)
1128       << "base_begin = " << reinterpret_cast<void*>(base_begin)
1129       << " base_end = " << reinterpret_cast<void*>(base_end);
1130   CHECK_GE(aligned_base_size, size);
1131   // Unmap the unaligned parts.
1132   if (base_begin < aligned_base_begin) {
1133     MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
1134     CHECK_EQ(munmap(base_begin, aligned_base_begin - base_begin), 0)
1135         << "base_begin=" << reinterpret_cast<void*>(base_begin)
1136         << " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
1137   }
1138   if (aligned_base_end < base_end) {
1139     MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
1140     CHECK_EQ(munmap(aligned_base_end, base_end - aligned_base_end), 0)
1141         << "base_end=" << reinterpret_cast<void*>(base_end)
1142         << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
1143   }
1144   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
1145   base_begin_ = aligned_base_begin;
1146   base_size_ = aligned_base_size;
1147   begin_ = aligned_base_begin;
1148   size_ = aligned_base_size;
1149   DCHECK(gMaps != nullptr);
1150   if (base_begin < aligned_base_begin) {
1151     auto it = gMaps->find(base_begin);
1152     CHECK(it != gMaps->end()) << "MemMap not found";
1153     gMaps->erase(it);
1154     gMaps->insert(std::make_pair(base_begin_, this));
1155   }
1156 }
1157 
1158 }  // namespace art
1159