1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_MEM_MAP_H_
18 #define ART_RUNTIME_MEM_MAP_H_
19 
20 #include "base/mutex.h"
21 
22 #include <string>
23 #include <map>
24 
25 #include <stddef.h>
26 #include <sys/mman.h>  // For the PROT_* and MAP_* constants.
27 #include <sys/types.h>
28 
29 #include "base/allocator.h"
30 #include "globals.h"
31 
32 namespace art {
33 
34 #if defined(__LP64__) && (!defined(__x86_64__) || defined(__APPLE__))
35 #define USE_ART_LOW_4G_ALLOCATOR 1
36 #else
37 #define USE_ART_LOW_4G_ALLOCATOR 0
38 #endif
39 
40 #ifdef __linux__
41 static constexpr bool kMadviseZeroes = true;
42 #else
43 static constexpr bool kMadviseZeroes = false;
44 #endif
45 
46 // Used to keep track of mmap segments.
47 //
48 // On 64b systems not supporting MAP_32BIT, the implementation of MemMap will do a linear scan
49 // for free pages. For security, the start of this scan should be randomized. This requires a
50 // dynamic initializer.
51 // For this to work, it is paramount that there are no other static initializers that access MemMap.
52 // Otherwise, calls might see uninitialized values.
53 class MemMap {
54  public:
55   // Request an anonymous region of length 'byte_count' and a requested base address.
56   // Use null as the requested base address if you don't care.
57   // "reuse" allows re-mapping an address range from an existing mapping.
58   //
59   // The word "anonymous" in this context means "not backed by a file". The supplied
60   // 'ashmem_name' will be used -- on systems that support it -- to give the mapping
61   // a name.
62   //
63   // On success, returns returns a MemMap instance.  On failure, returns null.
64   static MemMap* MapAnonymous(const char* ashmem_name, uint8_t* addr, size_t byte_count, int prot,
65                               bool low_4gb, bool reuse, std::string* error_msg);
66 
67   // Create placeholder for a region allocated by direct call to mmap.
68   // This is useful when we do not have control over the code calling mmap,
69   // but when we still want to keep track of it in the list.
70   // The region is not considered to be owned and will not be unmmaped.
71   static MemMap* MapDummy(const char* name, uint8_t* addr, size_t byte_count);
72 
73   // Map part of a file, taking care of non-page aligned offsets.  The
74   // "start" offset is absolute, not relative.
75   //
76   // On success, returns returns a MemMap instance.  On failure, returns null.
MapFile(size_t byte_count,int prot,int flags,int fd,off_t start,const char * filename,std::string * error_msg)77   static MemMap* MapFile(size_t byte_count, int prot, int flags, int fd, off_t start,
78                          const char* filename, std::string* error_msg) {
79     return MapFileAtAddress(
80         nullptr, byte_count, prot, flags, fd, start, false, filename, error_msg);
81   }
82 
83   // Map part of a file, taking care of non-page aligned offsets.  The
84   // "start" offset is absolute, not relative. This version allows
85   // requesting a specific address for the base of the
86   // mapping. "reuse" allows us to create a view into an existing
87   // mapping where we do not take ownership of the memory.
88   //
89   // On success, returns returns a MemMap instance.  On failure, returns null.
90   static MemMap* MapFileAtAddress(uint8_t* addr, size_t byte_count, int prot, int flags, int fd,
91                                   off_t start, bool reuse, const char* filename,
92                                   std::string* error_msg);
93 
94   // Releases the memory mapping.
95   ~MemMap() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
96 
GetName()97   const std::string& GetName() const {
98     return name_;
99   }
100 
101   bool Protect(int prot);
102 
103   void MadviseDontNeedAndZero();
104 
GetProtect()105   int GetProtect() const {
106     return prot_;
107   }
108 
Begin()109   uint8_t* Begin() const {
110     return begin_;
111   }
112 
Size()113   size_t Size() const {
114     return size_;
115   }
116 
117   // Resize the mem-map by unmapping pages at the end. Currently only supports shrinking.
118   void SetSize(size_t new_size);
119 
End()120   uint8_t* End() const {
121     return Begin() + Size();
122   }
123 
BaseBegin()124   void* BaseBegin() const {
125     return base_begin_;
126   }
127 
BaseSize()128   size_t BaseSize() const {
129     return base_size_;
130   }
131 
BaseEnd()132   void* BaseEnd() const {
133     return reinterpret_cast<uint8_t*>(BaseBegin()) + BaseSize();
134   }
135 
HasAddress(const void * addr)136   bool HasAddress(const void* addr) const {
137     return Begin() <= addr && addr < End();
138   }
139 
140   // Unmap the pages at end and remap them to create another memory map.
141   MemMap* RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
142                      std::string* error_msg);
143 
144   static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
145       LOCKS_EXCLUDED(Locks::mem_maps_lock_);
146   static void DumpMaps(std::ostream& os, bool terse = false)
147       LOCKS_EXCLUDED(Locks::mem_maps_lock_);
148 
149   typedef AllocationTrackingMultiMap<void*, MemMap*, kAllocatorTagMaps> Maps;
150 
151   static void Init() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
152   static void Shutdown() LOCKS_EXCLUDED(Locks::mem_maps_lock_);
153 
154  private:
155   MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin, size_t base_size,
156          int prot, bool reuse) LOCKS_EXCLUDED(Locks::mem_maps_lock_);
157 
158   static void DumpMapsLocked(std::ostream& os, bool terse)
159       EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
160   static bool HasMemMap(MemMap* map)
161       EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
162   static MemMap* GetLargestMemMapAt(void* address)
163       EXCLUSIVE_LOCKS_REQUIRED(Locks::mem_maps_lock_);
164 
165   const std::string name_;
166   uint8_t* const begin_;  // Start of data.
167   size_t size_;  // Length of data.
168 
169   void* const base_begin_;  // Page-aligned base address.
170   size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
171   int prot_;  // Protection of the map.
172 
173   // When reuse_ is true, this is just a view of an existing mapping
174   // and we do not take ownership and are not responsible for
175   // unmapping.
176   const bool reuse_;
177 
178 #if USE_ART_LOW_4G_ALLOCATOR
179   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
180 #endif
181 
182   // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
183   static Maps* maps_ GUARDED_BY(Locks::mem_maps_lock_);
184 
185   friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
186 };
187 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
188 std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps);
189 
190 }  // namespace art
191 
192 #endif  // ART_RUNTIME_MEM_MAP_H_
193