1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
18 #define ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
19 
20 #include "space.h"
21 
22 #include <iostream>
23 #include <valgrind.h>
24 #include <memcheck/memcheck.h>
25 
26 namespace art {
27 namespace gc {
28 
29 namespace collector {
30   class MarkSweep;
31 }  // namespace collector
32 
33 namespace space {
34 
35 class ZygoteSpace;
36 
37 // TODO: Remove define macro
38 #define CHECK_MEMORY_CALL(call, args, what) \
39   do { \
40     int rc = call args; \
41     if (UNLIKELY(rc != 0)) { \
42       errno = rc; \
43       PLOG(FATAL) << # call << " failed for " << what; \
44     } \
45   } while (false)
46 
47 // A common parent of DlMallocSpace and RosAllocSpace.
48 class MallocSpace : public ContinuousMemMapAllocSpace {
49  public:
50   typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
51 
GetType()52   SpaceType GetType() const {
53     return kSpaceTypeMallocSpace;
54   }
55 
56   // Allocate num_bytes allowing the underlying space to grow.
57   virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
58                                           size_t* bytes_allocated, size_t* usable_size) = 0;
59   // Allocate num_bytes without allowing the underlying space to grow.
60   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
61                                 size_t* usable_size) = 0;
62   // Return the storage space required by obj. If usable_size isn't nullptr then it is set to the
63   // amount of the storage space that may be used by obj.
64   virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
65   virtual size_t Free(Thread* self, mirror::Object* ptr)
66       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
67   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
68       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
69 
70 #ifndef NDEBUG
CheckMoreCoreForPrecondition()71   virtual void CheckMoreCoreForPrecondition() {}  // to be overridden in the debug build.
72 #else
CheckMoreCoreForPrecondition()73   void CheckMoreCoreForPrecondition() {}  // no-op in the non-debug build.
74 #endif
75 
76   void* MoreCore(intptr_t increment);
77 
78   // Hands unused pages back to the system.
79   virtual size_t Trim() = 0;
80 
81   // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
82   // in use, indicated by num_bytes equaling zero.
83   virtual void Walk(WalkCallback callback, void* arg) = 0;
84 
85   // Returns the number of bytes that the space has currently obtained from the system. This is
86   // greater or equal to the amount of live data in the space.
87   virtual size_t GetFootprint() = 0;
88 
89   // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
90   virtual size_t GetFootprintLimit() = 0;
91 
92   // Set the maximum number of bytes that the heap is allowed to obtain from the system via
93   // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
94   // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
95   virtual void SetFootprintLimit(size_t limit) = 0;
96 
97   // Removes the fork time growth limit on capacity, allowing the application to allocate up to the
98   // maximum reserved size of the heap.
ClearGrowthLimit()99   void ClearGrowthLimit() {
100     growth_limit_ = NonGrowthLimitCapacity();
101   }
102 
103   // Override capacity so that we only return the possibly limited capacity
Capacity()104   size_t Capacity() const {
105     return growth_limit_;
106   }
107 
108   // The total amount of memory reserved for the alloc space.
NonGrowthLimitCapacity()109   size_t NonGrowthLimitCapacity() const {
110     return GetMemMap()->Size();
111   }
112 
113   void Dump(std::ostream& os) const;
114 
115   void SetGrowthLimit(size_t growth_limit);
116 
117   virtual MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
118                                       byte* begin, byte* end, byte* limit, size_t growth_limit,
119                                       bool can_move_objects) = 0;
120 
121   // Splits ourself into a zygote space and new malloc space which has our unused memory. When true,
122   // the low memory mode argument specifies that the heap wishes the created space to be more
123   // aggressive in releasing unused pages. Invalidates the space its called on.
124   ZygoteSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode,
125                                  MallocSpace** out_malloc_space) NO_THREAD_SAFETY_ANALYSIS;
126   virtual uint64_t GetBytesAllocated() = 0;
127   virtual uint64_t GetObjectsAllocated() = 0;
128 
129   // Returns the class of a recently freed object.
130   mirror::Class* FindRecentFreedObject(const mirror::Object* obj);
131 
CanMoveObjects()132   bool CanMoveObjects() const OVERRIDE {
133     return can_move_objects_;
134   }
135 
DisableMovingObjects()136   void DisableMovingObjects() {
137     can_move_objects_ = false;
138   }
139 
140  protected:
141   MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
142               byte* limit, size_t growth_limit, bool create_bitmaps, bool can_move_objects,
143               size_t starting_size, size_t initial_size);
144 
145   static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size,
146                               size_t* growth_limit, size_t* capacity, byte* requested_begin);
147 
148   // When true the low memory mode argument specifies that the heap wishes the created allocator to
149   // be more aggressive in releasing unused pages.
150   virtual void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
151                                 size_t maximum_size, bool low_memory_mode) = 0;
152 
153   virtual void RegisterRecentFree(mirror::Object* ptr)
154       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
155       EXCLUSIVE_LOCKS_REQUIRED(lock_);
156 
GetSweepCallback()157   virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
158     return &SweepCallback;
159   }
160 
161   // Recent allocation buffer.
162   static constexpr size_t kRecentFreeCount = kDebugSpaces ? (1 << 16) : 0;
163   static constexpr size_t kRecentFreeMask = kRecentFreeCount - 1;
164   std::pair<const mirror::Object*, mirror::Class*> recent_freed_objects_[kRecentFreeCount];
165   size_t recent_free_pos_;
166 
167   static size_t bitmap_index_;
168 
169   // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
170   Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
171 
172   // The capacity of the alloc space until such time that ClearGrowthLimit is called.
173   // The underlying mem_map_ controls the maximum size we allow the heap to grow to. The growth
174   // limit is a value <= to the mem_map_ capacity used for ergonomic reasons because of the zygote.
175   // Prior to forking the zygote the heap will have a maximally sized mem_map_ but the growth_limit_
176   // will be set to a lower value. The growth_limit_ is used as the capacity of the alloc_space_,
177   // however, capacity normally can't vary. In the case of the growth_limit_ it can be cleared
178   // one time by a call to ClearGrowthLimit.
179   size_t growth_limit_;
180 
181   // True if objects in the space are movable.
182   bool can_move_objects_;
183 
184   // Starting and initial sized, used when you reset the space.
185   const size_t starting_size_;
186   const size_t initial_size_;
187 
188  private:
189   static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
190       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
191 
192   DISALLOW_COPY_AND_ASSIGN(MallocSpace);
193 };
194 
195 }  // namespace space
196 }  // namespace gc
197 }  // namespace art
198 
199 #endif  // ART_RUNTIME_GC_SPACE_MALLOC_SPACE_H_
200