1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_
18 #define ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_
19 
20 #include "malloc_space.h"
21 #include "space.h"
22 
23 namespace art {
24 namespace gc {
25 
26 namespace collector {
27   class MarkSweep;
28 }  // namespace collector
29 
30 namespace space {
31 
32 // An alloc space is a space where objects may be allocated and garbage collected. Not final as may
33 // be overridden by a ValgrindMallocSpace.
34 class DlMallocSpace : public MallocSpace {
35  public:
36   // Create a DlMallocSpace from an existing mem_map.
37   static DlMallocSpace* CreateFromMemMap(MemMap* mem_map, const std::string& name,
38                                          size_t starting_size, size_t initial_size,
39                                          size_t growth_limit, size_t capacity,
40                                          bool can_move_objects);
41 
42   // Create a DlMallocSpace with the requested sizes. The requested
43   // base address is not guaranteed to be granted, if it is required,
44   // the caller should call Begin on the returned space to confirm the
45   // request was granted.
46   static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
47                                size_t capacity, byte* requested_begin, bool can_move_objects);
48 
49   // Virtual to allow ValgrindMallocSpace to intercept.
50   virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
51                                           size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_);
52   // Virtual to allow ValgrindMallocSpace to intercept.
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size)53   virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
54                         size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_) {
55     return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size);
56   }
57   // Virtual to allow ValgrindMallocSpace to intercept.
AllocationSize(mirror::Object * obj,size_t * usable_size)58   virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
59     return AllocationSizeNonvirtual(obj, usable_size);
60   }
61   // Virtual to allow ValgrindMallocSpace to intercept.
62   virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
63       LOCKS_EXCLUDED(lock_)
64       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
65   // Virtual to allow ValgrindMallocSpace to intercept.
66   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
67       LOCKS_EXCLUDED(lock_)
68       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
69 
70   // DlMallocSpaces don't have thread local state.
RevokeThreadLocalBuffers(art::Thread *)71   void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
72   }
RevokeAllThreadLocalBuffers()73   void RevokeAllThreadLocalBuffers() OVERRIDE {
74   }
75 
76   // Faster non-virtual allocation path.
77   mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
78                                   size_t* usable_size) LOCKS_EXCLUDED(lock_);
79 
80   // Faster non-virtual allocation size path.
81   size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size);
82 
83 #ifndef NDEBUG
84   // Override only in the debug build.
85   void CheckMoreCoreForPrecondition();
86 #endif
87 
GetMspace()88   void* GetMspace() const {
89     return mspace_;
90   }
91 
92   size_t Trim() OVERRIDE;
93 
94   // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
95   // in use, indicated by num_bytes equaling zero.
96   void Walk(WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
97 
98   // Returns the number of bytes that the space has currently obtained from the system. This is
99   // greater or equal to the amount of live data in the space.
100   size_t GetFootprint() OVERRIDE;
101 
102   // Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
103   size_t GetFootprintLimit() OVERRIDE;
104 
105   // Set the maximum number of bytes that the heap is allowed to obtain from the system via
106   // MoreCore. Note this is used to stop the mspace growing beyond the limit to Capacity. When
107   // allocations fail we GC before increasing the footprint limit and allowing the mspace to grow.
108   void SetFootprintLimit(size_t limit) OVERRIDE;
109 
110   MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
111                               byte* begin, byte* end, byte* limit, size_t growth_limit,
112                               bool can_move_objects);
113 
114   uint64_t GetBytesAllocated() OVERRIDE;
115   uint64_t GetObjectsAllocated() OVERRIDE;
116 
117   virtual void Clear() OVERRIDE;
118 
IsDlMallocSpace()119   bool IsDlMallocSpace() const OVERRIDE {
120     return true;
121   }
122 
AsDlMallocSpace()123   DlMallocSpace* AsDlMallocSpace() OVERRIDE {
124     return this;
125   }
126 
127   void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
128       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
129 
130  protected:
131   DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
132                 byte* limit, size_t growth_limit, bool can_move_objects, size_t starting_size,
133                 size_t initial_size);
134 
135  private:
136   mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
137                                            size_t* usable_size)
138       EXCLUSIVE_LOCKS_REQUIRED(lock_);
139 
CreateAllocator(void * base,size_t morecore_start,size_t initial_size,size_t,bool)140   void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
141                         size_t /*maximum_size*/, bool /*low_memory_mode*/) OVERRIDE {
142     return CreateMspace(base, morecore_start, initial_size);
143   }
144   static void* CreateMspace(void* base, size_t morecore_start, size_t initial_size);
145 
146   // The boundary tag overhead.
147   static const size_t kChunkOverhead = kWordSize;
148 
149   // Underlying malloc space.
150   void* mspace_;
151 
152   friend class collector::MarkSweep;
153 
154   DISALLOW_COPY_AND_ASSIGN(DlMallocSpace);
155 };
156 
157 }  // namespace space
158 }  // namespace gc
159 }  // namespace art
160 
161 #endif  // ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_H_
162