1 /*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "large_object_space.h"
18
19 #include <memory>
20
21 #include "gc/accounting/heap_bitmap-inl.h"
22 #include "gc/accounting/space_bitmap-inl.h"
23 #include "base/logging.h"
24 #include "base/memory_tool.h"
25 #include "base/mutex-inl.h"
26 #include "base/stl_util.h"
27 #include "image.h"
28 #include "os.h"
29 #include "scoped_thread_state_change-inl.h"
30 #include "space-inl.h"
31 #include "thread-inl.h"
32
33 namespace art {
34 namespace gc {
35 namespace space {
36
37 class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
38 public:
MemoryToolLargeObjectMapSpace(const std::string & name)39 explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
40 }
41
~MemoryToolLargeObjectMapSpace()42 ~MemoryToolLargeObjectMapSpace() OVERRIDE {
43 // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
44 // freed since they are held live by the class linker.
45 MutexLock mu(Thread::Current(), lock_);
46 for (auto& m : large_objects_) {
47 delete m.second.mem_map;
48 }
49 }
50
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)51 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
52 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
53 OVERRIDE {
54 mirror::Object* obj =
55 LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
56 usable_size, bytes_tl_bulk_allocated);
57 mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
58 reinterpret_cast<uintptr_t>(obj) + kMemoryToolRedZoneBytes);
59 MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), kMemoryToolRedZoneBytes);
60 MEMORY_TOOL_MAKE_NOACCESS(
61 reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
62 kMemoryToolRedZoneBytes);
63 if (usable_size != nullptr) {
64 *usable_size = num_bytes; // Since we have redzones, shrink the usable size.
65 }
66 return object_without_rdz;
67 }
68
AllocationSize(mirror::Object * obj,size_t * usable_size)69 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
70 return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
71 }
72
IsZygoteLargeObject(Thread * self,mirror::Object * obj) const73 bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE {
74 return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
75 }
76
Free(Thread * self,mirror::Object * obj)77 size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
78 mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
79 MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
80 return LargeObjectMapSpace::Free(self, object_with_rdz);
81 }
82
Contains(const mirror::Object * obj) const83 bool Contains(const mirror::Object* obj) const OVERRIDE {
84 return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
85 }
86
87 private:
ObjectWithRedzone(const mirror::Object * obj)88 static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) {
89 return reinterpret_cast<const mirror::Object*>(
90 reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
91 }
92
ObjectWithRedzone(mirror::Object * obj)93 static mirror::Object* ObjectWithRedzone(mirror::Object* obj) {
94 return reinterpret_cast<mirror::Object*>(
95 reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
96 }
97
98 static constexpr size_t kMemoryToolRedZoneBytes = kPageSize;
99 };
100
SwapBitmaps()101 void LargeObjectSpace::SwapBitmaps() {
102 live_bitmap_.swap(mark_bitmap_);
103 // Swap names to get more descriptive diagnostics.
104 std::string temp_name = live_bitmap_->GetName();
105 live_bitmap_->SetName(mark_bitmap_->GetName());
106 mark_bitmap_->SetName(temp_name);
107 }
108
LargeObjectSpace(const std::string & name,uint8_t * begin,uint8_t * end)109 LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end)
110 : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
111 num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
112 total_objects_allocated_(0), begin_(begin), end_(end) {
113 }
114
115
CopyLiveToMarked()116 void LargeObjectSpace::CopyLiveToMarked() {
117 mark_bitmap_->CopyFrom(live_bitmap_.get());
118 }
119
LargeObjectMapSpace(const std::string & name)120 LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
121 : LargeObjectSpace(name, nullptr, nullptr),
122 lock_("large object map space lock", kAllocSpaceLock) {}
123
Create(const std::string & name)124 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
125 if (Runtime::Current()->IsRunningOnMemoryTool()) {
126 return new MemoryToolLargeObjectMapSpace(name);
127 } else {
128 return new LargeObjectMapSpace(name);
129 }
130 }
131
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)132 mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
133 size_t* bytes_allocated, size_t* usable_size,
134 size_t* bytes_tl_bulk_allocated) {
135 std::string error_msg;
136 MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
137 PROT_READ | PROT_WRITE, true, false, &error_msg);
138 if (UNLIKELY(mem_map == nullptr)) {
139 LOG(WARNING) << "Large object allocation failed: " << error_msg;
140 return nullptr;
141 }
142 mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
143 MutexLock mu(self, lock_);
144 large_objects_.Put(obj, LargeObject {mem_map, false /* not zygote */});
145 const size_t allocation_size = mem_map->BaseSize();
146 DCHECK(bytes_allocated != nullptr);
147
148 if (begin_ == nullptr || begin_ > reinterpret_cast<uint8_t*>(obj)) {
149 begin_ = reinterpret_cast<uint8_t*>(obj);
150 }
151 end_ = std::max(end_, reinterpret_cast<uint8_t*>(obj) + allocation_size);
152
153 *bytes_allocated = allocation_size;
154 if (usable_size != nullptr) {
155 *usable_size = allocation_size;
156 }
157 DCHECK(bytes_tl_bulk_allocated != nullptr);
158 *bytes_tl_bulk_allocated = allocation_size;
159 num_bytes_allocated_ += allocation_size;
160 total_bytes_allocated_ += allocation_size;
161 ++num_objects_allocated_;
162 ++total_objects_allocated_;
163 return obj;
164 }
165
IsZygoteLargeObject(Thread * self,mirror::Object * obj) const166 bool LargeObjectMapSpace::IsZygoteLargeObject(Thread* self, mirror::Object* obj) const {
167 MutexLock mu(self, lock_);
168 auto it = large_objects_.find(obj);
169 CHECK(it != large_objects_.end());
170 return it->second.is_zygote;
171 }
172
SetAllLargeObjectsAsZygoteObjects(Thread * self)173 void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
174 MutexLock mu(self, lock_);
175 for (auto& pair : large_objects_) {
176 pair.second.is_zygote = true;
177 }
178 }
179
Free(Thread * self,mirror::Object * ptr)180 size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
181 MutexLock mu(self, lock_);
182 auto it = large_objects_.find(ptr);
183 if (UNLIKELY(it == large_objects_.end())) {
184 ScopedObjectAccess soa(self);
185 Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
186 LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
187 }
188 MemMap* mem_map = it->second.mem_map;
189 const size_t map_size = mem_map->BaseSize();
190 DCHECK_GE(num_bytes_allocated_, map_size);
191 size_t allocation_size = map_size;
192 num_bytes_allocated_ -= allocation_size;
193 --num_objects_allocated_;
194 delete mem_map;
195 large_objects_.erase(it);
196 return allocation_size;
197 }
198
AllocationSize(mirror::Object * obj,size_t * usable_size)199 size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
200 MutexLock mu(Thread::Current(), lock_);
201 auto it = large_objects_.find(obj);
202 CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live";
203 size_t alloc_size = it->second.mem_map->BaseSize();
204 if (usable_size != nullptr) {
205 *usable_size = alloc_size;
206 }
207 return alloc_size;
208 }
209
FreeList(Thread * self,size_t num_ptrs,mirror::Object ** ptrs)210 size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
211 size_t total = 0;
212 for (size_t i = 0; i < num_ptrs; ++i) {
213 if (kDebugSpaces) {
214 CHECK(Contains(ptrs[i]));
215 }
216 total += Free(self, ptrs[i]);
217 }
218 return total;
219 }
220
Walk(DlMallocSpace::WalkCallback callback,void * arg)221 void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
222 MutexLock mu(Thread::Current(), lock_);
223 for (auto& pair : large_objects_) {
224 MemMap* mem_map = pair.second.mem_map;
225 callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
226 callback(nullptr, nullptr, 0, arg);
227 }
228 }
229
Contains(const mirror::Object * obj) const230 bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
231 Thread* self = Thread::Current();
232 if (lock_.IsExclusiveHeld(self)) {
233 // We hold lock_ so do the check.
234 return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
235 } else {
236 MutexLock mu(self, lock_);
237 return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
238 }
239 }
240
241 // Keeps track of allocation sizes + whether or not the previous allocation is free.
242 // Used to coalesce free blocks and find the best fit block for an allocation for best fit object
243 // allocation. Each allocation has an AllocationInfo which contains the size of the previous free
244 // block preceding it. Implemented in such a way that we can also find the iterator for any
245 // allocation info pointer.
246 class AllocationInfo {
247 public:
AllocationInfo()248 AllocationInfo() : prev_free_(0), alloc_size_(0) {
249 }
250 // Return the number of pages that the allocation info covers.
AlignSize() const251 size_t AlignSize() const {
252 return alloc_size_ & kFlagsMask;
253 }
254 // Returns the allocation size in bytes.
ByteSize() const255 size_t ByteSize() const {
256 return AlignSize() * FreeListSpace::kAlignment;
257 }
258 // Updates the allocation size and whether or not it is free.
SetByteSize(size_t size,bool free)259 void SetByteSize(size_t size, bool free) {
260 DCHECK_EQ(size & ~kFlagsMask, 0u);
261 DCHECK_ALIGNED(size, FreeListSpace::kAlignment);
262 alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0u);
263 }
264 // Returns true if the block is free.
IsFree() const265 bool IsFree() const {
266 return (alloc_size_ & kFlagFree) != 0;
267 }
268 // Return true if the large object is a zygote object.
IsZygoteObject() const269 bool IsZygoteObject() const {
270 return (alloc_size_ & kFlagZygote) != 0;
271 }
272 // Change the object to be a zygote object.
SetZygoteObject()273 void SetZygoteObject() {
274 alloc_size_ |= kFlagZygote;
275 }
276 // Return true if this is a zygote large object.
277 // Finds and returns the next non free allocation info after ourself.
GetNextInfo()278 AllocationInfo* GetNextInfo() {
279 return this + AlignSize();
280 }
GetNextInfo() const281 const AllocationInfo* GetNextInfo() const {
282 return this + AlignSize();
283 }
284 // Returns the previous free allocation info by using the prev_free_ member to figure out
285 // where it is. This is only used for coalescing so we only need to be able to do it if the
286 // previous allocation info is free.
GetPrevFreeInfo()287 AllocationInfo* GetPrevFreeInfo() {
288 DCHECK_NE(prev_free_, 0U);
289 return this - prev_free_;
290 }
291 // Returns the address of the object associated with this allocation info.
GetObjectAddress()292 mirror::Object* GetObjectAddress() {
293 return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
294 }
295 // Return how many kAlignment units there are before the free block.
GetPrevFree() const296 size_t GetPrevFree() const {
297 return prev_free_;
298 }
299 // Returns how many free bytes there is before the block.
GetPrevFreeBytes() const300 size_t GetPrevFreeBytes() const {
301 return GetPrevFree() * FreeListSpace::kAlignment;
302 }
303 // Update the size of the free block prior to the allocation.
SetPrevFreeBytes(size_t bytes)304 void SetPrevFreeBytes(size_t bytes) {
305 DCHECK_ALIGNED(bytes, FreeListSpace::kAlignment);
306 prev_free_ = bytes / FreeListSpace::kAlignment;
307 }
308
309 private:
310 static constexpr uint32_t kFlagFree = 0x80000000; // If block is free.
311 static constexpr uint32_t kFlagZygote = 0x40000000; // If the large object is a zygote object.
312 static constexpr uint32_t kFlagsMask = ~(kFlagFree | kFlagZygote); // Combined flags for masking.
313 // Contains the size of the previous free block with kAlignment as the unit. If 0 then the
314 // allocation before us is not free.
315 // These variables are undefined in the middle of allocations / free blocks.
316 uint32_t prev_free_;
317 // Allocation size of this object in kAlignment as the unit.
318 uint32_t alloc_size_;
319 };
320
GetSlotIndexForAllocationInfo(const AllocationInfo * info) const321 size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
322 DCHECK_GE(info, allocation_info_);
323 DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
324 return info - allocation_info_;
325 }
326
GetAllocationInfoForAddress(uintptr_t address)327 AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) {
328 return &allocation_info_[GetSlotIndexForAddress(address)];
329 }
330
GetAllocationInfoForAddress(uintptr_t address) const331 const AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) const {
332 return &allocation_info_[GetSlotIndexForAddress(address)];
333 }
334
operator ()(const AllocationInfo * a,const AllocationInfo * b) const335 inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
336 const AllocationInfo* b) const {
337 if (a->GetPrevFree() < b->GetPrevFree()) return true;
338 if (a->GetPrevFree() > b->GetPrevFree()) return false;
339 if (a->AlignSize() < b->AlignSize()) return true;
340 if (a->AlignSize() > b->AlignSize()) return false;
341 return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
342 }
343
Create(const std::string & name,uint8_t * requested_begin,size_t size)344 FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
345 CHECK_EQ(size % kAlignment, 0U);
346 std::string error_msg;
347 MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
348 PROT_READ | PROT_WRITE, true, false, &error_msg);
349 CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
350 return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
351 }
352
FreeListSpace(const std::string & name,MemMap * mem_map,uint8_t * begin,uint8_t * end)353 FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
354 : LargeObjectSpace(name, begin, end),
355 mem_map_(mem_map),
356 lock_("free list space lock", kAllocSpaceLock) {
357 const size_t space_capacity = end - begin;
358 free_end_ = space_capacity;
359 CHECK_ALIGNED(space_capacity, kAlignment);
360 const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
361 std::string error_msg;
362 allocation_info_map_.reset(
363 MemMap::MapAnonymous("large object free list space allocation info map",
364 nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
365 false, false, &error_msg));
366 CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
367 << error_msg;
368 allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
369 }
370
~FreeListSpace()371 FreeListSpace::~FreeListSpace() {}
372
Walk(DlMallocSpace::WalkCallback callback,void * arg)373 void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
374 MutexLock mu(Thread::Current(), lock_);
375 const uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
376 AllocationInfo* cur_info = &allocation_info_[0];
377 const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
378 while (cur_info < end_info) {
379 if (!cur_info->IsFree()) {
380 size_t alloc_size = cur_info->ByteSize();
381 uint8_t* byte_start = reinterpret_cast<uint8_t*>(GetAddressForAllocationInfo(cur_info));
382 uint8_t* byte_end = byte_start + alloc_size;
383 callback(byte_start, byte_end, alloc_size, arg);
384 callback(nullptr, nullptr, 0, arg);
385 }
386 cur_info = cur_info->GetNextInfo();
387 }
388 CHECK_EQ(cur_info, end_info);
389 }
390
RemoveFreePrev(AllocationInfo * info)391 void FreeListSpace::RemoveFreePrev(AllocationInfo* info) {
392 CHECK_GT(info->GetPrevFree(), 0U);
393 auto it = free_blocks_.lower_bound(info);
394 CHECK(it != free_blocks_.end());
395 CHECK_EQ(*it, info);
396 free_blocks_.erase(it);
397 }
398
Free(Thread * self,mirror::Object * obj)399 size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
400 MutexLock mu(self, lock_);
401 DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
402 << reinterpret_cast<void*>(End());
403 DCHECK_ALIGNED(obj, kAlignment);
404 AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
405 DCHECK(!info->IsFree());
406 const size_t allocation_size = info->ByteSize();
407 DCHECK_GT(allocation_size, 0U);
408 DCHECK_ALIGNED(allocation_size, kAlignment);
409 info->SetByteSize(allocation_size, true); // Mark as free.
410 // Look at the next chunk.
411 AllocationInfo* next_info = info->GetNextInfo();
412 // Calculate the start of the end free block.
413 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
414 size_t prev_free_bytes = info->GetPrevFreeBytes();
415 size_t new_free_size = allocation_size;
416 if (prev_free_bytes != 0) {
417 // Coalesce with previous free chunk.
418 new_free_size += prev_free_bytes;
419 RemoveFreePrev(info);
420 info = info->GetPrevFreeInfo();
421 // The previous allocation info must not be free since we are supposed to always coalesce.
422 DCHECK_EQ(info->GetPrevFreeBytes(), 0U) << "Previous allocation was free";
423 }
424 uintptr_t next_addr = GetAddressForAllocationInfo(next_info);
425 if (next_addr >= free_end_start) {
426 // Easy case, the next chunk is the end free region.
427 CHECK_EQ(next_addr, free_end_start);
428 free_end_ += new_free_size;
429 } else {
430 AllocationInfo* new_free_info;
431 if (next_info->IsFree()) {
432 AllocationInfo* next_next_info = next_info->GetNextInfo();
433 // Next next info can't be free since we always coalesce.
434 DCHECK(!next_next_info->IsFree());
435 DCHECK_ALIGNED(next_next_info->ByteSize(), kAlignment);
436 new_free_info = next_next_info;
437 new_free_size += next_next_info->GetPrevFreeBytes();
438 RemoveFreePrev(next_next_info);
439 } else {
440 new_free_info = next_info;
441 }
442 new_free_info->SetPrevFreeBytes(new_free_size);
443 free_blocks_.insert(new_free_info);
444 info->SetByteSize(new_free_size, true);
445 DCHECK_EQ(info->GetNextInfo(), new_free_info);
446 }
447 --num_objects_allocated_;
448 DCHECK_LE(allocation_size, num_bytes_allocated_);
449 num_bytes_allocated_ -= allocation_size;
450 madvise(obj, allocation_size, MADV_DONTNEED);
451 if (kIsDebugBuild) {
452 // Can't disallow reads since we use them to find next chunks during coalescing.
453 mprotect(obj, allocation_size, PROT_READ);
454 }
455 return allocation_size;
456 }
457
AllocationSize(mirror::Object * obj,size_t * usable_size)458 size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
459 DCHECK(Contains(obj));
460 AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
461 DCHECK(!info->IsFree());
462 size_t alloc_size = info->ByteSize();
463 if (usable_size != nullptr) {
464 *usable_size = alloc_size;
465 }
466 return alloc_size;
467 }
468
Alloc(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)469 mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
470 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
471 MutexLock mu(self, lock_);
472 const size_t allocation_size = RoundUp(num_bytes, kAlignment);
473 AllocationInfo temp_info;
474 temp_info.SetPrevFreeBytes(allocation_size);
475 temp_info.SetByteSize(0, false);
476 AllocationInfo* new_info;
477 // Find the smallest chunk at least num_bytes in size.
478 auto it = free_blocks_.lower_bound(&temp_info);
479 if (it != free_blocks_.end()) {
480 AllocationInfo* info = *it;
481 free_blocks_.erase(it);
482 // Fit our object in the previous allocation info free space.
483 new_info = info->GetPrevFreeInfo();
484 // Remove the newly allocated block from the info and update the prev_free_.
485 info->SetPrevFreeBytes(info->GetPrevFreeBytes() - allocation_size);
486 if (info->GetPrevFreeBytes() > 0) {
487 AllocationInfo* new_free = info - info->GetPrevFree();
488 new_free->SetPrevFreeBytes(0);
489 new_free->SetByteSize(info->GetPrevFreeBytes(), true);
490 // If there is remaining space, insert back into the free set.
491 free_blocks_.insert(info);
492 }
493 } else {
494 // Try to steal some memory from the free space at the end of the space.
495 if (LIKELY(free_end_ >= allocation_size)) {
496 // Fit our object at the start of the end free block.
497 new_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(End()) - free_end_);
498 free_end_ -= allocation_size;
499 } else {
500 return nullptr;
501 }
502 }
503 DCHECK(bytes_allocated != nullptr);
504 *bytes_allocated = allocation_size;
505 if (usable_size != nullptr) {
506 *usable_size = allocation_size;
507 }
508 DCHECK(bytes_tl_bulk_allocated != nullptr);
509 *bytes_tl_bulk_allocated = allocation_size;
510 // Need to do these inside of the lock.
511 ++num_objects_allocated_;
512 ++total_objects_allocated_;
513 num_bytes_allocated_ += allocation_size;
514 total_bytes_allocated_ += allocation_size;
515 mirror::Object* obj = reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(new_info));
516 // We always put our object at the start of the free block, there cannot be another free block
517 // before it.
518 if (kIsDebugBuild) {
519 mprotect(obj, allocation_size, PROT_READ | PROT_WRITE);
520 }
521 new_info->SetPrevFreeBytes(0);
522 new_info->SetByteSize(allocation_size, false);
523 return obj;
524 }
525
Dump(std::ostream & os) const526 void FreeListSpace::Dump(std::ostream& os) const {
527 MutexLock mu(Thread::Current(), lock_);
528 os << GetName() << " -"
529 << " begin: " << reinterpret_cast<void*>(Begin())
530 << " end: " << reinterpret_cast<void*>(End()) << "\n";
531 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
532 const AllocationInfo* cur_info =
533 GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin()));
534 const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
535 while (cur_info < end_info) {
536 size_t size = cur_info->ByteSize();
537 uintptr_t address = GetAddressForAllocationInfo(cur_info);
538 if (cur_info->IsFree()) {
539 os << "Free block at address: " << reinterpret_cast<const void*>(address)
540 << " of length " << size << " bytes\n";
541 } else {
542 os << "Large object at address: " << reinterpret_cast<const void*>(address)
543 << " of length " << size << " bytes\n";
544 }
545 cur_info = cur_info->GetNextInfo();
546 }
547 if (free_end_) {
548 os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
549 << " of length " << free_end_ << " bytes\n";
550 }
551 }
552
IsZygoteLargeObject(Thread * self ATTRIBUTE_UNUSED,mirror::Object * obj) const553 bool FreeListSpace::IsZygoteLargeObject(Thread* self ATTRIBUTE_UNUSED, mirror::Object* obj) const {
554 const AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
555 DCHECK(info != nullptr);
556 return info->IsZygoteObject();
557 }
558
SetAllLargeObjectsAsZygoteObjects(Thread * self)559 void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
560 MutexLock mu(self, lock_);
561 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
562 for (AllocationInfo* cur_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin())),
563 *end_info = GetAllocationInfoForAddress(free_end_start); cur_info < end_info;
564 cur_info = cur_info->GetNextInfo()) {
565 if (!cur_info->IsFree()) {
566 cur_info->SetZygoteObject();
567 }
568 }
569 }
570
SweepCallback(size_t num_ptrs,mirror::Object ** ptrs,void * arg)571 void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
572 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
573 space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
574 Thread* self = context->self;
575 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
576 // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
577 // the bitmaps as an optimization.
578 if (!context->swap_bitmaps) {
579 accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
580 for (size_t i = 0; i < num_ptrs; ++i) {
581 bitmap->Clear(ptrs[i]);
582 }
583 }
584 context->freed.objects += num_ptrs;
585 context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
586 }
587
Sweep(bool swap_bitmaps)588 collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
589 if (Begin() >= End()) {
590 return collector::ObjectBytePair(0, 0);
591 }
592 accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
593 accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
594 if (swap_bitmaps) {
595 std::swap(live_bitmap, mark_bitmap);
596 }
597 AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
598 std::pair<uint8_t*, uint8_t*> range = GetBeginEndAtomic();
599 accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
600 reinterpret_cast<uintptr_t>(range.first),
601 reinterpret_cast<uintptr_t>(range.second),
602 SweepCallback,
603 &scc);
604 return scc.freed;
605 }
606
LogFragmentationAllocFailure(std::ostream &,size_t)607 void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
608 size_t /*failed_alloc_bytes*/) {
609 UNIMPLEMENTED(FATAL);
610 }
611
GetBeginEndAtomic() const612 std::pair<uint8_t*, uint8_t*> LargeObjectMapSpace::GetBeginEndAtomic() const {
613 MutexLock mu(Thread::Current(), lock_);
614 return std::make_pair(Begin(), End());
615 }
616
GetBeginEndAtomic() const617 std::pair<uint8_t*, uint8_t*> FreeListSpace::GetBeginEndAtomic() const {
618 MutexLock mu(Thread::Current(), lock_);
619 return std::make_pair(Begin(), End());
620 }
621
622 } // namespace space
623 } // namespace gc
624 } // namespace art
625