1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
18 #define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
19 
20 #include "region_space.h"
21 
22 namespace art {
23 namespace gc {
24 namespace space {
25 
Alloc(Thread *,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)26 inline mirror::Object* RegionSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
27                                           size_t* usable_size,
28                                           size_t* bytes_tl_bulk_allocated) {
29   num_bytes = RoundUp(num_bytes, kAlignment);
30   return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
31                                 bytes_tl_bulk_allocated);
32 }
33 
AllocThreadUnsafe(Thread * self,size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)34 inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
35                                                       size_t* bytes_allocated,
36                                                       size_t* usable_size,
37                                                       size_t* bytes_tl_bulk_allocated) {
38   Locks::mutator_lock_->AssertExclusiveHeld(self);
39   return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
40 }
41 
42 template<bool kForEvac>
AllocNonvirtual(size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)43 inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
44                                                     size_t* usable_size,
45                                                     size_t* bytes_tl_bulk_allocated) {
46   DCHECK_ALIGNED(num_bytes, kAlignment);
47   mirror::Object* obj;
48   if (LIKELY(num_bytes <= kRegionSize)) {
49     // Non-large object.
50     if (!kForEvac) {
51       obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
52                                    bytes_tl_bulk_allocated);
53     } else {
54       DCHECK(evac_region_ != nullptr);
55       obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
56                                 bytes_tl_bulk_allocated);
57     }
58     if (LIKELY(obj != nullptr)) {
59       return obj;
60     }
61     MutexLock mu(Thread::Current(), region_lock_);
62     // Retry with current region since another thread may have updated it.
63     if (!kForEvac) {
64       obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
65                                    bytes_tl_bulk_allocated);
66     } else {
67       obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
68                                 bytes_tl_bulk_allocated);
69     }
70     if (LIKELY(obj != nullptr)) {
71       return obj;
72     }
73     if (!kForEvac) {
74       // Retain sufficient free regions for full evacuation.
75       if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
76         return nullptr;
77       }
78       for (size_t i = 0; i < num_regions_; ++i) {
79         Region* r = &regions_[i];
80         if (r->IsFree()) {
81           r->Unfree(time_);
82           r->SetNewlyAllocated();
83           ++num_non_free_regions_;
84           obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
85           CHECK(obj != nullptr);
86           current_region_ = r;
87           return obj;
88         }
89       }
90     } else {
91       for (size_t i = 0; i < num_regions_; ++i) {
92         Region* r = &regions_[i];
93         if (r->IsFree()) {
94           r->Unfree(time_);
95           ++num_non_free_regions_;
96           obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
97           CHECK(obj != nullptr);
98           evac_region_ = r;
99           return obj;
100         }
101       }
102     }
103   } else {
104     // Large object.
105     obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size,
106                                bytes_tl_bulk_allocated);
107     if (LIKELY(obj != nullptr)) {
108       return obj;
109     }
110   }
111   return nullptr;
112 }
113 
Alloc(size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)114 inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* bytes_allocated,
115                                                   size_t* usable_size,
116                                                   size_t* bytes_tl_bulk_allocated) {
117   DCHECK(IsAllocated() && IsInToSpace());
118   DCHECK_ALIGNED(num_bytes, kAlignment);
119   Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_);
120   uint8_t* old_top;
121   uint8_t* new_top;
122   do {
123     old_top = atomic_top->LoadRelaxed();
124     new_top = old_top + num_bytes;
125     if (UNLIKELY(new_top > end_)) {
126       return nullptr;
127     }
128   } while (!atomic_top->CompareExchangeWeakSequentiallyConsistent(old_top, new_top));
129   reinterpret_cast<Atomic<uint64_t>*>(&objects_allocated_)->FetchAndAddSequentiallyConsistent(1);
130   DCHECK_LE(atomic_top->LoadRelaxed(), end_);
131   DCHECK_LT(old_top, end_);
132   DCHECK_LE(new_top, end_);
133   *bytes_allocated = num_bytes;
134   if (usable_size != nullptr) {
135     *usable_size = num_bytes;
136   }
137   *bytes_tl_bulk_allocated = num_bytes;
138   return reinterpret_cast<mirror::Object*>(old_top);
139 }
140 
AllocationSizeNonvirtual(mirror::Object * obj,size_t * usable_size)141 inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
142   size_t num_bytes = obj->SizeOf();
143   if (usable_size != nullptr) {
144     if (LIKELY(num_bytes <= kRegionSize)) {
145       DCHECK(RefToRegion(obj)->IsAllocated());
146       *usable_size = RoundUp(num_bytes, kAlignment);
147     } else {
148       DCHECK(RefToRegion(obj)->IsLarge());
149       *usable_size = RoundUp(num_bytes, kRegionSize);
150     }
151   }
152   return num_bytes;
153 }
154 
155 template<RegionSpace::RegionType kRegionType>
GetBytesAllocatedInternal()156 uint64_t RegionSpace::GetBytesAllocatedInternal() {
157   uint64_t bytes = 0;
158   MutexLock mu(Thread::Current(), region_lock_);
159   for (size_t i = 0; i < num_regions_; ++i) {
160     Region* r = &regions_[i];
161     if (r->IsFree()) {
162       continue;
163     }
164     switch (kRegionType) {
165       case RegionType::kRegionTypeAll:
166         bytes += r->BytesAllocated();
167         break;
168       case RegionType::kRegionTypeFromSpace:
169         if (r->IsInFromSpace()) {
170           bytes += r->BytesAllocated();
171         }
172         break;
173       case RegionType::kRegionTypeUnevacFromSpace:
174         if (r->IsInUnevacFromSpace()) {
175           bytes += r->BytesAllocated();
176         }
177         break;
178       case RegionType::kRegionTypeToSpace:
179         if (r->IsInToSpace()) {
180           bytes += r->BytesAllocated();
181         }
182         break;
183       default:
184         LOG(FATAL) << "Unexpected space type : " << kRegionType;
185     }
186   }
187   return bytes;
188 }
189 
190 template<RegionSpace::RegionType kRegionType>
GetObjectsAllocatedInternal()191 uint64_t RegionSpace::GetObjectsAllocatedInternal() {
192   uint64_t bytes = 0;
193   MutexLock mu(Thread::Current(), region_lock_);
194   for (size_t i = 0; i < num_regions_; ++i) {
195     Region* r = &regions_[i];
196     if (r->IsFree()) {
197       continue;
198     }
199     switch (kRegionType) {
200       case RegionType::kRegionTypeAll:
201         bytes += r->ObjectsAllocated();
202         break;
203       case RegionType::kRegionTypeFromSpace:
204         if (r->IsInFromSpace()) {
205           bytes += r->ObjectsAllocated();
206         }
207         break;
208       case RegionType::kRegionTypeUnevacFromSpace:
209         if (r->IsInUnevacFromSpace()) {
210           bytes += r->ObjectsAllocated();
211         }
212         break;
213       case RegionType::kRegionTypeToSpace:
214         if (r->IsInToSpace()) {
215           bytes += r->ObjectsAllocated();
216         }
217         break;
218       default:
219         LOG(FATAL) << "Unexpected space type : " << kRegionType;
220     }
221   }
222   return bytes;
223 }
224 
225 template<bool kToSpaceOnly>
WalkInternal(ObjectCallback * callback,void * arg)226 void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
227   // TODO: MutexLock on region_lock_ won't work due to lock order
228   // issues (the classloader classes lock and the monitor lock). We
229   // call this with threads suspended.
230   Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
231   for (size_t i = 0; i < num_regions_; ++i) {
232     Region* r = &regions_[i];
233     if (r->IsFree() || (kToSpaceOnly && !r->IsInToSpace())) {
234       continue;
235     }
236     if (r->IsLarge()) {
237       mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
238       if (obj->GetClass() != nullptr) {
239         callback(obj, arg);
240       }
241     } else if (r->IsLargeTail()) {
242       // Do nothing.
243     } else {
244       uint8_t* pos = r->Begin();
245       uint8_t* top = r->Top();
246       while (pos < top) {
247         mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
248         if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
249           callback(obj, arg);
250           pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
251         } else {
252           break;
253         }
254       }
255     }
256   }
257 }
258 
GetNextObject(mirror::Object * obj)259 inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
260   const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
261   return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
262 }
263 
264 template<bool kForEvac>
AllocLarge(size_t num_bytes,size_t * bytes_allocated,size_t * usable_size,size_t * bytes_tl_bulk_allocated)265 mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
266                                         size_t* usable_size,
267                                         size_t* bytes_tl_bulk_allocated) {
268   DCHECK_ALIGNED(num_bytes, kAlignment);
269   DCHECK_GT(num_bytes, kRegionSize);
270   size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
271   DCHECK_GT(num_regs, 0U);
272   DCHECK_LT((num_regs - 1) * kRegionSize, num_bytes);
273   DCHECK_LE(num_bytes, num_regs * kRegionSize);
274   MutexLock mu(Thread::Current(), region_lock_);
275   if (!kForEvac) {
276     // Retain sufficient free regions for full evacuation.
277     if ((num_non_free_regions_ + num_regs) * 2 > num_regions_) {
278       return nullptr;
279     }
280   }
281   // Find a large enough contiguous free regions.
282   size_t left = 0;
283   while (left + num_regs - 1 < num_regions_) {
284     bool found = true;
285     size_t right = left;
286     DCHECK_LT(right, left + num_regs)
287         << "The inner loop Should iterate at least once";
288     while (right < left + num_regs) {
289       if (regions_[right].IsFree()) {
290         ++right;
291       } else {
292         found = false;
293         break;
294       }
295     }
296     if (found) {
297       // right points to the one region past the last free region.
298       DCHECK_EQ(left + num_regs, right);
299       Region* first_reg = &regions_[left];
300       DCHECK(first_reg->IsFree());
301       first_reg->UnfreeLarge(time_);
302       ++num_non_free_regions_;
303       first_reg->SetTop(first_reg->Begin() + num_bytes);
304       for (size_t p = left + 1; p < right; ++p) {
305         DCHECK_LT(p, num_regions_);
306         DCHECK(regions_[p].IsFree());
307         regions_[p].UnfreeLargeTail(time_);
308         ++num_non_free_regions_;
309       }
310       *bytes_allocated = num_bytes;
311       if (usable_size != nullptr) {
312         *usable_size = num_regs * kRegionSize;
313       }
314       *bytes_tl_bulk_allocated = num_bytes;
315       return reinterpret_cast<mirror::Object*>(first_reg->Begin());
316     } else {
317       // right points to the non-free region. Start with the one after it.
318       left = right + 1;
319     }
320   }
321   return nullptr;
322 }
323 
324 }  // namespace space
325 }  // namespace gc
326 }  // namespace art
327 
328 #endif  // ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
329