1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
18 #define ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
19 
20 #include <sys/mman.h>  // For the PROT_* and MAP_* constants.
21 
22 #include <algorithm>
23 #include <memory>
24 #include <string>
25 
26 #include "atomic.h"
27 #include "base/logging.h"
28 #include "base/macros.h"
29 #include "mem_map.h"
30 #include "stack_reference.h"
31 
32 namespace art {
33 namespace gc {
34 namespace accounting {
35 
36 // Internal representation is StackReference<T>, so this only works with mirror::Object or it's
37 // subclasses.
38 template <typename T>
39 class AtomicStack {
40  public:
41   class ObjectComparator {
42    public:
43     // These two comparators are for std::binary_search.
operator()44     bool operator()(const T* a, const StackReference<T>& b) const NO_THREAD_SAFETY_ANALYSIS {
45       return a < b.AsMirrorPtr();
46     }
operator()47     bool operator()(const StackReference<T>& a, const T* b) const NO_THREAD_SAFETY_ANALYSIS {
48       return a.AsMirrorPtr() < b;
49     }
50     // This comparator is for std::sort.
operator()51     bool operator()(const StackReference<T>& a, const StackReference<T>& b) const
52         NO_THREAD_SAFETY_ANALYSIS {
53       return a.AsMirrorPtr() < b.AsMirrorPtr();
54     }
55   };
56 
57   // Capacity is how many elements we can store in the stack.
Create(const std::string & name,size_t growth_limit,size_t capacity)58   static AtomicStack* Create(const std::string& name, size_t growth_limit, size_t capacity) {
59     std::unique_ptr<AtomicStack> mark_stack(new AtomicStack(name, growth_limit, capacity));
60     mark_stack->Init();
61     return mark_stack.release();
62   }
63 
~AtomicStack()64   ~AtomicStack() {}
65 
Reset()66   void Reset() {
67     DCHECK(mem_map_.get() != nullptr);
68     DCHECK(begin_ != nullptr);
69     front_index_.StoreRelaxed(0);
70     back_index_.StoreRelaxed(0);
71     debug_is_sorted_ = true;
72     mem_map_->MadviseDontNeedAndZero();
73   }
74 
75   // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
76 
77   // Returns false if we overflowed the stack.
AtomicPushBackIgnoreGrowthLimit(T * value)78   bool AtomicPushBackIgnoreGrowthLimit(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
79     return AtomicPushBackInternal(value, capacity_);
80   }
81 
82   // Returns false if we overflowed the stack.
AtomicPushBack(T * value)83   bool AtomicPushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
84     return AtomicPushBackInternal(value, growth_limit_);
85   }
86 
87   // Atomically bump the back index by the given number of
88   // slots. Returns false if we overflowed the stack.
AtomicBumpBack(size_t num_slots,StackReference<T> ** start_address,StackReference<T> ** end_address)89   bool AtomicBumpBack(size_t num_slots, StackReference<T>** start_address,
90                       StackReference<T>** end_address)
91       REQUIRES_SHARED(Locks::mutator_lock_) {
92     if (kIsDebugBuild) {
93       debug_is_sorted_ = false;
94     }
95     int32_t index;
96     int32_t new_index;
97     do {
98       index = back_index_.LoadRelaxed();
99       new_index = index + num_slots;
100       if (UNLIKELY(static_cast<size_t>(new_index) >= growth_limit_)) {
101         // Stack overflow.
102         return false;
103       }
104     } while (!back_index_.CompareExchangeWeakRelaxed(index, new_index));
105     *start_address = begin_ + index;
106     *end_address = begin_ + new_index;
107     if (kIsDebugBuild) {
108       // Sanity check that the memory is zero.
109       for (int32_t i = index; i < new_index; ++i) {
110         DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr))
111             << "i=" << i << " index=" << index << " new_index=" << new_index;
112       }
113     }
114     return true;
115   }
116 
AssertAllZero()117   void AssertAllZero() REQUIRES_SHARED(Locks::mutator_lock_) {
118     if (kIsDebugBuild) {
119       for (size_t i = 0; i < capacity_; ++i) {
120         DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr)) << "i=" << i;
121       }
122     }
123   }
124 
PushBack(T * value)125   void PushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
126     if (kIsDebugBuild) {
127       debug_is_sorted_ = false;
128     }
129     const int32_t index = back_index_.LoadRelaxed();
130     DCHECK_LT(static_cast<size_t>(index), growth_limit_);
131     back_index_.StoreRelaxed(index + 1);
132     begin_[index].Assign(value);
133   }
134 
PopBack()135   T* PopBack() REQUIRES_SHARED(Locks::mutator_lock_) {
136     DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed());
137     // Decrement the back index non atomically.
138     back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1);
139     return begin_[back_index_.LoadRelaxed()].AsMirrorPtr();
140   }
141 
142   // Take an item from the front of the stack.
PopFront()143   T PopFront() {
144     int32_t index = front_index_.LoadRelaxed();
145     DCHECK_LT(index, back_index_.LoadRelaxed());
146     front_index_.StoreRelaxed(index + 1);
147     return begin_[index];
148   }
149 
150   // Pop a number of elements.
PopBackCount(int32_t n)151   void PopBackCount(int32_t n) {
152     DCHECK_GE(Size(), static_cast<size_t>(n));
153     back_index_.FetchAndSubSequentiallyConsistent(n);
154   }
155 
IsEmpty()156   bool IsEmpty() const {
157     return Size() == 0;
158   }
159 
IsFull()160   bool IsFull() const {
161     return Size() == growth_limit_;
162   }
163 
Size()164   size_t Size() const {
165     DCHECK_LE(front_index_.LoadRelaxed(), back_index_.LoadRelaxed());
166     return back_index_.LoadRelaxed() - front_index_.LoadRelaxed();
167   }
168 
Begin()169   StackReference<T>* Begin() const {
170     return begin_ + front_index_.LoadRelaxed();
171   }
End()172   StackReference<T>* End() const {
173     return begin_ + back_index_.LoadRelaxed();
174   }
175 
Capacity()176   size_t Capacity() const {
177     return capacity_;
178   }
179 
180   // Will clear the stack.
Resize(size_t new_capacity)181   void Resize(size_t new_capacity) {
182     capacity_ = new_capacity;
183     growth_limit_ = new_capacity;
184     Init();
185   }
186 
Sort()187   void Sort() {
188     int32_t start_back_index = back_index_.LoadRelaxed();
189     int32_t start_front_index = front_index_.LoadRelaxed();
190     std::sort(Begin(), End(), ObjectComparator());
191     CHECK_EQ(start_back_index, back_index_.LoadRelaxed());
192     CHECK_EQ(start_front_index, front_index_.LoadRelaxed());
193     if (kIsDebugBuild) {
194       debug_is_sorted_ = true;
195     }
196   }
197 
ContainsSorted(const T * value)198   bool ContainsSorted(const T* value) const REQUIRES_SHARED(Locks::mutator_lock_) {
199     DCHECK(debug_is_sorted_);
200     return std::binary_search(Begin(), End(), value, ObjectComparator());
201   }
202 
Contains(const T * value)203   bool Contains(const T* value) const REQUIRES_SHARED(Locks::mutator_lock_) {
204     for (auto cur = Begin(), end = End(); cur != end; ++cur) {
205       if (cur->AsMirrorPtr() == value) {
206         return true;
207       }
208     }
209     return false;
210   }
211 
212  private:
AtomicStack(const std::string & name,size_t growth_limit,size_t capacity)213   AtomicStack(const std::string& name, size_t growth_limit, size_t capacity)
214       : name_(name),
215         back_index_(0),
216         front_index_(0),
217         begin_(nullptr),
218         growth_limit_(growth_limit),
219         capacity_(capacity),
220         debug_is_sorted_(true) {
221   }
222 
223   // Returns false if we overflowed the stack.
AtomicPushBackInternal(T * value,size_t limit)224   bool AtomicPushBackInternal(T* value, size_t limit) ALWAYS_INLINE
225       REQUIRES_SHARED(Locks::mutator_lock_) {
226     if (kIsDebugBuild) {
227       debug_is_sorted_ = false;
228     }
229     int32_t index;
230     do {
231       index = back_index_.LoadRelaxed();
232       if (UNLIKELY(static_cast<size_t>(index) >= limit)) {
233         // Stack overflow.
234         return false;
235       }
236     } while (!back_index_.CompareExchangeWeakRelaxed(index, index + 1));
237     begin_[index].Assign(value);
238     return true;
239   }
240 
241   // Size in number of elements.
Init()242   void Init() {
243     std::string error_msg;
244     mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), nullptr, capacity_ * sizeof(begin_[0]),
245                                         PROT_READ | PROT_WRITE, false, false, &error_msg));
246     CHECK(mem_map_.get() != nullptr) << "couldn't allocate mark stack.\n" << error_msg;
247     uint8_t* addr = mem_map_->Begin();
248     CHECK(addr != nullptr);
249     debug_is_sorted_ = true;
250     begin_ = reinterpret_cast<StackReference<T>*>(addr);
251     Reset();
252   }
253 
254   // Name of the mark stack.
255   std::string name_;
256   // Memory mapping of the atomic stack.
257   std::unique_ptr<MemMap> mem_map_;
258   // Back index (index after the last element pushed).
259   AtomicInteger back_index_;
260   // Front index, used for implementing PopFront.
261   AtomicInteger front_index_;
262   // Base of the atomic stack.
263   StackReference<T>* begin_;
264   // Current maximum which we can push back to, must be <= capacity_.
265   size_t growth_limit_;
266   // Maximum number of elements.
267   size_t capacity_;
268   // Whether or not the stack is sorted, only updated in debug mode to avoid performance overhead.
269   bool debug_is_sorted_;
270 
271   DISALLOW_COPY_AND_ASSIGN(AtomicStack);
272 };
273 
274 typedef AtomicStack<mirror::Object> ObjectStack;
275 
276 }  // namespace accounting
277 }  // namespace gc
278 }  // namespace art
279 
280 #endif  // ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
281