1 /*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
18 #define ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
19
20 #include <stdint.h>
21
22 #include <iosfwd>
23 #include <string>
24
25 #include "base/logging.h"
26 #include "base/mutex.h"
27 #include "gc_root.h"
28 #include "object_callbacks.h"
29 #include "offsets.h"
30 #include "read_barrier_option.h"
31
32 namespace art {
33
34 class RootInfo;
35
36 namespace mirror {
37 class Object;
38 } // namespace mirror
39
40 class MemMap;
41
42 /*
43 * Maintain a table of indirect references. Used for local/global JNI
44 * references.
45 *
46 * The table contains object references that are part of the GC root set.
47 * When an object is added we return an IndirectRef that is not a valid
48 * pointer but can be used to find the original value in O(1) time.
49 * Conversions to and from indirect references are performed on upcalls
50 * and downcalls, so they need to be very fast.
51 *
52 * To be efficient for JNI local variable storage, we need to provide
53 * operations that allow us to operate on segments of the table, where
54 * segments are pushed and popped as if on a stack. For example, deletion
55 * of an entry should only succeed if it appears in the current segment,
56 * and we want to be able to strip off the current segment quickly when
57 * a method returns. Additions to the table must be made in the current
58 * segment even if space is available in an earlier area.
59 *
60 * A new segment is created when we call into native code from interpreted
61 * code, or when we handle the JNI PushLocalFrame function.
62 *
63 * The GC must be able to scan the entire table quickly.
64 *
65 * In summary, these must be very fast:
66 * - adding or removing a segment
67 * - adding references to a new segment
68 * - converting an indirect reference back to an Object
69 * These can be a little slower, but must still be pretty quick:
70 * - adding references to a "mature" segment
71 * - removing individual references
72 * - scanning the entire table straight through
73 *
74 * If there's more than one segment, we don't guarantee that the table
75 * will fill completely before we fail due to lack of space. We do ensure
76 * that the current segment will pack tightly, which should satisfy JNI
77 * requirements (e.g. EnsureLocalCapacity).
78 *
79 * To make everything fit nicely in 32-bit integers, the maximum size of
80 * the table is capped at 64K.
81 *
82 * Only SynchronizedGet is synchronized.
83 */
84
85 /*
86 * Indirect reference definition. This must be interchangeable with JNI's
87 * jobject, and it's convenient to let null be null, so we use void*.
88 *
89 * We need a 16-bit table index and a 2-bit reference type (global, local,
90 * weak global). Real object pointers will have zeroes in the low 2 or 3
91 * bits (4- or 8-byte alignment), so it's useful to put the ref type
92 * in the low bits and reserve zero as an invalid value.
93 *
94 * The remaining 14 bits can be used to detect stale indirect references.
95 * For example, if objects don't move, we can use a hash of the original
96 * Object* to make sure the entry hasn't been re-used. (If the Object*
97 * we find there doesn't match because of heap movement, we could do a
98 * secondary check on the preserved hash value; this implies that creating
99 * a global/local ref queries the hash value and forces it to be saved.)
100 *
101 * A more rigorous approach would be to put a serial number in the extra
102 * bits, and keep a copy of the serial number in a parallel table. This is
103 * easier when objects can move, but requires 2x the memory and additional
104 * memory accesses on add/get. It will catch additional problems, e.g.:
105 * create iref1 for obj, delete iref1, create iref2 for same obj, lookup
106 * iref1. A pattern based on object bits will miss this.
107 */
108 typedef void* IndirectRef;
109
110 /*
111 * Indirect reference kind, used as the two low bits of IndirectRef.
112 *
113 * For convenience these match up with enum jobjectRefType from jni.h.
114 */
115 enum IndirectRefKind {
116 kHandleScopeOrInvalid = 0, // <<stack indirect reference table or invalid reference>>
117 kLocal = 1, // <<local reference>>
118 kGlobal = 2, // <<global reference>>
119 kWeakGlobal = 3 // <<weak global reference>>
120 };
121 std::ostream& operator<<(std::ostream& os, const IndirectRefKind& rhs);
122
123 /*
124 * Determine what kind of indirect reference this is.
125 */
GetIndirectRefKind(IndirectRef iref)126 static inline IndirectRefKind GetIndirectRefKind(IndirectRef iref) {
127 return static_cast<IndirectRefKind>(reinterpret_cast<uintptr_t>(iref) & 0x03);
128 }
129
130 /* use as initial value for "cookie", and when table has only one segment */
131 static const uint32_t IRT_FIRST_SEGMENT = 0;
132
133 /*
134 * Table definition.
135 *
136 * For the global reference table, the expected common operations are
137 * adding a new entry and removing a recently-added entry (usually the
138 * most-recently-added entry). For JNI local references, the common
139 * operations are adding a new entry and removing an entire table segment.
140 *
141 * If "alloc_entries_" is not equal to "max_entries_", the table may expand
142 * when entries are added, which means the memory may move. If you want
143 * to keep pointers into "table" rather than offsets, you must use a
144 * fixed-size table.
145 *
146 * If we delete entries from the middle of the list, we will be left with
147 * "holes". We track the number of holes so that, when adding new elements,
148 * we can quickly decide to do a trivial append or go slot-hunting.
149 *
150 * When the top-most entry is removed, any holes immediately below it are
151 * also removed. Thus, deletion of an entry may reduce "topIndex" by more
152 * than one.
153 *
154 * To get the desired behavior for JNI locals, we need to know the bottom
155 * and top of the current "segment". The top is managed internally, and
156 * the bottom is passed in as a function argument. When we call a native method or
157 * push a local frame, the current top index gets pushed on, and serves
158 * as the new bottom. When we pop a frame off, the value from the stack
159 * becomes the new top index, and the value stored in the previous frame
160 * becomes the new bottom.
161 *
162 * To avoid having to re-scan the table after a pop, we want to push the
163 * number of holes in the table onto the stack. Because of our 64K-entry
164 * cap, we can combine the two into a single unsigned 32-bit value.
165 * Instead of a "bottom" argument we take a "cookie", which includes the
166 * bottom index and the count of holes below the bottom.
167 *
168 * Common alternative implementation: make IndirectRef a pointer to the
169 * actual reference slot. Instead of getting a table and doing a lookup,
170 * the lookup can be done instantly. Operations like determining the
171 * type and deleting the reference are more expensive because the table
172 * must be hunted for (i.e. you have to do a pointer comparison to see
173 * which table it's in), you can't move the table when expanding it (so
174 * realloc() is out), and tricks like serial number checking to detect
175 * stale references aren't possible (though we may be able to get similar
176 * benefits with other approaches).
177 *
178 * TODO: consider a "lastDeleteIndex" for quick hole-filling when an
179 * add immediately follows a delete; must invalidate after segment pop
180 * (which could increase the cost/complexity of method call/return).
181 * Might be worth only using it for JNI globals.
182 *
183 * TODO: may want completely different add/remove algorithms for global
184 * and local refs to improve performance. A large circular buffer might
185 * reduce the amortized cost of adding global references.
186 *
187 */
188 union IRTSegmentState {
189 uint32_t all;
190 struct {
191 uint32_t topIndex:16; /* index of first unused entry */
192 uint32_t numHoles:16; /* #of holes in entire table */
193 } parts;
194 };
195
196 // Try to choose kIRTPrevCount so that sizeof(IrtEntry) is a power of 2.
197 // Contains multiple entries but only one active one, this helps us detect use after free errors
198 // since the serial stored in the indirect ref wont match.
199 static const size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
200 class IrtEntry {
201 public:
Add(mirror::Object * obj)202 void Add(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
203 ++serial_;
204 if (serial_ == kIRTPrevCount) {
205 serial_ = 0;
206 }
207 references_[serial_] = GcRoot<mirror::Object>(obj);
208 }
GetReference()209 GcRoot<mirror::Object>* GetReference() {
210 DCHECK_LT(serial_, kIRTPrevCount);
211 return &references_[serial_];
212 }
GetSerial()213 uint32_t GetSerial() const {
214 return serial_;
215 }
SetReference(mirror::Object * obj)216 void SetReference(mirror::Object* obj) {
217 DCHECK_LT(serial_, kIRTPrevCount);
218 references_[serial_] = GcRoot<mirror::Object>(obj);
219 }
220
221 private:
222 uint32_t serial_;
223 GcRoot<mirror::Object> references_[kIRTPrevCount];
224 };
225 static_assert(sizeof(IrtEntry) == (1 + kIRTPrevCount) * sizeof(uint32_t),
226 "Unexpected sizeof(IrtEntry)");
227
228 class IrtIterator {
229 public:
IrtIterator(IrtEntry * table,size_t i,size_t capacity)230 explicit IrtIterator(IrtEntry* table, size_t i, size_t capacity)
231 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
232 : table_(table), i_(i), capacity_(capacity) {
233 }
234
235 IrtIterator& operator++() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
236 ++i_;
237 return *this;
238 }
239
240 GcRoot<mirror::Object>* operator*() {
241 // This does not have a read barrier as this is used to visit roots.
242 return table_[i_].GetReference();
243 }
244
equals(const IrtIterator & rhs)245 bool equals(const IrtIterator& rhs) const {
246 return (i_ == rhs.i_ && table_ == rhs.table_);
247 }
248
249 private:
250 IrtEntry* const table_;
251 size_t i_;
252 const size_t capacity_;
253 };
254
255 bool inline operator==(const IrtIterator& lhs, const IrtIterator& rhs) {
256 return lhs.equals(rhs);
257 }
258
259 bool inline operator!=(const IrtIterator& lhs, const IrtIterator& rhs) {
260 return !lhs.equals(rhs);
261 }
262
263 class IndirectReferenceTable {
264 public:
265 // WARNING: When using with abort_on_error = false, the object may be in a partially
266 // initialized state. Use IsValid() to check.
267 IndirectReferenceTable(size_t initialCount, size_t maxCount, IndirectRefKind kind,
268 bool abort_on_error = true);
269
270 ~IndirectReferenceTable();
271
272 bool IsValid() const;
273
274 /*
275 * Add a new entry. "obj" must be a valid non-nullptr object reference.
276 *
277 * Returns nullptr if the table is full (max entries reached, or alloc
278 * failed during expansion).
279 */
280 IndirectRef Add(uint32_t cookie, mirror::Object* obj)
281 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
282
283 /*
284 * Given an IndirectRef in the table, return the Object it refers to.
285 *
286 * Returns kInvalidIndirectRefObject if iref is invalid.
287 */
288 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
289 mirror::Object* Get(IndirectRef iref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
290 ALWAYS_INLINE;
291
292 // Synchronized get which reads a reference, acquiring a lock if necessary.
293 template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
SynchronizedGet(Thread *,ReaderWriterMutex *,IndirectRef iref)294 mirror::Object* SynchronizedGet(Thread* /*self*/, ReaderWriterMutex* /*mutex*/,
295 IndirectRef iref) const
296 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
297 return Get<kReadBarrierOption>(iref);
298 }
299
300 /*
301 * Update an existing entry.
302 *
303 * Updates an existing indirect reference to point to a new object.
304 */
305 void Update(IndirectRef iref, mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
306
307 /*
308 * Remove an existing entry.
309 *
310 * If the entry is not between the current top index and the bottom index
311 * specified by the cookie, we don't remove anything. This is the behavior
312 * required by JNI's DeleteLocalRef function.
313 *
314 * Returns "false" if nothing was removed.
315 */
316 bool Remove(uint32_t cookie, IndirectRef iref);
317
318 void AssertEmpty();
319
320 void Dump(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
321
322 /*
323 * Return the #of entries in the entire table. This includes holes, and
324 * so may be larger than the actual number of "live" entries.
325 */
Capacity()326 size_t Capacity() const {
327 return segment_state_.parts.topIndex;
328 }
329
330 // Note IrtIterator does not have a read barrier as it's used to visit roots.
begin()331 IrtIterator begin() {
332 return IrtIterator(table_, 0, Capacity());
333 }
334
end()335 IrtIterator end() {
336 return IrtIterator(table_, Capacity(), Capacity());
337 }
338
339 void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
340 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
341
GetSegmentState()342 uint32_t GetSegmentState() const {
343 return segment_state_.all;
344 }
345
SetSegmentState(uint32_t new_state)346 void SetSegmentState(uint32_t new_state) {
347 segment_state_.all = new_state;
348 }
349
SegmentStateOffset()350 static Offset SegmentStateOffset() {
351 return Offset(OFFSETOF_MEMBER(IndirectReferenceTable, segment_state_));
352 }
353
354 // Release pages past the end of the table that may have previously held references.
355 void Trim() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
356
357 private:
358 // Extract the table index from an indirect reference.
ExtractIndex(IndirectRef iref)359 static uint32_t ExtractIndex(IndirectRef iref) {
360 uintptr_t uref = reinterpret_cast<uintptr_t>(iref);
361 return (uref >> 2) & 0xffff;
362 }
363
364 /*
365 * The object pointer itself is subject to relocation in some GC
366 * implementations, so we shouldn't really be using it here.
367 */
ToIndirectRef(uint32_t tableIndex)368 IndirectRef ToIndirectRef(uint32_t tableIndex) const {
369 DCHECK_LT(tableIndex, 65536U);
370 uint32_t serialChunk = table_[tableIndex].GetSerial();
371 uintptr_t uref = (serialChunk << 20) | (tableIndex << 2) | kind_;
372 return reinterpret_cast<IndirectRef>(uref);
373 }
374
375 // Abort if check_jni is not enabled.
376 static void AbortIfNoCheckJNI();
377
378 /* extra debugging checks */
379 bool GetChecked(IndirectRef) const;
380 bool CheckEntry(const char*, IndirectRef, int) const;
381
382 /* semi-public - read/write by jni down calls */
383 IRTSegmentState segment_state_;
384
385 // Mem map where we store the indirect refs.
386 std::unique_ptr<MemMap> table_mem_map_;
387 // bottom of the stack. Do not directly access the object references
388 // in this as they are roots. Use Get() that has a read barrier.
389 IrtEntry* table_;
390 /* bit mask, ORed into all irefs */
391 const IndirectRefKind kind_;
392 /* max #of entries allowed */
393 const size_t max_entries_;
394 };
395
396 } // namespace art
397
398 #endif // ART_RUNTIME_INDIRECT_REFERENCE_TABLE_H_
399