1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_REFERENCE_QUEUE_H_
18 #define ART_RUNTIME_GC_REFERENCE_QUEUE_H_
19 
20 #include <iosfwd>
21 #include <string>
22 #include <vector>
23 
24 #include "base/atomic.h"
25 #include "base/locks.h"
26 #include "base/macros.h"
27 #include "base/timing_logger.h"
28 #include "jni.h"
29 #include "obj_ptr.h"
30 #include "offsets.h"
31 #include "runtime_globals.h"
32 #include "thread_pool.h"
33 
34 namespace art HIDDEN {
35 
36 class Mutex;
37 
38 namespace mirror {
39 class Reference;
40 }  // namespace mirror
41 
42 class IsMarkedVisitor;
43 class MarkObjectVisitor;
44 
45 namespace gc {
46 
47 namespace collector {
48 class GarbageCollector;
49 }  // namespace collector
50 
51 class Heap;
52 
53 struct FinalizerStats {
FinalizerStatsFinalizerStats54   FinalizerStats(size_t num_refs, size_t num_enqueued)
55       : num_refs_(num_refs), num_enqueued_(num_enqueued) {}
56   const uint32_t num_refs_;
57   const uint32_t num_enqueued_;
58 };
59 
60 // Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the
61 // appropriate java.lang.ref.ReferenceQueue. The linked list is maintained as an unordered,
62 // circular, and singly-linked list using the pendingNext fields of the java.lang.ref.Reference
63 // objects.
64 class ReferenceQueue {
65  public:
66   explicit ReferenceQueue(Mutex* lock);
67 
68   // Enqueue a reference if it is unprocessed. Thread safe to call from multiple
69   // threads since it uses a lock to avoid a race between checking for the references presence and
70   // adding it.
71   void AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref)
72       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*lock_);
73 
74   // Enqueue a reference. The reference must be unprocessed.
75   // Not thread safe, used when mutators are paused to minimize lock overhead.
76   void EnqueueReference(ObjPtr<mirror::Reference> ref) REQUIRES_SHARED(Locks::mutator_lock_);
77 
78   // Dequeue a reference from the queue and return that dequeued reference.
79   // Call DisableReadBarrierForReference for the reference that's returned from this function.
80   ObjPtr<mirror::Reference> DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
81 
82   // If applicable, disable the read barrier for the reference after its referent is handled (see
83   // ConcurrentCopying::ProcessMarkStackRef.) This must be called for a reference that's dequeued
84   // from pending queue (DequeuePendingReference). 'order' is expected to be
85   // 'release' if called outside 'weak-ref access disabled' critical section.
86   // Otherwise 'relaxed' order will suffice.
87   void DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref, std::memory_order order)
88       REQUIRES_SHARED(Locks::mutator_lock_);
89 
90   // Enqueues finalizer references with white referents.  White referents are blackened, moved to
91   // the zombie field, and the referent field is cleared.
92   FinalizerStats EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
93                                   collector::GarbageCollector* collector)
94       REQUIRES_SHARED(Locks::mutator_lock_);
95 
96   // Walks the reference list marking and dequeuing any references subject to the reference
97   // clearing policy.  References with a black referent are removed from the list.  References
98   // with white referents biased toward saving are blackened and also removed from the list.
99   // Returns the number of non-null soft references. May be called concurrently with
100   // AtomicEnqueueIfNotEnqueued().
101   uint32_t ForwardSoftReferences(MarkObjectVisitor* visitor)
102       REQUIRES(!*lock_)
103       REQUIRES_SHARED(Locks::mutator_lock_);
104 
105   // Unlink the reference list clearing references objects with white referents. Cleared references
106   // registered to a reference queue are scheduled for appending by the heap worker thread.
107   void ClearWhiteReferences(ReferenceQueue* cleared_references,
108                             collector::GarbageCollector* collector,
109                             bool report_cleared = false)
110       REQUIRES_SHARED(Locks::mutator_lock_);
111 
112   void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
113   size_t GetLength() const REQUIRES_SHARED(Locks::mutator_lock_);
114 
IsEmpty()115   bool IsEmpty() const {
116     return list_ == nullptr;
117   }
118 
119   // Clear this queue. Only safe after handing off the contents elsewhere for further processing.
Clear()120   void Clear() {
121     list_ = nullptr;
122   }
123 
GetList()124   mirror::Reference* GetList() REQUIRES_SHARED(Locks::mutator_lock_) {
125     return list_;
126   }
127 
128   // Visits list_, currently only used for the mark compact GC.
129   void UpdateRoots(IsMarkedVisitor* visitor)
130       REQUIRES_SHARED(Locks::mutator_lock_);
131 
132  private:
133   // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
134   // calling AtomicEnqueueIfNotEnqueued.
135   Mutex* const lock_;
136   // The actual reference list. Only a root for the mark compact GC since it
137   // will be null during root marking for other GC types. Not an ObjPtr since it
138   // is accessed from multiple threads.  Points to a singly-linked circular list
139   // using the pendingNext field.
140   mirror::Reference* list_;
141 
142   DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue);
143 };
144 
145 }  // namespace gc
146 }  // namespace art
147 
148 #endif  // ART_RUNTIME_GC_REFERENCE_QUEUE_H_
149