1 /* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_GC_REFERENCE_QUEUE_H_ 18 #define ART_RUNTIME_GC_REFERENCE_QUEUE_H_ 19 20 #include <iosfwd> 21 #include <string> 22 #include <vector> 23 24 #include "base/atomic.h" 25 #include "base/locks.h" 26 #include "base/timing_logger.h" 27 #include "jni.h" 28 #include "obj_ptr.h" 29 #include "offsets.h" 30 #include "runtime_globals.h" 31 #include "thread_pool.h" 32 33 namespace art { 34 35 class Mutex; 36 37 namespace mirror { 38 class Reference; 39 } // namespace mirror 40 41 class IsMarkedVisitor; 42 class MarkObjectVisitor; 43 44 namespace gc { 45 46 namespace collector { 47 class GarbageCollector; 48 } // namespace collector 49 50 class Heap; 51 52 // Used to temporarily store java.lang.ref.Reference(s) during GC and prior to queueing on the 53 // appropriate java.lang.ref.ReferenceQueue. The linked list is maintained as an unordered, 54 // circular, and singly-linked list using the pendingNext fields of the java.lang.ref.Reference 55 // objects. 56 class ReferenceQueue { 57 public: 58 explicit ReferenceQueue(Mutex* lock); 59 60 // Enqueue a reference if it is unprocessed. Thread safe to call from multiple 61 // threads since it uses a lock to avoid a race between checking for the references presence and 62 // adding it. 63 void AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr<mirror::Reference> ref) 64 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*lock_); 65 66 // Enqueue a reference. The reference must be unprocessed. 67 // Not thread safe, used when mutators are paused to minimize lock overhead. 68 void EnqueueReference(ObjPtr<mirror::Reference> ref) REQUIRES_SHARED(Locks::mutator_lock_); 69 70 // Dequeue a reference from the queue and return that dequeued reference. 71 // Call DisableReadBarrierForReference for the reference that's returned from this function. 72 ObjPtr<mirror::Reference> DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_); 73 74 // If applicable, disable the read barrier for the reference after its referent is handled (see 75 // ConcurrentCopying::ProcessMarkStackRef.) This must be called for a reference that's dequeued 76 // from pending queue (DequeuePendingReference). 77 void DisableReadBarrierForReference(ObjPtr<mirror::Reference> ref) 78 REQUIRES_SHARED(Locks::mutator_lock_); 79 80 // Enqueues finalizer references with white referents. White referents are blackened, moved to 81 // the zombie field, and the referent field is cleared. 82 void EnqueueFinalizerReferences(ReferenceQueue* cleared_references, 83 collector::GarbageCollector* collector) 84 REQUIRES_SHARED(Locks::mutator_lock_); 85 86 // Walks the reference list marking any references subject to the reference clearing policy. 87 // References with a black referent are removed from the list. References with white referents 88 // biased toward saving are blackened and also removed from the list. 89 void ForwardSoftReferences(MarkObjectVisitor* visitor) 90 REQUIRES_SHARED(Locks::mutator_lock_); 91 92 // Unlink the reference list clearing references objects with white referents. Cleared references 93 // registered to a reference queue are scheduled for appending by the heap worker thread. 94 void ClearWhiteReferences(ReferenceQueue* cleared_references, 95 collector::GarbageCollector* collector) 96 REQUIRES_SHARED(Locks::mutator_lock_); 97 98 void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_); 99 size_t GetLength() const REQUIRES_SHARED(Locks::mutator_lock_); 100 IsEmpty()101 bool IsEmpty() const { 102 return list_ == nullptr; 103 } Clear()104 void Clear() { 105 list_ = nullptr; 106 } GetList()107 mirror::Reference* GetList() REQUIRES_SHARED(Locks::mutator_lock_) { 108 return list_; 109 } 110 111 // Visits list_, currently only used for the mark compact GC. 112 void UpdateRoots(IsMarkedVisitor* visitor) 113 REQUIRES_SHARED(Locks::mutator_lock_); 114 115 private: 116 // Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously 117 // calling AtomicEnqueueIfNotEnqueued. 118 Mutex* const lock_; 119 // The actual reference list. Only a root for the mark compact GC since it will be null for other 120 // GC types. Not an ObjPtr since it is accessed from multiple threads. 121 mirror::Reference* list_; 122 123 DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue); 124 }; 125 126 } // namespace gc 127 } // namespace art 128 129 #endif // ART_RUNTIME_GC_REFERENCE_QUEUE_H_ 130