/* * Copyright (C) 2013 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "reference_queue.h" #include "accounting/card_table-inl.h" #include "collector/concurrent_copying.h" #include "heap.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "mirror/reference-inl.h" namespace art { namespace gc { ReferenceQueue::ReferenceQueue(Mutex* lock) : lock_(lock), list_(nullptr) { } void ReferenceQueue::AtomicEnqueueIfNotEnqueued(Thread* self, ObjPtr ref) { DCHECK(ref != nullptr); MutexLock mu(self, *lock_); if (ref->IsUnprocessed()) { EnqueueReference(ref); } } void ReferenceQueue::EnqueueReference(ObjPtr ref) { DCHECK(ref != nullptr); CHECK(ref->IsUnprocessed()); if (IsEmpty()) { // 1 element cyclic queue, ie: Reference ref = ..; ref.pendingNext = ref; list_ = ref.Ptr(); } else { // The list is owned by the GC, everything that has been inserted must already be at least // gray. ObjPtr head = list_->GetPendingNext(); DCHECK(head != nullptr); ref->SetPendingNext(head); } // Add the reference in the middle to preserve the cycle. list_->SetPendingNext(ref); } ObjPtr ReferenceQueue::DequeuePendingReference() { DCHECK(!IsEmpty()); ObjPtr ref = list_->GetPendingNext(); DCHECK(ref != nullptr); // Note: the following code is thread-safe because it is only called from ProcessReferences which // is single threaded. if (list_ == ref) { list_ = nullptr; } else { ObjPtr next = ref->GetPendingNext(); list_->SetPendingNext(next); } ref->SetPendingNext(nullptr); return ref; } // This must be called whenever DequeuePendingReference is called. void ReferenceQueue::DisableReadBarrierForReference(ObjPtr ref) { Heap* heap = Runtime::Current()->GetHeap(); if (kUseBakerOrBrooksReadBarrier && heap->CurrentCollectorType() == kCollectorTypeCC && heap->ConcurrentCopyingCollector()->IsActive()) { // Change the gray ptr we left in ConcurrentCopying::ProcessMarkStackRef() to white. // We check IsActive() above because we don't want to do this when the zygote compaction // collector (SemiSpace) is running. CHECK(ref != nullptr); collector::ConcurrentCopying* concurrent_copying = heap->ConcurrentCopyingCollector(); uint32_t rb_state = ref->GetReadBarrierState(); if (rb_state == ReadBarrier::GrayState()) { ref->AtomicSetReadBarrierState(ReadBarrier::GrayState(), ReadBarrier::WhiteState()); CHECK_EQ(ref->GetReadBarrierState(), ReadBarrier::WhiteState()); } else { // In ConcurrentCopying::ProcessMarkStackRef() we may leave a white reference in the queue and // find it here, which is OK. CHECK_EQ(rb_state, ReadBarrier::WhiteState()) << "ref=" << ref << " rb_state=" << rb_state; ObjPtr referent = ref->GetReferent(); // The referent could be null if it's cleared by a mutator (Reference.clear()). if (referent != nullptr) { CHECK(concurrent_copying->IsInToSpace(referent.Ptr())) << "ref=" << ref << " rb_state=" << ref->GetReadBarrierState() << " referent=" << referent; } } } } void ReferenceQueue::Dump(std::ostream& os) const { ObjPtr cur = list_; os << "Reference starting at list_=" << list_ << "\n"; if (cur == nullptr) { return; } do { ObjPtr pending_next = cur->GetPendingNext(); os << "Reference= " << cur << " PendingNext=" << pending_next; if (cur->IsFinalizerReferenceInstance()) { os << " Zombie=" << cur->AsFinalizerReference()->GetZombie(); } os << "\n"; cur = pending_next; } while (cur != list_); } size_t ReferenceQueue::GetLength() const { size_t count = 0; ObjPtr cur = list_; if (cur != nullptr) { do { ++count; cur = cur->GetPendingNext(); } while (cur != list_); } return count; } void ReferenceQueue::ClearWhiteReferences(ReferenceQueue* cleared_references, collector::GarbageCollector* collector) { while (!IsEmpty()) { ObjPtr ref = DequeuePendingReference(); mirror::HeapReference* referent_addr = ref->GetReferentReferenceAddr(); // do_atomic_update is false because this happens during the reference processing phase where // Reference.clear() would block. if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) { // Referent is white, clear it. if (Runtime::Current()->IsActiveTransaction()) { ref->ClearReferent(); } else { ref->ClearReferent(); } cleared_references->EnqueueReference(ref); } // Delay disabling the read barrier until here so that the ClearReferent call above in // transaction mode will trigger the read barrier. DisableReadBarrierForReference(ref); } } void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue* cleared_references, collector::GarbageCollector* collector) { while (!IsEmpty()) { ObjPtr ref = DequeuePendingReference()->AsFinalizerReference(); mirror::HeapReference* referent_addr = ref->GetReferentReferenceAddr(); // do_atomic_update is false because this happens during the reference processing phase where // Reference.clear() would block. if (!collector->IsNullOrMarkedHeapReference(referent_addr, /*do_atomic_update*/false)) { ObjPtr forward_address = collector->MarkObject(referent_addr->AsMirrorPtr()); // Move the updated referent to the zombie field. if (Runtime::Current()->IsActiveTransaction()) { ref->SetZombie(forward_address); ref->ClearReferent(); } else { ref->SetZombie(forward_address); ref->ClearReferent(); } cleared_references->EnqueueReference(ref); } // Delay disabling the read barrier until here so that the ClearReferent call above in // transaction mode will trigger the read barrier. DisableReadBarrierForReference(ref->AsReference()); } } void ReferenceQueue::ForwardSoftReferences(MarkObjectVisitor* visitor) { if (UNLIKELY(IsEmpty())) { return; } ObjPtr const head = list_; ObjPtr ref = head; do { mirror::HeapReference* referent_addr = ref->GetReferentReferenceAddr(); if (referent_addr->AsMirrorPtr() != nullptr) { // do_atomic_update is false because mutators can't access the referent due to the weak ref // access blocking. visitor->MarkHeapReference(referent_addr, /*do_atomic_update*/ false); } ref = ref->GetPendingNext(); } while (LIKELY(ref != head)); } void ReferenceQueue::UpdateRoots(IsMarkedVisitor* visitor) { if (list_ != nullptr) { list_ = down_cast(visitor->IsMarked(list_)); } } } // namespace gc } // namespace art