1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_
7 
8 #include <deque>
9 
10 #include "src/base/bits.h"
11 #include "src/base/platform/condition-variable.h"
12 #include "src/cancelable-task.h"
13 #include "src/heap/marking.h"
14 #include "src/heap/spaces.h"
15 #include "src/heap/store-buffer.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 // Callback function, returns whether an object is alive. The heap size
21 // of the object is returned in size. It optionally updates the offset
22 // to the first live object in the page (only used for old and map objects).
23 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
24 
25 // Callback function to mark an object in a given heap.
26 typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
27 
28 // Forward declarations.
29 class CodeFlusher;
30 class MarkCompactCollector;
31 class MarkingVisitor;
32 class RootMarkingVisitor;
33 
34 class ObjectMarking : public AllStatic {
35  public:
INLINE(static MarkBit MarkBitFrom (Address addr))36   INLINE(static MarkBit MarkBitFrom(Address addr)) {
37     MemoryChunk* p = MemoryChunk::FromAddress(addr);
38     return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
39   }
40 
INLINE(static MarkBit MarkBitFrom (HeapObject * obj))41   INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
42     return MarkBitFrom(reinterpret_cast<Address>(obj));
43   }
44 
Color(HeapObject * obj)45   static Marking::ObjectColor Color(HeapObject* obj) {
46     return Marking::Color(ObjectMarking::MarkBitFrom(obj));
47   }
48 
49  private:
50   DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
51 };
52 
53 // ----------------------------------------------------------------------------
54 // Marking deque for tracing live objects.
55 class MarkingDeque {
56  public:
MarkingDeque(Heap * heap)57   explicit MarkingDeque(Heap* heap)
58       : backing_store_(nullptr),
59         backing_store_committed_size_(0),
60         array_(nullptr),
61         top_(0),
62         bottom_(0),
63         mask_(0),
64         overflowed_(false),
65         in_use_(false),
66         uncommit_task_pending_(false),
67         heap_(heap) {}
68 
69   void SetUp();
70   void TearDown();
71 
72   // Ensures that the marking deque is committed and will stay committed until
73   // StopUsing() is called.
74   void StartUsing();
75   void StopUsing();
76   void Clear();
77 
IsFull()78   inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
79 
IsEmpty()80   inline bool IsEmpty() { return top_ == bottom_; }
81 
overflowed()82   bool overflowed() const { return overflowed_; }
83 
ClearOverflowed()84   void ClearOverflowed() { overflowed_ = false; }
85 
SetOverflowed()86   void SetOverflowed() { overflowed_ = true; }
87 
88   // Push the object on the marking stack if there is room, otherwise mark the
89   // deque as overflowed and wait for a rescan of the heap.
INLINE(bool Push (HeapObject * object))90   INLINE(bool Push(HeapObject* object)) {
91     DCHECK(object->IsHeapObject());
92     if (IsFull()) {
93       SetOverflowed();
94       return false;
95     } else {
96       array_[top_] = object;
97       top_ = ((top_ + 1) & mask_);
98       return true;
99     }
100   }
101 
INLINE(HeapObject * Pop ())102   INLINE(HeapObject* Pop()) {
103     DCHECK(!IsEmpty());
104     top_ = ((top_ - 1) & mask_);
105     HeapObject* object = array_[top_];
106     DCHECK(object->IsHeapObject());
107     return object;
108   }
109 
110   // Unshift the object into the marking stack if there is room, otherwise mark
111   // the deque as overflowed and wait for a rescan of the heap.
INLINE(bool Unshift (HeapObject * object))112   INLINE(bool Unshift(HeapObject* object)) {
113     DCHECK(object->IsHeapObject());
114     if (IsFull()) {
115       SetOverflowed();
116       return false;
117     } else {
118       bottom_ = ((bottom_ - 1) & mask_);
119       array_[bottom_] = object;
120       return true;
121     }
122   }
123 
array()124   HeapObject** array() { return array_; }
bottom()125   int bottom() { return bottom_; }
top()126   int top() { return top_; }
mask()127   int mask() { return mask_; }
set_top(int top)128   void set_top(int top) { top_ = top; }
129 
130  private:
131   // This task uncommits the marking_deque backing store if
132   // markin_deque->in_use_ is false.
133   class UncommitTask : public CancelableTask {
134    public:
UncommitTask(Isolate * isolate,MarkingDeque * marking_deque)135     explicit UncommitTask(Isolate* isolate, MarkingDeque* marking_deque)
136         : CancelableTask(isolate), marking_deque_(marking_deque) {}
137 
138    private:
139     // CancelableTask override.
RunInternal()140     void RunInternal() override {
141       base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
142       if (!marking_deque_->in_use_) {
143         marking_deque_->Uncommit();
144       }
145       marking_deque_->uncommit_task_pending_ = false;
146     }
147 
148     MarkingDeque* marking_deque_;
149     DISALLOW_COPY_AND_ASSIGN(UncommitTask);
150   };
151 
152   static const size_t kMaxSize = 4 * MB;
153   static const size_t kMinSize = 256 * KB;
154 
155   // Must be called with mutex lock.
156   void EnsureCommitted();
157 
158   // Must be called with mutex lock.
159   void Uncommit();
160 
161   // Must be called with mutex lock.
162   void StartUncommitTask();
163 
164   base::Mutex mutex_;
165 
166   base::VirtualMemory* backing_store_;
167   size_t backing_store_committed_size_;
168   HeapObject** array_;
169   // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
170   // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
171   // (mod mask + 1).
172   int top_;
173   int bottom_;
174   int mask_;
175   bool overflowed_;
176   // in_use_ == true after taking mutex lock implies that the marking deque is
177   // committed and will stay committed at least until in_use_ == false.
178   bool in_use_;
179   bool uncommit_task_pending_;
180   Heap* heap_;
181 
182   DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
183 };
184 
185 
186 // CodeFlusher collects candidates for code flushing during marking and
187 // processes those candidates after marking has completed in order to
188 // reset those functions referencing code objects that would otherwise
189 // be unreachable. Code objects can be referenced in two ways:
190 //    - SharedFunctionInfo references unoptimized code.
191 //    - JSFunction references either unoptimized or optimized code.
192 // We are not allowed to flush unoptimized code for functions that got
193 // optimized or inlined into optimized code, because we might bailout
194 // into the unoptimized code again during deoptimization.
195 class CodeFlusher {
196  public:
CodeFlusher(Isolate * isolate)197   explicit CodeFlusher(Isolate* isolate)
198       : isolate_(isolate),
199         jsfunction_candidates_head_(nullptr),
200         shared_function_info_candidates_head_(nullptr) {}
201 
202   inline void AddCandidate(SharedFunctionInfo* shared_info);
203   inline void AddCandidate(JSFunction* function);
204 
205   void EvictCandidate(SharedFunctionInfo* shared_info);
206   void EvictCandidate(JSFunction* function);
207 
ProcessCandidates()208   void ProcessCandidates() {
209     ProcessSharedFunctionInfoCandidates();
210     ProcessJSFunctionCandidates();
211   }
212 
213   void IteratePointersToFromSpace(ObjectVisitor* v);
214 
215  private:
216   void ProcessJSFunctionCandidates();
217   void ProcessSharedFunctionInfoCandidates();
218 
219   static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
220   static inline JSFunction* GetNextCandidate(JSFunction* candidate);
221   static inline void SetNextCandidate(JSFunction* candidate,
222                                       JSFunction* next_candidate);
223   static inline void ClearNextCandidate(JSFunction* candidate,
224                                         Object* undefined);
225 
226   static inline SharedFunctionInfo* GetNextCandidate(
227       SharedFunctionInfo* candidate);
228   static inline void SetNextCandidate(SharedFunctionInfo* candidate,
229                                       SharedFunctionInfo* next_candidate);
230   static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
231 
232   Isolate* isolate_;
233   JSFunction* jsfunction_candidates_head_;
234   SharedFunctionInfo* shared_function_info_candidates_head_;
235 
236   DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
237 };
238 
239 
240 // Defined in isolate.h.
241 class ThreadLocalTop;
242 
243 class MarkBitCellIterator BASE_EMBEDDED {
244  public:
MarkBitCellIterator(MemoryChunk * chunk)245   explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
246     last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
247         chunk_->AddressToMarkbitIndex(chunk_->area_end())));
248     cell_base_ = chunk_->area_start();
249     cell_index_ = Bitmap::IndexToCell(
250         Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
251     cells_ = chunk_->markbits()->cells();
252   }
253 
Done()254   inline bool Done() { return cell_index_ == last_cell_index_; }
255 
HasNext()256   inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
257 
CurrentCell()258   inline MarkBit::CellType* CurrentCell() {
259     DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
260                               chunk_->AddressToMarkbitIndex(cell_base_))));
261     return &cells_[cell_index_];
262   }
263 
CurrentCellBase()264   inline Address CurrentCellBase() {
265     DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
266                               chunk_->AddressToMarkbitIndex(cell_base_))));
267     return cell_base_;
268   }
269 
Advance()270   inline void Advance() {
271     cell_index_++;
272     cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
273   }
274 
Advance(unsigned int new_cell_index)275   inline bool Advance(unsigned int new_cell_index) {
276     if (new_cell_index != cell_index_) {
277       DCHECK_GT(new_cell_index, cell_index_);
278       DCHECK_LE(new_cell_index, last_cell_index_);
279       unsigned int diff = new_cell_index - cell_index_;
280       cell_index_ = new_cell_index;
281       cell_base_ += diff * (Bitmap::kBitsPerCell * kPointerSize);
282       return true;
283     }
284     return false;
285   }
286 
287   // Return the next mark bit cell. If there is no next it returns 0;
PeekNext()288   inline MarkBit::CellType PeekNext() {
289     if (HasNext()) {
290       return cells_[cell_index_ + 1];
291     }
292     return 0;
293   }
294 
295  private:
296   MemoryChunk* chunk_;
297   MarkBit::CellType* cells_;
298   unsigned int last_cell_index_;
299   unsigned int cell_index_;
300   Address cell_base_;
301 };
302 
303 // Grey objects can happen on black pages when black objects transition to
304 // grey e.g. when calling RecordWrites on them.
305 enum LiveObjectIterationMode {
306   kBlackObjects,
307   kGreyObjects,
308   kAllLiveObjects
309 };
310 
311 template <LiveObjectIterationMode T>
312 class LiveObjectIterator BASE_EMBEDDED {
313  public:
LiveObjectIterator(MemoryChunk * chunk)314   explicit LiveObjectIterator(MemoryChunk* chunk)
315       : chunk_(chunk),
316         it_(chunk_),
317         cell_base_(it_.CurrentCellBase()),
318         current_cell_(*it_.CurrentCell()) {
319   }
320 
321   HeapObject* Next();
322 
323  private:
heap()324   inline Heap* heap() { return chunk_->heap(); }
325 
326   MemoryChunk* chunk_;
327   MarkBitCellIterator it_;
328   Address cell_base_;
329   MarkBit::CellType current_cell_;
330 };
331 
332 enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
333 
334 // -------------------------------------------------------------------------
335 // Mark-Compact collector
336 class MarkCompactCollector {
337  public:
338   class Evacuator;
339 
340   class Sweeper {
341    public:
342     class SweeperTask;
343 
344     enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
345     enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
346     enum ClearOldToNewSlotsMode {
347       DO_NOT_CLEAR,
348       CLEAR_REGULAR_SLOTS,
349       CLEAR_TYPED_SLOTS
350     };
351 
352     typedef std::deque<Page*> SweepingList;
353     typedef List<Page*> SweptList;
354 
355     static int RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
356                         FreeSpaceTreatmentMode free_space_mode);
357 
Sweeper(Heap * heap)358     explicit Sweeper(Heap* heap)
359         : heap_(heap),
360           pending_sweeper_tasks_semaphore_(0),
361           sweeping_in_progress_(false),
362           num_sweeping_tasks_(0) {}
363 
sweeping_in_progress()364     bool sweeping_in_progress() { return sweeping_in_progress_; }
365 
366     void AddPage(AllocationSpace space, Page* page);
367 
368     int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
369                            int max_pages = 0);
370     int ParallelSweepPage(Page* page, AllocationSpace identity);
371 
372     // After calling this function sweeping is considered to be in progress
373     // and the main thread can sweep lazily, but the background sweeper tasks
374     // are not running yet.
375     void StartSweeping();
376     void StartSweeperTasks();
377     void EnsureCompleted();
378     void EnsureNewSpaceCompleted();
379     bool AreSweeperTasksRunning();
380     bool IsSweepingCompleted(AllocationSpace space);
381     void SweepOrWaitUntilSweepingCompleted(Page* page);
382 
383     void AddSweptPageSafe(PagedSpace* space, Page* page);
384     Page* GetSweptPageSafe(PagedSpace* space);
385 
386    private:
387     static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
388 
389     static ClearOldToNewSlotsMode GetClearOldToNewSlotsMode(Page* p);
390 
391     template <typename Callback>
ForAllSweepingSpaces(Callback callback)392     void ForAllSweepingSpaces(Callback callback) {
393       for (int i = 0; i < kAllocationSpaces; i++) {
394         callback(static_cast<AllocationSpace>(i));
395       }
396     }
397 
398     Page* GetSweepingPageSafe(AllocationSpace space);
399     void AddSweepingPageSafe(AllocationSpace space, Page* page);
400 
401     void PrepareToBeSweptPage(AllocationSpace space, Page* page);
402 
403     Heap* heap_;
404     base::Semaphore pending_sweeper_tasks_semaphore_;
405     base::Mutex mutex_;
406     SweptList swept_list_[kAllocationSpaces];
407     SweepingList sweeping_list_[kAllocationSpaces];
408     bool sweeping_in_progress_;
409     base::AtomicNumber<intptr_t> num_sweeping_tasks_;
410   };
411 
412   enum IterationMode {
413     kKeepMarking,
414     kClearMarkbits,
415   };
416 
417   static void Initialize();
418 
419   void SetUp();
420 
421   void TearDown();
422 
423   void CollectEvacuationCandidates(PagedSpace* space);
424 
425   void AddEvacuationCandidate(Page* p);
426 
427   // Prepares for GC by resetting relocation info in old and map spaces and
428   // choosing spaces to compact.
429   void Prepare();
430 
431   // Performs a global garbage collection.
432   void CollectGarbage();
433 
434   bool StartCompaction();
435 
436   void AbortCompaction();
437 
438 #ifdef DEBUG
439   // Checks whether performing mark-compact collection.
in_use()440   bool in_use() { return state_ > PREPARE_GC; }
are_map_pointers_encoded()441   bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
442 #endif
443 
444   // Determine type of object and emit deletion log event.
445   static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
446 
447   // Distinguishable invalid map encodings (for single word and multiple words)
448   // that indicate free regions.
449   static const uint32_t kSingleFreeEncoding = 0;
450   static const uint32_t kMultiFreeEncoding = 1;
451 
452   static inline bool IsMarked(Object* obj);
453   static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
454 
heap()455   inline Heap* heap() const { return heap_; }
456   inline Isolate* isolate() const;
457 
code_flusher()458   CodeFlusher* code_flusher() { return code_flusher_; }
is_code_flushing_enabled()459   inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
460 
461 #ifdef VERIFY_HEAP
462   void VerifyValidStoreAndSlotsBufferEntries();
463   void VerifyMarkbitsAreClean();
464   static void VerifyMarkbitsAreClean(PagedSpace* space);
465   static void VerifyMarkbitsAreClean(NewSpace* space);
466   void VerifyWeakEmbeddedObjectsInCode();
467   void VerifyOmittedMapChecks();
468 #endif
469 
INLINE(static bool ShouldSkipEvacuationSlotRecording (Object * host))470   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
471     return Page::FromAddress(reinterpret_cast<Address>(host))
472         ->ShouldSkipEvacuationSlotRecording();
473   }
474 
IsOnEvacuationCandidate(HeapObject * obj)475   static inline bool IsOnEvacuationCandidate(HeapObject* obj) {
476     return Page::FromAddress(reinterpret_cast<Address>(obj))
477         ->IsEvacuationCandidate();
478   }
479 
480   void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
481   void RecordCodeEntrySlot(HeapObject* host, Address slot, Code* target);
482   void RecordCodeTargetPatch(Address pc, Code* target);
483   INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
484   INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
485                               Object* target));
486 
487   void UpdateSlots(SlotsBuffer* buffer);
488   void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
489 
490   void InvalidateCode(Code* code);
491 
492   void ClearMarkbits();
493 
is_compacting()494   bool is_compacting() const { return compacting_; }
495 
marking_parity()496   MarkingParity marking_parity() { return marking_parity_; }
497 
498   // Ensures that sweeping is finished.
499   //
500   // Note: Can only be called safely from main thread.
501   void EnsureSweepingCompleted();
502 
503   // Help out in sweeping the corresponding space and refill memory that has
504   // been regained.
505   //
506   // Note: Thread-safe.
507   void SweepAndRefill(CompactionSpace* space);
508 
509   // Checks if sweeping is in progress right now on any space.
sweeping_in_progress()510   bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
511 
set_evacuation(bool evacuation)512   void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
513 
evacuation()514   bool evacuation() const { return evacuation_; }
515 
516   // Special case for processing weak references in a full collection. We need
517   // to artificially keep AllocationSites alive for a time.
518   void MarkAllocationSite(AllocationSite* site);
519 
520   // Mark objects in implicit references groups if their parent object
521   // is marked.
522   void MarkImplicitRefGroups(MarkObjectFunction mark_object);
523 
marking_deque()524   MarkingDeque* marking_deque() { return &marking_deque_; }
525 
sweeper()526   Sweeper& sweeper() { return sweeper_; }
527 
528  private:
529   template <PageEvacuationMode mode>
530   class EvacuateNewSpacePageVisitor;
531   class EvacuateNewSpaceVisitor;
532   class EvacuateOldSpaceVisitor;
533   class EvacuateRecordOnlyVisitor;
534   class EvacuateVisitorBase;
535   class HeapObjectVisitor;
536   class ObjectStatsVisitor;
537 
538   explicit MarkCompactCollector(Heap* heap);
539 
540   bool WillBeDeoptimized(Code* code);
541 
542   void ComputeEvacuationHeuristics(size_t area_size,
543                                    int* target_fragmentation_percent,
544                                    size_t* max_evacuated_bytes);
545 
546   void VisitAllObjects(HeapObjectVisitor* visitor);
547 
548   void RecordObjectStats();
549 
550   // Finishes GC, performs heap verification if enabled.
551   void Finish();
552 
553   // -----------------------------------------------------------------------
554   // Phase 1: Marking live objects.
555   //
556   //  Before: The heap has been prepared for garbage collection by
557   //          MarkCompactCollector::Prepare() and is otherwise in its
558   //          normal state.
559   //
560   //   After: Live objects are marked and non-live objects are unmarked.
561 
562   friend class CodeMarkingVisitor;
563   friend class IncrementalMarkingMarkingVisitor;
564   friend class MarkCompactMarkingVisitor;
565   friend class MarkingVisitor;
566   friend class RecordMigratedSlotVisitor;
567   friend class RootMarkingVisitor;
568   friend class SharedFunctionInfoMarkingVisitor;
569 
570   // Mark code objects that are active on the stack to prevent them
571   // from being flushed.
572   void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
573 
574   void PrepareForCodeFlushing();
575 
576   // Marking operations for objects reachable from roots.
577   void MarkLiveObjects();
578 
579   // Pushes a black object onto the marking stack and accounts for live bytes.
580   // Note that this assumes live bytes have not yet been counted.
581   INLINE(void PushBlack(HeapObject* obj));
582 
583   // Unshifts a black object into the marking stack and accounts for live bytes.
584   // Note that this assumes lives bytes have already been counted.
585   INLINE(void UnshiftBlack(HeapObject* obj));
586 
587   // Marks the object black and pushes it on the marking stack.
588   // This is for non-incremental marking only.
589   INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
590 
591   // Marks the object black assuming that it is not yet marked.
592   // This is for non-incremental marking only.
593   INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
594 
595   // Mark the heap roots and all objects reachable from them.
596   void MarkRoots(RootMarkingVisitor* visitor);
597 
598   // Mark the string table specially.  References to internalized strings from
599   // the string table are weak.
600   void MarkStringTable(RootMarkingVisitor* visitor);
601 
602   // Mark objects reachable (transitively) from objects in the marking stack
603   // or overflowed in the heap.
604   void ProcessMarkingDeque();
605 
606   // Mark objects reachable (transitively) from objects in the marking stack
607   // or overflowed in the heap.  This respects references only considered in
608   // the final atomic marking pause including the following:
609   //    - Processing of objects reachable through Harmony WeakMaps.
610   //    - Objects reachable due to host application logic like object groups,
611   //      implicit references' groups, or embedder heap tracing.
612   void ProcessEphemeralMarking(ObjectVisitor* visitor,
613                                bool only_process_harmony_weak_collections);
614 
615   // If the call-site of the top optimized code was not prepared for
616   // deoptimization, then treat the maps in the code as strong pointers,
617   // otherwise a map can die and deoptimize the code.
618   void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
619 
620   // Collects a list of dependent code from maps embedded in optimize code.
621   DependentCode* DependentCodeListFromNonLiveMaps();
622 
623   // Mark objects reachable (transitively) from objects in the marking
624   // stack.  This function empties the marking stack, but may leave
625   // overflowed objects in the heap, in which case the marking stack's
626   // overflow flag will be set.
627   void EmptyMarkingDeque();
628 
629   // Refill the marking stack with overflowed objects from the heap.  This
630   // function either leaves the marking stack full or clears the overflow
631   // flag on the marking stack.
632   void RefillMarkingDeque();
633 
634   // Helper methods for refilling the marking stack by discovering grey objects
635   // on various pages of the heap. Used by {RefillMarkingDeque} only.
636   template <class T>
637   void DiscoverGreyObjectsWithIterator(T* it);
638   void DiscoverGreyObjectsOnPage(MemoryChunk* p);
639   void DiscoverGreyObjectsInSpace(PagedSpace* space);
640   void DiscoverGreyObjectsInNewSpace();
641 
642   // Callback function for telling whether the object *p is an unmarked
643   // heap object.
644   static bool IsUnmarkedHeapObject(Object** p);
645 
646   // Clear non-live references in weak cells, transition and descriptor arrays,
647   // and deoptimize dependent code of non-live maps.
648   void ClearNonLiveReferences();
649   void MarkDependentCodeForDeoptimization(DependentCode* list);
650   // Find non-live targets of simple transitions in the given list. Clear
651   // transitions to non-live targets and if needed trim descriptors arrays.
652   void ClearSimpleMapTransitions(Object* non_live_map_list);
653   void ClearSimpleMapTransition(Map* map, Map* dead_transition);
654   // Compact every array in the global list of transition arrays and
655   // trim the corresponding descriptor array if a transition target is non-live.
656   void ClearFullMapTransitions();
657   bool CompactTransitionArray(Map* map, TransitionArray* transitions,
658                               DescriptorArray* descriptors);
659   void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
660   void TrimEnumCache(Map* map, DescriptorArray* descriptors);
661 
662   // Mark all values associated with reachable keys in weak collections
663   // encountered so far.  This might push new object or even new weak maps onto
664   // the marking stack.
665   void ProcessWeakCollections();
666 
667   // After all reachable objects have been marked those weak map entries
668   // with an unreachable key are removed from all encountered weak maps.
669   // The linked list of all encountered weak maps is destroyed.
670   void ClearWeakCollections();
671 
672   // We have to remove all encountered weak maps from the list of weak
673   // collections when incremental marking is aborted.
674   void AbortWeakCollections();
675 
676   void ClearWeakCells(Object** non_live_map_list,
677                       DependentCode** dependent_code_list);
678   void AbortWeakCells();
679 
680   void AbortTransitionArrays();
681 
682   // Starts sweeping of spaces by contributing on the main thread and setting
683   // up other pages for sweeping. Does not start sweeper tasks.
684   void StartSweepSpaces();
685   void StartSweepSpace(PagedSpace* space);
686 
687   void EvacuateNewSpacePrologue();
688 
689   void EvacuatePagesInParallel();
690 
691   // The number of parallel compaction tasks, including the main thread.
692   int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
693 
694   void EvacuateNewSpaceAndCandidates();
695 
696   void UpdatePointersAfterEvacuation();
697 
698   // Iterates through all live objects on a page using marking information.
699   // Returns whether all objects have successfully been visited.
700   template <class Visitor>
701   bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
702                         IterationMode mode);
703 
704   void RecomputeLiveBytes(MemoryChunk* page);
705 
706   void ReleaseEvacuationCandidates();
707 
708 
709 #ifdef DEBUG
710   friend class MarkObjectVisitor;
711   static void VisitObject(HeapObject* obj);
712 
713   friend class UnmarkObjectVisitor;
714   static void UnmarkObject(HeapObject* obj);
715 #endif
716 
717   Heap* heap_;
718 
719   base::Semaphore page_parallel_job_semaphore_;
720 
721 #ifdef DEBUG
722   enum CollectorState {
723     IDLE,
724     PREPARE_GC,
725     MARK_LIVE_OBJECTS,
726     SWEEP_SPACES,
727     ENCODE_FORWARDING_ADDRESSES,
728     UPDATE_POINTERS,
729     RELOCATE_OBJECTS
730   };
731 
732   // The current stage of the collector.
733   CollectorState state_;
734 #endif
735 
736   MarkingParity marking_parity_;
737 
738   bool was_marked_incrementally_;
739 
740   bool evacuation_;
741 
742   // True if we are collecting slots to perform evacuation from evacuation
743   // candidates.
744   bool compacting_;
745 
746   bool black_allocation_;
747 
748   bool have_code_to_deoptimize_;
749 
750   MarkingDeque marking_deque_;
751 
752   CodeFlusher* code_flusher_;
753 
754   List<Page*> evacuation_candidates_;
755   List<Page*> newspace_evacuation_candidates_;
756 
757   Sweeper sweeper_;
758 
759   friend class Heap;
760   friend class StoreBuffer;
761 };
762 
763 
764 class EvacuationScope BASE_EMBEDDED {
765  public:
EvacuationScope(MarkCompactCollector * collector)766   explicit EvacuationScope(MarkCompactCollector* collector)
767       : collector_(collector) {
768     collector_->set_evacuation(true);
769   }
770 
~EvacuationScope()771   ~EvacuationScope() { collector_->set_evacuation(false); }
772 
773  private:
774   MarkCompactCollector* collector_;
775 };
776 
777 V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
778 }  // namespace internal
779 }  // namespace v8
780 
781 #endif  // V8_HEAP_MARK_COMPACT_H_
782