1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/mark-compact.h"
6
7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h"
11 #include "src/compilation-cache.h"
12 #include "src/deoptimizer.h"
13 #include "src/execution.h"
14 #include "src/frames-inl.h"
15 #include "src/gdb-jit.h"
16 #include "src/global-handles.h"
17 #include "src/heap/array-buffer-tracker.h"
18 #include "src/heap/gc-tracer.h"
19 #include "src/heap/incremental-marking.h"
20 #include "src/heap/mark-compact-inl.h"
21 #include "src/heap/object-stats.h"
22 #include "src/heap/objects-visiting-inl.h"
23 #include "src/heap/objects-visiting.h"
24 #include "src/heap/page-parallel-job.h"
25 #include "src/heap/spaces-inl.h"
26 #include "src/ic/ic.h"
27 #include "src/ic/stub-cache.h"
28 #include "src/tracing/tracing-category-observer.h"
29 #include "src/utils-inl.h"
30 #include "src/v8.h"
31
32 namespace v8 {
33 namespace internal {
34
35
36 const char* Marking::kWhiteBitPattern = "00";
37 const char* Marking::kBlackBitPattern = "11";
38 const char* Marking::kGreyBitPattern = "10";
39 const char* Marking::kImpossibleBitPattern = "01";
40
41 // The following has to hold in order for {ObjectMarking::MarkBitFrom} to not
42 // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
43 STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
44
45
46 // -------------------------------------------------------------------------
47 // MarkCompactCollector
48
MarkCompactCollector(Heap * heap)49 MarkCompactCollector::MarkCompactCollector(Heap* heap)
50 : // NOLINT
51 heap_(heap),
52 page_parallel_job_semaphore_(0),
53 #ifdef DEBUG
54 state_(IDLE),
55 #endif
56 marking_parity_(ODD_MARKING_PARITY),
57 was_marked_incrementally_(false),
58 evacuation_(false),
59 compacting_(false),
60 black_allocation_(false),
61 have_code_to_deoptimize_(false),
62 marking_deque_(heap),
63 code_flusher_(nullptr),
64 sweeper_(heap) {
65 }
66
67 #ifdef VERIFY_HEAP
68 class VerifyMarkingVisitor : public ObjectVisitor {
69 public:
VerifyMarkingVisitor(Heap * heap)70 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
71
VisitPointers(Object ** start,Object ** end)72 void VisitPointers(Object** start, Object** end) override {
73 for (Object** current = start; current < end; current++) {
74 if ((*current)->IsHeapObject()) {
75 HeapObject* object = HeapObject::cast(*current);
76 CHECK(heap_->mark_compact_collector()->IsMarked(object));
77 }
78 }
79 }
80
VisitEmbeddedPointer(RelocInfo * rinfo)81 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
82 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
83 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
84 Object* p = rinfo->target_object();
85 VisitPointer(&p);
86 }
87 }
88
VisitCell(RelocInfo * rinfo)89 void VisitCell(RelocInfo* rinfo) override {
90 Code* code = rinfo->host();
91 DCHECK(rinfo->rmode() == RelocInfo::CELL);
92 if (!code->IsWeakObject(rinfo->target_cell())) {
93 ObjectVisitor::VisitCell(rinfo);
94 }
95 }
96
97 private:
98 Heap* heap_;
99 };
100
101
VerifyMarking(Heap * heap,Address bottom,Address top)102 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
103 VerifyMarkingVisitor visitor(heap);
104 HeapObject* object;
105 Address next_object_must_be_here_or_later = bottom;
106 for (Address current = bottom; current < top;) {
107 object = HeapObject::FromAddress(current);
108 if (MarkCompactCollector::IsMarked(object)) {
109 CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
110 CHECK(current >= next_object_must_be_here_or_later);
111 object->Iterate(&visitor);
112 next_object_must_be_here_or_later = current + object->Size();
113 // The object is either part of a black area of black allocation or a
114 // regular black object
115 Page* page = Page::FromAddress(current);
116 CHECK(
117 page->markbits()->AllBitsSetInRange(
118 page->AddressToMarkbitIndex(current),
119 page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
120 page->markbits()->AllBitsClearInRange(
121 page->AddressToMarkbitIndex(current + kPointerSize * 2),
122 page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
123 current = next_object_must_be_here_or_later;
124 } else {
125 current += kPointerSize;
126 }
127 }
128 }
129
VerifyMarking(NewSpace * space)130 static void VerifyMarking(NewSpace* space) {
131 Address end = space->top();
132 // The bottom position is at the start of its page. Allows us to use
133 // page->area_start() as start of range on all pages.
134 CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
135
136 NewSpacePageRange range(space->bottom(), end);
137 for (auto it = range.begin(); it != range.end();) {
138 Page* page = *(it++);
139 Address limit = it != range.end() ? page->area_end() : end;
140 CHECK(limit == end || !page->Contains(end));
141 VerifyMarking(space->heap(), page->area_start(), limit);
142 }
143 }
144
145
VerifyMarking(PagedSpace * space)146 static void VerifyMarking(PagedSpace* space) {
147 for (Page* p : *space) {
148 VerifyMarking(space->heap(), p->area_start(), p->area_end());
149 }
150 }
151
152
VerifyMarking(Heap * heap)153 static void VerifyMarking(Heap* heap) {
154 VerifyMarking(heap->old_space());
155 VerifyMarking(heap->code_space());
156 VerifyMarking(heap->map_space());
157 VerifyMarking(heap->new_space());
158
159 VerifyMarkingVisitor visitor(heap);
160
161 LargeObjectIterator it(heap->lo_space());
162 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
163 if (MarkCompactCollector::IsMarked(obj)) {
164 obj->Iterate(&visitor);
165 }
166 }
167
168 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
169 }
170
171
172 class VerifyEvacuationVisitor : public ObjectVisitor {
173 public:
VisitPointers(Object ** start,Object ** end)174 void VisitPointers(Object** start, Object** end) override {
175 for (Object** current = start; current < end; current++) {
176 if ((*current)->IsHeapObject()) {
177 HeapObject* object = HeapObject::cast(*current);
178 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
179 }
180 }
181 }
182 };
183
184
VerifyEvacuation(Page * page)185 static void VerifyEvacuation(Page* page) {
186 VerifyEvacuationVisitor visitor;
187 HeapObjectIterator iterator(page);
188 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
189 heap_object = iterator.Next()) {
190 // We skip free space objects.
191 if (!heap_object->IsFiller()) {
192 heap_object->Iterate(&visitor);
193 }
194 }
195 }
196
197
VerifyEvacuation(NewSpace * space)198 static void VerifyEvacuation(NewSpace* space) {
199 VerifyEvacuationVisitor visitor;
200 NewSpacePageRange range(space->bottom(), space->top());
201 for (auto it = range.begin(); it != range.end();) {
202 Page* page = *(it++);
203 Address current = page->area_start();
204 Address limit = it != range.end() ? page->area_end() : space->top();
205 CHECK(limit == space->top() || !page->Contains(space->top()));
206 while (current < limit) {
207 HeapObject* object = HeapObject::FromAddress(current);
208 object->Iterate(&visitor);
209 current += object->Size();
210 }
211 }
212 }
213
214
VerifyEvacuation(Heap * heap,PagedSpace * space)215 static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
216 if (FLAG_use_allocation_folding && (space == heap->old_space())) {
217 return;
218 }
219 for (Page* p : *space) {
220 if (p->IsEvacuationCandidate()) continue;
221 VerifyEvacuation(p);
222 }
223 }
224
225
VerifyEvacuation(Heap * heap)226 static void VerifyEvacuation(Heap* heap) {
227 VerifyEvacuation(heap, heap->old_space());
228 VerifyEvacuation(heap, heap->code_space());
229 VerifyEvacuation(heap, heap->map_space());
230 VerifyEvacuation(heap->new_space());
231
232 VerifyEvacuationVisitor visitor;
233 heap->IterateStrongRoots(&visitor, VISIT_ALL);
234 }
235 #endif // VERIFY_HEAP
236
237
SetUp()238 void MarkCompactCollector::SetUp() {
239 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
240 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
241 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
242 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
243 marking_deque()->SetUp();
244
245 if (FLAG_flush_code) {
246 code_flusher_ = new CodeFlusher(isolate());
247 if (FLAG_trace_code_flushing) {
248 PrintF("[code-flushing is now on]\n");
249 }
250 }
251 }
252
253
TearDown()254 void MarkCompactCollector::TearDown() {
255 AbortCompaction();
256 marking_deque()->TearDown();
257 delete code_flusher_;
258 }
259
260
AddEvacuationCandidate(Page * p)261 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
262 DCHECK(!p->NeverEvacuate());
263 p->MarkEvacuationCandidate();
264 evacuation_candidates_.Add(p);
265 }
266
267
TraceFragmentation(PagedSpace * space)268 static void TraceFragmentation(PagedSpace* space) {
269 int number_of_pages = space->CountTotalPages();
270 intptr_t reserved = (number_of_pages * space->AreaSize());
271 intptr_t free = reserved - space->SizeOfObjects();
272 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
273 AllocationSpaceName(space->identity()), number_of_pages,
274 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
275 }
276
StartCompaction()277 bool MarkCompactCollector::StartCompaction() {
278 if (!compacting_) {
279 DCHECK(evacuation_candidates_.length() == 0);
280
281 CollectEvacuationCandidates(heap()->old_space());
282
283 if (FLAG_compact_code_space) {
284 CollectEvacuationCandidates(heap()->code_space());
285 } else if (FLAG_trace_fragmentation) {
286 TraceFragmentation(heap()->code_space());
287 }
288
289 if (FLAG_trace_fragmentation) {
290 TraceFragmentation(heap()->map_space());
291 }
292
293 compacting_ = evacuation_candidates_.length() > 0;
294 }
295
296 return compacting_;
297 }
298
CollectGarbage()299 void MarkCompactCollector::CollectGarbage() {
300 // Make sure that Prepare() has been called. The individual steps below will
301 // update the state as they proceed.
302 DCHECK(state_ == PREPARE_GC);
303
304 MarkLiveObjects();
305
306 DCHECK(heap_->incremental_marking()->IsStopped());
307
308 ClearNonLiveReferences();
309
310 RecordObjectStats();
311
312 #ifdef VERIFY_HEAP
313 if (FLAG_verify_heap) {
314 VerifyMarking(heap_);
315 }
316 #endif
317
318 StartSweepSpaces();
319
320 EvacuateNewSpaceAndCandidates();
321
322 Finish();
323 }
324
325
326 #ifdef VERIFY_HEAP
VerifyMarkbitsAreClean(PagedSpace * space)327 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
328 for (Page* p : *space) {
329 CHECK(p->markbits()->IsClean());
330 CHECK_EQ(0, p->LiveBytes());
331 }
332 }
333
334
VerifyMarkbitsAreClean(NewSpace * space)335 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
336 for (Page* p : NewSpacePageRange(space->bottom(), space->top())) {
337 CHECK(p->markbits()->IsClean());
338 CHECK_EQ(0, p->LiveBytes());
339 }
340 }
341
342
VerifyMarkbitsAreClean()343 void MarkCompactCollector::VerifyMarkbitsAreClean() {
344 VerifyMarkbitsAreClean(heap_->old_space());
345 VerifyMarkbitsAreClean(heap_->code_space());
346 VerifyMarkbitsAreClean(heap_->map_space());
347 VerifyMarkbitsAreClean(heap_->new_space());
348
349 LargeObjectIterator it(heap_->lo_space());
350 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
351 MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
352 CHECK(Marking::IsWhite(mark_bit));
353 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
354 }
355 }
356
357
VerifyWeakEmbeddedObjectsInCode()358 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
359 HeapObjectIterator code_iterator(heap()->code_space());
360 for (HeapObject* obj = code_iterator.Next(); obj != NULL;
361 obj = code_iterator.Next()) {
362 Code* code = Code::cast(obj);
363 if (!code->is_optimized_code()) continue;
364 if (WillBeDeoptimized(code)) continue;
365 code->VerifyEmbeddedObjectsDependency();
366 }
367 }
368
369
VerifyOmittedMapChecks()370 void MarkCompactCollector::VerifyOmittedMapChecks() {
371 HeapObjectIterator iterator(heap()->map_space());
372 for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
373 Map* map = Map::cast(obj);
374 map->VerifyOmittedMapChecks();
375 }
376 }
377 #endif // VERIFY_HEAP
378
379
ClearMarkbitsInPagedSpace(PagedSpace * space)380 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
381 for (Page* p : *space) {
382 p->ClearLiveness();
383 }
384 }
385
386
ClearMarkbitsInNewSpace(NewSpace * space)387 static void ClearMarkbitsInNewSpace(NewSpace* space) {
388 for (Page* page : *space) {
389 page->ClearLiveness();
390 }
391 }
392
393
ClearMarkbits()394 void MarkCompactCollector::ClearMarkbits() {
395 ClearMarkbitsInPagedSpace(heap_->code_space());
396 ClearMarkbitsInPagedSpace(heap_->map_space());
397 ClearMarkbitsInPagedSpace(heap_->old_space());
398 ClearMarkbitsInNewSpace(heap_->new_space());
399
400 LargeObjectIterator it(heap_->lo_space());
401 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
402 Marking::MarkWhite(ObjectMarking::MarkBitFrom(obj));
403 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
404 chunk->ResetProgressBar();
405 chunk->ResetLiveBytes();
406 }
407 }
408
409 class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
410 public:
SweeperTask(Sweeper * sweeper,base::Semaphore * pending_sweeper_tasks,AllocationSpace space_to_start)411 SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
412 AllocationSpace space_to_start)
413 : sweeper_(sweeper),
414 pending_sweeper_tasks_(pending_sweeper_tasks),
415 space_to_start_(space_to_start) {}
416
~SweeperTask()417 virtual ~SweeperTask() {}
418
419 private:
420 // v8::Task overrides.
Run()421 void Run() override {
422 DCHECK_GE(space_to_start_, FIRST_SPACE);
423 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
424 const int offset = space_to_start_ - FIRST_SPACE;
425 const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
426 for (int i = 0; i < num_spaces; i++) {
427 const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
428 DCHECK_GE(space_id, FIRST_SPACE);
429 DCHECK_LE(space_id, LAST_PAGED_SPACE);
430 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
431 }
432 pending_sweeper_tasks_->Signal();
433 }
434
435 Sweeper* sweeper_;
436 base::Semaphore* pending_sweeper_tasks_;
437 AllocationSpace space_to_start_;
438
439 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
440 };
441
StartSweeping()442 void MarkCompactCollector::Sweeper::StartSweeping() {
443 sweeping_in_progress_ = true;
444 ForAllSweepingSpaces([this](AllocationSpace space) {
445 std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
446 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
447 });
448 }
449
StartSweeperTasks()450 void MarkCompactCollector::Sweeper::StartSweeperTasks() {
451 if (FLAG_concurrent_sweeping && sweeping_in_progress_) {
452 ForAllSweepingSpaces([this](AllocationSpace space) {
453 if (space == NEW_SPACE) return;
454 num_sweeping_tasks_.Increment(1);
455 V8::GetCurrentPlatform()->CallOnBackgroundThread(
456 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space),
457 v8::Platform::kShortRunningTask);
458 });
459 }
460 }
461
SweepOrWaitUntilSweepingCompleted(Page * page)462 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
463 Page* page) {
464 if (!page->SweepingDone()) {
465 ParallelSweepPage(page, page->owner()->identity());
466 if (!page->SweepingDone()) {
467 // We were not able to sweep that page, i.e., a concurrent
468 // sweeper thread currently owns this page. Wait for the sweeper
469 // thread to be done with this page.
470 page->WaitUntilSweepingCompleted();
471 }
472 }
473 }
474
SweepAndRefill(CompactionSpace * space)475 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
476 if (FLAG_concurrent_sweeping &&
477 !sweeper().IsSweepingCompleted(space->identity())) {
478 sweeper().ParallelSweepSpace(space->identity(), 0);
479 space->RefillFreeList();
480 }
481 }
482
GetSweptPageSafe(PagedSpace * space)483 Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
484 base::LockGuard<base::Mutex> guard(&mutex_);
485 SweptList& list = swept_list_[space->identity()];
486 if (list.length() > 0) {
487 return list.RemoveLast();
488 }
489 return nullptr;
490 }
491
EnsureCompleted()492 void MarkCompactCollector::Sweeper::EnsureCompleted() {
493 if (!sweeping_in_progress_) return;
494
495 // If sweeping is not completed or not running at all, we try to complete it
496 // here.
497 ForAllSweepingSpaces([this](AllocationSpace space) {
498 if (!FLAG_concurrent_sweeping || !this->IsSweepingCompleted(space)) {
499 ParallelSweepSpace(space, 0);
500 }
501 });
502
503 if (FLAG_concurrent_sweeping) {
504 while (num_sweeping_tasks_.Value() > 0) {
505 pending_sweeper_tasks_semaphore_.Wait();
506 num_sweeping_tasks_.Increment(-1);
507 }
508 }
509
510 ForAllSweepingSpaces([this](AllocationSpace space) {
511 if (space == NEW_SPACE) {
512 swept_list_[NEW_SPACE].Clear();
513 }
514 DCHECK(sweeping_list_[space].empty());
515 });
516 sweeping_in_progress_ = false;
517 }
518
EnsureNewSpaceCompleted()519 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
520 if (!sweeping_in_progress_) return;
521 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted(NEW_SPACE)) {
522 for (Page* p : *heap_->new_space()) {
523 SweepOrWaitUntilSweepingCompleted(p);
524 }
525 }
526 }
527
EnsureSweepingCompleted()528 void MarkCompactCollector::EnsureSweepingCompleted() {
529 if (!sweeper().sweeping_in_progress()) return;
530
531 sweeper().EnsureCompleted();
532 heap()->old_space()->RefillFreeList();
533 heap()->code_space()->RefillFreeList();
534 heap()->map_space()->RefillFreeList();
535
536 #ifdef VERIFY_HEAP
537 if (FLAG_verify_heap && !evacuation()) {
538 VerifyEvacuation(heap_);
539 }
540 #endif
541 }
542
AreSweeperTasksRunning()543 bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
544 DCHECK(FLAG_concurrent_sweeping);
545 while (pending_sweeper_tasks_semaphore_.WaitFor(
546 base::TimeDelta::FromSeconds(0))) {
547 num_sweeping_tasks_.Increment(-1);
548 }
549 return num_sweeping_tasks_.Value() != 0;
550 }
551
IsSweepingCompleted(AllocationSpace space)552 bool MarkCompactCollector::Sweeper::IsSweepingCompleted(AllocationSpace space) {
553 DCHECK(FLAG_concurrent_sweeping);
554 if (AreSweeperTasksRunning()) return false;
555 base::LockGuard<base::Mutex> guard(&mutex_);
556 return sweeping_list_[space].empty();
557 }
558
AllocationSpaceName(AllocationSpace space)559 const char* AllocationSpaceName(AllocationSpace space) {
560 switch (space) {
561 case NEW_SPACE:
562 return "NEW_SPACE";
563 case OLD_SPACE:
564 return "OLD_SPACE";
565 case CODE_SPACE:
566 return "CODE_SPACE";
567 case MAP_SPACE:
568 return "MAP_SPACE";
569 case LO_SPACE:
570 return "LO_SPACE";
571 default:
572 UNREACHABLE();
573 }
574
575 return NULL;
576 }
577
ComputeEvacuationHeuristics(size_t area_size,int * target_fragmentation_percent,size_t * max_evacuated_bytes)578 void MarkCompactCollector::ComputeEvacuationHeuristics(
579 size_t area_size, int* target_fragmentation_percent,
580 size_t* max_evacuated_bytes) {
581 // For memory reducing and optimize for memory mode we directly define both
582 // constants.
583 const int kTargetFragmentationPercentForReduceMemory = 20;
584 const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
585 const int kTargetFragmentationPercentForOptimizeMemory = 20;
586 const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
587
588 // For regular mode (which is latency critical) we define less aggressive
589 // defaults to start and switch to a trace-based (using compaction speed)
590 // approach as soon as we have enough samples.
591 const int kTargetFragmentationPercent = 70;
592 const size_t kMaxEvacuatedBytes = 4 * MB;
593 // Time to take for a single area (=payload of page). Used as soon as there
594 // exist enough compaction speed samples.
595 const float kTargetMsPerArea = .5;
596
597 if (heap()->ShouldReduceMemory()) {
598 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
599 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
600 } else if (heap()->ShouldOptimizeForMemoryUsage()) {
601 *target_fragmentation_percent =
602 kTargetFragmentationPercentForOptimizeMemory;
603 *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
604 } else {
605 const double estimated_compaction_speed =
606 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
607 if (estimated_compaction_speed != 0) {
608 // Estimate the target fragmentation based on traced compaction speed
609 // and a goal for a single page.
610 const double estimated_ms_per_area =
611 1 + area_size / estimated_compaction_speed;
612 *target_fragmentation_percent = static_cast<int>(
613 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
614 if (*target_fragmentation_percent <
615 kTargetFragmentationPercentForReduceMemory) {
616 *target_fragmentation_percent =
617 kTargetFragmentationPercentForReduceMemory;
618 }
619 } else {
620 *target_fragmentation_percent = kTargetFragmentationPercent;
621 }
622 *max_evacuated_bytes = kMaxEvacuatedBytes;
623 }
624 }
625
626
CollectEvacuationCandidates(PagedSpace * space)627 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
628 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
629
630 int number_of_pages = space->CountTotalPages();
631 size_t area_size = space->AreaSize();
632
633 // Pairs of (live_bytes_in_page, page).
634 typedef std::pair<size_t, Page*> LiveBytesPagePair;
635 std::vector<LiveBytesPagePair> pages;
636 pages.reserve(number_of_pages);
637
638 DCHECK(!sweeping_in_progress());
639 DCHECK(!FLAG_concurrent_sweeping ||
640 sweeper().IsSweepingCompleted(space->identity()));
641 Page* owner_of_linear_allocation_area =
642 space->top() == space->limit()
643 ? nullptr
644 : Page::FromAllocationAreaAddress(space->top());
645 for (Page* p : *space) {
646 if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue;
647 // Invariant: Evacuation candidates are just created when marking is
648 // started. This means that sweeping has finished. Furthermore, at the end
649 // of a GC all evacuation candidates are cleared and their slot buffers are
650 // released.
651 CHECK(!p->IsEvacuationCandidate());
652 CHECK_NULL(p->old_to_old_slots());
653 CHECK_NULL(p->typed_old_to_old_slots());
654 CHECK(p->SweepingDone());
655 DCHECK(p->area_size() == area_size);
656 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
657 }
658
659 int candidate_count = 0;
660 size_t total_live_bytes = 0;
661
662 const bool reduce_memory = heap()->ShouldReduceMemory();
663 if (FLAG_manual_evacuation_candidates_selection) {
664 for (size_t i = 0; i < pages.size(); i++) {
665 Page* p = pages[i].second;
666 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
667 candidate_count++;
668 total_live_bytes += pages[i].first;
669 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
670 AddEvacuationCandidate(p);
671 }
672 }
673 } else if (FLAG_stress_compaction) {
674 for (size_t i = 0; i < pages.size(); i++) {
675 Page* p = pages[i].second;
676 if (i % 2 == 0) {
677 candidate_count++;
678 total_live_bytes += pages[i].first;
679 AddEvacuationCandidate(p);
680 }
681 }
682 } else {
683 // The following approach determines the pages that should be evacuated.
684 //
685 // We use two conditions to decide whether a page qualifies as an evacuation
686 // candidate, or not:
687 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
688 // between live bytes and capacity of this page (= area).
689 // * Evacuation quota: A global quota determining how much bytes should be
690 // compacted.
691 //
692 // The algorithm sorts all pages by live bytes and then iterates through
693 // them starting with the page with the most free memory, adding them to the
694 // set of evacuation candidates as long as both conditions (fragmentation
695 // and quota) hold.
696 size_t max_evacuated_bytes;
697 int target_fragmentation_percent;
698 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
699 &max_evacuated_bytes);
700
701 const size_t free_bytes_threshold =
702 target_fragmentation_percent * (area_size / 100);
703
704 // Sort pages from the most free to the least free, then select
705 // the first n pages for evacuation such that:
706 // - the total size of evacuated objects does not exceed the specified
707 // limit.
708 // - fragmentation of (n+1)-th page does not exceed the specified limit.
709 std::sort(pages.begin(), pages.end(),
710 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
711 return a.first < b.first;
712 });
713 for (size_t i = 0; i < pages.size(); i++) {
714 size_t live_bytes = pages[i].first;
715 DCHECK_GE(area_size, live_bytes);
716 size_t free_bytes = area_size - live_bytes;
717 if (FLAG_always_compact ||
718 ((free_bytes >= free_bytes_threshold) &&
719 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
720 candidate_count++;
721 total_live_bytes += live_bytes;
722 }
723 if (FLAG_trace_fragmentation_verbose) {
724 PrintIsolate(isolate(),
725 "compaction-selection-page: space=%s free_bytes_page=%zu "
726 "fragmentation_limit_kb=%" PRIuS
727 " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
728 "compaction_limit_kb=%zu\n",
729 AllocationSpaceName(space->identity()), free_bytes / KB,
730 free_bytes_threshold / KB, target_fragmentation_percent,
731 total_live_bytes / KB, max_evacuated_bytes / KB);
732 }
733 }
734 // How many pages we will allocated for the evacuated objects
735 // in the worst case: ceil(total_live_bytes / area_size)
736 int estimated_new_pages =
737 static_cast<int>((total_live_bytes + area_size - 1) / area_size);
738 DCHECK_LE(estimated_new_pages, candidate_count);
739 int estimated_released_pages = candidate_count - estimated_new_pages;
740 // Avoid (compact -> expand) cycles.
741 if ((estimated_released_pages == 0) && !FLAG_always_compact) {
742 candidate_count = 0;
743 }
744 for (int i = 0; i < candidate_count; i++) {
745 AddEvacuationCandidate(pages[i].second);
746 }
747 }
748
749 if (FLAG_trace_fragmentation) {
750 PrintIsolate(isolate(),
751 "compaction-selection: space=%s reduce_memory=%d pages=%d "
752 "total_live_bytes=%zu\n",
753 AllocationSpaceName(space->identity()), reduce_memory,
754 candidate_count, total_live_bytes / KB);
755 }
756 }
757
758
AbortCompaction()759 void MarkCompactCollector::AbortCompaction() {
760 if (compacting_) {
761 RememberedSet<OLD_TO_OLD>::ClearAll(heap());
762 for (Page* p : evacuation_candidates_) {
763 p->ClearEvacuationCandidate();
764 }
765 compacting_ = false;
766 evacuation_candidates_.Rewind(0);
767 }
768 DCHECK_EQ(0, evacuation_candidates_.length());
769 }
770
771
Prepare()772 void MarkCompactCollector::Prepare() {
773 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
774
775 #ifdef DEBUG
776 DCHECK(state_ == IDLE);
777 state_ = PREPARE_GC;
778 #endif
779
780 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
781
782 if (sweeping_in_progress()) {
783 // Instead of waiting we could also abort the sweeper threads here.
784 EnsureSweepingCompleted();
785 }
786
787 if (heap()->incremental_marking()->IsSweeping()) {
788 heap()->incremental_marking()->Stop();
789 }
790
791 // If concurrent unmapping tasks are still running, we should wait for
792 // them here.
793 heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
794
795 // Clear marking bits if incremental marking is aborted.
796 if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
797 heap()->incremental_marking()->Stop();
798 heap()->incremental_marking()->AbortBlackAllocation();
799 ClearMarkbits();
800 AbortWeakCollections();
801 AbortWeakCells();
802 AbortTransitionArrays();
803 AbortCompaction();
804 if (heap_->UsingEmbedderHeapTracer()) {
805 heap_->embedder_heap_tracer()->AbortTracing();
806 }
807 marking_deque()->Clear();
808 was_marked_incrementally_ = false;
809 }
810
811 if (!was_marked_incrementally_) {
812 if (heap_->UsingEmbedderHeapTracer()) {
813 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
814 heap_->embedder_heap_tracer()->TracePrologue();
815 }
816 }
817
818 if (heap_->UsingEmbedderHeapTracer()) {
819 heap_->embedder_heap_tracer()->EnterFinalPause();
820 }
821
822 // Don't start compaction if we are in the middle of incremental
823 // marking cycle. We did not collect any slots.
824 if (!FLAG_never_compact && !was_marked_incrementally_) {
825 StartCompaction();
826 }
827
828 PagedSpaces spaces(heap());
829 for (PagedSpace* space = spaces.next(); space != NULL;
830 space = spaces.next()) {
831 space->PrepareForMarkCompact();
832 }
833 heap()->account_external_memory_concurrently_freed();
834
835 #ifdef VERIFY_HEAP
836 if (!was_marked_incrementally_ && FLAG_verify_heap) {
837 VerifyMarkbitsAreClean();
838 }
839 #endif
840 }
841
842
Finish()843 void MarkCompactCollector::Finish() {
844 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
845
846 if (!heap()->delay_sweeper_tasks_for_testing_) {
847 sweeper().StartSweeperTasks();
848 }
849
850 // The hashing of weak_object_to_code_table is no longer valid.
851 heap()->weak_object_to_code_table()->Rehash(
852 heap()->isolate()->factory()->undefined_value());
853
854 // Clear the marking state of live large objects.
855 heap_->lo_space()->ClearMarkingStateOfLiveObjects();
856
857 #ifdef DEBUG
858 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
859 state_ = IDLE;
860 #endif
861 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
862
863 // The stub caches are not traversed during GC; clear them to force
864 // their lazy re-initialization. This must be done after the
865 // GC, because it relies on the new address of certain old space
866 // objects (empty string, illegal builtin).
867 isolate()->load_stub_cache()->Clear();
868 isolate()->store_stub_cache()->Clear();
869
870 if (have_code_to_deoptimize_) {
871 // Some code objects were marked for deoptimization during the GC.
872 Deoptimizer::DeoptimizeMarkedCode(isolate());
873 have_code_to_deoptimize_ = false;
874 }
875
876 heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
877
878 if (marking_parity_ == EVEN_MARKING_PARITY) {
879 marking_parity_ = ODD_MARKING_PARITY;
880 } else {
881 DCHECK(marking_parity_ == ODD_MARKING_PARITY);
882 marking_parity_ = EVEN_MARKING_PARITY;
883 }
884 }
885
886
887 // -------------------------------------------------------------------------
888 // Phase 1: tracing and marking live objects.
889 // before: all objects are in normal state.
890 // after: a live object's map pointer is marked as '00'.
891
892 // Marking all live objects in the heap as part of mark-sweep or mark-compact
893 // collection. Before marking, all objects are in their normal state. After
894 // marking, live objects' map pointers are marked indicating that the object
895 // has been found reachable.
896 //
897 // The marking algorithm is a (mostly) depth-first (because of possible stack
898 // overflow) traversal of the graph of objects reachable from the roots. It
899 // uses an explicit stack of pointers rather than recursion. The young
900 // generation's inactive ('from') space is used as a marking stack. The
901 // objects in the marking stack are the ones that have been reached and marked
902 // but their children have not yet been visited.
903 //
904 // The marking stack can overflow during traversal. In that case, we set an
905 // overflow flag. When the overflow flag is set, we continue marking objects
906 // reachable from the objects on the marking stack, but no longer push them on
907 // the marking stack. Instead, we mark them as both marked and overflowed.
908 // When the stack is in the overflowed state, objects marked as overflowed
909 // have been reached and marked but their children have not been visited yet.
910 // After emptying the marking stack, we clear the overflow flag and traverse
911 // the heap looking for objects marked as overflowed, push them on the stack,
912 // and continue with marking. This process repeats until all reachable
913 // objects have been marked.
914
ProcessJSFunctionCandidates()915 void CodeFlusher::ProcessJSFunctionCandidates() {
916 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
917 Object* undefined = isolate_->heap()->undefined_value();
918
919 JSFunction* candidate = jsfunction_candidates_head_;
920 JSFunction* next_candidate;
921 while (candidate != NULL) {
922 next_candidate = GetNextCandidate(candidate);
923 ClearNextCandidate(candidate, undefined);
924
925 SharedFunctionInfo* shared = candidate->shared();
926
927 Code* code = shared->code();
928 MarkBit code_mark = ObjectMarking::MarkBitFrom(code);
929 if (Marking::IsWhite(code_mark)) {
930 if (FLAG_trace_code_flushing && shared->is_compiled()) {
931 PrintF("[code-flushing clears: ");
932 shared->ShortPrint();
933 PrintF(" - age: %d]\n", code->GetAge());
934 }
935 // Always flush the optimized code map if there is one.
936 if (!shared->OptimizedCodeMapIsCleared()) {
937 shared->ClearOptimizedCodeMap();
938 }
939 shared->set_code(lazy_compile);
940 candidate->set_code(lazy_compile);
941 } else {
942 DCHECK(Marking::IsBlack(code_mark));
943 candidate->set_code(code);
944 }
945
946 // We are in the middle of a GC cycle so the write barrier in the code
947 // setter did not record the slot update and we have to do that manually.
948 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
949 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
950 isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
951 candidate, slot, target);
952
953 Object** shared_code_slot =
954 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
955 isolate_->heap()->mark_compact_collector()->RecordSlot(
956 shared, shared_code_slot, *shared_code_slot);
957
958 candidate = next_candidate;
959 }
960
961 jsfunction_candidates_head_ = NULL;
962 }
963
964
ProcessSharedFunctionInfoCandidates()965 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
966 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
967
968 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
969 SharedFunctionInfo* next_candidate;
970 while (candidate != NULL) {
971 next_candidate = GetNextCandidate(candidate);
972 ClearNextCandidate(candidate);
973
974 Code* code = candidate->code();
975 MarkBit code_mark = ObjectMarking::MarkBitFrom(code);
976 if (Marking::IsWhite(code_mark)) {
977 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
978 PrintF("[code-flushing clears: ");
979 candidate->ShortPrint();
980 PrintF(" - age: %d]\n", code->GetAge());
981 }
982 // Always flush the optimized code map if there is one.
983 if (!candidate->OptimizedCodeMapIsCleared()) {
984 candidate->ClearOptimizedCodeMap();
985 }
986 candidate->set_code(lazy_compile);
987 }
988
989 Object** code_slot =
990 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
991 isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
992 *code_slot);
993
994 candidate = next_candidate;
995 }
996
997 shared_function_info_candidates_head_ = NULL;
998 }
999
1000
EvictCandidate(SharedFunctionInfo * shared_info)1001 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1002 // Make sure previous flushing decisions are revisited.
1003 isolate_->heap()->incremental_marking()->IterateBlackObject(shared_info);
1004
1005 if (FLAG_trace_code_flushing) {
1006 PrintF("[code-flushing abandons function-info: ");
1007 shared_info->ShortPrint();
1008 PrintF("]\n");
1009 }
1010
1011 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1012 SharedFunctionInfo* next_candidate;
1013 if (candidate == shared_info) {
1014 next_candidate = GetNextCandidate(shared_info);
1015 shared_function_info_candidates_head_ = next_candidate;
1016 ClearNextCandidate(shared_info);
1017 } else {
1018 while (candidate != NULL) {
1019 next_candidate = GetNextCandidate(candidate);
1020
1021 if (next_candidate == shared_info) {
1022 next_candidate = GetNextCandidate(shared_info);
1023 SetNextCandidate(candidate, next_candidate);
1024 ClearNextCandidate(shared_info);
1025 break;
1026 }
1027
1028 candidate = next_candidate;
1029 }
1030 }
1031 }
1032
1033
EvictCandidate(JSFunction * function)1034 void CodeFlusher::EvictCandidate(JSFunction* function) {
1035 DCHECK(!function->next_function_link()->IsUndefined(isolate_));
1036 Object* undefined = isolate_->heap()->undefined_value();
1037
1038 // Make sure previous flushing decisions are revisited.
1039 isolate_->heap()->incremental_marking()->IterateBlackObject(function);
1040 isolate_->heap()->incremental_marking()->IterateBlackObject(
1041 function->shared());
1042
1043 if (FLAG_trace_code_flushing) {
1044 PrintF("[code-flushing abandons closure: ");
1045 function->shared()->ShortPrint();
1046 PrintF("]\n");
1047 }
1048
1049 JSFunction* candidate = jsfunction_candidates_head_;
1050 JSFunction* next_candidate;
1051 if (candidate == function) {
1052 next_candidate = GetNextCandidate(function);
1053 jsfunction_candidates_head_ = next_candidate;
1054 ClearNextCandidate(function, undefined);
1055 } else {
1056 while (candidate != NULL) {
1057 next_candidate = GetNextCandidate(candidate);
1058
1059 if (next_candidate == function) {
1060 next_candidate = GetNextCandidate(function);
1061 SetNextCandidate(candidate, next_candidate);
1062 ClearNextCandidate(function, undefined);
1063 break;
1064 }
1065
1066 candidate = next_candidate;
1067 }
1068 }
1069 }
1070
1071
IteratePointersToFromSpace(ObjectVisitor * v)1072 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1073 Heap* heap = isolate_->heap();
1074
1075 JSFunction** slot = &jsfunction_candidates_head_;
1076 JSFunction* candidate = jsfunction_candidates_head_;
1077 while (candidate != NULL) {
1078 if (heap->InFromSpace(candidate)) {
1079 v->VisitPointer(reinterpret_cast<Object**>(slot));
1080 }
1081 candidate = GetNextCandidate(*slot);
1082 slot = GetNextCandidateSlot(*slot);
1083 }
1084 }
1085
1086
1087 class MarkCompactMarkingVisitor
1088 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1089 public:
1090 static void Initialize();
1091
INLINE(static void VisitPointer (Heap * heap,HeapObject * object,Object ** p))1092 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
1093 MarkObjectByPointer(heap->mark_compact_collector(), object, p);
1094 }
1095
INLINE(static void VisitPointers (Heap * heap,HeapObject * object,Object ** start,Object ** end))1096 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
1097 Object** start, Object** end)) {
1098 // Mark all objects pointed to in [start, end).
1099 const int kMinRangeForMarkingRecursion = 64;
1100 if (end - start >= kMinRangeForMarkingRecursion) {
1101 if (VisitUnmarkedObjects(heap, object, start, end)) return;
1102 // We are close to a stack overflow, so just mark the objects.
1103 }
1104 MarkCompactCollector* collector = heap->mark_compact_collector();
1105 for (Object** p = start; p < end; p++) {
1106 MarkObjectByPointer(collector, object, p);
1107 }
1108 }
1109
1110 // Marks the object black and pushes it on the marking stack.
INLINE(static void MarkObject (Heap * heap,HeapObject * object))1111 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1112 MarkBit mark = ObjectMarking::MarkBitFrom(object);
1113 heap->mark_compact_collector()->MarkObject(object, mark);
1114 }
1115
1116 // Marks the object black without pushing it on the marking stack.
1117 // Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush (Heap * heap,HeapObject * object))1118 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1119 MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
1120 if (Marking::IsWhite(mark_bit)) {
1121 heap->mark_compact_collector()->SetMark(object, mark_bit);
1122 return true;
1123 }
1124 return false;
1125 }
1126
1127 // Mark object pointed to by p.
INLINE(static void MarkObjectByPointer (MarkCompactCollector * collector,HeapObject * object,Object ** p))1128 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1129 HeapObject* object, Object** p)) {
1130 if (!(*p)->IsHeapObject()) return;
1131 HeapObject* target_object = HeapObject::cast(*p);
1132 collector->RecordSlot(object, p, target_object);
1133 MarkBit mark = ObjectMarking::MarkBitFrom(target_object);
1134 collector->MarkObject(target_object, mark);
1135 }
1136
1137
1138 // Visit an unmarked object.
INLINE(static void VisitUnmarkedObject (MarkCompactCollector * collector,HeapObject * obj))1139 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1140 HeapObject* obj)) {
1141 #ifdef DEBUG
1142 DCHECK(collector->heap()->Contains(obj));
1143 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1144 #endif
1145 Map* map = obj->map();
1146 Heap* heap = obj->GetHeap();
1147 MarkBit mark = ObjectMarking::MarkBitFrom(obj);
1148 heap->mark_compact_collector()->SetMark(obj, mark);
1149 // Mark the map pointer and the body.
1150 MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
1151 heap->mark_compact_collector()->MarkObject(map, map_mark);
1152 IterateBody(map, obj);
1153 }
1154
1155 // Visit all unmarked objects pointed to by [start, end).
1156 // Returns false if the operation fails (lack of stack space).
INLINE(static bool VisitUnmarkedObjects (Heap * heap,HeapObject * object,Object ** start,Object ** end))1157 INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
1158 Object** start, Object** end)) {
1159 // Return false is we are close to the stack limit.
1160 StackLimitCheck check(heap->isolate());
1161 if (check.HasOverflowed()) return false;
1162
1163 MarkCompactCollector* collector = heap->mark_compact_collector();
1164 // Visit the unmarked objects.
1165 for (Object** p = start; p < end; p++) {
1166 Object* o = *p;
1167 if (!o->IsHeapObject()) continue;
1168 collector->RecordSlot(object, p, o);
1169 HeapObject* obj = HeapObject::cast(o);
1170 MarkBit mark = ObjectMarking::MarkBitFrom(obj);
1171 if (Marking::IsBlackOrGrey(mark)) continue;
1172 VisitUnmarkedObject(collector, obj);
1173 }
1174 return true;
1175 }
1176
1177 private:
1178 // Code flushing support.
1179
1180 static const int kRegExpCodeThreshold = 5;
1181
UpdateRegExpCodeAgeAndFlush(Heap * heap,JSRegExp * re,bool is_one_byte)1182 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1183 bool is_one_byte) {
1184 // Make sure that the fixed array is in fact initialized on the RegExp.
1185 // We could potentially trigger a GC when initializing the RegExp.
1186 if (HeapObject::cast(re->data())->map()->instance_type() !=
1187 FIXED_ARRAY_TYPE)
1188 return;
1189
1190 // Make sure this is a RegExp that actually contains code.
1191 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1192
1193 Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1194 if (!code->IsSmi() &&
1195 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1196 // Save a copy that can be reinstated if we need the code again.
1197 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1198
1199 // Saving a copy might create a pointer into compaction candidate
1200 // that was not observed by marker. This might happen if JSRegExp data
1201 // was marked through the compilation cache before marker reached JSRegExp
1202 // object.
1203 FixedArray* data = FixedArray::cast(re->data());
1204 if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(data))) {
1205 Object** slot =
1206 data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1207 heap->mark_compact_collector()->RecordSlot(data, slot, code);
1208 }
1209
1210 // Set a number in the 0-255 range to guarantee no smi overflow.
1211 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1212 Smi::FromInt(heap->ms_count() & 0xff));
1213 } else if (code->IsSmi()) {
1214 int value = Smi::cast(code)->value();
1215 // The regexp has not been compiled yet or there was a compilation error.
1216 if (value == JSRegExp::kUninitializedValue ||
1217 value == JSRegExp::kCompilationErrorValue) {
1218 return;
1219 }
1220
1221 // Check if we should flush now.
1222 if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
1223 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1224 Smi::FromInt(JSRegExp::kUninitializedValue));
1225 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1226 Smi::FromInt(JSRegExp::kUninitializedValue));
1227 }
1228 }
1229 }
1230
1231
1232 // Works by setting the current sweep_generation (as a smi) in the
1233 // code object place in the data array of the RegExp and keeps a copy
1234 // around that can be reinstated if we reuse the RegExp before flushing.
1235 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1236 // we flush the code.
VisitRegExpAndFlushCode(Map * map,HeapObject * object)1237 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1238 Heap* heap = map->GetHeap();
1239 MarkCompactCollector* collector = heap->mark_compact_collector();
1240 if (!collector->is_code_flushing_enabled()) {
1241 JSObjectVisitor::Visit(map, object);
1242 return;
1243 }
1244 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1245 // Flush code or set age on both one byte and two byte code.
1246 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1247 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1248 // Visit the fields of the RegExp, including the updated FixedArray.
1249 JSObjectVisitor::Visit(map, object);
1250 }
1251 };
1252
1253
Initialize()1254 void MarkCompactMarkingVisitor::Initialize() {
1255 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1256
1257 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1258 }
1259
1260
1261 class CodeMarkingVisitor : public ThreadVisitor {
1262 public:
CodeMarkingVisitor(MarkCompactCollector * collector)1263 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1264 : collector_(collector) {}
1265
VisitThread(Isolate * isolate,ThreadLocalTop * top)1266 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1267 collector_->PrepareThreadForCodeFlushing(isolate, top);
1268 }
1269
1270 private:
1271 MarkCompactCollector* collector_;
1272 };
1273
1274
1275 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1276 public:
SharedFunctionInfoMarkingVisitor(MarkCompactCollector * collector)1277 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1278 : collector_(collector) {}
1279
VisitPointers(Object ** start,Object ** end)1280 void VisitPointers(Object** start, Object** end) override {
1281 for (Object** p = start; p < end; p++) VisitPointer(p);
1282 }
1283
VisitPointer(Object ** slot)1284 void VisitPointer(Object** slot) override {
1285 Object* obj = *slot;
1286 if (obj->IsSharedFunctionInfo()) {
1287 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1288 MarkBit shared_mark = ObjectMarking::MarkBitFrom(shared);
1289 MarkBit code_mark = ObjectMarking::MarkBitFrom(shared->code());
1290 collector_->MarkObject(shared->code(), code_mark);
1291 collector_->MarkObject(shared, shared_mark);
1292 }
1293 }
1294
1295 private:
1296 MarkCompactCollector* collector_;
1297 };
1298
1299
PrepareThreadForCodeFlushing(Isolate * isolate,ThreadLocalTop * top)1300 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1301 ThreadLocalTop* top) {
1302 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1303 // Note: for the frame that has a pending lazy deoptimization
1304 // StackFrame::unchecked_code will return a non-optimized code object for
1305 // the outermost function and StackFrame::LookupCode will return
1306 // actual optimized code object.
1307 StackFrame* frame = it.frame();
1308 Code* code = frame->unchecked_code();
1309 MarkBit code_mark = ObjectMarking::MarkBitFrom(code);
1310 MarkObject(code, code_mark);
1311 if (frame->is_optimized()) {
1312 Code* optimized_code = frame->LookupCode();
1313 MarkBit optimized_code_mark = ObjectMarking::MarkBitFrom(optimized_code);
1314 MarkObject(optimized_code, optimized_code_mark);
1315 }
1316 }
1317 }
1318
1319
PrepareForCodeFlushing()1320 void MarkCompactCollector::PrepareForCodeFlushing() {
1321 // If code flushing is disabled, there is no need to prepare for it.
1322 if (!is_code_flushing_enabled()) return;
1323
1324 // Make sure we are not referencing the code from the stack.
1325 DCHECK(this == heap()->mark_compact_collector());
1326 PrepareThreadForCodeFlushing(heap()->isolate(),
1327 heap()->isolate()->thread_local_top());
1328
1329 // Iterate the archived stacks in all threads to check if
1330 // the code is referenced.
1331 CodeMarkingVisitor code_marking_visitor(this);
1332 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1333 &code_marking_visitor);
1334
1335 SharedFunctionInfoMarkingVisitor visitor(this);
1336 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1337 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1338
1339 ProcessMarkingDeque();
1340 }
1341
1342
1343 // Visitor class for marking heap roots.
1344 class RootMarkingVisitor : public ObjectVisitor {
1345 public:
RootMarkingVisitor(Heap * heap)1346 explicit RootMarkingVisitor(Heap* heap)
1347 : collector_(heap->mark_compact_collector()) {}
1348
VisitPointer(Object ** p)1349 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
1350
VisitPointers(Object ** start,Object ** end)1351 void VisitPointers(Object** start, Object** end) override {
1352 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1353 }
1354
1355 // Skip the weak next code link in a code object, which is visited in
1356 // ProcessTopOptimizedFrame.
VisitNextCodeLink(Object ** p)1357 void VisitNextCodeLink(Object** p) override {}
1358
1359 private:
MarkObjectByPointer(Object ** p)1360 void MarkObjectByPointer(Object** p) {
1361 if (!(*p)->IsHeapObject()) return;
1362
1363 HeapObject* object = HeapObject::cast(*p);
1364
1365 MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
1366 if (Marking::IsBlackOrGrey(mark_bit)) return;
1367
1368 Map* map = object->map();
1369 // Mark the object.
1370 collector_->SetMark(object, mark_bit);
1371
1372 // Mark the map pointer and body, and push them on the marking stack.
1373 MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
1374 collector_->MarkObject(map, map_mark);
1375 MarkCompactMarkingVisitor::IterateBody(map, object);
1376
1377 // Mark all the objects reachable from the map and body. May leave
1378 // overflowed objects in the heap.
1379 collector_->EmptyMarkingDeque();
1380 }
1381
1382 MarkCompactCollector* collector_;
1383 };
1384
1385
1386 // Helper class for pruning the string table.
1387 template <bool finalize_external_strings, bool record_slots>
1388 class StringTableCleaner : public ObjectVisitor {
1389 public:
StringTableCleaner(Heap * heap,HeapObject * table)1390 StringTableCleaner(Heap* heap, HeapObject* table)
1391 : heap_(heap), pointers_removed_(0), table_(table) {
1392 DCHECK(!record_slots || table != nullptr);
1393 }
1394
VisitPointers(Object ** start,Object ** end)1395 void VisitPointers(Object** start, Object** end) override {
1396 // Visit all HeapObject pointers in [start, end).
1397 MarkCompactCollector* collector = heap_->mark_compact_collector();
1398 for (Object** p = start; p < end; p++) {
1399 Object* o = *p;
1400 if (o->IsHeapObject()) {
1401 if (Marking::IsWhite(ObjectMarking::MarkBitFrom(HeapObject::cast(o)))) {
1402 if (finalize_external_strings) {
1403 DCHECK(o->IsExternalString());
1404 heap_->FinalizeExternalString(String::cast(*p));
1405 } else {
1406 pointers_removed_++;
1407 }
1408 // Set the entry to the_hole_value (as deleted).
1409 *p = heap_->the_hole_value();
1410 } else if (record_slots) {
1411 // StringTable contains only old space strings.
1412 DCHECK(!heap_->InNewSpace(o));
1413 collector->RecordSlot(table_, p, o);
1414 }
1415 }
1416 }
1417 }
1418
PointersRemoved()1419 int PointersRemoved() {
1420 DCHECK(!finalize_external_strings);
1421 return pointers_removed_;
1422 }
1423
1424 private:
1425 Heap* heap_;
1426 int pointers_removed_;
1427 HeapObject* table_;
1428 };
1429
1430 typedef StringTableCleaner<false, true> InternalizedStringTableCleaner;
1431 typedef StringTableCleaner<true, false> ExternalStringTableCleaner;
1432
1433 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1434 // are retained.
1435 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1436 public:
RetainAs(Object * object)1437 virtual Object* RetainAs(Object* object) {
1438 MarkBit mark_bit = ObjectMarking::MarkBitFrom(HeapObject::cast(object));
1439 DCHECK(!Marking::IsGrey(mark_bit));
1440 if (Marking::IsBlack(mark_bit)) {
1441 return object;
1442 } else if (object->IsAllocationSite() &&
1443 !(AllocationSite::cast(object)->IsZombie())) {
1444 // "dead" AllocationSites need to live long enough for a traversal of new
1445 // space. These sites get a one-time reprieve.
1446 AllocationSite* site = AllocationSite::cast(object);
1447 site->MarkZombie();
1448 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1449 return object;
1450 } else {
1451 return NULL;
1452 }
1453 }
1454 };
1455
1456
1457 // Fill the marking stack with overflowed objects returned by the given
1458 // iterator. Stop when the marking stack is filled or the end of the space
1459 // is reached, whichever comes first.
1460 template <class T>
DiscoverGreyObjectsWithIterator(T * it)1461 void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
1462 // The caller should ensure that the marking stack is initially not full,
1463 // so that we don't waste effort pointlessly scanning for objects.
1464 DCHECK(!marking_deque()->IsFull());
1465
1466 Map* filler_map = heap()->one_pointer_filler_map();
1467 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1468 MarkBit markbit = ObjectMarking::MarkBitFrom(object);
1469 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1470 Marking::GreyToBlack(markbit);
1471 PushBlack(object);
1472 if (marking_deque()->IsFull()) return;
1473 }
1474 }
1475 }
1476
DiscoverGreyObjectsOnPage(MemoryChunk * p)1477 void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
1478 DCHECK(!marking_deque()->IsFull());
1479 LiveObjectIterator<kGreyObjects> it(p);
1480 HeapObject* object = NULL;
1481 while ((object = it.Next()) != NULL) {
1482 MarkBit markbit = ObjectMarking::MarkBitFrom(object);
1483 DCHECK(Marking::IsGrey(markbit));
1484 Marking::GreyToBlack(markbit);
1485 PushBlack(object);
1486 if (marking_deque()->IsFull()) return;
1487 }
1488 }
1489
1490 class RecordMigratedSlotVisitor final : public ObjectVisitor {
1491 public:
RecordMigratedSlotVisitor(MarkCompactCollector * collector)1492 explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
1493 : collector_(collector) {}
1494
VisitPointer(Object ** p)1495 inline void VisitPointer(Object** p) final {
1496 RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
1497 }
1498
VisitPointers(Object ** start,Object ** end)1499 inline void VisitPointers(Object** start, Object** end) final {
1500 while (start < end) {
1501 RecordMigratedSlot(*start, reinterpret_cast<Address>(start));
1502 ++start;
1503 }
1504 }
1505
VisitCodeEntry(Address code_entry_slot)1506 inline void VisitCodeEntry(Address code_entry_slot) final {
1507 Address code_entry = Memory::Address_at(code_entry_slot);
1508 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
1509 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
1510 nullptr, CODE_ENTRY_SLOT,
1511 code_entry_slot);
1512 }
1513 }
1514
VisitCodeTarget(RelocInfo * rinfo)1515 inline void VisitCodeTarget(RelocInfo* rinfo) final {
1516 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
1517 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1518 Code* host = rinfo->host();
1519 // The target is always in old space, we don't have to record the slot in
1520 // the old-to-new remembered set.
1521 DCHECK(!collector_->heap()->InNewSpace(target));
1522 collector_->RecordRelocSlot(host, rinfo, target);
1523 }
1524
VisitDebugTarget(RelocInfo * rinfo)1525 inline void VisitDebugTarget(RelocInfo* rinfo) final {
1526 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1527 rinfo->IsPatchedDebugBreakSlotSequence());
1528 Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
1529 Code* host = rinfo->host();
1530 // The target is always in old space, we don't have to record the slot in
1531 // the old-to-new remembered set.
1532 DCHECK(!collector_->heap()->InNewSpace(target));
1533 collector_->RecordRelocSlot(host, rinfo, target);
1534 }
1535
VisitEmbeddedPointer(RelocInfo * rinfo)1536 inline void VisitEmbeddedPointer(RelocInfo* rinfo) final {
1537 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1538 HeapObject* object = HeapObject::cast(rinfo->target_object());
1539 Code* host = rinfo->host();
1540 collector_->heap()->RecordWriteIntoCode(host, rinfo, object);
1541 collector_->RecordRelocSlot(host, rinfo, object);
1542 }
1543
VisitCell(RelocInfo * rinfo)1544 inline void VisitCell(RelocInfo* rinfo) final {
1545 DCHECK(rinfo->rmode() == RelocInfo::CELL);
1546 Cell* cell = rinfo->target_cell();
1547 Code* host = rinfo->host();
1548 // The cell is always in old space, we don't have to record the slot in
1549 // the old-to-new remembered set.
1550 DCHECK(!collector_->heap()->InNewSpace(cell));
1551 collector_->RecordRelocSlot(host, rinfo, cell);
1552 }
1553
1554 // Entries that will never move.
VisitCodeAgeSequence(RelocInfo * rinfo)1555 inline void VisitCodeAgeSequence(RelocInfo* rinfo) final {
1556 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
1557 Code* stub = rinfo->code_age_stub();
1558 USE(stub);
1559 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate());
1560 }
1561
1562 // Entries that are skipped for recording.
VisitExternalReference(RelocInfo * rinfo)1563 inline void VisitExternalReference(RelocInfo* rinfo) final {}
VisitExternalReference(Address * p)1564 inline void VisitExternalReference(Address* p) final {}
VisitRuntimeEntry(RelocInfo * rinfo)1565 inline void VisitRuntimeEntry(RelocInfo* rinfo) final {}
VisitExternalOneByteString(v8::String::ExternalOneByteStringResource ** resource)1566 inline void VisitExternalOneByteString(
1567 v8::String::ExternalOneByteStringResource** resource) final {}
VisitExternalTwoByteString(v8::String::ExternalStringResource ** resource)1568 inline void VisitExternalTwoByteString(
1569 v8::String::ExternalStringResource** resource) final {}
VisitInternalReference(RelocInfo * rinfo)1570 inline void VisitInternalReference(RelocInfo* rinfo) final {}
VisitEmbedderReference(Object ** p,uint16_t class_id)1571 inline void VisitEmbedderReference(Object** p, uint16_t class_id) final {}
1572
1573 private:
RecordMigratedSlot(Object * value,Address slot)1574 inline void RecordMigratedSlot(Object* value, Address slot) {
1575 if (value->IsHeapObject()) {
1576 Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
1577 if (p->InNewSpace()) {
1578 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
1579 } else if (p->IsEvacuationCandidate()) {
1580 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
1581 }
1582 }
1583 }
1584
1585 MarkCompactCollector* collector_;
1586 };
1587
1588 class MarkCompactCollector::HeapObjectVisitor {
1589 public:
~HeapObjectVisitor()1590 virtual ~HeapObjectVisitor() {}
1591 virtual bool Visit(HeapObject* object) = 0;
1592 };
1593
1594 class MarkCompactCollector::EvacuateVisitorBase
1595 : public MarkCompactCollector::HeapObjectVisitor {
1596 protected:
1597 enum MigrationMode { kFast, kProfiled };
1598
EvacuateVisitorBase(Heap * heap,CompactionSpaceCollection * compaction_spaces)1599 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces)
1600 : heap_(heap),
1601 compaction_spaces_(compaction_spaces),
1602 profiling_(
1603 heap->isolate()->is_profiling() ||
1604 heap->isolate()->logger()->is_logging_code_events() ||
1605 heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
1606
TryEvacuateObject(PagedSpace * target_space,HeapObject * object,HeapObject ** target_object)1607 inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1608 HeapObject** target_object) {
1609 #ifdef VERIFY_HEAP
1610 if (AbortCompactionForTesting(object)) return false;
1611 #endif // VERIFY_HEAP
1612 int size = object->Size();
1613 AllocationAlignment alignment = object->RequiredAlignment();
1614 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1615 if (allocation.To(target_object)) {
1616 MigrateObject(*target_object, object, size, target_space->identity());
1617 return true;
1618 }
1619 return false;
1620 }
1621
MigrateObject(HeapObject * dst,HeapObject * src,int size,AllocationSpace dest)1622 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1623 AllocationSpace dest) {
1624 if (profiling_) {
1625 MigrateObject<kProfiled>(dst, src, size, dest);
1626 } else {
1627 MigrateObject<kFast>(dst, src, size, dest);
1628 }
1629 }
1630
1631 template <MigrationMode mode>
MigrateObject(HeapObject * dst,HeapObject * src,int size,AllocationSpace dest)1632 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1633 AllocationSpace dest) {
1634 Address dst_addr = dst->address();
1635 Address src_addr = src->address();
1636 DCHECK(heap_->AllowedToBeMigrated(src, dest));
1637 DCHECK(dest != LO_SPACE);
1638 if (dest == OLD_SPACE) {
1639 DCHECK_OBJECT_SIZE(size);
1640 DCHECK(IsAligned(size, kPointerSize));
1641 heap_->CopyBlock(dst_addr, src_addr, size);
1642 if ((mode == kProfiled) && dst->IsBytecodeArray()) {
1643 PROFILE(heap_->isolate(),
1644 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1645 }
1646 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1647 dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
1648 } else if (dest == CODE_SPACE) {
1649 DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
1650 if (mode == kProfiled) {
1651 PROFILE(heap_->isolate(),
1652 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1653 }
1654 heap_->CopyBlock(dst_addr, src_addr, size);
1655 Code::cast(dst)->Relocate(dst_addr - src_addr);
1656 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1657 dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
1658 } else {
1659 DCHECK_OBJECT_SIZE(size);
1660 DCHECK(dest == NEW_SPACE);
1661 heap_->CopyBlock(dst_addr, src_addr, size);
1662 }
1663 if (mode == kProfiled) {
1664 heap_->OnMoveEvent(dst, src, size);
1665 }
1666 Memory::Address_at(src_addr) = dst_addr;
1667 }
1668
1669 #ifdef VERIFY_HEAP
AbortCompactionForTesting(HeapObject * object)1670 bool AbortCompactionForTesting(HeapObject* object) {
1671 if (FLAG_stress_compaction) {
1672 const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1673 Page::kPageAlignmentMask & ~kPointerAlignmentMask;
1674 if ((reinterpret_cast<uintptr_t>(object->address()) &
1675 Page::kPageAlignmentMask) == mask) {
1676 Page* page = Page::FromAddress(object->address());
1677 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1678 page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1679 } else {
1680 page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1681 return true;
1682 }
1683 }
1684 }
1685 return false;
1686 }
1687 #endif // VERIFY_HEAP
1688
1689 Heap* heap_;
1690 CompactionSpaceCollection* compaction_spaces_;
1691 bool profiling_;
1692 };
1693
1694 class MarkCompactCollector::EvacuateNewSpaceVisitor final
1695 : public MarkCompactCollector::EvacuateVisitorBase {
1696 public:
1697 static const intptr_t kLabSize = 4 * KB;
1698 static const intptr_t kMaxLabObjectSize = 256;
1699
EvacuateNewSpaceVisitor(Heap * heap,CompactionSpaceCollection * compaction_spaces,base::HashMap * local_pretenuring_feedback)1700 explicit EvacuateNewSpaceVisitor(Heap* heap,
1701 CompactionSpaceCollection* compaction_spaces,
1702 base::HashMap* local_pretenuring_feedback)
1703 : EvacuateVisitorBase(heap, compaction_spaces),
1704 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1705 space_to_allocate_(NEW_SPACE),
1706 promoted_size_(0),
1707 semispace_copied_size_(0),
1708 local_pretenuring_feedback_(local_pretenuring_feedback) {}
1709
Visit(HeapObject * object)1710 inline bool Visit(HeapObject* object) override {
1711 heap_->UpdateAllocationSite<Heap::kCached>(object,
1712 local_pretenuring_feedback_);
1713 int size = object->Size();
1714 HeapObject* target_object = nullptr;
1715 if (heap_->ShouldBePromoted(object->address(), size) &&
1716 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1717 &target_object)) {
1718 promoted_size_ += size;
1719 return true;
1720 }
1721 HeapObject* target = nullptr;
1722 AllocationSpace space = AllocateTargetObject(object, &target);
1723 MigrateObject(HeapObject::cast(target), object, size, space);
1724 semispace_copied_size_ += size;
1725 return true;
1726 }
1727
promoted_size()1728 intptr_t promoted_size() { return promoted_size_; }
semispace_copied_size()1729 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1730
1731 private:
1732 enum NewSpaceAllocationMode {
1733 kNonstickyBailoutOldSpace,
1734 kStickyBailoutOldSpace,
1735 };
1736
AllocateTargetObject(HeapObject * old_object,HeapObject ** target_object)1737 inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
1738 HeapObject** target_object) {
1739 const int size = old_object->Size();
1740 AllocationAlignment alignment = old_object->RequiredAlignment();
1741 AllocationResult allocation;
1742 AllocationSpace space_allocated_in = space_to_allocate_;
1743 if (space_to_allocate_ == NEW_SPACE) {
1744 if (size > kMaxLabObjectSize) {
1745 allocation =
1746 AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
1747 } else {
1748 allocation = AllocateInLab(size, alignment);
1749 }
1750 }
1751 if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
1752 allocation = AllocateInOldSpace(size, alignment);
1753 space_allocated_in = OLD_SPACE;
1754 }
1755 bool ok = allocation.To(target_object);
1756 DCHECK(ok);
1757 USE(ok);
1758 return space_allocated_in;
1759 }
1760
NewLocalAllocationBuffer()1761 inline bool NewLocalAllocationBuffer() {
1762 AllocationResult result =
1763 AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
1764 LocalAllocationBuffer saved_old_buffer = buffer_;
1765 buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
1766 if (buffer_.IsValid()) {
1767 buffer_.TryMerge(&saved_old_buffer);
1768 return true;
1769 }
1770 return false;
1771 }
1772
AllocateInNewSpace(int size_in_bytes,AllocationAlignment alignment,NewSpaceAllocationMode mode)1773 inline AllocationResult AllocateInNewSpace(int size_in_bytes,
1774 AllocationAlignment alignment,
1775 NewSpaceAllocationMode mode) {
1776 AllocationResult allocation =
1777 heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
1778 if (allocation.IsRetry()) {
1779 if (!heap_->new_space()->AddFreshPageSynchronized()) {
1780 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1781 } else {
1782 allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
1783 alignment);
1784 if (allocation.IsRetry()) {
1785 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1786 }
1787 }
1788 }
1789 return allocation;
1790 }
1791
AllocateInOldSpace(int size_in_bytes,AllocationAlignment alignment)1792 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1793 AllocationAlignment alignment) {
1794 AllocationResult allocation =
1795 compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
1796 alignment);
1797 if (allocation.IsRetry()) {
1798 v8::internal::Heap::FatalProcessOutOfMemory(
1799 "MarkCompactCollector: semi-space copy, fallback in old gen", true);
1800 }
1801 return allocation;
1802 }
1803
AllocateInLab(int size_in_bytes,AllocationAlignment alignment)1804 inline AllocationResult AllocateInLab(int size_in_bytes,
1805 AllocationAlignment alignment) {
1806 AllocationResult allocation;
1807 if (!buffer_.IsValid()) {
1808 if (!NewLocalAllocationBuffer()) {
1809 space_to_allocate_ = OLD_SPACE;
1810 return AllocationResult::Retry(OLD_SPACE);
1811 }
1812 }
1813 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1814 if (allocation.IsRetry()) {
1815 if (!NewLocalAllocationBuffer()) {
1816 space_to_allocate_ = OLD_SPACE;
1817 return AllocationResult::Retry(OLD_SPACE);
1818 } else {
1819 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1820 if (allocation.IsRetry()) {
1821 space_to_allocate_ = OLD_SPACE;
1822 return AllocationResult::Retry(OLD_SPACE);
1823 }
1824 }
1825 }
1826 return allocation;
1827 }
1828
1829 LocalAllocationBuffer buffer_;
1830 AllocationSpace space_to_allocate_;
1831 intptr_t promoted_size_;
1832 intptr_t semispace_copied_size_;
1833 base::HashMap* local_pretenuring_feedback_;
1834 };
1835
1836 template <PageEvacuationMode mode>
1837 class MarkCompactCollector::EvacuateNewSpacePageVisitor final
1838 : public MarkCompactCollector::HeapObjectVisitor {
1839 public:
EvacuateNewSpacePageVisitor(Heap * heap,base::HashMap * local_pretenuring_feedback)1840 explicit EvacuateNewSpacePageVisitor(
1841 Heap* heap, base::HashMap* local_pretenuring_feedback)
1842 : heap_(heap),
1843 moved_bytes_(0),
1844 local_pretenuring_feedback_(local_pretenuring_feedback) {}
1845
Move(Page * page)1846 static void Move(Page* page) {
1847 switch (mode) {
1848 case NEW_TO_NEW:
1849 page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1850 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1851 break;
1852 case NEW_TO_OLD: {
1853 page->Unlink();
1854 Page* new_page = Page::ConvertNewToOld(page);
1855 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1856 break;
1857 }
1858 }
1859 }
1860
Visit(HeapObject * object)1861 inline bool Visit(HeapObject* object) {
1862 heap_->UpdateAllocationSite<Heap::kCached>(object,
1863 local_pretenuring_feedback_);
1864 if (mode == NEW_TO_OLD) {
1865 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1866 object->IterateBodyFast(&visitor);
1867 }
1868 return true;
1869 }
1870
moved_bytes()1871 intptr_t moved_bytes() { return moved_bytes_; }
account_moved_bytes(intptr_t bytes)1872 void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1873
1874 private:
1875 Heap* heap_;
1876 intptr_t moved_bytes_;
1877 base::HashMap* local_pretenuring_feedback_;
1878 };
1879
1880 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1881 : public MarkCompactCollector::EvacuateVisitorBase {
1882 public:
EvacuateOldSpaceVisitor(Heap * heap,CompactionSpaceCollection * compaction_spaces)1883 EvacuateOldSpaceVisitor(Heap* heap,
1884 CompactionSpaceCollection* compaction_spaces)
1885 : EvacuateVisitorBase(heap, compaction_spaces) {}
1886
Visit(HeapObject * object)1887 inline bool Visit(HeapObject* object) override {
1888 CompactionSpace* target_space = compaction_spaces_->Get(
1889 Page::FromAddress(object->address())->owner()->identity());
1890 HeapObject* target_object = nullptr;
1891 if (TryEvacuateObject(target_space, object, &target_object)) {
1892 DCHECK(object->map_word().IsForwardingAddress());
1893 return true;
1894 }
1895 return false;
1896 }
1897 };
1898
1899 class MarkCompactCollector::EvacuateRecordOnlyVisitor final
1900 : public MarkCompactCollector::HeapObjectVisitor {
1901 public:
EvacuateRecordOnlyVisitor(Heap * heap)1902 explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
1903
Visit(HeapObject * object)1904 inline bool Visit(HeapObject* object) {
1905 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1906 object->IterateBody(&visitor);
1907 return true;
1908 }
1909
1910 private:
1911 Heap* heap_;
1912 };
1913
DiscoverGreyObjectsInSpace(PagedSpace * space)1914 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1915 for (Page* p : *space) {
1916 DiscoverGreyObjectsOnPage(p);
1917 if (marking_deque()->IsFull()) return;
1918 }
1919 }
1920
1921
DiscoverGreyObjectsInNewSpace()1922 void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
1923 NewSpace* space = heap()->new_space();
1924 for (Page* page : NewSpacePageRange(space->bottom(), space->top())) {
1925 DiscoverGreyObjectsOnPage(page);
1926 if (marking_deque()->IsFull()) return;
1927 }
1928 }
1929
1930
IsUnmarkedHeapObject(Object ** p)1931 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1932 Object* o = *p;
1933 if (!o->IsHeapObject()) return false;
1934 HeapObject* heap_object = HeapObject::cast(o);
1935 MarkBit mark = ObjectMarking::MarkBitFrom(heap_object);
1936 return Marking::IsWhite(mark);
1937 }
1938
1939
IsUnmarkedHeapObjectWithHeap(Heap * heap,Object ** p)1940 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1941 Object** p) {
1942 Object* o = *p;
1943 DCHECK(o->IsHeapObject());
1944 HeapObject* heap_object = HeapObject::cast(o);
1945 MarkBit mark = ObjectMarking::MarkBitFrom(heap_object);
1946 return Marking::IsWhite(mark);
1947 }
1948
1949
MarkStringTable(RootMarkingVisitor * visitor)1950 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
1951 StringTable* string_table = heap()->string_table();
1952 // Mark the string table itself.
1953 MarkBit string_table_mark = ObjectMarking::MarkBitFrom(string_table);
1954 if (Marking::IsWhite(string_table_mark)) {
1955 // String table could have already been marked by visiting the handles list.
1956 SetMark(string_table, string_table_mark);
1957 }
1958 // Explicitly mark the prefix.
1959 string_table->IteratePrefix(visitor);
1960 ProcessMarkingDeque();
1961 }
1962
1963
MarkAllocationSite(AllocationSite * site)1964 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
1965 MarkBit mark_bit = ObjectMarking::MarkBitFrom(site);
1966 SetMark(site, mark_bit);
1967 }
1968
1969
MarkRoots(RootMarkingVisitor * visitor)1970 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1971 // Mark the heap roots including global variables, stack variables,
1972 // etc., and all objects reachable from them.
1973 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1974
1975 // Handle the string table specially.
1976 MarkStringTable(visitor);
1977
1978 // There may be overflowed objects in the heap. Visit them now.
1979 while (marking_deque()->overflowed()) {
1980 RefillMarkingDeque();
1981 EmptyMarkingDeque();
1982 }
1983 }
1984
1985
MarkImplicitRefGroups(MarkObjectFunction mark_object)1986 void MarkCompactCollector::MarkImplicitRefGroups(
1987 MarkObjectFunction mark_object) {
1988 List<ImplicitRefGroup*>* ref_groups =
1989 isolate()->global_handles()->implicit_ref_groups();
1990
1991 int last = 0;
1992 for (int i = 0; i < ref_groups->length(); i++) {
1993 ImplicitRefGroup* entry = ref_groups->at(i);
1994 DCHECK(entry != NULL);
1995
1996 if (!IsMarked(*entry->parent)) {
1997 (*ref_groups)[last++] = entry;
1998 continue;
1999 }
2000
2001 Object*** children = entry->children;
2002 // A parent object is marked, so mark all child heap objects.
2003 for (size_t j = 0; j < entry->length; ++j) {
2004 if ((*children[j])->IsHeapObject()) {
2005 mark_object(heap(), HeapObject::cast(*children[j]));
2006 }
2007 }
2008
2009 // Once the entire group has been marked, dispose it because it's
2010 // not needed anymore.
2011 delete entry;
2012 }
2013 ref_groups->Rewind(last);
2014 }
2015
2016
2017 // Mark all objects reachable from the objects on the marking stack.
2018 // Before: the marking stack contains zero or more heap object pointers.
2019 // After: the marking stack is empty, and all objects reachable from the
2020 // marking stack have been marked, or are overflowed in the heap.
EmptyMarkingDeque()2021 void MarkCompactCollector::EmptyMarkingDeque() {
2022 while (!marking_deque()->IsEmpty()) {
2023 HeapObject* object = marking_deque()->Pop();
2024
2025 DCHECK(!object->IsFiller());
2026 DCHECK(object->IsHeapObject());
2027 DCHECK(heap()->Contains(object));
2028 DCHECK(!Marking::IsWhite(ObjectMarking::MarkBitFrom(object)));
2029
2030 Map* map = object->map();
2031 MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
2032 MarkObject(map, map_mark);
2033
2034 MarkCompactMarkingVisitor::IterateBody(map, object);
2035 }
2036 }
2037
2038
2039 // Sweep the heap for overflowed objects, clear their overflow bits, and
2040 // push them on the marking stack. Stop early if the marking stack fills
2041 // before sweeping completes. If sweeping completes, there are no remaining
2042 // overflowed objects in the heap so the overflow flag on the markings stack
2043 // is cleared.
RefillMarkingDeque()2044 void MarkCompactCollector::RefillMarkingDeque() {
2045 isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
2046 DCHECK(marking_deque()->overflowed());
2047
2048 DiscoverGreyObjectsInNewSpace();
2049 if (marking_deque()->IsFull()) return;
2050
2051 DiscoverGreyObjectsInSpace(heap()->old_space());
2052 if (marking_deque()->IsFull()) return;
2053
2054 DiscoverGreyObjectsInSpace(heap()->code_space());
2055 if (marking_deque()->IsFull()) return;
2056
2057 DiscoverGreyObjectsInSpace(heap()->map_space());
2058 if (marking_deque()->IsFull()) return;
2059
2060 LargeObjectIterator lo_it(heap()->lo_space());
2061 DiscoverGreyObjectsWithIterator(&lo_it);
2062 if (marking_deque()->IsFull()) return;
2063
2064 marking_deque()->ClearOverflowed();
2065 }
2066
2067
2068 // Mark all objects reachable (transitively) from objects on the marking
2069 // stack. Before: the marking stack contains zero or more heap object
2070 // pointers. After: the marking stack is empty and there are no overflowed
2071 // objects in the heap.
ProcessMarkingDeque()2072 void MarkCompactCollector::ProcessMarkingDeque() {
2073 EmptyMarkingDeque();
2074 while (marking_deque()->overflowed()) {
2075 RefillMarkingDeque();
2076 EmptyMarkingDeque();
2077 }
2078 }
2079
2080 // Mark all objects reachable (transitively) from objects on the marking
2081 // stack including references only considered in the atomic marking pause.
ProcessEphemeralMarking(ObjectVisitor * visitor,bool only_process_harmony_weak_collections)2082 void MarkCompactCollector::ProcessEphemeralMarking(
2083 ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
2084 DCHECK(marking_deque()->IsEmpty() && !marking_deque()->overflowed());
2085 bool work_to_do = true;
2086 while (work_to_do) {
2087 if (heap_->UsingEmbedderHeapTracer()) {
2088 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
2089 heap_->RegisterWrappersWithEmbedderHeapTracer();
2090 heap_->embedder_heap_tracer()->AdvanceTracing(
2091 0, EmbedderHeapTracer::AdvanceTracingActions(
2092 EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
2093 }
2094 if (!only_process_harmony_weak_collections) {
2095 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_OBJECT_GROUPING);
2096 isolate()->global_handles()->IterateObjectGroups(
2097 visitor, &IsUnmarkedHeapObjectWithHeap);
2098 MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
2099 }
2100 ProcessWeakCollections();
2101 work_to_do = !marking_deque()->IsEmpty();
2102 ProcessMarkingDeque();
2103 }
2104 }
2105
ProcessTopOptimizedFrame(ObjectVisitor * visitor)2106 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2107 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2108 !it.done(); it.Advance()) {
2109 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2110 return;
2111 }
2112 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2113 Code* code = it.frame()->LookupCode();
2114 if (!code->CanDeoptAt(it.frame()->pc())) {
2115 Code::BodyDescriptor::IterateBody(code, visitor);
2116 }
2117 ProcessMarkingDeque();
2118 return;
2119 }
2120 }
2121 }
2122
SetUp()2123 void MarkingDeque::SetUp() {
2124 backing_store_ = new base::VirtualMemory(kMaxSize);
2125 backing_store_committed_size_ = 0;
2126 if (backing_store_ == nullptr) {
2127 V8::FatalProcessOutOfMemory("MarkingDeque::SetUp");
2128 }
2129 }
2130
TearDown()2131 void MarkingDeque::TearDown() {
2132 delete backing_store_;
2133 }
2134
StartUsing()2135 void MarkingDeque::StartUsing() {
2136 base::LockGuard<base::Mutex> guard(&mutex_);
2137 if (in_use_) {
2138 // This can happen in mark-compact GC if the incremental marker already
2139 // started using the marking deque.
2140 return;
2141 }
2142 in_use_ = true;
2143 EnsureCommitted();
2144 array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
2145 size_t size = FLAG_force_marking_deque_overflows
2146 ? 64 * kPointerSize
2147 : backing_store_committed_size_;
2148 DCHECK(
2149 base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
2150 mask_ = static_cast<int>((size / kPointerSize) - 1);
2151 top_ = bottom_ = 0;
2152 overflowed_ = false;
2153 }
2154
StopUsing()2155 void MarkingDeque::StopUsing() {
2156 base::LockGuard<base::Mutex> guard(&mutex_);
2157 DCHECK(IsEmpty());
2158 DCHECK(!overflowed_);
2159 top_ = bottom_ = mask_ = 0;
2160 in_use_ = false;
2161 if (FLAG_concurrent_sweeping) {
2162 StartUncommitTask();
2163 } else {
2164 Uncommit();
2165 }
2166 }
2167
Clear()2168 void MarkingDeque::Clear() {
2169 DCHECK(in_use_);
2170 top_ = bottom_ = 0;
2171 overflowed_ = false;
2172 }
2173
Uncommit()2174 void MarkingDeque::Uncommit() {
2175 DCHECK(!in_use_);
2176 bool success = backing_store_->Uncommit(backing_store_->address(),
2177 backing_store_committed_size_);
2178 backing_store_committed_size_ = 0;
2179 CHECK(success);
2180 }
2181
EnsureCommitted()2182 void MarkingDeque::EnsureCommitted() {
2183 DCHECK(in_use_);
2184 if (backing_store_committed_size_ > 0) return;
2185
2186 for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
2187 if (backing_store_->Commit(backing_store_->address(), size, false)) {
2188 backing_store_committed_size_ = size;
2189 break;
2190 }
2191 }
2192 if (backing_store_committed_size_ == 0) {
2193 V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted");
2194 }
2195 }
2196
StartUncommitTask()2197 void MarkingDeque::StartUncommitTask() {
2198 if (!uncommit_task_pending_) {
2199 uncommit_task_pending_ = true;
2200 UncommitTask* task = new UncommitTask(heap_->isolate(), this);
2201 V8::GetCurrentPlatform()->CallOnBackgroundThread(
2202 task, v8::Platform::kShortRunningTask);
2203 }
2204 }
2205
2206 class MarkCompactCollector::ObjectStatsVisitor
2207 : public MarkCompactCollector::HeapObjectVisitor {
2208 public:
ObjectStatsVisitor(Heap * heap,ObjectStats * live_stats,ObjectStats * dead_stats)2209 ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats,
2210 ObjectStats* dead_stats)
2211 : live_collector_(heap, live_stats), dead_collector_(heap, dead_stats) {
2212 DCHECK_NOT_NULL(live_stats);
2213 DCHECK_NOT_NULL(dead_stats);
2214 // Global objects are roots and thus recorded as live.
2215 live_collector_.CollectGlobalStatistics();
2216 }
2217
Visit(HeapObject * obj)2218 bool Visit(HeapObject* obj) override {
2219 if (Marking::IsBlack(ObjectMarking::MarkBitFrom(obj))) {
2220 live_collector_.CollectStatistics(obj);
2221 } else {
2222 DCHECK(!Marking::IsGrey(ObjectMarking::MarkBitFrom(obj)));
2223 dead_collector_.CollectStatistics(obj);
2224 }
2225 return true;
2226 }
2227
2228 private:
2229 ObjectStatsCollector live_collector_;
2230 ObjectStatsCollector dead_collector_;
2231 };
2232
VisitAllObjects(HeapObjectVisitor * visitor)2233 void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
2234 SpaceIterator space_it(heap());
2235 HeapObject* obj = nullptr;
2236 while (space_it.has_next()) {
2237 std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
2238 ObjectIterator* obj_it = it.get();
2239 while ((obj = obj_it->Next()) != nullptr) {
2240 visitor->Visit(obj);
2241 }
2242 }
2243 }
2244
RecordObjectStats()2245 void MarkCompactCollector::RecordObjectStats() {
2246 if (V8_UNLIKELY(FLAG_gc_stats)) {
2247 heap()->CreateObjectStats();
2248 ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
2249 heap()->dead_object_stats_);
2250 VisitAllObjects(&visitor);
2251 if (V8_UNLIKELY(FLAG_gc_stats &
2252 v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
2253 std::stringstream live, dead;
2254 heap()->live_object_stats_->Dump(live);
2255 heap()->dead_object_stats_->Dump(dead);
2256 TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
2257 "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
2258 "live", TRACE_STR_COPY(live.str().c_str()), "dead",
2259 TRACE_STR_COPY(dead.str().c_str()));
2260 }
2261 if (FLAG_trace_gc_object_stats) {
2262 heap()->live_object_stats_->PrintJSON("live");
2263 heap()->dead_object_stats_->PrintJSON("dead");
2264 }
2265 heap()->live_object_stats_->CheckpointObjectStats();
2266 heap()->dead_object_stats_->ClearObjectStats();
2267 }
2268 }
2269
MarkLiveObjects()2270 void MarkCompactCollector::MarkLiveObjects() {
2271 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
2272 // The recursive GC marker detects when it is nearing stack overflow,
2273 // and switches to a different marking system. JS interrupts interfere
2274 // with the C stack limit check.
2275 PostponeInterruptsScope postpone(isolate());
2276
2277 {
2278 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
2279 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2280 if (was_marked_incrementally_) {
2281 incremental_marking->Finalize();
2282 } else {
2283 CHECK(incremental_marking->IsStopped());
2284 }
2285 }
2286
2287 #ifdef DEBUG
2288 DCHECK(state_ == PREPARE_GC);
2289 state_ = MARK_LIVE_OBJECTS;
2290 #endif
2291
2292 marking_deque()->StartUsing();
2293
2294 {
2295 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
2296 PrepareForCodeFlushing();
2297 }
2298
2299 RootMarkingVisitor root_visitor(heap());
2300
2301 {
2302 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
2303 MarkRoots(&root_visitor);
2304 ProcessTopOptimizedFrame(&root_visitor);
2305 }
2306
2307 {
2308 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
2309
2310 // The objects reachable from the roots are marked, yet unreachable
2311 // objects are unmarked. Mark objects reachable due to host
2312 // application specific logic or through Harmony weak maps.
2313 {
2314 TRACE_GC(heap()->tracer(),
2315 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
2316 ProcessEphemeralMarking(&root_visitor, false);
2317 }
2318
2319 // The objects reachable from the roots, weak maps or object groups
2320 // are marked. Objects pointed to only by weak global handles cannot be
2321 // immediately reclaimed. Instead, we have to mark them as pending and mark
2322 // objects reachable from them.
2323 //
2324 // First we identify nonlive weak handles and mark them as pending
2325 // destruction.
2326 {
2327 TRACE_GC(heap()->tracer(),
2328 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
2329 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2330 &IsUnmarkedHeapObject);
2331 ProcessMarkingDeque();
2332 }
2333 // Then we mark the objects.
2334
2335 {
2336 TRACE_GC(heap()->tracer(),
2337 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
2338 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2339 ProcessMarkingDeque();
2340 }
2341
2342 // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2343 // the weak roots we just marked as pending destruction.
2344 //
2345 // We only process harmony collections, as all object groups have been fully
2346 // processed and no weakly reachable node can discover new objects groups.
2347 {
2348 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
2349 ProcessEphemeralMarking(&root_visitor, true);
2350 if (heap_->UsingEmbedderHeapTracer()) {
2351 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
2352 heap()->embedder_heap_tracer()->TraceEpilogue();
2353 }
2354 }
2355 }
2356 }
2357
2358
ClearNonLiveReferences()2359 void MarkCompactCollector::ClearNonLiveReferences() {
2360 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
2361
2362 {
2363 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
2364
2365 // Prune the string table removing all strings only pointed to by the
2366 // string table. Cannot use string_table() here because the string
2367 // table is marked.
2368 StringTable* string_table = heap()->string_table();
2369 InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
2370 string_table->IterateElements(&internalized_visitor);
2371 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2372
2373 ExternalStringTableCleaner external_visitor(heap(), nullptr);
2374 heap()->external_string_table_.Iterate(&external_visitor);
2375 heap()->external_string_table_.CleanUp();
2376 }
2377
2378 {
2379 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
2380 // Process the weak references.
2381 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2382 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
2383 }
2384
2385 {
2386 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
2387
2388 // Remove object groups after marking phase.
2389 heap()->isolate()->global_handles()->RemoveObjectGroups();
2390 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2391 }
2392
2393 // Flush code from collected candidates.
2394 if (is_code_flushing_enabled()) {
2395 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
2396 code_flusher_->ProcessCandidates();
2397 }
2398
2399
2400 DependentCode* dependent_code_list;
2401 Object* non_live_map_list;
2402 ClearWeakCells(&non_live_map_list, &dependent_code_list);
2403
2404 {
2405 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
2406 ClearSimpleMapTransitions(non_live_map_list);
2407 ClearFullMapTransitions();
2408 }
2409
2410 MarkDependentCodeForDeoptimization(dependent_code_list);
2411
2412 ClearWeakCollections();
2413 }
2414
2415
MarkDependentCodeForDeoptimization(DependentCode * list_head)2416 void MarkCompactCollector::MarkDependentCodeForDeoptimization(
2417 DependentCode* list_head) {
2418 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
2419 Isolate* isolate = this->isolate();
2420 DependentCode* current = list_head;
2421 while (current->length() > 0) {
2422 have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
2423 isolate, DependentCode::kWeakCodeGroup);
2424 current = current->next_link();
2425 }
2426
2427 {
2428 ArrayList* list = heap_->weak_new_space_object_to_code_list();
2429 int counter = 0;
2430 for (int i = 0; i < list->Length(); i += 2) {
2431 WeakCell* obj = WeakCell::cast(list->Get(i));
2432 WeakCell* dep = WeakCell::cast(list->Get(i + 1));
2433 if (obj->cleared() || dep->cleared()) {
2434 if (!dep->cleared()) {
2435 Code* code = Code::cast(dep->value());
2436 if (!code->marked_for_deoptimization()) {
2437 DependentCode::SetMarkedForDeoptimization(
2438 code, DependentCode::DependencyGroup::kWeakCodeGroup);
2439 code->InvalidateEmbeddedObjects();
2440 have_code_to_deoptimize_ = true;
2441 }
2442 }
2443 } else {
2444 // We record the slot manually because marking is finished at this
2445 // point and the write barrier would bailout.
2446 list->Set(counter, obj, SKIP_WRITE_BARRIER);
2447 RecordSlot(list, list->Slot(counter), obj);
2448 counter++;
2449 list->Set(counter, dep, SKIP_WRITE_BARRIER);
2450 RecordSlot(list, list->Slot(counter), dep);
2451 counter++;
2452 }
2453 }
2454 }
2455
2456 WeakHashTable* table = heap_->weak_object_to_code_table();
2457 uint32_t capacity = table->Capacity();
2458 for (uint32_t i = 0; i < capacity; i++) {
2459 uint32_t key_index = table->EntryToIndex(i);
2460 Object* key = table->get(key_index);
2461 if (!table->IsKey(isolate, key)) continue;
2462 uint32_t value_index = table->EntryToValueIndex(i);
2463 Object* value = table->get(value_index);
2464 DCHECK(key->IsWeakCell());
2465 if (WeakCell::cast(key)->cleared()) {
2466 have_code_to_deoptimize_ |=
2467 DependentCode::cast(value)->MarkCodeForDeoptimization(
2468 isolate, DependentCode::kWeakCodeGroup);
2469 table->set(key_index, heap_->the_hole_value());
2470 table->set(value_index, heap_->the_hole_value());
2471 table->ElementRemoved();
2472 }
2473 }
2474 }
2475
2476
ClearSimpleMapTransitions(Object * non_live_map_list)2477 void MarkCompactCollector::ClearSimpleMapTransitions(
2478 Object* non_live_map_list) {
2479 Object* the_hole_value = heap()->the_hole_value();
2480 Object* weak_cell_obj = non_live_map_list;
2481 while (weak_cell_obj != Smi::kZero) {
2482 WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
2483 Map* map = Map::cast(weak_cell->value());
2484 DCHECK(Marking::IsWhite(ObjectMarking::MarkBitFrom(map)));
2485 Object* potential_parent = map->constructor_or_backpointer();
2486 if (potential_parent->IsMap()) {
2487 Map* parent = Map::cast(potential_parent);
2488 if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(parent)) &&
2489 parent->raw_transitions() == weak_cell) {
2490 ClearSimpleMapTransition(parent, map);
2491 }
2492 }
2493 weak_cell->clear();
2494 weak_cell_obj = weak_cell->next();
2495 weak_cell->clear_next(the_hole_value);
2496 }
2497 }
2498
2499
ClearSimpleMapTransition(Map * map,Map * dead_transition)2500 void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
2501 Map* dead_transition) {
2502 // A previously existing simple transition (stored in a WeakCell) is going
2503 // to be cleared. Clear the useless cell pointer, and take ownership
2504 // of the descriptor array.
2505 map->set_raw_transitions(Smi::kZero);
2506 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2507 DescriptorArray* descriptors = map->instance_descriptors();
2508 if (descriptors == dead_transition->instance_descriptors() &&
2509 number_of_own_descriptors > 0) {
2510 TrimDescriptorArray(map, descriptors);
2511 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2512 map->set_owns_descriptors(true);
2513 }
2514 }
2515
2516
ClearFullMapTransitions()2517 void MarkCompactCollector::ClearFullMapTransitions() {
2518 HeapObject* undefined = heap()->undefined_value();
2519 Object* obj = heap()->encountered_transition_arrays();
2520 while (obj != Smi::kZero) {
2521 TransitionArray* array = TransitionArray::cast(obj);
2522 int num_transitions = array->number_of_entries();
2523 DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
2524 if (num_transitions > 0) {
2525 Map* map = array->GetTarget(0);
2526 Map* parent = Map::cast(map->constructor_or_backpointer());
2527 bool parent_is_alive =
2528 Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(parent));
2529 DescriptorArray* descriptors =
2530 parent_is_alive ? parent->instance_descriptors() : nullptr;
2531 bool descriptors_owner_died =
2532 CompactTransitionArray(parent, array, descriptors);
2533 if (descriptors_owner_died) {
2534 TrimDescriptorArray(parent, descriptors);
2535 }
2536 }
2537 obj = array->next_link();
2538 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2539 }
2540 heap()->set_encountered_transition_arrays(Smi::kZero);
2541 }
2542
2543
CompactTransitionArray(Map * map,TransitionArray * transitions,DescriptorArray * descriptors)2544 bool MarkCompactCollector::CompactTransitionArray(
2545 Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
2546 int num_transitions = transitions->number_of_entries();
2547 bool descriptors_owner_died = false;
2548 int transition_index = 0;
2549 // Compact all live transitions to the left.
2550 for (int i = 0; i < num_transitions; ++i) {
2551 Map* target = transitions->GetTarget(i);
2552 DCHECK_EQ(target->constructor_or_backpointer(), map);
2553 if (Marking::IsWhite(ObjectMarking::MarkBitFrom(target))) {
2554 if (descriptors != nullptr &&
2555 target->instance_descriptors() == descriptors) {
2556 descriptors_owner_died = true;
2557 }
2558 } else {
2559 if (i != transition_index) {
2560 Name* key = transitions->GetKey(i);
2561 transitions->SetKey(transition_index, key);
2562 Object** key_slot = transitions->GetKeySlot(transition_index);
2563 RecordSlot(transitions, key_slot, key);
2564 // Target slots do not need to be recorded since maps are not compacted.
2565 transitions->SetTarget(transition_index, transitions->GetTarget(i));
2566 }
2567 transition_index++;
2568 }
2569 }
2570 // If there are no transitions to be cleared, return.
2571 if (transition_index == num_transitions) {
2572 DCHECK(!descriptors_owner_died);
2573 return false;
2574 }
2575 // Note that we never eliminate a transition array, though we might right-trim
2576 // such that number_of_transitions() == 0. If this assumption changes,
2577 // TransitionArray::Insert() will need to deal with the case that a transition
2578 // array disappeared during GC.
2579 int trim = TransitionArray::Capacity(transitions) - transition_index;
2580 if (trim > 0) {
2581 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2582 transitions, trim * TransitionArray::kTransitionSize);
2583 transitions->SetNumberOfTransitions(transition_index);
2584 }
2585 return descriptors_owner_died;
2586 }
2587
2588
TrimDescriptorArray(Map * map,DescriptorArray * descriptors)2589 void MarkCompactCollector::TrimDescriptorArray(Map* map,
2590 DescriptorArray* descriptors) {
2591 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2592 if (number_of_own_descriptors == 0) {
2593 DCHECK(descriptors == heap_->empty_descriptor_array());
2594 return;
2595 }
2596
2597 int number_of_descriptors = descriptors->number_of_descriptors_storage();
2598 int to_trim = number_of_descriptors - number_of_own_descriptors;
2599 if (to_trim > 0) {
2600 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2601 descriptors, to_trim * DescriptorArray::kDescriptorSize);
2602 descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2603
2604 if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2605 descriptors->Sort();
2606
2607 if (FLAG_unbox_double_fields) {
2608 LayoutDescriptor* layout_descriptor = map->layout_descriptor();
2609 layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2610 number_of_own_descriptors);
2611 SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2612 }
2613 }
2614 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2615 map->set_owns_descriptors(true);
2616 }
2617
2618
TrimEnumCache(Map * map,DescriptorArray * descriptors)2619 void MarkCompactCollector::TrimEnumCache(Map* map,
2620 DescriptorArray* descriptors) {
2621 int live_enum = map->EnumLength();
2622 if (live_enum == kInvalidEnumCacheSentinel) {
2623 live_enum =
2624 map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
2625 }
2626 if (live_enum == 0) return descriptors->ClearEnumCache();
2627
2628 FixedArray* enum_cache = descriptors->GetEnumCache();
2629
2630 int to_trim = enum_cache->length() - live_enum;
2631 if (to_trim <= 0) return;
2632 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2633 descriptors->GetEnumCache(), to_trim);
2634
2635 if (!descriptors->HasEnumIndicesCache()) return;
2636 FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
2637 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
2638 to_trim);
2639 }
2640
2641
ProcessWeakCollections()2642 void MarkCompactCollector::ProcessWeakCollections() {
2643 Object* weak_collection_obj = heap()->encountered_weak_collections();
2644 while (weak_collection_obj != Smi::kZero) {
2645 JSWeakCollection* weak_collection =
2646 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2647 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2648 if (weak_collection->table()->IsHashTable()) {
2649 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2650 for (int i = 0; i < table->Capacity(); i++) {
2651 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2652 Object** key_slot =
2653 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2654 RecordSlot(table, key_slot, *key_slot);
2655 Object** value_slot =
2656 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2657 MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
2658 value_slot);
2659 }
2660 }
2661 }
2662 weak_collection_obj = weak_collection->next();
2663 }
2664 }
2665
2666
ClearWeakCollections()2667 void MarkCompactCollector::ClearWeakCollections() {
2668 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2669 Object* weak_collection_obj = heap()->encountered_weak_collections();
2670 while (weak_collection_obj != Smi::kZero) {
2671 JSWeakCollection* weak_collection =
2672 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2673 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2674 if (weak_collection->table()->IsHashTable()) {
2675 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2676 for (int i = 0; i < table->Capacity(); i++) {
2677 HeapObject* key = HeapObject::cast(table->KeyAt(i));
2678 if (!MarkCompactCollector::IsMarked(key)) {
2679 table->RemoveEntry(i);
2680 }
2681 }
2682 }
2683 weak_collection_obj = weak_collection->next();
2684 weak_collection->set_next(heap()->undefined_value());
2685 }
2686 heap()->set_encountered_weak_collections(Smi::kZero);
2687 }
2688
2689
AbortWeakCollections()2690 void MarkCompactCollector::AbortWeakCollections() {
2691 Object* weak_collection_obj = heap()->encountered_weak_collections();
2692 while (weak_collection_obj != Smi::kZero) {
2693 JSWeakCollection* weak_collection =
2694 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2695 weak_collection_obj = weak_collection->next();
2696 weak_collection->set_next(heap()->undefined_value());
2697 }
2698 heap()->set_encountered_weak_collections(Smi::kZero);
2699 }
2700
2701
ClearWeakCells(Object ** non_live_map_list,DependentCode ** dependent_code_list)2702 void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
2703 DependentCode** dependent_code_list) {
2704 Heap* heap = this->heap();
2705 TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
2706 Object* weak_cell_obj = heap->encountered_weak_cells();
2707 Object* the_hole_value = heap->the_hole_value();
2708 DependentCode* dependent_code_head =
2709 DependentCode::cast(heap->empty_fixed_array());
2710 Object* non_live_map_head = Smi::kZero;
2711 while (weak_cell_obj != Smi::kZero) {
2712 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2713 Object* next_weak_cell = weak_cell->next();
2714 bool clear_value = true;
2715 bool clear_next = true;
2716 // We do not insert cleared weak cells into the list, so the value
2717 // cannot be a Smi here.
2718 HeapObject* value = HeapObject::cast(weak_cell->value());
2719 if (!MarkCompactCollector::IsMarked(value)) {
2720 // Cells for new-space objects embedded in optimized code are wrapped in
2721 // WeakCell and put into Heap::weak_object_to_code_table.
2722 // Such cells do not have any strong references but we want to keep them
2723 // alive as long as the cell value is alive.
2724 // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
2725 if (value->IsCell()) {
2726 Object* cell_value = Cell::cast(value)->value();
2727 if (cell_value->IsHeapObject() &&
2728 MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
2729 // Resurrect the cell.
2730 MarkBit mark = ObjectMarking::MarkBitFrom(value);
2731 SetMark(value, mark);
2732 Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
2733 RecordSlot(value, slot, *slot);
2734 slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2735 RecordSlot(weak_cell, slot, *slot);
2736 clear_value = false;
2737 }
2738 }
2739 if (value->IsMap()) {
2740 // The map is non-live.
2741 Map* map = Map::cast(value);
2742 // Add dependent code to the dependent_code_list.
2743 DependentCode* candidate = map->dependent_code();
2744 // We rely on the fact that the weak code group comes first.
2745 STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
2746 if (candidate->length() > 0 &&
2747 candidate->group() == DependentCode::kWeakCodeGroup) {
2748 candidate->set_next_link(dependent_code_head);
2749 dependent_code_head = candidate;
2750 }
2751 // Add the weak cell to the non_live_map list.
2752 weak_cell->set_next(non_live_map_head);
2753 non_live_map_head = weak_cell;
2754 clear_value = false;
2755 clear_next = false;
2756 }
2757 } else {
2758 // The value of the weak cell is alive.
2759 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2760 RecordSlot(weak_cell, slot, *slot);
2761 clear_value = false;
2762 }
2763 if (clear_value) {
2764 weak_cell->clear();
2765 }
2766 if (clear_next) {
2767 weak_cell->clear_next(the_hole_value);
2768 }
2769 weak_cell_obj = next_weak_cell;
2770 }
2771 heap->set_encountered_weak_cells(Smi::kZero);
2772 *non_live_map_list = non_live_map_head;
2773 *dependent_code_list = dependent_code_head;
2774 }
2775
2776
AbortWeakCells()2777 void MarkCompactCollector::AbortWeakCells() {
2778 Object* the_hole_value = heap()->the_hole_value();
2779 Object* weak_cell_obj = heap()->encountered_weak_cells();
2780 while (weak_cell_obj != Smi::kZero) {
2781 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2782 weak_cell_obj = weak_cell->next();
2783 weak_cell->clear_next(the_hole_value);
2784 }
2785 heap()->set_encountered_weak_cells(Smi::kZero);
2786 }
2787
2788
AbortTransitionArrays()2789 void MarkCompactCollector::AbortTransitionArrays() {
2790 HeapObject* undefined = heap()->undefined_value();
2791 Object* obj = heap()->encountered_transition_arrays();
2792 while (obj != Smi::kZero) {
2793 TransitionArray* array = TransitionArray::cast(obj);
2794 obj = array->next_link();
2795 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2796 }
2797 heap()->set_encountered_transition_arrays(Smi::kZero);
2798 }
2799
RecordRelocSlot(Code * host,RelocInfo * rinfo,Object * target)2800 void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
2801 Object* target) {
2802 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
2803 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
2804 if (target_page->IsEvacuationCandidate() &&
2805 (rinfo->host() == NULL ||
2806 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
2807 RelocInfo::Mode rmode = rinfo->rmode();
2808 Address addr = rinfo->pc();
2809 SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
2810 if (rinfo->IsInConstantPool()) {
2811 addr = rinfo->constant_pool_entry_address();
2812 if (RelocInfo::IsCodeTarget(rmode)) {
2813 slot_type = CODE_ENTRY_SLOT;
2814 } else {
2815 DCHECK(RelocInfo::IsEmbeddedObject(rmode));
2816 slot_type = OBJECT_SLOT;
2817 }
2818 }
2819 RememberedSet<OLD_TO_OLD>::InsertTyped(
2820 source_page, reinterpret_cast<Address>(host), slot_type, addr);
2821 }
2822 }
2823
UpdateSlot(Object ** slot)2824 static inline SlotCallbackResult UpdateSlot(Object** slot) {
2825 Object* obj = reinterpret_cast<Object*>(
2826 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
2827
2828 if (obj->IsHeapObject()) {
2829 HeapObject* heap_obj = HeapObject::cast(obj);
2830 MapWord map_word = heap_obj->map_word();
2831 if (map_word.IsForwardingAddress()) {
2832 DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
2833 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2834 Page::FromAddress(heap_obj->address())
2835 ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2836 HeapObject* target = map_word.ToForwardingAddress();
2837 base::NoBarrier_CompareAndSwap(
2838 reinterpret_cast<base::AtomicWord*>(slot),
2839 reinterpret_cast<base::AtomicWord>(obj),
2840 reinterpret_cast<base::AtomicWord>(target));
2841 DCHECK(!heap_obj->GetHeap()->InFromSpace(target) &&
2842 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2843 }
2844 }
2845 return REMOVE_SLOT;
2846 }
2847
2848 // Visitor for updating pointers from live objects in old spaces to new space.
2849 // It does not expect to encounter pointers to dead objects.
2850 class PointersUpdatingVisitor : public ObjectVisitor {
2851 public:
VisitPointer(Object ** p)2852 void VisitPointer(Object** p) override { UpdateSlot(p); }
2853
VisitPointers(Object ** start,Object ** end)2854 void VisitPointers(Object** start, Object** end) override {
2855 for (Object** p = start; p < end; p++) UpdateSlot(p);
2856 }
2857
VisitCell(RelocInfo * rinfo)2858 void VisitCell(RelocInfo* rinfo) override {
2859 UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot);
2860 }
2861
VisitEmbeddedPointer(RelocInfo * rinfo)2862 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
2863 UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot);
2864 }
2865
VisitCodeTarget(RelocInfo * rinfo)2866 void VisitCodeTarget(RelocInfo* rinfo) override {
2867 UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot);
2868 }
2869
VisitCodeEntry(Address entry_address)2870 void VisitCodeEntry(Address entry_address) override {
2871 UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot);
2872 }
2873
VisitDebugTarget(RelocInfo * rinfo)2874 void VisitDebugTarget(RelocInfo* rinfo) override {
2875 UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot);
2876 }
2877 };
2878
UpdateReferenceInExternalStringTableEntry(Heap * heap,Object ** p)2879 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2880 Object** p) {
2881 MapWord map_word = HeapObject::cast(*p)->map_word();
2882
2883 if (map_word.IsForwardingAddress()) {
2884 return String::cast(map_word.ToForwardingAddress());
2885 }
2886
2887 return String::cast(*p);
2888 }
2889
EvacuateNewSpacePrologue()2890 void MarkCompactCollector::EvacuateNewSpacePrologue() {
2891 NewSpace* new_space = heap()->new_space();
2892 // Append the list of new space pages to be processed.
2893 for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) {
2894 newspace_evacuation_candidates_.Add(p);
2895 }
2896 new_space->Flip();
2897 new_space->ResetAllocationInfo();
2898 }
2899
2900 class MarkCompactCollector::Evacuator : public Malloced {
2901 public:
2902 enum EvacuationMode {
2903 kObjectsNewToOld,
2904 kPageNewToOld,
2905 kObjectsOldToOld,
2906 kPageNewToNew,
2907 };
2908
ComputeEvacuationMode(MemoryChunk * chunk)2909 static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
2910 // Note: The order of checks is important in this function.
2911 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
2912 return kPageNewToOld;
2913 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
2914 return kPageNewToNew;
2915 if (chunk->InNewSpace()) return kObjectsNewToOld;
2916 DCHECK(chunk->IsEvacuationCandidate());
2917 return kObjectsOldToOld;
2918 }
2919
2920 // NewSpacePages with more live bytes than this threshold qualify for fast
2921 // evacuation.
PageEvacuationThreshold()2922 static int PageEvacuationThreshold() {
2923 if (FLAG_page_promotion)
2924 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
2925 return Page::kAllocatableMemory + kPointerSize;
2926 }
2927
Evacuator(MarkCompactCollector * collector)2928 explicit Evacuator(MarkCompactCollector* collector)
2929 : collector_(collector),
2930 compaction_spaces_(collector->heap()),
2931 local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
2932 new_space_visitor_(collector->heap(), &compaction_spaces_,
2933 &local_pretenuring_feedback_),
2934 new_to_new_page_visitor_(collector->heap(),
2935 &local_pretenuring_feedback_),
2936 new_to_old_page_visitor_(collector->heap(),
2937 &local_pretenuring_feedback_),
2938
2939 old_space_visitor_(collector->heap(), &compaction_spaces_),
2940 duration_(0.0),
2941 bytes_compacted_(0) {}
2942
2943 inline bool EvacuatePage(Page* chunk);
2944
2945 // Merge back locally cached info sequentially. Note that this method needs
2946 // to be called from the main thread.
2947 inline void Finalize();
2948
compaction_spaces()2949 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
2950
2951 private:
2952 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
2953
heap()2954 inline Heap* heap() { return collector_->heap(); }
2955
ReportCompactionProgress(double duration,intptr_t bytes_compacted)2956 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
2957 duration_ += duration;
2958 bytes_compacted_ += bytes_compacted;
2959 }
2960
2961 MarkCompactCollector* collector_;
2962
2963 // Locally cached collector data.
2964 CompactionSpaceCollection compaction_spaces_;
2965 base::HashMap local_pretenuring_feedback_;
2966
2967 // Visitors for the corresponding spaces.
2968 EvacuateNewSpaceVisitor new_space_visitor_;
2969 EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
2970 new_to_new_page_visitor_;
2971 EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
2972 new_to_old_page_visitor_;
2973 EvacuateOldSpaceVisitor old_space_visitor_;
2974
2975 // Book keeping info.
2976 double duration_;
2977 intptr_t bytes_compacted_;
2978 };
2979
EvacuatePage(Page * page)2980 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
2981 bool success = false;
2982 DCHECK(page->SweepingDone());
2983 int saved_live_bytes = page->LiveBytes();
2984 double evacuation_time = 0.0;
2985 Heap* heap = page->heap();
2986 {
2987 AlwaysAllocateScope always_allocate(heap->isolate());
2988 TimedScope timed_scope(&evacuation_time);
2989 switch (ComputeEvacuationMode(page)) {
2990 case kObjectsNewToOld:
2991 success = collector_->VisitLiveObjects(page, &new_space_visitor_,
2992 kClearMarkbits);
2993 DCHECK(success);
2994 ArrayBufferTracker::ProcessBuffers(
2995 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
2996 break;
2997 case kPageNewToOld:
2998 success = collector_->VisitLiveObjects(page, &new_to_old_page_visitor_,
2999 kKeepMarking);
3000 DCHECK(success);
3001 new_to_old_page_visitor_.account_moved_bytes(page->LiveBytes());
3002 // ArrayBufferTracker will be updated during sweeping.
3003 break;
3004 case kPageNewToNew:
3005 success = collector_->VisitLiveObjects(page, &new_to_new_page_visitor_,
3006 kKeepMarking);
3007 DCHECK(success);
3008 new_to_new_page_visitor_.account_moved_bytes(page->LiveBytes());
3009 // ArrayBufferTracker will be updated during sweeping.
3010 break;
3011 case kObjectsOldToOld:
3012 success = collector_->VisitLiveObjects(page, &old_space_visitor_,
3013 kClearMarkbits);
3014 if (!success) {
3015 // Aborted compaction page. We have to record slots here, since we
3016 // might not have recorded them in first place.
3017 // Note: We mark the page as aborted here to be able to record slots
3018 // for code objects in |RecordMigratedSlotVisitor|.
3019 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3020 EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
3021 success =
3022 collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking);
3023 ArrayBufferTracker::ProcessBuffers(
3024 page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3025 DCHECK(success);
3026 // We need to return failure here to indicate that we want this page
3027 // added to the sweeper.
3028 success = false;
3029 } else {
3030 ArrayBufferTracker::ProcessBuffers(
3031 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3032 }
3033 break;
3034 }
3035 }
3036 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3037 if (FLAG_trace_evacuation) {
3038 PrintIsolate(heap->isolate(),
3039 "evacuation[%p]: page=%p new_space=%d "
3040 "page_evacuation=%d executable=%d contains_age_mark=%d "
3041 "live_bytes=%d time=%f\n",
3042 static_cast<void*>(this), static_cast<void*>(page),
3043 page->InNewSpace(),
3044 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
3045 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
3046 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
3047 page->Contains(heap->new_space()->age_mark()),
3048 saved_live_bytes, evacuation_time);
3049 }
3050 return success;
3051 }
3052
Finalize()3053 void MarkCompactCollector::Evacuator::Finalize() {
3054 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3055 heap()->code_space()->MergeCompactionSpace(
3056 compaction_spaces_.Get(CODE_SPACE));
3057 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3058 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3059 new_to_old_page_visitor_.moved_bytes());
3060 heap()->IncrementSemiSpaceCopiedObjectSize(
3061 new_space_visitor_.semispace_copied_size() +
3062 new_to_new_page_visitor_.moved_bytes());
3063 heap()->IncrementYoungSurvivorsCounter(
3064 new_space_visitor_.promoted_size() +
3065 new_space_visitor_.semispace_copied_size() +
3066 new_to_old_page_visitor_.moved_bytes() +
3067 new_to_new_page_visitor_.moved_bytes());
3068 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3069 }
3070
NumberOfParallelCompactionTasks(int pages,intptr_t live_bytes)3071 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3072 intptr_t live_bytes) {
3073 if (!FLAG_parallel_compaction) return 1;
3074 // Compute the number of needed tasks based on a target compaction time, the
3075 // profiled compaction speed and marked live memory.
3076 //
3077 // The number of parallel compaction tasks is limited by:
3078 // - #evacuation pages
3079 // - #cores
3080 const double kTargetCompactionTimeInMs = .5;
3081
3082 double compaction_speed =
3083 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3084
3085 const int available_cores = Max(
3086 1, static_cast<int>(
3087 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
3088 int tasks;
3089 if (compaction_speed > 0) {
3090 tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
3091 kTargetCompactionTimeInMs);
3092 } else {
3093 tasks = pages;
3094 }
3095 const int tasks_capped_pages = Min(pages, tasks);
3096 return Min(available_cores, tasks_capped_pages);
3097 }
3098
3099 class EvacuationJobTraits {
3100 public:
3101 typedef int* PerPageData; // Pointer to number of aborted pages.
3102 typedef MarkCompactCollector::Evacuator* PerTaskData;
3103
3104 static const bool NeedSequentialFinalization = true;
3105
ProcessPageInParallel(Heap * heap,PerTaskData evacuator,MemoryChunk * chunk,PerPageData)3106 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3107 MemoryChunk* chunk, PerPageData) {
3108 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
3109 }
3110
FinalizePageSequentially(Heap * heap,MemoryChunk * chunk,bool success,PerPageData data)3111 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
3112 bool success, PerPageData data) {
3113 using Evacuator = MarkCompactCollector::Evacuator;
3114 Page* p = static_cast<Page*>(chunk);
3115 switch (Evacuator::ComputeEvacuationMode(p)) {
3116 case Evacuator::kPageNewToOld:
3117 break;
3118 case Evacuator::kPageNewToNew:
3119 DCHECK(success);
3120 break;
3121 case Evacuator::kObjectsNewToOld:
3122 DCHECK(success);
3123 break;
3124 case Evacuator::kObjectsOldToOld:
3125 if (success) {
3126 DCHECK(p->IsEvacuationCandidate());
3127 DCHECK(p->SweepingDone());
3128 p->Unlink();
3129 } else {
3130 // We have partially compacted the page, i.e., some objects may have
3131 // moved, others are still in place.
3132 p->ClearEvacuationCandidate();
3133 // Slots have already been recorded so we just need to add it to the
3134 // sweeper, which will happen after updating pointers.
3135 *data += 1;
3136 }
3137 break;
3138 default:
3139 UNREACHABLE();
3140 }
3141 }
3142 };
3143
EvacuatePagesInParallel()3144 void MarkCompactCollector::EvacuatePagesInParallel() {
3145 PageParallelJob<EvacuationJobTraits> job(
3146 heap_, heap_->isolate()->cancelable_task_manager(),
3147 &page_parallel_job_semaphore_);
3148
3149 int abandoned_pages = 0;
3150 intptr_t live_bytes = 0;
3151 for (Page* page : evacuation_candidates_) {
3152 live_bytes += page->LiveBytes();
3153 job.AddPage(page, &abandoned_pages);
3154 }
3155
3156 const bool reduce_memory = heap()->ShouldReduceMemory();
3157 const Address age_mark = heap()->new_space()->age_mark();
3158 for (Page* page : newspace_evacuation_candidates_) {
3159 live_bytes += page->LiveBytes();
3160 if (!reduce_memory && !page->NeverEvacuate() &&
3161 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
3162 !page->Contains(age_mark)) {
3163 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3164 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3165 } else {
3166 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
3167 }
3168 }
3169
3170 job.AddPage(page, &abandoned_pages);
3171 }
3172 DCHECK_GE(job.NumberOfPages(), 1);
3173
3174 // Used for trace summary.
3175 double compaction_speed = 0;
3176 if (FLAG_trace_evacuation) {
3177 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3178 }
3179
3180 const int wanted_num_tasks =
3181 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
3182 Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
3183 for (int i = 0; i < wanted_num_tasks; i++) {
3184 evacuators[i] = new Evacuator(this);
3185 }
3186 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
3187 for (int i = 0; i < wanted_num_tasks; i++) {
3188 evacuators[i]->Finalize();
3189 delete evacuators[i];
3190 }
3191 delete[] evacuators;
3192
3193 if (FLAG_trace_evacuation) {
3194 PrintIsolate(isolate(),
3195 "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
3196 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
3197 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
3198 isolate()->time_millis_since_init(),
3199 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
3200 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
3201 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
3202 live_bytes, compaction_speed);
3203 }
3204 }
3205
3206 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3207 public:
RetainAs(Object * object)3208 virtual Object* RetainAs(Object* object) {
3209 if (object->IsHeapObject()) {
3210 HeapObject* heap_object = HeapObject::cast(object);
3211 MapWord map_word = heap_object->map_word();
3212 if (map_word.IsForwardingAddress()) {
3213 return map_word.ToForwardingAddress();
3214 }
3215 }
3216 return object;
3217 }
3218 };
3219
3220 MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode
GetClearOldToNewSlotsMode(Page * p)3221 MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) {
3222 AllocationSpace identity = p->owner()->identity();
3223 if (p->old_to_new_slots() &&
3224 (identity == OLD_SPACE || identity == MAP_SPACE)) {
3225 return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS;
3226 } else if (p->typed_old_to_new_slots() && identity == CODE_SPACE) {
3227 return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS;
3228 }
3229 return MarkCompactCollector::Sweeper::DO_NOT_CLEAR;
3230 }
3231
RawSweep(Page * p,FreeListRebuildingMode free_list_mode,FreeSpaceTreatmentMode free_space_mode)3232 int MarkCompactCollector::Sweeper::RawSweep(
3233 Page* p, FreeListRebuildingMode free_list_mode,
3234 FreeSpaceTreatmentMode free_space_mode) {
3235 Space* space = p->owner();
3236 DCHECK_NOT_NULL(space);
3237 DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
3238 space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
3239 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
3240
3241 // If there are old-to-new slots in that page, we have to filter out slots
3242 // that are in dead memory which is freed by the sweeper.
3243 ClearOldToNewSlotsMode slots_clearing_mode = GetClearOldToNewSlotsMode(p);
3244
3245 // The free ranges map is used for filtering typed slots.
3246 std::map<uint32_t, uint32_t> free_ranges;
3247
3248 // Before we sweep objects on the page, we free dead array buffers which
3249 // requires valid mark bits.
3250 ArrayBufferTracker::FreeDead(p);
3251
3252 Address free_start = p->area_start();
3253 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3254
3255 // If we use the skip list for code space pages, we have to lock the skip
3256 // list because it could be accessed concurrently by the runtime or the
3257 // deoptimizer.
3258 const bool rebuild_skip_list =
3259 space->identity() == CODE_SPACE && p->skip_list() != nullptr;
3260 SkipList* skip_list = p->skip_list();
3261 if (rebuild_skip_list) {
3262 skip_list->Clear();
3263 }
3264
3265 intptr_t freed_bytes = 0;
3266 intptr_t max_freed_bytes = 0;
3267 int curr_region = -1;
3268
3269 LiveObjectIterator<kBlackObjects> it(p);
3270 HeapObject* object = NULL;
3271
3272 while ((object = it.Next()) != NULL) {
3273 DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
3274 Address free_end = object->address();
3275 if (free_end != free_start) {
3276 CHECK_GT(free_end, free_start);
3277 size_t size = static_cast<size_t>(free_end - free_start);
3278 if (free_space_mode == ZAP_FREE_SPACE) {
3279 memset(free_start, 0xcc, size);
3280 }
3281 if (free_list_mode == REBUILD_FREE_LIST) {
3282 freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
3283 free_start, size);
3284 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3285 } else {
3286 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3287 ClearRecordedSlots::kNo);
3288 }
3289
3290 if (slots_clearing_mode == CLEAR_REGULAR_SLOTS) {
3291 RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
3292 SlotSet::KEEP_EMPTY_BUCKETS);
3293 } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
3294 free_ranges.insert(std::pair<uint32_t, uint32_t>(
3295 static_cast<uint32_t>(free_start - p->address()),
3296 static_cast<uint32_t>(free_end - p->address())));
3297 }
3298 }
3299 Map* map = object->synchronized_map();
3300 int size = object->SizeFromMap(map);
3301 if (rebuild_skip_list) {
3302 int new_region_start = SkipList::RegionNumber(free_end);
3303 int new_region_end =
3304 SkipList::RegionNumber(free_end + size - kPointerSize);
3305 if (new_region_start != curr_region || new_region_end != curr_region) {
3306 skip_list->AddObject(free_end, size);
3307 curr_region = new_region_end;
3308 }
3309 }
3310 free_start = free_end + size;
3311 }
3312
3313 if (free_start != p->area_end()) {
3314 CHECK_GT(p->area_end(), free_start);
3315 size_t size = static_cast<size_t>(p->area_end() - free_start);
3316 if (free_space_mode == ZAP_FREE_SPACE) {
3317 memset(free_start, 0xcc, size);
3318 }
3319 if (free_list_mode == REBUILD_FREE_LIST) {
3320 freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
3321 free_start, size);
3322 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3323 } else {
3324 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3325 ClearRecordedSlots::kNo);
3326 }
3327
3328 if (slots_clearing_mode == CLEAR_REGULAR_SLOTS) {
3329 RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
3330 SlotSet::KEEP_EMPTY_BUCKETS);
3331 } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
3332 free_ranges.insert(std::pair<uint32_t, uint32_t>(
3333 static_cast<uint32_t>(free_start - p->address()),
3334 static_cast<uint32_t>(p->area_end() - p->address())));
3335 }
3336 }
3337
3338 // Clear invalid typed slots after collection all free ranges.
3339 if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
3340 p->typed_old_to_new_slots()->RemoveInvaldSlots(free_ranges);
3341 }
3342
3343 // Clear the mark bits of that page and reset live bytes count.
3344 p->ClearLiveness();
3345
3346 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3347 if (free_list_mode == IGNORE_FREE_LIST) return 0;
3348 return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
3349 }
3350
InvalidateCode(Code * code)3351 void MarkCompactCollector::InvalidateCode(Code* code) {
3352 Page* page = Page::FromAddress(code->address());
3353 Address start = code->instruction_start();
3354 Address end = code->address() + code->Size();
3355
3356 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, start, end);
3357
3358 if (heap_->incremental_marking()->IsCompacting() &&
3359 !ShouldSkipEvacuationSlotRecording(code)) {
3360 DCHECK(compacting_);
3361
3362 // If the object is white than no slots were recorded on it yet.
3363 MarkBit mark_bit = ObjectMarking::MarkBitFrom(code);
3364 if (Marking::IsWhite(mark_bit)) return;
3365
3366 // Ignore all slots that might have been recorded in the body of the
3367 // deoptimized code object. Assumption: no slots will be recorded for
3368 // this object after invalidating it.
3369 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
3370 }
3371 }
3372
3373
3374 // Return true if the given code is deoptimized or will be deoptimized.
WillBeDeoptimized(Code * code)3375 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3376 return code->is_optimized_code() && code->marked_for_deoptimization();
3377 }
3378
3379
3380 #ifdef VERIFY_HEAP
VerifyAllBlackObjects(MemoryChunk * page)3381 static void VerifyAllBlackObjects(MemoryChunk* page) {
3382 LiveObjectIterator<kAllLiveObjects> it(page);
3383 HeapObject* object = NULL;
3384 while ((object = it.Next()) != NULL) {
3385 CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
3386 }
3387 }
3388 #endif // VERIFY_HEAP
3389
3390 template <class Visitor>
VisitLiveObjects(MemoryChunk * page,Visitor * visitor,IterationMode mode)3391 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
3392 IterationMode mode) {
3393 #ifdef VERIFY_HEAP
3394 VerifyAllBlackObjects(page);
3395 #endif // VERIFY_HEAP
3396
3397 LiveObjectIterator<kBlackObjects> it(page);
3398 HeapObject* object = nullptr;
3399 while ((object = it.Next()) != nullptr) {
3400 DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
3401 if (!visitor->Visit(object)) {
3402 if (mode == kClearMarkbits) {
3403 page->markbits()->ClearRange(
3404 page->AddressToMarkbitIndex(page->area_start()),
3405 page->AddressToMarkbitIndex(object->address()));
3406 if (page->old_to_new_slots() != nullptr) {
3407 page->old_to_new_slots()->RemoveRange(
3408 0, static_cast<int>(object->address() - page->address()),
3409 SlotSet::PREFREE_EMPTY_BUCKETS);
3410 }
3411 if (page->typed_old_to_new_slots() != nullptr) {
3412 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
3413 object->address());
3414 }
3415 RecomputeLiveBytes(page);
3416 }
3417 return false;
3418 }
3419 }
3420 if (mode == kClearMarkbits) {
3421 page->ClearLiveness();
3422 }
3423 return true;
3424 }
3425
3426
RecomputeLiveBytes(MemoryChunk * page)3427 void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
3428 LiveObjectIterator<kBlackObjects> it(page);
3429 int new_live_size = 0;
3430 HeapObject* object = nullptr;
3431 while ((object = it.Next()) != nullptr) {
3432 new_live_size += object->Size();
3433 }
3434 page->SetLiveBytes(new_live_size);
3435 }
3436
AddSweptPageSafe(PagedSpace * space,Page * page)3437 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
3438 Page* page) {
3439 base::LockGuard<base::Mutex> guard(&mutex_);
3440 swept_list_[space->identity()].Add(page);
3441 }
3442
EvacuateNewSpaceAndCandidates()3443 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3444 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3445 Heap::RelocationLock relocation_lock(heap());
3446
3447 {
3448 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
3449 EvacuationScope evacuation_scope(this);
3450
3451 EvacuateNewSpacePrologue();
3452 EvacuatePagesInParallel();
3453 heap()->new_space()->set_age_mark(heap()->new_space()->top());
3454 }
3455
3456 UpdatePointersAfterEvacuation();
3457
3458 if (!heap()->new_space()->Rebalance()) {
3459 FatalProcessOutOfMemory("NewSpace::Rebalance");
3460 }
3461
3462 // Give pages that are queued to be freed back to the OS. Note that filtering
3463 // slots only handles old space (for unboxed doubles), and thus map space can
3464 // still contain stale pointers. We only free the chunks after pointer updates
3465 // to still have access to page headers.
3466 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3467
3468 {
3469 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3470
3471 for (Page* p : newspace_evacuation_candidates_) {
3472 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3473 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
3474 sweeper().AddPage(p->owner()->identity(), p);
3475 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3476 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3477 p->ForAllFreeListCategories(
3478 [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3479 sweeper().AddPage(p->owner()->identity(), p);
3480 }
3481 }
3482 newspace_evacuation_candidates_.Rewind(0);
3483
3484 for (Page* p : evacuation_candidates_) {
3485 // Important: skip list should be cleared only after roots were updated
3486 // because root iteration traverses the stack and might have to find
3487 // code objects from non-updated pc pointing into evacuation candidate.
3488 SkipList* list = p->skip_list();
3489 if (list != NULL) list->Clear();
3490 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3491 sweeper().AddPage(p->owner()->identity(), p);
3492 p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3493 }
3494 }
3495
3496 // Deallocate evacuated candidate pages.
3497 ReleaseEvacuationCandidates();
3498 }
3499
3500 #ifdef VERIFY_HEAP
3501 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
3502 VerifyEvacuation(heap());
3503 }
3504 #endif
3505 }
3506
3507 template <PointerDirection direction>
3508 class PointerUpdateJobTraits {
3509 public:
3510 typedef int PerPageData; // Per page data is not used in this job.
3511 typedef int PerTaskData; // Per task data is not used in this job.
3512
ProcessPageInParallel(Heap * heap,PerTaskData,MemoryChunk * chunk,PerPageData)3513 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
3514 PerPageData) {
3515 UpdateUntypedPointers(heap, chunk);
3516 UpdateTypedPointers(heap, chunk);
3517 return true;
3518 }
3519 static const bool NeedSequentialFinalization = false;
FinalizePageSequentially(Heap *,MemoryChunk *,bool,PerPageData)3520 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3521 }
3522
3523 private:
UpdateUntypedPointers(Heap * heap,MemoryChunk * chunk)3524 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
3525 if (direction == OLD_TO_NEW) {
3526 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap, chunk](Address slot) {
3527 return CheckAndUpdateOldToNewSlot(heap, slot);
3528 });
3529 } else {
3530 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
3531 return UpdateSlot(reinterpret_cast<Object**>(slot));
3532 });
3533 }
3534 }
3535
UpdateTypedPointers(Heap * heap,MemoryChunk * chunk)3536 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
3537 if (direction == OLD_TO_OLD) {
3538 Isolate* isolate = heap->isolate();
3539 RememberedSet<OLD_TO_OLD>::IterateTyped(
3540 chunk, [isolate](SlotType type, Address host_addr, Address slot) {
3541 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot,
3542 UpdateSlot);
3543 });
3544 } else {
3545 Isolate* isolate = heap->isolate();
3546 RememberedSet<OLD_TO_NEW>::IterateTyped(
3547 chunk,
3548 [isolate, heap](SlotType type, Address host_addr, Address slot) {
3549 return UpdateTypedSlotHelper::UpdateTypedSlot(
3550 isolate, type, slot, [heap](Object** slot) {
3551 return CheckAndUpdateOldToNewSlot(
3552 heap, reinterpret_cast<Address>(slot));
3553 });
3554 });
3555 }
3556 }
3557
CheckAndUpdateOldToNewSlot(Heap * heap,Address slot_address)3558 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
3559 Address slot_address) {
3560 // There may be concurrent action on slots in dead objects. Concurrent
3561 // sweeper threads may overwrite the slot content with a free space object.
3562 // Moreover, the pointed-to object may also get concurrently overwritten
3563 // with a free space object. The sweeper always gets priority performing
3564 // these writes.
3565 base::NoBarrierAtomicValue<Object*>* slot =
3566 base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address);
3567 Object* slot_reference = slot->Value();
3568 if (heap->InFromSpace(slot_reference)) {
3569 HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
3570 DCHECK(heap_object->IsHeapObject());
3571 MapWord map_word = heap_object->map_word();
3572 // There could still be stale pointers in large object space, map space,
3573 // and old space for pages that have been promoted.
3574 if (map_word.IsForwardingAddress()) {
3575 // A sweeper thread may concurrently write a size value which looks like
3576 // a forwarding pointer. We have to ignore these values.
3577 if (map_word.ToRawValue() < Page::kPageSize) {
3578 return REMOVE_SLOT;
3579 }
3580 // Update the corresponding slot only if the slot content did not
3581 // change in the meantime. This may happen when a concurrent sweeper
3582 // thread stored a free space object at that memory location.
3583 slot->TrySetValue(slot_reference, map_word.ToForwardingAddress());
3584 }
3585 // If the object was in from space before and is after executing the
3586 // callback in to space, the object is still live.
3587 // Unfortunately, we do not know about the slot. It could be in a
3588 // just freed free space object.
3589 if (heap->InToSpace(slot->Value())) {
3590 return KEEP_SLOT;
3591 }
3592 } else if (heap->InToSpace(slot_reference)) {
3593 // Slots can point to "to" space if the page has been moved, or if the
3594 // slot has been recorded multiple times in the remembered set. Since
3595 // there is no forwarding information present we need to check the
3596 // markbits to determine liveness.
3597 if (Marking::IsBlack(ObjectMarking::MarkBitFrom(
3598 reinterpret_cast<HeapObject*>(slot_reference))))
3599 return KEEP_SLOT;
3600 } else {
3601 DCHECK(!heap->InNewSpace(slot_reference));
3602 }
3603 return REMOVE_SLOT;
3604 }
3605 };
3606
NumberOfPointerUpdateTasks(int pages)3607 int NumberOfPointerUpdateTasks(int pages) {
3608 if (!FLAG_parallel_pointer_update) return 1;
3609 const int available_cores = Max(
3610 1, static_cast<int>(
3611 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
3612 const int kPagesPerTask = 4;
3613 return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask);
3614 }
3615
3616 template <PointerDirection direction>
UpdatePointersInParallel(Heap * heap,base::Semaphore * semaphore)3617 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
3618 PageParallelJob<PointerUpdateJobTraits<direction> > job(
3619 heap, heap->isolate()->cancelable_task_manager(), semaphore);
3620 RememberedSet<direction>::IterateMemoryChunks(
3621 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
3622 int num_pages = job.NumberOfPages();
3623 int num_tasks = NumberOfPointerUpdateTasks(num_pages);
3624 job.Run(num_tasks, [](int i) { return 0; });
3625 }
3626
3627 class ToSpacePointerUpdateJobTraits {
3628 public:
3629 typedef std::pair<Address, Address> PerPageData;
3630 typedef PointersUpdatingVisitor* PerTaskData;
3631
ProcessPageInParallel(Heap * heap,PerTaskData visitor,MemoryChunk * chunk,PerPageData limits)3632 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
3633 MemoryChunk* chunk, PerPageData limits) {
3634 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3635 // New->new promoted pages contain garbage so they require iteration
3636 // using markbits.
3637 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
3638 } else {
3639 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
3640 }
3641 return true;
3642 }
3643
3644 static const bool NeedSequentialFinalization = false;
FinalizePageSequentially(Heap *,MemoryChunk *,bool,PerPageData)3645 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3646 }
3647
3648 private:
ProcessPageInParallelVisitAll(Heap * heap,PerTaskData visitor,MemoryChunk * chunk,PerPageData limits)3649 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
3650 MemoryChunk* chunk,
3651 PerPageData limits) {
3652 for (Address cur = limits.first; cur < limits.second;) {
3653 HeapObject* object = HeapObject::FromAddress(cur);
3654 Map* map = object->map();
3655 int size = object->SizeFromMap(map);
3656 object->IterateBody(map->instance_type(), size, visitor);
3657 cur += size;
3658 }
3659 }
3660
ProcessPageInParallelVisitLive(Heap * heap,PerTaskData visitor,MemoryChunk * chunk,PerPageData limits)3661 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
3662 MemoryChunk* chunk,
3663 PerPageData limits) {
3664 LiveObjectIterator<kBlackObjects> it(chunk);
3665 HeapObject* object = NULL;
3666 while ((object = it.Next()) != NULL) {
3667 Map* map = object->map();
3668 int size = object->SizeFromMap(map);
3669 object->IterateBody(map->instance_type(), size, visitor);
3670 }
3671 }
3672 };
3673
UpdateToSpacePointersInParallel(Heap * heap,base::Semaphore * semaphore)3674 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
3675 PageParallelJob<ToSpacePointerUpdateJobTraits> job(
3676 heap, heap->isolate()->cancelable_task_manager(), semaphore);
3677 Address space_start = heap->new_space()->bottom();
3678 Address space_end = heap->new_space()->top();
3679 for (Page* page : NewSpacePageRange(space_start, space_end)) {
3680 Address start =
3681 page->Contains(space_start) ? space_start : page->area_start();
3682 Address end = page->Contains(space_end) ? space_end : page->area_end();
3683 job.AddPage(page, std::make_pair(start, end));
3684 }
3685 PointersUpdatingVisitor visitor;
3686 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
3687 job.Run(num_tasks, [&visitor](int i) { return &visitor; });
3688 }
3689
UpdatePointersAfterEvacuation()3690 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3691 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3692
3693 PointersUpdatingVisitor updating_visitor;
3694
3695 {
3696 TRACE_GC(heap()->tracer(),
3697 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
3698 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_);
3699 // Update roots.
3700 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3701 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
3702 }
3703
3704 {
3705 Heap* heap = this->heap();
3706 TRACE_GC(heap->tracer(),
3707 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
3708 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_);
3709 }
3710
3711 {
3712 TRACE_GC(heap()->tracer(),
3713 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3714 // Update pointers from external string table.
3715 heap_->UpdateReferencesInExternalStringTable(
3716 &UpdateReferenceInExternalStringTableEntry);
3717
3718 EvacuationWeakObjectRetainer evacuation_object_retainer;
3719 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
3720 }
3721 }
3722
3723
ReleaseEvacuationCandidates()3724 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3725 for (Page* p : evacuation_candidates_) {
3726 if (!p->IsEvacuationCandidate()) continue;
3727 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3728 p->ResetLiveBytes();
3729 CHECK(p->SweepingDone());
3730 space->ReleasePage(p);
3731 }
3732 evacuation_candidates_.Rewind(0);
3733 compacting_ = false;
3734 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3735 }
3736
ParallelSweepSpace(AllocationSpace identity,int required_freed_bytes,int max_pages)3737 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
3738 int required_freed_bytes,
3739 int max_pages) {
3740 int max_freed = 0;
3741 int pages_freed = 0;
3742 Page* page = nullptr;
3743 while ((page = GetSweepingPageSafe(identity)) != nullptr) {
3744 int freed = ParallelSweepPage(page, identity);
3745 pages_freed += 1;
3746 DCHECK_GE(freed, 0);
3747 max_freed = Max(max_freed, freed);
3748 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
3749 return max_freed;
3750 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
3751 }
3752 return max_freed;
3753 }
3754
ParallelSweepPage(Page * page,AllocationSpace identity)3755 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
3756 AllocationSpace identity) {
3757 int max_freed = 0;
3758 {
3759 base::LockGuard<base::Mutex> guard(page->mutex());
3760 // If this page was already swept in the meantime, we can return here.
3761 if (page->SweepingDone()) return 0;
3762 DCHECK_EQ(Page::kSweepingPending,
3763 page->concurrent_sweeping_state().Value());
3764 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3765 const Sweeper::FreeSpaceTreatmentMode free_space_mode =
3766 Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
3767 if (identity == NEW_SPACE) {
3768 RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
3769 } else {
3770 max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
3771 }
3772 DCHECK(page->SweepingDone());
3773
3774 // After finishing sweeping of a page we clean up its remembered set.
3775 if (page->typed_old_to_new_slots()) {
3776 page->typed_old_to_new_slots()->FreeToBeFreedChunks();
3777 }
3778 if (page->old_to_new_slots()) {
3779 page->old_to_new_slots()->FreeToBeFreedBuckets();
3780 }
3781 }
3782
3783 {
3784 base::LockGuard<base::Mutex> guard(&mutex_);
3785 swept_list_[identity].Add(page);
3786 }
3787 return max_freed;
3788 }
3789
AddPage(AllocationSpace space,Page * page)3790 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
3791 DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
3792 PrepareToBeSweptPage(space, page);
3793 sweeping_list_[space].push_back(page);
3794 }
3795
PrepareToBeSweptPage(AllocationSpace space,Page * page)3796 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
3797 Page* page) {
3798 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
3799 DCHECK_GE(page->area_size(), static_cast<size_t>(page->LiveBytes()));
3800 size_t to_sweep = page->area_size() - page->LiveBytes();
3801 if (space != NEW_SPACE)
3802 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
3803 }
3804
GetSweepingPageSafe(AllocationSpace space)3805 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
3806 AllocationSpace space) {
3807 base::LockGuard<base::Mutex> guard(&mutex_);
3808 Page* page = nullptr;
3809 if (!sweeping_list_[space].empty()) {
3810 page = sweeping_list_[space].front();
3811 sweeping_list_[space].pop_front();
3812 }
3813 return page;
3814 }
3815
AddSweepingPageSafe(AllocationSpace space,Page * page)3816 void MarkCompactCollector::Sweeper::AddSweepingPageSafe(AllocationSpace space,
3817 Page* page) {
3818 base::LockGuard<base::Mutex> guard(&mutex_);
3819 sweeping_list_[space].push_back(page);
3820 }
3821
StartSweepSpace(PagedSpace * space)3822 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
3823 space->ClearStats();
3824
3825 int will_be_swept = 0;
3826 bool unused_page_present = false;
3827
3828 // Loop needs to support deletion if live bytes == 0 for a page.
3829 for (auto it = space->begin(); it != space->end();) {
3830 Page* p = *(it++);
3831 DCHECK(p->SweepingDone());
3832
3833 if (p->IsEvacuationCandidate()) {
3834 // Will be processed in EvacuateNewSpaceAndCandidates.
3835 DCHECK(evacuation_candidates_.length() > 0);
3836 continue;
3837 }
3838
3839 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3840 // We need to sweep the page to get it into an iterable state again. Note
3841 // that this adds unusable memory into the free list that is later on
3842 // (in the free list) dropped again. Since we only use the flag for
3843 // testing this is fine.
3844 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3845 Sweeper::RawSweep(p, Sweeper::IGNORE_FREE_LIST,
3846 Heap::ShouldZapGarbage() ? Sweeper::ZAP_FREE_SPACE
3847 : Sweeper::IGNORE_FREE_SPACE);
3848 continue;
3849 }
3850
3851 // One unused page is kept, all further are released before sweeping them.
3852 if (p->LiveBytes() == 0) {
3853 if (unused_page_present) {
3854 if (FLAG_gc_verbose) {
3855 PrintIsolate(isolate(), "sweeping: released page: %p",
3856 static_cast<void*>(p));
3857 }
3858 ArrayBufferTracker::FreeAll(p);
3859 space->ReleasePage(p);
3860 continue;
3861 }
3862 unused_page_present = true;
3863 }
3864
3865 sweeper().AddPage(space->identity(), p);
3866 will_be_swept++;
3867 }
3868
3869 if (FLAG_gc_verbose) {
3870 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
3871 AllocationSpaceName(space->identity()), will_be_swept);
3872 }
3873 }
3874
StartSweepSpaces()3875 void MarkCompactCollector::StartSweepSpaces() {
3876 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
3877 #ifdef DEBUG
3878 state_ = SWEEP_SPACES;
3879 #endif
3880
3881 {
3882 {
3883 GCTracer::Scope sweep_scope(heap()->tracer(),
3884 GCTracer::Scope::MC_SWEEP_OLD);
3885 StartSweepSpace(heap()->old_space());
3886 }
3887 {
3888 GCTracer::Scope sweep_scope(heap()->tracer(),
3889 GCTracer::Scope::MC_SWEEP_CODE);
3890 StartSweepSpace(heap()->code_space());
3891 }
3892 {
3893 GCTracer::Scope sweep_scope(heap()->tracer(),
3894 GCTracer::Scope::MC_SWEEP_MAP);
3895 StartSweepSpace(heap()->map_space());
3896 }
3897 sweeper().StartSweeping();
3898 }
3899
3900 // Deallocate unmarked large objects.
3901 heap_->lo_space()->FreeUnmarkedObjects();
3902 }
3903
isolate() const3904 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
3905
3906
Initialize()3907 void MarkCompactCollector::Initialize() {
3908 MarkCompactMarkingVisitor::Initialize();
3909 IncrementalMarking::Initialize();
3910 }
3911
RecordCodeEntrySlot(HeapObject * host,Address slot,Code * target)3912 void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
3913 Code* target) {
3914 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
3915 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
3916 if (target_page->IsEvacuationCandidate() &&
3917 !ShouldSkipEvacuationSlotRecording(host)) {
3918 // TODO(ulan): remove this check after investigating crbug.com/414964.
3919 CHECK(target->IsCode());
3920 RememberedSet<OLD_TO_OLD>::InsertTyped(
3921 source_page, reinterpret_cast<Address>(host), CODE_ENTRY_SLOT, slot);
3922 }
3923 }
3924
3925
RecordCodeTargetPatch(Address pc,Code * target)3926 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
3927 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
3928 if (is_compacting()) {
3929 Code* host =
3930 isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
3931 pc);
3932 MarkBit mark_bit = ObjectMarking::MarkBitFrom(host);
3933 if (Marking::IsBlack(mark_bit)) {
3934 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
3935 // The target is always in old space, we don't have to record the slot in
3936 // the old-to-new remembered set.
3937 DCHECK(!heap()->InNewSpace(target));
3938 RecordRelocSlot(host, &rinfo, target);
3939 }
3940 }
3941 }
3942
3943 } // namespace internal
3944 } // namespace v8
3945