1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/heap.h"
6 
7 #include <unordered_map>
8 #include <unordered_set>
9 
10 #include "src/accessors.h"
11 #include "src/api-inl.h"
12 #include "src/assembler-inl.h"
13 #include "src/ast/context-slot-cache.h"
14 #include "src/base/bits.h"
15 #include "src/base/once.h"
16 #include "src/base/utils/random-number-generator.h"
17 #include "src/bootstrapper.h"
18 #include "src/code-stubs.h"
19 #include "src/compilation-cache.h"
20 #include "src/conversions.h"
21 #include "src/debug/debug.h"
22 #include "src/deoptimizer.h"
23 #include "src/feedback-vector.h"
24 #include "src/global-handles.h"
25 #include "src/heap/array-buffer-collector.h"
26 #include "src/heap/array-buffer-tracker-inl.h"
27 #include "src/heap/barrier.h"
28 #include "src/heap/code-stats.h"
29 #include "src/heap/concurrent-marking.h"
30 #include "src/heap/embedder-tracing.h"
31 #include "src/heap/gc-idle-time-handler.h"
32 #include "src/heap/gc-tracer.h"
33 #include "src/heap/heap-controller.h"
34 #include "src/heap/heap-write-barrier-inl.h"
35 #include "src/heap/incremental-marking.h"
36 #include "src/heap/item-parallel-job.h"
37 #include "src/heap/mark-compact-inl.h"
38 #include "src/heap/mark-compact.h"
39 #include "src/heap/memory-reducer.h"
40 #include "src/heap/object-stats.h"
41 #include "src/heap/objects-visiting-inl.h"
42 #include "src/heap/objects-visiting.h"
43 #include "src/heap/remembered-set.h"
44 #include "src/heap/scavenge-job.h"
45 #include "src/heap/scavenger-inl.h"
46 #include "src/heap/store-buffer.h"
47 #include "src/heap/stress-marking-observer.h"
48 #include "src/heap/stress-scavenge-observer.h"
49 #include "src/heap/sweeper.h"
50 #include "src/instruction-stream.h"
51 #include "src/interpreter/interpreter.h"
52 #include "src/objects/data-handler.h"
53 #include "src/objects/hash-table-inl.h"
54 #include "src/objects/maybe-object.h"
55 #include "src/objects/shared-function-info.h"
56 #include "src/regexp/jsregexp.h"
57 #include "src/runtime-profiler.h"
58 #include "src/snapshot/natives.h"
59 #include "src/snapshot/serializer-common.h"
60 #include "src/snapshot/snapshot.h"
61 #include "src/tracing/trace-event.h"
62 #include "src/unicode-decoder.h"
63 #include "src/unicode-inl.h"
64 #include "src/utils-inl.h"
65 #include "src/utils.h"
66 #include "src/v8.h"
67 #include "src/vm-state-inl.h"
68 
69 // Has to be the last include (doesn't have include guards):
70 #include "src/objects/object-macros.h"
71 
72 namespace v8 {
73 namespace internal {
74 
SetArgumentsAdaptorDeoptPCOffset(int pc_offset)75 void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
76   DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset());
77   set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
78 }
79 
SetConstructStubCreateDeoptPCOffset(int pc_offset)80 void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
81   DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero);
82   set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
83 }
84 
SetConstructStubInvokeDeoptPCOffset(int pc_offset)85 void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
86   DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero);
87   set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
88 }
89 
SetInterpreterEntryReturnPCOffset(int pc_offset)90 void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
91   DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset());
92   set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
93 }
94 
SetSerializedObjects(FixedArray * objects)95 void Heap::SetSerializedObjects(FixedArray* objects) {
96   DCHECK(isolate()->serializer_enabled());
97   set_serialized_objects(objects);
98 }
99 
SetSerializedGlobalProxySizes(FixedArray * sizes)100 void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
101   DCHECK(isolate()->serializer_enabled());
102   set_serialized_global_proxy_sizes(sizes);
103 }
104 
operator ==(const Heap::GCCallbackTuple & other) const105 bool Heap::GCCallbackTuple::operator==(
106     const Heap::GCCallbackTuple& other) const {
107   return other.callback == callback && other.data == data;
108 }
109 
operator =(const Heap::GCCallbackTuple & other)110 Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=(
111     const Heap::GCCallbackTuple& other) {
112   callback = other.callback;
113   gc_type = other.gc_type;
114   data = other.data;
115   return *this;
116 }
117 
118 struct Heap::StrongRootsList {
119   Object** start;
120   Object** end;
121   StrongRootsList* next;
122 };
123 
124 class IdleScavengeObserver : public AllocationObserver {
125  public:
IdleScavengeObserver(Heap & heap,intptr_t step_size)126   IdleScavengeObserver(Heap& heap, intptr_t step_size)
127       : AllocationObserver(step_size), heap_(heap) {}
128 
Step(int bytes_allocated,Address,size_t)129   void Step(int bytes_allocated, Address, size_t) override {
130     heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
131   }
132 
133  private:
134   Heap& heap_;
135 };
136 
Heap()137 Heap::Heap()
138     : external_memory_(0),
139       external_memory_limit_(kExternalAllocationSoftLimit),
140       external_memory_at_last_mark_compact_(0),
141       external_memory_concurrently_freed_(0),
142       isolate_(nullptr),
143       code_range_size_(0),
144       // semispace_size_ should be a power of 2 and old_generation_size_ should
145       // be a multiple of Page::kPageSize.
146       max_semi_space_size_(8 * (kPointerSize / 4) * MB),
147       initial_semispace_size_(kMinSemiSpaceSizeInKB * KB),
148       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
149       initial_max_old_generation_size_(max_old_generation_size_),
150       initial_old_generation_size_(max_old_generation_size_ /
151                                    kInitalOldGenerationLimitFactor),
152       old_generation_size_configured_(false),
153       // Variables set based on semispace_size_ and old_generation_size_ in
154       // ConfigureHeap.
155       // Will be 4 * reserved_semispace_size_ to ensure that young
156       // generation can be aligned to its size.
157       maximum_committed_(0),
158       survived_since_last_expansion_(0),
159       survived_last_scavenge_(0),
160       always_allocate_scope_count_(0),
161       memory_pressure_level_(MemoryPressureLevel::kNone),
162       contexts_disposed_(0),
163       number_of_disposed_maps_(0),
164       new_space_(nullptr),
165       old_space_(nullptr),
166       code_space_(nullptr),
167       map_space_(nullptr),
168       lo_space_(nullptr),
169       new_lo_space_(nullptr),
170       read_only_space_(nullptr),
171       write_protect_code_memory_(false),
172       code_space_memory_modification_scope_depth_(0),
173       gc_state_(NOT_IN_GC),
174       gc_post_processing_depth_(0),
175       allocations_count_(0),
176       raw_allocations_hash_(0),
177       stress_marking_observer_(nullptr),
178       stress_scavenge_observer_(nullptr),
179       allocation_step_in_progress_(false),
180       max_marking_limit_reached_(0.0),
181       ms_count_(0),
182       gc_count_(0),
183       consecutive_ineffective_mark_compacts_(0),
184       mmap_region_base_(0),
185       remembered_unmapped_pages_index_(0),
186       old_generation_allocation_limit_(initial_old_generation_size_),
187       inline_allocation_disabled_(false),
188       tracer_(nullptr),
189       promoted_objects_size_(0),
190       promotion_ratio_(0),
191       semi_space_copied_object_size_(0),
192       previous_semi_space_copied_object_size_(0),
193       semi_space_copied_rate_(0),
194       nodes_died_in_new_space_(0),
195       nodes_copied_in_new_space_(0),
196       nodes_promoted_(0),
197       maximum_size_scavenges_(0),
198       last_idle_notification_time_(0.0),
199       last_gc_time_(0.0),
200       mark_compact_collector_(nullptr),
201       minor_mark_compact_collector_(nullptr),
202       array_buffer_collector_(nullptr),
203       memory_allocator_(nullptr),
204       store_buffer_(nullptr),
205       incremental_marking_(nullptr),
206       concurrent_marking_(nullptr),
207       gc_idle_time_handler_(nullptr),
208       memory_reducer_(nullptr),
209       live_object_stats_(nullptr),
210       dead_object_stats_(nullptr),
211       scavenge_job_(nullptr),
212       parallel_scavenge_semaphore_(0),
213       idle_scavenge_observer_(nullptr),
214       new_space_allocation_counter_(0),
215       old_generation_allocation_counter_at_last_gc_(0),
216       old_generation_size_at_last_gc_(0),
217       global_pretenuring_feedback_(kInitialFeedbackCapacity),
218       is_marking_flag_(false),
219       ring_buffer_full_(false),
220       ring_buffer_end_(0),
221       configured_(false),
222       current_gc_flags_(Heap::kNoGCFlags),
223       current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
224       external_string_table_(this),
225       gc_callbacks_depth_(0),
226       deserialization_complete_(false),
227       strong_roots_list_(nullptr),
228       heap_iterator_depth_(0),
229       local_embedder_heap_tracer_(nullptr),
230       fast_promotion_mode_(false),
231       force_oom_(false),
232       delay_sweeper_tasks_for_testing_(false),
233       pending_layout_change_object_(nullptr),
234       unprotected_memory_chunks_registry_enabled_(false)
235 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
236       ,
237       allocation_timeout_(0)
238 #endif  // V8_ENABLE_ALLOCATION_TIMEOUT
239 {
240   // Ensure old_generation_size_ is a multiple of kPageSize.
241   DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1));
242 
243   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
244   set_native_contexts_list(nullptr);
245   set_allocation_sites_list(Smi::kZero);
246   // Put a dummy entry in the remembered pages so we can find the list the
247   // minidump even if there are no real unmapped pages.
248   RememberUnmappedPage(kNullAddress, false);
249 }
250 
MaxReserved()251 size_t Heap::MaxReserved() {
252   const double kFactor = Page::kPageSize * 1.0 / Page::kAllocatableMemory;
253   return static_cast<size_t>(
254       (2 * max_semi_space_size_ + max_old_generation_size_) * kFactor);
255 }
256 
ComputeMaxOldGenerationSize(uint64_t physical_memory)257 size_t Heap::ComputeMaxOldGenerationSize(uint64_t physical_memory) {
258   const size_t old_space_physical_memory_factor = 4;
259   size_t computed_size = static_cast<size_t>(physical_memory / i::MB /
260                                              old_space_physical_memory_factor *
261                                              kPointerMultiplier);
262   return Max(Min(computed_size, HeapController::kMaxHeapSize),
263              HeapController::kMinHeapSize);
264 }
265 
Capacity()266 size_t Heap::Capacity() {
267   if (!HasBeenSetUp()) return 0;
268 
269   return new_space_->Capacity() + OldGenerationCapacity();
270 }
271 
OldGenerationCapacity()272 size_t Heap::OldGenerationCapacity() {
273   if (!HasBeenSetUp()) return 0;
274   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
275   size_t total = 0;
276   for (PagedSpace* space = spaces.next(); space != nullptr;
277        space = spaces.next()) {
278     total += space->Capacity();
279   }
280   return total + lo_space_->SizeOfObjects();
281 }
282 
CommittedOldGenerationMemory()283 size_t Heap::CommittedOldGenerationMemory() {
284   if (!HasBeenSetUp()) return 0;
285 
286   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
287   size_t total = 0;
288   for (PagedSpace* space = spaces.next(); space != nullptr;
289        space = spaces.next()) {
290     total += space->CommittedMemory();
291   }
292   return total + lo_space_->Size();
293 }
294 
CommittedMemoryOfHeapAndUnmapper()295 size_t Heap::CommittedMemoryOfHeapAndUnmapper() {
296   if (!HasBeenSetUp()) return 0;
297 
298   return CommittedMemory() +
299          memory_allocator()->unmapper()->CommittedBufferedMemory();
300 }
301 
CommittedMemory()302 size_t Heap::CommittedMemory() {
303   if (!HasBeenSetUp()) return 0;
304 
305   return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
306 }
307 
308 
CommittedPhysicalMemory()309 size_t Heap::CommittedPhysicalMemory() {
310   if (!HasBeenSetUp()) return 0;
311 
312   size_t total = 0;
313   for (SpaceIterator it(this); it.has_next();) {
314     total += it.next()->CommittedPhysicalMemory();
315   }
316 
317   return total;
318 }
319 
CommittedMemoryExecutable()320 size_t Heap::CommittedMemoryExecutable() {
321   if (!HasBeenSetUp()) return 0;
322 
323   return static_cast<size_t>(memory_allocator()->SizeExecutable());
324 }
325 
326 
UpdateMaximumCommitted()327 void Heap::UpdateMaximumCommitted() {
328   if (!HasBeenSetUp()) return;
329 
330   const size_t current_committed_memory = CommittedMemory();
331   if (current_committed_memory > maximum_committed_) {
332     maximum_committed_ = current_committed_memory;
333   }
334 }
335 
Available()336 size_t Heap::Available() {
337   if (!HasBeenSetUp()) return 0;
338 
339   size_t total = 0;
340 
341   for (SpaceIterator it(this); it.has_next();) {
342     total += it.next()->Available();
343   }
344   return total;
345 }
346 
CanExpandOldGeneration(size_t size)347 bool Heap::CanExpandOldGeneration(size_t size) {
348   if (force_oom_) return false;
349   if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false;
350   // The OldGenerationCapacity does not account compaction spaces used
351   // during evacuation. Ensure that expanding the old generation does push
352   // the total allocated memory size over the maximum heap size.
353   return memory_allocator()->Size() + size <= MaxReserved();
354 }
355 
HasBeenSetUp()356 bool Heap::HasBeenSetUp() {
357   // We will always have a new space when the heap is set up.
358   return new_space_ != nullptr;
359 }
360 
361 
SelectGarbageCollector(AllocationSpace space,const char ** reason)362 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
363                                               const char** reason) {
364   // Is global GC requested?
365   if (space != NEW_SPACE) {
366     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
367     *reason = "GC in old space requested";
368     return MARK_COMPACTOR;
369   }
370 
371   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
372     *reason = "GC in old space forced by flags";
373     return MARK_COMPACTOR;
374   }
375 
376   if (incremental_marking()->NeedsFinalization() &&
377       AllocationLimitOvershotByLargeMargin()) {
378     *reason = "Incremental marking needs finalization";
379     return MARK_COMPACTOR;
380   }
381 
382   // Over-estimate the new space size using capacity to allow some slack.
383   if (!CanExpandOldGeneration(new_space_->TotalCapacity())) {
384     isolate_->counters()
385         ->gc_compactor_caused_by_oldspace_exhaustion()
386         ->Increment();
387     *reason = "scavenge might not succeed";
388     return MARK_COMPACTOR;
389   }
390 
391   // Default
392   *reason = nullptr;
393   return YoungGenerationCollector();
394 }
395 
SetGCState(HeapState state)396 void Heap::SetGCState(HeapState state) {
397   gc_state_ = state;
398 }
399 
PrintShortHeapStatistics()400 void Heap::PrintShortHeapStatistics() {
401   if (!FLAG_trace_gc_verbose) return;
402   PrintIsolate(isolate_,
403                "Memory allocator,       used: %6" PRIuS
404                " KB,"
405                " available: %6" PRIuS " KB\n",
406                memory_allocator()->Size() / KB,
407                memory_allocator()->Available() / KB);
408   PrintIsolate(isolate_,
409                "Read-only space,        used: %6" PRIuS
410                " KB"
411                ", available: %6" PRIuS
412                " KB"
413                ", committed: %6" PRIuS " KB\n",
414                read_only_space_->Size() / KB,
415                read_only_space_->Available() / KB,
416                read_only_space_->CommittedMemory() / KB);
417   PrintIsolate(isolate_,
418                "New space,              used: %6" PRIuS
419                " KB"
420                ", available: %6" PRIuS
421                " KB"
422                ", committed: %6" PRIuS " KB\n",
423                new_space_->Size() / KB, new_space_->Available() / KB,
424                new_space_->CommittedMemory() / KB);
425   PrintIsolate(isolate_,
426                "New large object space, used: %6" PRIuS
427                " KB"
428                ", available: %6" PRIuS
429                " KB"
430                ", committed: %6" PRIuS " KB\n",
431                new_lo_space_->SizeOfObjects() / KB,
432                new_lo_space_->Available() / KB,
433                new_lo_space_->CommittedMemory() / KB);
434   PrintIsolate(isolate_,
435                "Old space,              used: %6" PRIuS
436                " KB"
437                ", available: %6" PRIuS
438                " KB"
439                ", committed: %6" PRIuS " KB\n",
440                old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
441                old_space_->CommittedMemory() / KB);
442   PrintIsolate(isolate_,
443                "Code space,             used: %6" PRIuS
444                " KB"
445                ", available: %6" PRIuS
446                " KB"
447                ", committed: %6" PRIuS "KB\n",
448                code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
449                code_space_->CommittedMemory() / KB);
450   PrintIsolate(isolate_,
451                "Map space,              used: %6" PRIuS
452                " KB"
453                ", available: %6" PRIuS
454                " KB"
455                ", committed: %6" PRIuS " KB\n",
456                map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
457                map_space_->CommittedMemory() / KB);
458   PrintIsolate(isolate_,
459                "Large object space,     used: %6" PRIuS
460                " KB"
461                ", available: %6" PRIuS
462                " KB"
463                ", committed: %6" PRIuS " KB\n",
464                lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
465                lo_space_->CommittedMemory() / KB);
466   PrintIsolate(isolate_,
467                "All spaces,             used: %6" PRIuS
468                " KB"
469                ", available: %6" PRIuS
470                " KB"
471                ", committed: %6" PRIuS "KB\n",
472                this->SizeOfObjects() / KB, this->Available() / KB,
473                this->CommittedMemory() / KB);
474   PrintIsolate(isolate_,
475                "Unmapper buffering %d chunks of committed: %6" PRIuS " KB\n",
476                memory_allocator()->unmapper()->NumberOfChunks(),
477                CommittedMemoryOfHeapAndUnmapper() / KB);
478   PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
479                external_memory_ / KB);
480   PrintIsolate(isolate_, "External memory global %zu KB\n",
481                external_memory_callback_() / KB);
482   PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
483                total_gc_time_ms_);
484 }
485 
ReportStatisticsAfterGC()486 void Heap::ReportStatisticsAfterGC() {
487   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
488        ++i) {
489     int count = deferred_counters_[i];
490     deferred_counters_[i] = 0;
491     while (count > 0) {
492       count--;
493       isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
494     }
495   }
496 }
497 
AddHeapObjectAllocationTracker(HeapObjectAllocationTracker * tracker)498 void Heap::AddHeapObjectAllocationTracker(
499     HeapObjectAllocationTracker* tracker) {
500   if (allocation_trackers_.empty()) DisableInlineAllocation();
501   allocation_trackers_.push_back(tracker);
502 }
503 
RemoveHeapObjectAllocationTracker(HeapObjectAllocationTracker * tracker)504 void Heap::RemoveHeapObjectAllocationTracker(
505     HeapObjectAllocationTracker* tracker) {
506   allocation_trackers_.erase(std::remove(allocation_trackers_.begin(),
507                                          allocation_trackers_.end(), tracker),
508                              allocation_trackers_.end());
509   if (allocation_trackers_.empty()) EnableInlineAllocation();
510 }
511 
AddRetainingPathTarget(Handle<HeapObject> object,RetainingPathOption option)512 void Heap::AddRetainingPathTarget(Handle<HeapObject> object,
513                                   RetainingPathOption option) {
514   if (!FLAG_track_retaining_path) {
515     PrintF("Retaining path tracking requires --track-retaining-path\n");
516   } else {
517     Handle<WeakArrayList> array(retaining_path_targets(), isolate());
518     int index = array->length();
519     array = WeakArrayList::AddToEnd(isolate(), array,
520                                     MaybeObjectHandle::Weak(object));
521     set_retaining_path_targets(*array);
522     DCHECK_EQ(array->length(), index + 1);
523     retaining_path_target_option_[index] = option;
524   }
525 }
526 
IsRetainingPathTarget(HeapObject * object,RetainingPathOption * option)527 bool Heap::IsRetainingPathTarget(HeapObject* object,
528                                  RetainingPathOption* option) {
529   WeakArrayList* targets = retaining_path_targets();
530   int length = targets->length();
531   MaybeObject* object_to_check = HeapObjectReference::Weak(object);
532   for (int i = 0; i < length; i++) {
533     MaybeObject* target = targets->Get(i);
534     DCHECK(target->IsWeakOrClearedHeapObject());
535     if (target == object_to_check) {
536       DCHECK(retaining_path_target_option_.count(i));
537       *option = retaining_path_target_option_[i];
538       return true;
539     }
540   }
541   return false;
542 }
543 
PrintRetainingPath(HeapObject * target,RetainingPathOption option)544 void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) {
545   PrintF("\n\n\n");
546   PrintF("#################################################\n");
547   PrintF("Retaining path for %p:\n", static_cast<void*>(target));
548   HeapObject* object = target;
549   std::vector<std::pair<HeapObject*, bool>> retaining_path;
550   Root root = Root::kUnknown;
551   bool ephemeron = false;
552   while (true) {
553     retaining_path.push_back(std::make_pair(object, ephemeron));
554     if (option == RetainingPathOption::kTrackEphemeronPath &&
555         ephemeron_retainer_.count(object)) {
556       object = ephemeron_retainer_[object];
557       ephemeron = true;
558     } else if (retainer_.count(object)) {
559       object = retainer_[object];
560       ephemeron = false;
561     } else {
562       if (retaining_root_.count(object)) {
563         root = retaining_root_[object];
564       }
565       break;
566     }
567   }
568   int distance = static_cast<int>(retaining_path.size());
569   for (auto node : retaining_path) {
570     HeapObject* object = node.first;
571     bool ephemeron = node.second;
572     PrintF("\n");
573     PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
574     PrintF("Distance from root %d%s: ", distance,
575            ephemeron ? " (ephemeron)" : "");
576     object->ShortPrint();
577     PrintF("\n");
578 #ifdef OBJECT_PRINT
579     object->Print();
580     PrintF("\n");
581 #endif
582     --distance;
583   }
584   PrintF("\n");
585   PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
586   PrintF("Root: %s\n", RootVisitor::RootName(root));
587   PrintF("-------------------------------------------------\n");
588 }
589 
AddRetainer(HeapObject * retainer,HeapObject * object)590 void Heap::AddRetainer(HeapObject* retainer, HeapObject* object) {
591   if (retainer_.count(object)) return;
592   retainer_[object] = retainer;
593   RetainingPathOption option = RetainingPathOption::kDefault;
594   if (IsRetainingPathTarget(object, &option)) {
595     // Check if the retaining path was already printed in
596     // AddEphemeronRetainer().
597     if (ephemeron_retainer_.count(object) == 0 ||
598         option == RetainingPathOption::kDefault) {
599       PrintRetainingPath(object, option);
600     }
601   }
602 }
603 
AddEphemeronRetainer(HeapObject * retainer,HeapObject * object)604 void Heap::AddEphemeronRetainer(HeapObject* retainer, HeapObject* object) {
605   if (ephemeron_retainer_.count(object)) return;
606   ephemeron_retainer_[object] = retainer;
607   RetainingPathOption option = RetainingPathOption::kDefault;
608   if (IsRetainingPathTarget(object, &option) &&
609       option == RetainingPathOption::kTrackEphemeronPath) {
610     // Check if the retaining path was already printed in AddRetainer().
611     if (retainer_.count(object) == 0) {
612       PrintRetainingPath(object, option);
613     }
614   }
615 }
616 
AddRetainingRoot(Root root,HeapObject * object)617 void Heap::AddRetainingRoot(Root root, HeapObject* object) {
618   if (retaining_root_.count(object)) return;
619   retaining_root_[object] = root;
620   RetainingPathOption option = RetainingPathOption::kDefault;
621   if (IsRetainingPathTarget(object, &option)) {
622     PrintRetainingPath(object, option);
623   }
624 }
625 
IncrementDeferredCount(v8::Isolate::UseCounterFeature feature)626 void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
627   deferred_counters_[feature]++;
628 }
629 
UncommitFromSpace()630 bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
631 
GarbageCollectionPrologue()632 void Heap::GarbageCollectionPrologue() {
633   TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE);
634   {
635     AllowHeapAllocation for_the_first_part_of_prologue;
636     gc_count_++;
637 
638 #ifdef VERIFY_HEAP
639     if (FLAG_verify_heap) {
640       Verify();
641     }
642 #endif
643   }
644 
645   // Reset GC statistics.
646   promoted_objects_size_ = 0;
647   previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
648   semi_space_copied_object_size_ = 0;
649   nodes_died_in_new_space_ = 0;
650   nodes_copied_in_new_space_ = 0;
651   nodes_promoted_ = 0;
652 
653   UpdateMaximumCommitted();
654 
655 #ifdef DEBUG
656   DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
657 
658   if (FLAG_gc_verbose) Print();
659 #endif  // DEBUG
660 
661   if (new_space_->IsAtMaximumCapacity()) {
662     maximum_size_scavenges_++;
663   } else {
664     maximum_size_scavenges_ = 0;
665   }
666   CheckNewSpaceExpansionCriteria();
667   UpdateNewSpaceAllocationCounter();
668   if (FLAG_track_retaining_path) {
669     retainer_.clear();
670     ephemeron_retainer_.clear();
671     retaining_root_.clear();
672   }
673 }
674 
SizeOfObjects()675 size_t Heap::SizeOfObjects() {
676   size_t total = 0;
677 
678   for (SpaceIterator it(this); it.has_next();) {
679     total += it.next()->SizeOfObjects();
680   }
681   return total;
682 }
683 
684 
GetSpaceName(int idx)685 const char* Heap::GetSpaceName(int idx) {
686   switch (idx) {
687     case NEW_SPACE:
688       return "new_space";
689     case OLD_SPACE:
690       return "old_space";
691     case MAP_SPACE:
692       return "map_space";
693     case CODE_SPACE:
694       return "code_space";
695     case LO_SPACE:
696       return "large_object_space";
697     case NEW_LO_SPACE:
698       return "new_large_object_space";
699     case RO_SPACE:
700       return "read_only_space";
701     default:
702       UNREACHABLE();
703   }
704   return nullptr;
705 }
706 
SetRootCodeStubs(SimpleNumberDictionary * value)707 void Heap::SetRootCodeStubs(SimpleNumberDictionary* value) {
708   roots_[kCodeStubsRootIndex] = value;
709 }
710 
RepairFreeListsAfterDeserialization()711 void Heap::RepairFreeListsAfterDeserialization() {
712   PagedSpaces spaces(this);
713   for (PagedSpace* space = spaces.next(); space != nullptr;
714        space = spaces.next()) {
715     space->RepairFreeListsAfterDeserialization();
716   }
717 }
718 
MergeAllocationSitePretenuringFeedback(const PretenuringFeedbackMap & local_pretenuring_feedback)719 void Heap::MergeAllocationSitePretenuringFeedback(
720     const PretenuringFeedbackMap& local_pretenuring_feedback) {
721   AllocationSite* site = nullptr;
722   for (auto& site_and_count : local_pretenuring_feedback) {
723     site = site_and_count.first;
724     MapWord map_word = site_and_count.first->map_word();
725     if (map_word.IsForwardingAddress()) {
726       site = AllocationSite::cast(map_word.ToForwardingAddress());
727     }
728 
729     // We have not validated the allocation site yet, since we have not
730     // dereferenced the site during collecting information.
731     // This is an inlined check of AllocationMemento::IsValid.
732     if (!site->IsAllocationSite() || site->IsZombie()) continue;
733 
734     const int value = static_cast<int>(site_and_count.second);
735     DCHECK_LT(0, value);
736     if (site->IncrementMementoFoundCount(value)) {
737       // For sites in the global map the count is accessed through the site.
738       global_pretenuring_feedback_.insert(std::make_pair(site, 0));
739     }
740   }
741 }
742 
AddAllocationObserversToAllSpaces(AllocationObserver * observer,AllocationObserver * new_space_observer)743 void Heap::AddAllocationObserversToAllSpaces(
744     AllocationObserver* observer, AllocationObserver* new_space_observer) {
745   DCHECK(observer && new_space_observer);
746 
747   for (SpaceIterator it(this); it.has_next();) {
748     Space* space = it.next();
749     if (space == new_space()) {
750       space->AddAllocationObserver(new_space_observer);
751     } else {
752       space->AddAllocationObserver(observer);
753     }
754   }
755 }
756 
RemoveAllocationObserversFromAllSpaces(AllocationObserver * observer,AllocationObserver * new_space_observer)757 void Heap::RemoveAllocationObserversFromAllSpaces(
758     AllocationObserver* observer, AllocationObserver* new_space_observer) {
759   DCHECK(observer && new_space_observer);
760 
761   for (SpaceIterator it(this); it.has_next();) {
762     Space* space = it.next();
763     if (space == new_space()) {
764       space->RemoveAllocationObserver(new_space_observer);
765     } else {
766       space->RemoveAllocationObserver(observer);
767     }
768   }
769 }
770 
771 class Heap::SkipStoreBufferScope {
772  public:
SkipStoreBufferScope(StoreBuffer * store_buffer)773   explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
774       : store_buffer_(store_buffer) {
775     store_buffer_->MoveAllEntriesToRememberedSet();
776     store_buffer_->SetMode(StoreBuffer::IN_GC);
777   }
778 
~SkipStoreBufferScope()779   ~SkipStoreBufferScope() {
780     DCHECK(store_buffer_->Empty());
781     store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
782   }
783 
784  private:
785   StoreBuffer* store_buffer_;
786 };
787 
788 namespace {
MakePretenureDecision(AllocationSite * site,AllocationSite::PretenureDecision current_decision,double ratio,bool maximum_size_scavenge)789 inline bool MakePretenureDecision(
790     AllocationSite* site, AllocationSite::PretenureDecision current_decision,
791     double ratio, bool maximum_size_scavenge) {
792   // Here we just allow state transitions from undecided or maybe tenure
793   // to don't tenure, maybe tenure, or tenure.
794   if ((current_decision == AllocationSite::kUndecided ||
795        current_decision == AllocationSite::kMaybeTenure)) {
796     if (ratio >= AllocationSite::kPretenureRatio) {
797       // We just transition into tenure state when the semi-space was at
798       // maximum capacity.
799       if (maximum_size_scavenge) {
800         site->set_deopt_dependent_code(true);
801         site->set_pretenure_decision(AllocationSite::kTenure);
802         // Currently we just need to deopt when we make a state transition to
803         // tenure.
804         return true;
805       }
806       site->set_pretenure_decision(AllocationSite::kMaybeTenure);
807     } else {
808       site->set_pretenure_decision(AllocationSite::kDontTenure);
809     }
810   }
811   return false;
812 }
813 
DigestPretenuringFeedback(Isolate * isolate,AllocationSite * site,bool maximum_size_scavenge)814 inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite* site,
815                                       bool maximum_size_scavenge) {
816   bool deopt = false;
817   int create_count = site->memento_create_count();
818   int found_count = site->memento_found_count();
819   bool minimum_mementos_created =
820       create_count >= AllocationSite::kPretenureMinimumCreated;
821   double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics
822                      ? static_cast<double>(found_count) / create_count
823                      : 0.0;
824   AllocationSite::PretenureDecision current_decision =
825       site->pretenure_decision();
826 
827   if (minimum_mementos_created) {
828     deopt = MakePretenureDecision(site, current_decision, ratio,
829                                   maximum_size_scavenge);
830   }
831 
832   if (FLAG_trace_pretenuring_statistics) {
833     PrintIsolate(isolate,
834                  "pretenuring: AllocationSite(%p): (created, found, ratio) "
835                  "(%d, %d, %f) %s => %s\n",
836                  static_cast<void*>(site), create_count, found_count, ratio,
837                  site->PretenureDecisionName(current_decision),
838                  site->PretenureDecisionName(site->pretenure_decision()));
839   }
840 
841   // Clear feedback calculation fields until the next gc.
842   site->set_memento_found_count(0);
843   site->set_memento_create_count(0);
844   return deopt;
845 }
846 }  // namespace
847 
RemoveAllocationSitePretenuringFeedback(AllocationSite * site)848 void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
849   global_pretenuring_feedback_.erase(site);
850 }
851 
DeoptMaybeTenuredAllocationSites()852 bool Heap::DeoptMaybeTenuredAllocationSites() {
853   return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
854 }
855 
ProcessPretenuringFeedback()856 void Heap::ProcessPretenuringFeedback() {
857   bool trigger_deoptimization = false;
858   if (FLAG_allocation_site_pretenuring) {
859     int tenure_decisions = 0;
860     int dont_tenure_decisions = 0;
861     int allocation_mementos_found = 0;
862     int allocation_sites = 0;
863     int active_allocation_sites = 0;
864 
865     AllocationSite* site = nullptr;
866 
867     // Step 1: Digest feedback for recorded allocation sites.
868     bool maximum_size_scavenge = MaximumSizeScavenge();
869     for (auto& site_and_count : global_pretenuring_feedback_) {
870       allocation_sites++;
871       site = site_and_count.first;
872       // Count is always access through the site.
873       DCHECK_EQ(0, site_and_count.second);
874       int found_count = site->memento_found_count();
875       // An entry in the storage does not imply that the count is > 0 because
876       // allocation sites might have been reset due to too many objects dying
877       // in old space.
878       if (found_count > 0) {
879         DCHECK(site->IsAllocationSite());
880         active_allocation_sites++;
881         allocation_mementos_found += found_count;
882         if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) {
883           trigger_deoptimization = true;
884         }
885         if (site->GetPretenureMode() == TENURED) {
886           tenure_decisions++;
887         } else {
888           dont_tenure_decisions++;
889         }
890       }
891     }
892 
893     // Step 2: Deopt maybe tenured allocation sites if necessary.
894     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
895     if (deopt_maybe_tenured) {
896       ForeachAllocationSite(
897           allocation_sites_list(),
898           [&allocation_sites, &trigger_deoptimization](AllocationSite* site) {
899             DCHECK(site->IsAllocationSite());
900             allocation_sites++;
901             if (site->IsMaybeTenure()) {
902               site->set_deopt_dependent_code(true);
903               trigger_deoptimization = true;
904             }
905           });
906     }
907 
908     if (trigger_deoptimization) {
909       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
910     }
911 
912     if (FLAG_trace_pretenuring_statistics &&
913         (allocation_mementos_found > 0 || tenure_decisions > 0 ||
914          dont_tenure_decisions > 0)) {
915       PrintIsolate(isolate(),
916                    "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
917                    "active_sites=%d "
918                    "mementos=%d tenured=%d not_tenured=%d\n",
919                    deopt_maybe_tenured ? 1 : 0, allocation_sites,
920                    active_allocation_sites, allocation_mementos_found,
921                    tenure_decisions, dont_tenure_decisions);
922     }
923 
924     global_pretenuring_feedback_.clear();
925     global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity);
926   }
927 }
928 
InvalidateCodeEmbeddedObjects(Code * code)929 void Heap::InvalidateCodeEmbeddedObjects(Code* code) {
930   MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
931   CodePageMemoryModificationScope modification_scope(chunk);
932   code->InvalidateEmbeddedObjects(this);
933 }
934 
InvalidateCodeDeoptimizationData(Code * code)935 void Heap::InvalidateCodeDeoptimizationData(Code* code) {
936   MemoryChunk* chunk = MemoryChunk::FromAddress(code->address());
937   CodePageMemoryModificationScope modification_scope(chunk);
938   code->set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
939 }
940 
DeoptMarkedAllocationSites()941 void Heap::DeoptMarkedAllocationSites() {
942   // TODO(hpayer): If iterating over the allocation sites list becomes a
943   // performance issue, use a cache data structure in heap instead.
944 
945   ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite* site) {
946     if (site->deopt_dependent_code()) {
947       site->dependent_code()->MarkCodeForDeoptimization(
948           isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
949       site->set_deopt_dependent_code(false);
950     }
951   });
952 
953   Deoptimizer::DeoptimizeMarkedCode(isolate_);
954 }
955 
956 
GarbageCollectionEpilogue()957 void Heap::GarbageCollectionEpilogue() {
958   TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE);
959   if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) {
960     ZapFromSpace();
961   }
962 
963 #ifdef VERIFY_HEAP
964   if (FLAG_verify_heap) {
965     Verify();
966   }
967 #endif
968 
969   AllowHeapAllocation for_the_rest_of_the_epilogue;
970 
971 #ifdef DEBUG
972   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
973   if (FLAG_print_handles) PrintHandles();
974   if (FLAG_gc_verbose) Print();
975   if (FLAG_code_stats) ReportCodeStatistics("After GC");
976   if (FLAG_check_handle_count) CheckHandleCount();
977 #endif
978 
979   UpdateMaximumCommitted();
980 
981   isolate_->counters()->alive_after_last_gc()->Set(
982       static_cast<int>(SizeOfObjects()));
983 
984   isolate_->counters()->string_table_capacity()->Set(
985       string_table()->Capacity());
986   isolate_->counters()->number_of_symbols()->Set(
987       string_table()->NumberOfElements());
988 
989   if (CommittedMemory() > 0) {
990     isolate_->counters()->external_fragmentation_total()->AddSample(
991         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
992 
993     isolate_->counters()->heap_sample_total_committed()->AddSample(
994         static_cast<int>(CommittedMemory() / KB));
995     isolate_->counters()->heap_sample_total_used()->AddSample(
996         static_cast<int>(SizeOfObjects() / KB));
997     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
998         static_cast<int>(map_space()->CommittedMemory() / KB));
999     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
1000         static_cast<int>(code_space()->CommittedMemory() / KB));
1001 
1002     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
1003         static_cast<int>(MaximumCommittedMemory() / KB));
1004   }
1005 
1006 #define UPDATE_COUNTERS_FOR_SPACE(space)                \
1007   isolate_->counters()->space##_bytes_available()->Set( \
1008       static_cast<int>(space()->Available()));          \
1009   isolate_->counters()->space##_bytes_committed()->Set( \
1010       static_cast<int>(space()->CommittedMemory()));    \
1011   isolate_->counters()->space##_bytes_used()->Set(      \
1012       static_cast<int>(space()->SizeOfObjects()));
1013 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
1014   if (space()->CommittedMemory() > 0) {                                \
1015     isolate_->counters()->external_fragmentation_##space()->AddSample( \
1016         static_cast<int>(100 -                                         \
1017                          (space()->SizeOfObjects() * 100.0) /          \
1018                              space()->CommittedMemory()));             \
1019   }
1020 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
1021   UPDATE_COUNTERS_FOR_SPACE(space)                         \
1022   UPDATE_FRAGMENTATION_FOR_SPACE(space)
1023 
1024   UPDATE_COUNTERS_FOR_SPACE(new_space)
1025   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
1026   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
1027   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
1028   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
1029 #undef UPDATE_COUNTERS_FOR_SPACE
1030 #undef UPDATE_FRAGMENTATION_FOR_SPACE
1031 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
1032 
1033 #ifdef DEBUG
1034   ReportStatisticsAfterGC();
1035 #endif  // DEBUG
1036 
1037   last_gc_time_ = MonotonicallyIncreasingTimeInMs();
1038 
1039   {
1040     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
1041     ReduceNewSpaceSize();
1042   }
1043 }
1044 
1045 class GCCallbacksScope {
1046  public:
GCCallbacksScope(Heap * heap)1047   explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
1048     heap_->gc_callbacks_depth_++;
1049   }
~GCCallbacksScope()1050   ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
1051 
CheckReenter()1052   bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
1053 
1054  private:
1055   Heap* heap_;
1056 };
1057 
1058 
HandleGCRequest()1059 void Heap::HandleGCRequest() {
1060   if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) {
1061     CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting);
1062     stress_scavenge_observer_->RequestedGCDone();
1063   } else if (HighMemoryPressure()) {
1064     incremental_marking()->reset_request_type();
1065     CheckMemoryPressure();
1066   } else if (incremental_marking()->request_type() ==
1067              IncrementalMarking::COMPLETE_MARKING) {
1068     incremental_marking()->reset_request_type();
1069     CollectAllGarbage(current_gc_flags_,
1070                       GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
1071                       current_gc_callback_flags_);
1072   } else if (incremental_marking()->request_type() ==
1073                  IncrementalMarking::FINALIZATION &&
1074              incremental_marking()->IsMarking() &&
1075              !incremental_marking()->finalize_marking_completed()) {
1076     incremental_marking()->reset_request_type();
1077     FinalizeIncrementalMarkingIncrementally(
1078         GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
1079   }
1080 }
1081 
1082 
ScheduleIdleScavengeIfNeeded(int bytes_allocated)1083 void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
1084   scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
1085 }
1086 
GCTypePriorityTimer(GarbageCollector collector)1087 HistogramTimer* Heap::GCTypePriorityTimer(GarbageCollector collector) {
1088   if (IsYoungGenerationCollector(collector)) {
1089     if (isolate_->IsIsolateInBackground()) {
1090       return isolate_->counters()->gc_scavenger_background();
1091     }
1092     return isolate_->counters()->gc_scavenger_foreground();
1093   } else {
1094     if (!incremental_marking()->IsStopped()) {
1095       if (ShouldReduceMemory()) {
1096         if (isolate_->IsIsolateInBackground()) {
1097           return isolate_->counters()->gc_finalize_reduce_memory_background();
1098         }
1099         return isolate_->counters()->gc_finalize_reduce_memory_foreground();
1100       } else {
1101         if (isolate_->IsIsolateInBackground()) {
1102           return isolate_->counters()->gc_finalize_background();
1103         }
1104         return isolate_->counters()->gc_finalize_foreground();
1105       }
1106     } else {
1107       if (isolate_->IsIsolateInBackground()) {
1108         return isolate_->counters()->gc_compactor_background();
1109       }
1110       return isolate_->counters()->gc_compactor_foreground();
1111     }
1112   }
1113 }
1114 
GCTypeTimer(GarbageCollector collector)1115 HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
1116   if (IsYoungGenerationCollector(collector)) {
1117     return isolate_->counters()->gc_scavenger();
1118   } else {
1119     if (!incremental_marking()->IsStopped()) {
1120       if (ShouldReduceMemory()) {
1121         return isolate_->counters()->gc_finalize_reduce_memory();
1122       } else {
1123         return isolate_->counters()->gc_finalize();
1124       }
1125     } else {
1126       return isolate_->counters()->gc_compactor();
1127     }
1128   }
1129 }
1130 
CollectAllGarbage(int flags,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)1131 void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
1132                              const v8::GCCallbackFlags gc_callback_flags) {
1133   // Since we are ignoring the return value, the exact choice of space does
1134   // not matter, so long as we do not specify NEW_SPACE, which would not
1135   // cause a full GC.
1136   set_current_gc_flags(flags);
1137   CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
1138   set_current_gc_flags(kNoGCFlags);
1139 }
1140 
1141 namespace {
1142 
CompareWords(int size,HeapObject * a,HeapObject * b)1143 intptr_t CompareWords(int size, HeapObject* a, HeapObject* b) {
1144   int words = size / kPointerSize;
1145   DCHECK_EQ(a->Size(), size);
1146   DCHECK_EQ(b->Size(), size);
1147   intptr_t* slot_a = reinterpret_cast<intptr_t*>(a->address());
1148   intptr_t* slot_b = reinterpret_cast<intptr_t*>(b->address());
1149   for (int i = 0; i < words; i++) {
1150     if (*slot_a != *slot_b) {
1151       return *slot_a - *slot_b;
1152     }
1153     slot_a++;
1154     slot_b++;
1155   }
1156   return 0;
1157 }
1158 
ReportDuplicates(int size,std::vector<HeapObject * > & objects)1159 void ReportDuplicates(int size, std::vector<HeapObject*>& objects) {
1160   if (objects.size() == 0) return;
1161 
1162   sort(objects.begin(), objects.end(), [size](HeapObject* a, HeapObject* b) {
1163     intptr_t c = CompareWords(size, a, b);
1164     if (c != 0) return c < 0;
1165     return a < b;
1166   });
1167 
1168   std::vector<std::pair<int, HeapObject*>> duplicates;
1169   HeapObject* current = objects[0];
1170   int count = 1;
1171   for (size_t i = 1; i < objects.size(); i++) {
1172     if (CompareWords(size, current, objects[i]) == 0) {
1173       count++;
1174     } else {
1175       if (count > 1) {
1176         duplicates.push_back(std::make_pair(count - 1, current));
1177       }
1178       count = 1;
1179       current = objects[i];
1180     }
1181   }
1182   if (count > 1) {
1183     duplicates.push_back(std::make_pair(count - 1, current));
1184   }
1185 
1186   int threshold = FLAG_trace_duplicate_threshold_kb * KB;
1187 
1188   sort(duplicates.begin(), duplicates.end());
1189   for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) {
1190     int duplicate_bytes = it->first * size;
1191     if (duplicate_bytes < threshold) break;
1192     PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size,
1193            duplicate_bytes / KB);
1194     PrintF("Sample object: ");
1195     it->second->Print();
1196     PrintF("============================\n");
1197   }
1198 }
1199 }  // anonymous namespace
1200 
CollectAllAvailableGarbage(GarbageCollectionReason gc_reason)1201 void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
1202   // Since we are ignoring the return value, the exact choice of space does
1203   // not matter, so long as we do not specify NEW_SPACE, which would not
1204   // cause a full GC.
1205   // Major GC would invoke weak handle callbacks on weakly reachable
1206   // handles, but won't collect weakly reachable objects until next
1207   // major GC.  Therefore if we collect aggressively and weak handle callback
1208   // has been invoked, we rerun major GC to release objects which become
1209   // garbage.
1210   // Note: as weak callbacks can execute arbitrary code, we cannot
1211   // hope that eventually there will be no weak callbacks invocations.
1212   // Therefore stop recollecting after several attempts.
1213   if (gc_reason == GarbageCollectionReason::kLastResort) {
1214     InvokeNearHeapLimitCallback();
1215   }
1216   RuntimeCallTimerScope runtime_timer(
1217       isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
1218 
1219   // The optimizing compiler may be unnecessarily holding on to memory.
1220   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1221   isolate()->ClearSerializerData();
1222   set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
1223   isolate_->compilation_cache()->Clear();
1224   const int kMaxNumberOfAttempts = 7;
1225   const int kMinNumberOfAttempts = 2;
1226   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
1227     if (!CollectGarbage(OLD_SPACE, gc_reason,
1228                         v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
1229         attempt + 1 >= kMinNumberOfAttempts) {
1230       break;
1231     }
1232   }
1233 
1234   set_current_gc_flags(kNoGCFlags);
1235   new_space_->Shrink();
1236   UncommitFromSpace();
1237   memory_allocator()->unmapper()->EnsureUnmappingCompleted();
1238 
1239   if (FLAG_trace_duplicate_threshold_kb) {
1240     std::map<int, std::vector<HeapObject*>> objects_by_size;
1241     PagedSpaces spaces(this);
1242     for (PagedSpace* space = spaces.next(); space != nullptr;
1243          space = spaces.next()) {
1244       HeapObjectIterator it(space);
1245       for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
1246         objects_by_size[obj->Size()].push_back(obj);
1247       }
1248     }
1249     {
1250       LargeObjectIterator it(lo_space());
1251       for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
1252         objects_by_size[obj->Size()].push_back(obj);
1253       }
1254     }
1255     for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend();
1256          ++it) {
1257       ReportDuplicates(it->first, it->second);
1258     }
1259   }
1260 }
1261 
ReportExternalMemoryPressure()1262 void Heap::ReportExternalMemoryPressure() {
1263   const GCCallbackFlags kGCCallbackFlagsForExternalMemory =
1264       static_cast<GCCallbackFlags>(
1265           kGCCallbackFlagSynchronousPhantomCallbackProcessing |
1266           kGCCallbackFlagCollectAllExternalMemory);
1267   if (external_memory_ >
1268       (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
1269     CollectAllGarbage(
1270         kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
1271         GarbageCollectionReason::kExternalMemoryPressure,
1272         static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
1273                                      kGCCallbackFlagsForExternalMemory));
1274     return;
1275   }
1276   if (incremental_marking()->IsStopped()) {
1277     if (incremental_marking()->CanBeActivated()) {
1278       StartIncrementalMarking(GCFlagsForIncrementalMarking(),
1279                               GarbageCollectionReason::kExternalMemoryPressure,
1280                               kGCCallbackFlagsForExternalMemory);
1281     } else {
1282       CollectAllGarbage(i::Heap::kNoGCFlags,
1283                         GarbageCollectionReason::kExternalMemoryPressure,
1284                         kGCCallbackFlagsForExternalMemory);
1285     }
1286   } else {
1287     // Incremental marking is turned on an has already been started.
1288     const double kMinStepSize = 5;
1289     const double kMaxStepSize = 10;
1290     const double ms_step =
1291         Min(kMaxStepSize,
1292             Max(kMinStepSize, static_cast<double>(external_memory_) /
1293                                   external_memory_limit_ * kMinStepSize));
1294     const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step;
1295     // Extend the gc callback flags with external memory flags.
1296     current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
1297         current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
1298     incremental_marking()->AdvanceIncrementalMarking(
1299         deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
1300   }
1301 }
1302 
EnsureFillerObjectAtTop()1303 void Heap::EnsureFillerObjectAtTop() {
1304   // There may be an allocation memento behind objects in new space. Upon
1305   // evacuation of a non-full new space (or if we are on the last page) there
1306   // may be uninitialized memory behind top. We fill the remainder of the page
1307   // with a filler.
1308   Address to_top = new_space_->top();
1309   Page* page = Page::FromAddress(to_top - kPointerSize);
1310   if (page->Contains(to_top)) {
1311     int remaining_in_page = static_cast<int>(page->area_end() - to_top);
1312     CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
1313   }
1314 }
1315 
CollectGarbage(AllocationSpace space,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)1316 bool Heap::CollectGarbage(AllocationSpace space,
1317                           GarbageCollectionReason gc_reason,
1318                           const v8::GCCallbackFlags gc_callback_flags) {
1319   const char* collector_reason = nullptr;
1320   GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
1321 
1322   if (!CanExpandOldGeneration(new_space()->Capacity())) {
1323     InvokeNearHeapLimitCallback();
1324   }
1325 
1326   // Ensure that all pending phantom callbacks are invoked.
1327   isolate()->global_handles()->InvokeSecondPassPhantomCallbacks();
1328 
1329   // The VM is in the GC state until exiting this function.
1330   VMState<GC> state(isolate());
1331 
1332 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
1333   // Reset the allocation timeout, but make sure to allow at least a few
1334   // allocations after a collection. The reason for this is that we have a lot
1335   // of allocation sequences and we assume that a garbage collection will allow
1336   // the subsequent allocation attempts to go through.
1337   if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
1338     allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_));
1339   }
1340 #endif
1341 
1342   EnsureFillerObjectAtTop();
1343 
1344   if (IsYoungGenerationCollector(collector) &&
1345       !incremental_marking()->IsStopped()) {
1346     if (FLAG_trace_incremental_marking) {
1347       isolate()->PrintWithTimestamp(
1348           "[IncrementalMarking] Scavenge during marking.\n");
1349     }
1350   }
1351 
1352   bool next_gc_likely_to_collect_more = false;
1353   size_t committed_memory_before = 0;
1354 
1355   if (collector == MARK_COMPACTOR) {
1356     committed_memory_before = CommittedOldGenerationMemory();
1357   }
1358 
1359   {
1360     tracer()->Start(collector, gc_reason, collector_reason);
1361     DCHECK(AllowHeapAllocation::IsAllowed());
1362     DisallowHeapAllocation no_allocation_during_gc;
1363     GarbageCollectionPrologue();
1364 
1365     {
1366       HistogramTimer* gc_type_timer = GCTypeTimer(collector);
1367       HistogramTimerScope histogram_timer_scope(gc_type_timer);
1368       TRACE_EVENT0("v8", gc_type_timer->name());
1369 
1370       HistogramTimer* gc_type_priority_timer = GCTypePriorityTimer(collector);
1371       OptionalHistogramTimerScopeMode mode =
1372           isolate_->IsMemorySavingsModeActive()
1373               ? OptionalHistogramTimerScopeMode::DONT_TAKE_TIME
1374               : OptionalHistogramTimerScopeMode::TAKE_TIME;
1375       OptionalHistogramTimerScope histogram_timer_priority_scope(
1376           gc_type_priority_timer, mode);
1377 
1378       next_gc_likely_to_collect_more =
1379           PerformGarbageCollection(collector, gc_callback_flags);
1380       if (collector == MARK_COMPACTOR) {
1381         tracer()->RecordMarkCompactHistograms(gc_type_timer);
1382       }
1383     }
1384 
1385     GarbageCollectionEpilogue();
1386     if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
1387       isolate()->CheckDetachedContextsAfterGC();
1388     }
1389 
1390     if (collector == MARK_COMPACTOR) {
1391       size_t committed_memory_after = CommittedOldGenerationMemory();
1392       size_t used_memory_after = OldGenerationSizeOfObjects();
1393       MemoryReducer::Event event;
1394       event.type = MemoryReducer::kMarkCompact;
1395       event.time_ms = MonotonicallyIncreasingTimeInMs();
1396       // Trigger one more GC if
1397       // - this GC decreased committed memory,
1398       // - there is high fragmentation,
1399       // - there are live detached contexts.
1400       event.next_gc_likely_to_collect_more =
1401           (committed_memory_before > committed_memory_after + MB) ||
1402           HasHighFragmentation(used_memory_after, committed_memory_after) ||
1403           (detached_contexts()->length() > 0);
1404       event.committed_memory = committed_memory_after;
1405       if (deserialization_complete_) {
1406         memory_reducer_->NotifyMarkCompact(event);
1407       }
1408     }
1409 
1410     tracer()->Stop(collector);
1411   }
1412 
1413   if (collector == MARK_COMPACTOR &&
1414       (gc_callback_flags & (kGCCallbackFlagForced |
1415                             kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
1416     isolate()->CountUsage(v8::Isolate::kForcedGC);
1417   }
1418 
1419   // Start incremental marking for the next cycle. The heap snapshot
1420   // generator needs incremental marking to stay off after it aborted.
1421   // We do this only for scavenger to avoid a loop where mark-compact
1422   // causes another mark-compact.
1423   if (IsYoungGenerationCollector(collector) &&
1424       !ShouldAbortIncrementalMarking()) {
1425     StartIncrementalMarkingIfAllocationLimitIsReached(
1426         GCFlagsForIncrementalMarking(),
1427         kGCCallbackScheduleIdleGarbageCollection);
1428   }
1429 
1430   return next_gc_likely_to_collect_more;
1431 }
1432 
1433 
NotifyContextDisposed(bool dependant_context)1434 int Heap::NotifyContextDisposed(bool dependant_context) {
1435   if (!dependant_context) {
1436     tracer()->ResetSurvivalEvents();
1437     old_generation_size_configured_ = false;
1438     MemoryReducer::Event event;
1439     event.type = MemoryReducer::kPossibleGarbage;
1440     event.time_ms = MonotonicallyIncreasingTimeInMs();
1441     memory_reducer_->NotifyPossibleGarbage(event);
1442   }
1443   isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
1444 
1445   number_of_disposed_maps_ = retained_maps()->length();
1446   tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
1447   return ++contexts_disposed_;
1448 }
1449 
StartIncrementalMarking(int gc_flags,GarbageCollectionReason gc_reason,GCCallbackFlags gc_callback_flags)1450 void Heap::StartIncrementalMarking(int gc_flags,
1451                                    GarbageCollectionReason gc_reason,
1452                                    GCCallbackFlags gc_callback_flags) {
1453   DCHECK(incremental_marking()->IsStopped());
1454   set_current_gc_flags(gc_flags);
1455   current_gc_callback_flags_ = gc_callback_flags;
1456   incremental_marking()->Start(gc_reason);
1457 }
1458 
StartIncrementalMarkingIfAllocationLimitIsReached(int gc_flags,const GCCallbackFlags gc_callback_flags)1459 void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
1460     int gc_flags, const GCCallbackFlags gc_callback_flags) {
1461   if (incremental_marking()->IsStopped()) {
1462     IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
1463     if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
1464       incremental_marking()->incremental_marking_job()->ScheduleTask(this);
1465     } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
1466       StartIncrementalMarking(gc_flags,
1467                               GarbageCollectionReason::kAllocationLimit,
1468                               gc_callback_flags);
1469     }
1470   }
1471 }
1472 
StartIdleIncrementalMarking(GarbageCollectionReason gc_reason,const GCCallbackFlags gc_callback_flags)1473 void Heap::StartIdleIncrementalMarking(
1474     GarbageCollectionReason gc_reason,
1475     const GCCallbackFlags gc_callback_flags) {
1476   gc_idle_time_handler_->ResetNoProgressCounter();
1477   StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
1478                           gc_callback_flags);
1479 }
1480 
MoveElements(FixedArray * array,int dst_index,int src_index,int len,WriteBarrierMode mode)1481 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
1482                         int len, WriteBarrierMode mode) {
1483   if (len == 0) return;
1484 
1485   DCHECK(array->map() != ReadOnlyRoots(this).fixed_cow_array_map());
1486   Object** dst = array->data_start() + dst_index;
1487   Object** src = array->data_start() + src_index;
1488   if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) {
1489     if (dst < src) {
1490       for (int i = 0; i < len; i++) {
1491         base::AsAtomicPointer::Relaxed_Store(
1492             dst + i, base::AsAtomicPointer::Relaxed_Load(src + i));
1493       }
1494     } else {
1495       for (int i = len - 1; i >= 0; i--) {
1496         base::AsAtomicPointer::Relaxed_Store(
1497             dst + i, base::AsAtomicPointer::Relaxed_Load(src + i));
1498       }
1499     }
1500   } else {
1501     MemMove(dst, src, len * kPointerSize);
1502   }
1503   if (mode == SKIP_WRITE_BARRIER) return;
1504   FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
1505 }
1506 
1507 
1508 #ifdef VERIFY_HEAP
1509 // Helper class for verifying the string table.
1510 class StringTableVerifier : public ObjectVisitor {
1511  public:
StringTableVerifier(Isolate * isolate)1512   explicit StringTableVerifier(Isolate* isolate) : isolate_(isolate) {}
1513 
VisitPointers(HeapObject * host,Object ** start,Object ** end)1514   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
1515     // Visit all HeapObject pointers in [start, end).
1516     for (Object** p = start; p < end; p++) {
1517       DCHECK(!HasWeakHeapObjectTag(*p));
1518       if ((*p)->IsHeapObject()) {
1519         HeapObject* object = HeapObject::cast(*p);
1520         // Check that the string is actually internalized.
1521         CHECK(object->IsTheHole(isolate_) || object->IsUndefined(isolate_) ||
1522               object->IsInternalizedString());
1523       }
1524     }
1525   }
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)1526   void VisitPointers(HeapObject* host, MaybeObject** start,
1527                      MaybeObject** end) override {
1528     UNREACHABLE();
1529   }
1530 
1531  private:
1532   Isolate* isolate_;
1533 };
1534 
VerifyStringTable(Isolate * isolate)1535 static void VerifyStringTable(Isolate* isolate) {
1536   StringTableVerifier verifier(isolate);
1537   isolate->heap()->string_table()->IterateElements(&verifier);
1538 }
1539 #endif  // VERIFY_HEAP
1540 
ReserveSpace(Reservation * reservations,std::vector<Address> * maps)1541 bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) {
1542   bool gc_performed = true;
1543   int counter = 0;
1544   static const int kThreshold = 20;
1545   while (gc_performed && counter++ < kThreshold) {
1546     gc_performed = false;
1547     for (int space = FIRST_SPACE;
1548          space < SerializerDeserializer::kNumberOfSpaces; space++) {
1549       Reservation* reservation = &reservations[space];
1550       DCHECK_LE(1, reservation->size());
1551       if (reservation->at(0).size == 0) {
1552         DCHECK_EQ(1, reservation->size());
1553         continue;
1554       }
1555       bool perform_gc = false;
1556       if (space == MAP_SPACE) {
1557         // We allocate each map individually to avoid fragmentation.
1558         maps->clear();
1559         DCHECK_LE(reservation->size(), 2);
1560         int reserved_size = 0;
1561         for (const Chunk& c : *reservation) reserved_size += c.size;
1562         DCHECK_EQ(0, reserved_size % Map::kSize);
1563         int num_maps = reserved_size / Map::kSize;
1564         for (int i = 0; i < num_maps; i++) {
1565           // The deserializer will update the skip list.
1566           AllocationResult allocation = map_space()->AllocateRawUnaligned(
1567               Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
1568           HeapObject* free_space = nullptr;
1569           if (allocation.To(&free_space)) {
1570             // Mark with a free list node, in case we have a GC before
1571             // deserializing.
1572             Address free_space_address = free_space->address();
1573             CreateFillerObjectAt(free_space_address, Map::kSize,
1574                                  ClearRecordedSlots::kNo);
1575             maps->push_back(free_space_address);
1576           } else {
1577             perform_gc = true;
1578             break;
1579           }
1580         }
1581       } else if (space == LO_SPACE) {
1582         // Just check that we can allocate during deserialization.
1583         DCHECK_LE(reservation->size(), 2);
1584         int reserved_size = 0;
1585         for (const Chunk& c : *reservation) reserved_size += c.size;
1586         perform_gc = !CanExpandOldGeneration(reserved_size);
1587       } else {
1588         for (auto& chunk : *reservation) {
1589           AllocationResult allocation;
1590           int size = chunk.size;
1591           DCHECK_LE(static_cast<size_t>(size),
1592                     MemoryAllocator::PageAreaSize(
1593                         static_cast<AllocationSpace>(space)));
1594           if (space == NEW_SPACE) {
1595             allocation = new_space()->AllocateRawUnaligned(size);
1596           } else {
1597             // The deserializer will update the skip list.
1598             allocation = paged_space(space)->AllocateRawUnaligned(
1599                 size, PagedSpace::IGNORE_SKIP_LIST);
1600           }
1601           HeapObject* free_space = nullptr;
1602           if (allocation.To(&free_space)) {
1603             // Mark with a free list node, in case we have a GC before
1604             // deserializing.
1605             Address free_space_address = free_space->address();
1606             CreateFillerObjectAt(free_space_address, size,
1607                                  ClearRecordedSlots::kNo);
1608             DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces,
1609                       space);
1610             chunk.start = free_space_address;
1611             chunk.end = free_space_address + size;
1612           } else {
1613             perform_gc = true;
1614             break;
1615           }
1616         }
1617       }
1618       if (perform_gc) {
1619         // We cannot perfom a GC with an uninitialized isolate. This check
1620         // fails for example if the max old space size is chosen unwisely,
1621         // so that we cannot allocate space to deserialize the initial heap.
1622         if (!deserialization_complete_) {
1623           V8::FatalProcessOutOfMemory(
1624               isolate(), "insufficient memory to create an Isolate");
1625         }
1626         if (space == NEW_SPACE) {
1627           CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
1628         } else {
1629           if (counter > 1) {
1630             CollectAllGarbage(
1631                 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
1632                 GarbageCollectionReason::kDeserializer);
1633           } else {
1634             CollectAllGarbage(kAbortIncrementalMarkingMask,
1635                               GarbageCollectionReason::kDeserializer);
1636           }
1637         }
1638         gc_performed = true;
1639         break;  // Abort for-loop over spaces and retry.
1640       }
1641     }
1642   }
1643 
1644   return !gc_performed;
1645 }
1646 
1647 
EnsureFromSpaceIsCommitted()1648 void Heap::EnsureFromSpaceIsCommitted() {
1649   if (new_space_->CommitFromSpaceIfNeeded()) return;
1650 
1651   // Committing memory to from space failed.
1652   // Memory is exhausted and we will die.
1653   FatalProcessOutOfMemory("Committing semi space failed.");
1654 }
1655 
1656 
UpdateSurvivalStatistics(int start_new_space_size)1657 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1658   if (start_new_space_size == 0) return;
1659 
1660   promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1661                       static_cast<double>(start_new_space_size) * 100);
1662 
1663   if (previous_semi_space_copied_object_size_ > 0) {
1664     promotion_rate_ =
1665         (static_cast<double>(promoted_objects_size_) /
1666          static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1667   } else {
1668     promotion_rate_ = 0;
1669   }
1670 
1671   semi_space_copied_rate_ =
1672       (static_cast<double>(semi_space_copied_object_size_) /
1673        static_cast<double>(start_new_space_size) * 100);
1674 
1675   double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1676   tracer()->AddSurvivalRatio(survival_rate);
1677 }
1678 
PerformGarbageCollection(GarbageCollector collector,const v8::GCCallbackFlags gc_callback_flags)1679 bool Heap::PerformGarbageCollection(
1680     GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1681   int freed_global_handles = 0;
1682 
1683   if (!IsYoungGenerationCollector(collector)) {
1684     PROFILE(isolate_, CodeMovingGCEvent());
1685   }
1686 
1687 #ifdef VERIFY_HEAP
1688   if (FLAG_verify_heap) {
1689     VerifyStringTable(this->isolate());
1690   }
1691 #endif
1692 
1693   GCType gc_type =
1694       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1695 
1696   {
1697     GCCallbacksScope scope(this);
1698     if (scope.CheckReenter()) {
1699       AllowHeapAllocation allow_allocation;
1700       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE);
1701       VMState<EXTERNAL> state(isolate_);
1702       HandleScope handle_scope(isolate_);
1703       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1704     }
1705   }
1706 
1707   EnsureFromSpaceIsCommitted();
1708 
1709   size_t start_new_space_size = Heap::new_space()->Size();
1710 
1711   {
1712     Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
1713 
1714     switch (collector) {
1715       case MARK_COMPACTOR:
1716         UpdateOldGenerationAllocationCounter();
1717         // Perform mark-sweep with optional compaction.
1718         MarkCompact();
1719         old_generation_size_configured_ = true;
1720         // This should be updated before PostGarbageCollectionProcessing, which
1721         // can cause another GC. Take into account the objects promoted during
1722         // GC.
1723         old_generation_allocation_counter_at_last_gc_ +=
1724             static_cast<size_t>(promoted_objects_size_);
1725         old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects();
1726         break;
1727       case MINOR_MARK_COMPACTOR:
1728         MinorMarkCompact();
1729         break;
1730       case SCAVENGER:
1731         if ((fast_promotion_mode_ &&
1732              CanExpandOldGeneration(new_space()->Size()))) {
1733           tracer()->NotifyYoungGenerationHandling(
1734               YoungGenerationHandling::kFastPromotionDuringScavenge);
1735           EvacuateYoungGeneration();
1736         } else {
1737           tracer()->NotifyYoungGenerationHandling(
1738               YoungGenerationHandling::kRegularScavenge);
1739 
1740           Scavenge();
1741         }
1742         break;
1743     }
1744 
1745     ProcessPretenuringFeedback();
1746   }
1747 
1748   UpdateSurvivalStatistics(static_cast<int>(start_new_space_size));
1749   ConfigureInitialOldGenerationSize();
1750 
1751   if (collector != MARK_COMPACTOR) {
1752     // Objects that died in the new space might have been accounted
1753     // as bytes marked ahead of schedule by the incremental marker.
1754     incremental_marking()->UpdateMarkedBytesAfterScavenge(
1755         start_new_space_size - SurvivedNewSpaceObjectSize());
1756   }
1757 
1758   if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) {
1759     ComputeFastPromotionMode();
1760   }
1761 
1762   isolate_->counters()->objs_since_last_young()->Set(0);
1763 
1764   gc_post_processing_depth_++;
1765   {
1766     AllowHeapAllocation allow_allocation;
1767     TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES);
1768     freed_global_handles =
1769         isolate_->global_handles()->PostGarbageCollectionProcessing(
1770             collector, gc_callback_flags);
1771   }
1772   gc_post_processing_depth_--;
1773 
1774   isolate_->eternal_handles()->PostGarbageCollectionProcessing();
1775 
1776   // Update relocatables.
1777   Relocatable::PostGarbageCollectionProcessing(isolate_);
1778 
1779   double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1780   double mutator_speed =
1781       tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
1782   size_t old_gen_size = OldGenerationSizeOfObjects();
1783   if (collector == MARK_COMPACTOR) {
1784     // Register the amount of external allocated memory.
1785     external_memory_at_last_mark_compact_ = external_memory_;
1786     external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
1787 
1788     size_t new_limit = heap_controller()->CalculateAllocationLimit(
1789         old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
1790         new_space()->Capacity(), CurrentHeapGrowingMode());
1791     old_generation_allocation_limit_ = new_limit;
1792 
1793     CheckIneffectiveMarkCompact(
1794         old_gen_size, tracer()->AverageMarkCompactMutatorUtilization());
1795   } else if (HasLowYoungGenerationAllocationRate() &&
1796              old_generation_size_configured_) {
1797     size_t new_limit = heap_controller()->CalculateAllocationLimit(
1798         old_gen_size, max_old_generation_size_, gc_speed, mutator_speed,
1799         new_space()->Capacity(), CurrentHeapGrowingMode());
1800     if (new_limit < old_generation_allocation_limit_) {
1801       old_generation_allocation_limit_ = new_limit;
1802     }
1803   }
1804 
1805   {
1806     GCCallbacksScope scope(this);
1807     if (scope.CheckReenter()) {
1808       AllowHeapAllocation allow_allocation;
1809       TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE);
1810       VMState<EXTERNAL> state(isolate_);
1811       HandleScope handle_scope(isolate_);
1812       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1813     }
1814   }
1815 
1816 #ifdef VERIFY_HEAP
1817   if (FLAG_verify_heap) {
1818     VerifyStringTable(this->isolate());
1819   }
1820 #endif
1821 
1822   return freed_global_handles > 0;
1823 }
1824 
1825 
CallGCPrologueCallbacks(GCType gc_type,GCCallbackFlags flags)1826 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1827   RuntimeCallTimerScope runtime_timer(
1828       isolate(), RuntimeCallCounterId::kGCPrologueCallback);
1829   for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
1830     if (gc_type & info.gc_type) {
1831       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1832       info.callback(isolate, gc_type, flags, info.data);
1833     }
1834   }
1835 }
1836 
CallGCEpilogueCallbacks(GCType gc_type,GCCallbackFlags flags)1837 void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1838   RuntimeCallTimerScope runtime_timer(
1839       isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
1840   for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
1841     if (gc_type & info.gc_type) {
1842       v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1843       info.callback(isolate, gc_type, flags, info.data);
1844     }
1845   }
1846 }
1847 
1848 
MarkCompact()1849 void Heap::MarkCompact() {
1850   PauseAllocationObserversScope pause_observers(this);
1851 
1852   SetGCState(MARK_COMPACT);
1853 
1854   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1855 
1856   uint64_t size_of_objects_before_gc = SizeOfObjects();
1857 
1858   CodeSpaceMemoryModificationScope code_modifcation(this);
1859 
1860   mark_compact_collector()->Prepare();
1861 
1862   ms_count_++;
1863 
1864   MarkCompactPrologue();
1865 
1866   mark_compact_collector()->CollectGarbage();
1867 
1868   LOG(isolate_, ResourceEvent("markcompact", "end"));
1869 
1870   MarkCompactEpilogue();
1871 
1872   if (FLAG_allocation_site_pretenuring) {
1873     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1874   }
1875 }
1876 
MinorMarkCompact()1877 void Heap::MinorMarkCompact() {
1878 #ifdef ENABLE_MINOR_MC
1879   DCHECK(FLAG_minor_mc);
1880 
1881   PauseAllocationObserversScope pause_observers(this);
1882   SetGCState(MINOR_MARK_COMPACT);
1883   LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
1884 
1885   TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
1886   AlwaysAllocateScope always_allocate(isolate());
1887   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
1888       incremental_marking());
1889   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
1890 
1891   minor_mark_compact_collector()->CollectGarbage();
1892 
1893   LOG(isolate_, ResourceEvent("MinorMarkCompact", "end"));
1894   SetGCState(NOT_IN_GC);
1895 #else
1896   UNREACHABLE();
1897 #endif  // ENABLE_MINOR_MC
1898 }
1899 
MarkCompactEpilogue()1900 void Heap::MarkCompactEpilogue() {
1901   TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
1902   SetGCState(NOT_IN_GC);
1903 
1904   isolate_->counters()->objs_since_last_full()->Set(0);
1905 
1906   incremental_marking()->Epilogue();
1907 
1908   DCHECK(incremental_marking()->IsStopped());
1909 }
1910 
1911 
MarkCompactPrologue()1912 void Heap::MarkCompactPrologue() {
1913   TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
1914   isolate_->context_slot_cache()->Clear();
1915   isolate_->descriptor_lookup_cache()->Clear();
1916   RegExpResultsCache::Clear(string_split_cache());
1917   RegExpResultsCache::Clear(regexp_multiple_cache());
1918 
1919   isolate_->compilation_cache()->MarkCompactPrologue();
1920 
1921   FlushNumberStringCache();
1922 }
1923 
1924 
CheckNewSpaceExpansionCriteria()1925 void Heap::CheckNewSpaceExpansionCriteria() {
1926   if (FLAG_experimental_new_space_growth_heuristic) {
1927     if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
1928         survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
1929       // Grow the size of new space if there is room to grow, and more than 10%
1930       // have survived the last scavenge.
1931       new_space_->Grow();
1932       survived_since_last_expansion_ = 0;
1933     }
1934   } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
1935              survived_since_last_expansion_ > new_space_->TotalCapacity()) {
1936     // Grow the size of new space if there is room to grow, and enough data
1937     // has survived scavenge since the last expansion.
1938     new_space_->Grow();
1939     survived_since_last_expansion_ = 0;
1940   }
1941 }
1942 
IsUnscavengedHeapObject(Heap * heap,Object ** p)1943 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1944   return Heap::InFromSpace(*p) &&
1945          !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1946 }
1947 
1948 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1949  public:
RetainAs(Object * object)1950   virtual Object* RetainAs(Object* object) {
1951     if (!Heap::InFromSpace(object)) {
1952       return object;
1953     }
1954 
1955     MapWord map_word = HeapObject::cast(object)->map_word();
1956     if (map_word.IsForwardingAddress()) {
1957       return map_word.ToForwardingAddress();
1958     }
1959     return nullptr;
1960   }
1961 };
1962 
EvacuateYoungGeneration()1963 void Heap::EvacuateYoungGeneration() {
1964   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE);
1965   base::LockGuard<base::Mutex> guard(relocation_mutex());
1966   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
1967   if (!FLAG_concurrent_marking) {
1968     DCHECK(fast_promotion_mode_);
1969     DCHECK(CanExpandOldGeneration(new_space()->Size()));
1970   }
1971 
1972   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
1973 
1974   SetGCState(SCAVENGE);
1975   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1976 
1977   // Move pages from new->old generation.
1978   PageRange range(new_space()->first_allocatable_address(), new_space()->top());
1979   for (auto it = range.begin(); it != range.end();) {
1980     Page* p = (*++it)->prev_page();
1981     new_space()->from_space().RemovePage(p);
1982     Page::ConvertNewToOld(p);
1983     if (incremental_marking()->IsMarking())
1984       mark_compact_collector()->RecordLiveSlotsOnPage(p);
1985   }
1986 
1987   // Reset new space.
1988   if (!new_space()->Rebalance()) {
1989     FatalProcessOutOfMemory("NewSpace::Rebalance");
1990   }
1991   new_space()->ResetLinearAllocationArea();
1992   new_space()->set_age_mark(new_space()->top());
1993 
1994   // Fix up special trackers.
1995   external_string_table_.PromoteAllNewSpaceStrings();
1996   // GlobalHandles are updated in PostGarbageCollectonProcessing
1997 
1998   IncrementYoungSurvivorsCounter(new_space()->Size());
1999   IncrementPromotedObjectsSize(new_space()->Size());
2000   IncrementSemiSpaceCopiedObjectSize(0);
2001 
2002   LOG(isolate_, ResourceEvent("scavenge", "end"));
2003   SetGCState(NOT_IN_GC);
2004 }
2005 
IsLogging(Isolate * isolate)2006 static bool IsLogging(Isolate* isolate) {
2007   return FLAG_verify_predictable || isolate->logger()->is_logging() ||
2008          isolate->is_profiling() ||
2009          (isolate->heap_profiler() != nullptr &&
2010           isolate->heap_profiler()->is_tracking_object_moves()) ||
2011          isolate->heap()->has_heap_object_allocation_tracker();
2012 }
2013 
2014 class PageScavengingItem final : public ItemParallelJob::Item {
2015  public:
PageScavengingItem(MemoryChunk * chunk)2016   explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
~PageScavengingItem()2017   virtual ~PageScavengingItem() {}
2018 
Process(Scavenger * scavenger)2019   void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
2020 
2021  private:
2022   MemoryChunk* const chunk_;
2023 };
2024 
2025 class ScavengingTask final : public ItemParallelJob::Task {
2026  public:
ScavengingTask(Heap * heap,Scavenger * scavenger,OneshotBarrier * barrier)2027   ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
2028       : ItemParallelJob::Task(heap->isolate()),
2029         heap_(heap),
2030         scavenger_(scavenger),
2031         barrier_(barrier) {}
2032 
RunInParallel()2033   void RunInParallel() final {
2034     TRACE_BACKGROUND_GC(
2035         heap_->tracer(),
2036         GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
2037     double scavenging_time = 0.0;
2038     {
2039       barrier_->Start();
2040       TimedScope scope(&scavenging_time);
2041       PageScavengingItem* item = nullptr;
2042       while ((item = GetItem<PageScavengingItem>()) != nullptr) {
2043         item->Process(scavenger_);
2044         item->MarkFinished();
2045       }
2046       do {
2047         scavenger_->Process(barrier_);
2048       } while (!barrier_->Wait());
2049       scavenger_->Process();
2050     }
2051     if (FLAG_trace_parallel_scavenge) {
2052       PrintIsolate(heap_->isolate(),
2053                    "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
2054                    static_cast<void*>(this), scavenging_time,
2055                    scavenger_->bytes_copied(), scavenger_->bytes_promoted());
2056     }
2057   };
2058 
2059  private:
2060   Heap* const heap_;
2061   Scavenger* const scavenger_;
2062   OneshotBarrier* const barrier_;
2063 };
2064 
NumberOfScavengeTasks()2065 int Heap::NumberOfScavengeTasks() {
2066   if (!FLAG_parallel_scavenge) return 1;
2067   const int num_scavenge_tasks =
2068       static_cast<int>(new_space()->TotalCapacity()) / MB;
2069   static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
2070   int tasks =
2071       Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
2072   if (!CanExpandOldGeneration(static_cast<size_t>(tasks * Page::kPageSize))) {
2073     // Optimize for memory usage near the heap limit.
2074     tasks = 1;
2075   }
2076   return tasks;
2077 }
2078 
Scavenge()2079 void Heap::Scavenge() {
2080   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
2081   base::LockGuard<base::Mutex> guard(relocation_mutex());
2082   ConcurrentMarking::PauseScope pause_scope(concurrent_marking());
2083   // There are soft limits in the allocation code, designed to trigger a mark
2084   // sweep collection by failing allocations. There is no sense in trying to
2085   // trigger one during scavenge: scavenges allocation should always succeed.
2086   AlwaysAllocateScope scope(isolate());
2087 
2088   // Bump-pointer allocations done during scavenge are not real allocations.
2089   // Pause the inline allocation steps.
2090   PauseAllocationObserversScope pause_observers(this);
2091 
2092   IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
2093       incremental_marking());
2094 
2095 
2096   mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
2097 
2098   SetGCState(SCAVENGE);
2099 
2100   // Implements Cheney's copying algorithm
2101   LOG(isolate_, ResourceEvent("scavenge", "begin"));
2102 
2103   // Flip the semispaces.  After flipping, to space is empty, from space has
2104   // live objects.
2105   new_space_->Flip();
2106   new_space_->ResetLinearAllocationArea();
2107 
2108   ItemParallelJob job(isolate()->cancelable_task_manager(),
2109                       &parallel_scavenge_semaphore_);
2110   const int kMainThreadId = 0;
2111   Scavenger* scavengers[kMaxScavengerTasks];
2112   const bool is_logging = IsLogging(isolate());
2113   const int num_scavenge_tasks = NumberOfScavengeTasks();
2114   OneshotBarrier barrier;
2115   Scavenger::CopiedList copied_list(num_scavenge_tasks);
2116   Scavenger::PromotionList promotion_list(num_scavenge_tasks);
2117   for (int i = 0; i < num_scavenge_tasks; i++) {
2118     scavengers[i] =
2119         new Scavenger(this, is_logging, &copied_list, &promotion_list, i);
2120     job.AddTask(new ScavengingTask(this, scavengers[i], &barrier));
2121   }
2122 
2123   {
2124     Sweeper* sweeper = mark_compact_collector()->sweeper();
2125     // Pause the concurrent sweeper.
2126     Sweeper::PauseOrCompleteScope pause_scope(sweeper);
2127     // Filter out pages from the sweeper that need to be processed for old to
2128     // new slots by the Scavenger. After processing, the Scavenger adds back
2129     // pages that are still unsweeped. This way the Scavenger has exclusive
2130     // access to the slots of a page and can completely avoid any locks on
2131     // the page itself.
2132     Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
2133     filter_scope.FilterOldSpaceSweepingPages(
2134         [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
2135     RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
2136         this, [&job](MemoryChunk* chunk) {
2137           job.AddItem(new PageScavengingItem(chunk));
2138         });
2139 
2140     RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
2141 
2142     {
2143       // Identify weak unmodified handles. Requires an unmodified graph.
2144       TRACE_GC(
2145           tracer(),
2146           GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
2147       isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
2148           &JSObject::IsUnmodifiedApiObject);
2149     }
2150     {
2151       // Copy roots.
2152       TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
2153       IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
2154     }
2155     {
2156       // Parallel phase scavenging all copied and promoted objects.
2157       TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
2158       job.Run(isolate()->async_counters());
2159       DCHECK(copied_list.IsEmpty());
2160       DCHECK(promotion_list.IsEmpty());
2161     }
2162     {
2163       // Scavenge weak global handles.
2164       TRACE_GC(tracer(),
2165                GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
2166       isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
2167           &IsUnscavengedHeapObject);
2168       isolate()
2169           ->global_handles()
2170           ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
2171               &root_scavenge_visitor);
2172       scavengers[kMainThreadId]->Process();
2173 
2174       DCHECK(copied_list.IsEmpty());
2175       DCHECK(promotion_list.IsEmpty());
2176       isolate()
2177           ->global_handles()
2178           ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
2179               &root_scavenge_visitor, &IsUnscavengedHeapObject);
2180     }
2181 
2182     for (int i = 0; i < num_scavenge_tasks; i++) {
2183       scavengers[i]->Finalize();
2184       delete scavengers[i];
2185     }
2186   }
2187 
2188   {
2189     // Update references into new space
2190     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
2191     UpdateNewSpaceReferencesInExternalStringTable(
2192         &UpdateNewSpaceReferenceInExternalStringTableEntry);
2193 
2194     incremental_marking()->UpdateMarkingWorklistAfterScavenge();
2195   }
2196 
2197   if (FLAG_concurrent_marking) {
2198     // Ensure that concurrent marker does not track pages that are
2199     // going to be unmapped.
2200     for (Page* p : PageRange(new_space()->from_space().first_page(), nullptr)) {
2201       concurrent_marking()->ClearLiveness(p);
2202     }
2203   }
2204 
2205   ScavengeWeakObjectRetainer weak_object_retainer;
2206   ProcessYoungWeakReferences(&weak_object_retainer);
2207 
2208   // Set age mark.
2209   new_space_->set_age_mark(new_space_->top());
2210 
2211   {
2212     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS);
2213     ArrayBufferTracker::PrepareToFreeDeadInNewSpace(this);
2214   }
2215   array_buffer_collector()->FreeAllocationsOnBackgroundThread();
2216 
2217   RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(this, [](MemoryChunk* chunk) {
2218     if (chunk->SweepingDone()) {
2219       RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
2220     } else {
2221       RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
2222     }
2223   });
2224 
2225   // Update how much has survived scavenge.
2226   IncrementYoungSurvivorsCounter(SurvivedNewSpaceObjectSize());
2227 
2228   // Scavenger may find new wrappers by iterating objects promoted onto a black
2229   // page.
2230   local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
2231 
2232   LOG(isolate_, ResourceEvent("scavenge", "end"));
2233 
2234   SetGCState(NOT_IN_GC);
2235 }
2236 
ComputeFastPromotionMode()2237 void Heap::ComputeFastPromotionMode() {
2238   const size_t survived_in_new_space =
2239       survived_last_scavenge_ * 100 / new_space_->Capacity();
2240   fast_promotion_mode_ =
2241       !FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
2242       !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
2243       survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
2244   if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
2245     PrintIsolate(
2246         isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n",
2247         fast_promotion_mode_ ? "true" : "false", survived_in_new_space);
2248   }
2249 }
2250 
UnprotectAndRegisterMemoryChunk(MemoryChunk * chunk)2251 void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) {
2252   if (unprotected_memory_chunks_registry_enabled_) {
2253     base::LockGuard<base::Mutex> guard(&unprotected_memory_chunks_mutex_);
2254     if (unprotected_memory_chunks_.insert(chunk).second) {
2255       chunk->SetReadAndWritable();
2256     }
2257   }
2258 }
2259 
UnprotectAndRegisterMemoryChunk(HeapObject * object)2260 void Heap::UnprotectAndRegisterMemoryChunk(HeapObject* object) {
2261   UnprotectAndRegisterMemoryChunk(MemoryChunk::FromAddress(object->address()));
2262 }
2263 
UnregisterUnprotectedMemoryChunk(MemoryChunk * chunk)2264 void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) {
2265   unprotected_memory_chunks_.erase(chunk);
2266 }
2267 
ProtectUnprotectedMemoryChunks()2268 void Heap::ProtectUnprotectedMemoryChunks() {
2269   DCHECK(unprotected_memory_chunks_registry_enabled_);
2270   for (auto chunk = unprotected_memory_chunks_.begin();
2271        chunk != unprotected_memory_chunks_.end(); chunk++) {
2272     CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk));
2273     (*chunk)->SetReadAndExecutable();
2274   }
2275   unprotected_memory_chunks_.clear();
2276 }
2277 
Contains(HeapObject * obj)2278 bool Heap::ExternalStringTable::Contains(HeapObject* obj) {
2279   for (size_t i = 0; i < new_space_strings_.size(); ++i) {
2280     if (new_space_strings_[i] == obj) return true;
2281   }
2282   for (size_t i = 0; i < old_space_strings_.size(); ++i) {
2283     if (old_space_strings_[i] == obj) return true;
2284   }
2285   return false;
2286 }
2287 
ProcessMovedExternalString(Page * old_page,Page * new_page,ExternalString * string)2288 void Heap::ProcessMovedExternalString(Page* old_page, Page* new_page,
2289                                       ExternalString* string) {
2290   size_t size = string->ExternalPayloadSize();
2291   new_page->IncrementExternalBackingStoreBytes(
2292       ExternalBackingStoreType::kExternalString, size);
2293   old_page->DecrementExternalBackingStoreBytes(
2294       ExternalBackingStoreType::kExternalString, size);
2295 }
2296 
UpdateNewSpaceReferenceInExternalStringTableEntry(Heap * heap,Object ** p)2297 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
2298                                                                 Object** p) {
2299   MapWord first_word = HeapObject::cast(*p)->map_word();
2300 
2301   if (!first_word.IsForwardingAddress()) {
2302     // Unreachable external string can be finalized.
2303     String* string = String::cast(*p);
2304     if (!string->IsExternalString()) {
2305       // Original external string has been internalized.
2306       DCHECK(string->IsThinString());
2307       return nullptr;
2308     }
2309     heap->FinalizeExternalString(string);
2310     return nullptr;
2311   }
2312 
2313   // String is still reachable.
2314   String* new_string = String::cast(first_word.ToForwardingAddress());
2315   String* original_string = reinterpret_cast<String*>(*p);
2316   // The length of the original string is used to disambiguate the scenario
2317   // of a ThingString being forwarded to an ExternalString (which already exists
2318   // in the OLD space), and an ExternalString being forwarded to its promoted
2319   // copy. See Scavenger::EvacuateThinString.
2320   if (new_string->IsThinString() || original_string->length() == 0) {
2321     // Filtering Thin strings out of the external string table.
2322     return nullptr;
2323   } else if (new_string->IsExternalString()) {
2324     heap->ProcessMovedExternalString(
2325         Page::FromAddress(reinterpret_cast<Address>(*p)),
2326         Page::FromHeapObject(new_string), ExternalString::cast(new_string));
2327     return new_string;
2328   }
2329 
2330   // Internalization can replace external strings with non-external strings.
2331   return new_string->IsExternalString() ? new_string : nullptr;
2332 }
2333 
VerifyNewSpace()2334 void Heap::ExternalStringTable::VerifyNewSpace() {
2335 #ifdef DEBUG
2336   std::set<String*> visited_map;
2337   std::map<MemoryChunk*, size_t> size_map;
2338   ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
2339   for (size_t i = 0; i < new_space_strings_.size(); ++i) {
2340     String* obj = String::cast(new_space_strings_[i]);
2341     MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
2342     DCHECK(mc->InNewSpace());
2343     DCHECK(heap_->InNewSpace(obj));
2344     DCHECK(!obj->IsTheHole(heap_->isolate()));
2345     DCHECK(obj->IsExternalString());
2346     // Note: we can have repeated elements in the table.
2347     DCHECK_EQ(0, visited_map.count(obj));
2348     visited_map.insert(obj);
2349     size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
2350   }
2351   for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
2352        it != size_map.end(); it++)
2353     DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
2354 #endif
2355 }
2356 
Verify()2357 void Heap::ExternalStringTable::Verify() {
2358 #ifdef DEBUG
2359   std::set<String*> visited_map;
2360   std::map<MemoryChunk*, size_t> size_map;
2361   ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString;
2362   VerifyNewSpace();
2363   for (size_t i = 0; i < old_space_strings_.size(); ++i) {
2364     String* obj = String::cast(old_space_strings_[i]);
2365     MemoryChunk* mc = MemoryChunk::FromHeapObject(obj);
2366     DCHECK(!mc->InNewSpace());
2367     DCHECK(!heap_->InNewSpace(obj));
2368     DCHECK(!obj->IsTheHole(heap_->isolate()));
2369     DCHECK(obj->IsExternalString());
2370     // Note: we can have repeated elements in the table.
2371     DCHECK_EQ(0, visited_map.count(obj));
2372     visited_map.insert(obj);
2373     size_map[mc] += ExternalString::cast(obj)->ExternalPayloadSize();
2374   }
2375   for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin();
2376        it != size_map.end(); it++)
2377     DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second);
2378 #endif
2379 }
2380 
UpdateNewSpaceReferences(Heap::ExternalStringTableUpdaterCallback updater_func)2381 void Heap::ExternalStringTable::UpdateNewSpaceReferences(
2382     Heap::ExternalStringTableUpdaterCallback updater_func) {
2383   if (new_space_strings_.empty()) return;
2384 
2385   Object** start = new_space_strings_.data();
2386   Object** end = start + new_space_strings_.size();
2387   Object** last = start;
2388 
2389   for (Object** p = start; p < end; ++p) {
2390     String* target = updater_func(heap_, p);
2391 
2392     if (target == nullptr) continue;
2393 
2394     DCHECK(target->IsExternalString());
2395 
2396     if (InNewSpace(target)) {
2397       // String is still in new space. Update the table entry.
2398       *last = target;
2399       ++last;
2400     } else {
2401       // String got promoted. Move it to the old string list.
2402       old_space_strings_.push_back(target);
2403     }
2404   }
2405 
2406   DCHECK_LE(last, end);
2407   new_space_strings_.resize(static_cast<size_t>(last - start));
2408 #ifdef VERIFY_HEAP
2409   if (FLAG_verify_heap) {
2410     VerifyNewSpace();
2411   }
2412 #endif
2413 }
2414 
PromoteAllNewSpaceStrings()2415 void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() {
2416   old_space_strings_.reserve(old_space_strings_.size() +
2417                              new_space_strings_.size());
2418   std::move(std::begin(new_space_strings_), std::end(new_space_strings_),
2419             std::back_inserter(old_space_strings_));
2420   new_space_strings_.clear();
2421 }
2422 
IterateNewSpaceStrings(RootVisitor * v)2423 void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
2424   if (!new_space_strings_.empty()) {
2425     v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
2426                          new_space_strings_.data(),
2427                          new_space_strings_.data() + new_space_strings_.size());
2428   }
2429 }
2430 
IterateAll(RootVisitor * v)2431 void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
2432   IterateNewSpaceStrings(v);
2433   if (!old_space_strings_.empty()) {
2434     v->VisitRootPointers(Root::kExternalStringsTable, nullptr,
2435                          old_space_strings_.data(),
2436                          old_space_strings_.data() + old_space_strings_.size());
2437   }
2438 }
2439 
UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)2440 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
2441     ExternalStringTableUpdaterCallback updater_func) {
2442   external_string_table_.UpdateNewSpaceReferences(updater_func);
2443 }
2444 
UpdateReferences(Heap::ExternalStringTableUpdaterCallback updater_func)2445 void Heap::ExternalStringTable::UpdateReferences(
2446     Heap::ExternalStringTableUpdaterCallback updater_func) {
2447   if (old_space_strings_.size() > 0) {
2448     Object** start = old_space_strings_.data();
2449     Object** end = start + old_space_strings_.size();
2450     for (Object** p = start; p < end; ++p) *p = updater_func(heap_, p);
2451   }
2452 
2453   UpdateNewSpaceReferences(updater_func);
2454 }
2455 
UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)2456 void Heap::UpdateReferencesInExternalStringTable(
2457     ExternalStringTableUpdaterCallback updater_func) {
2458   external_string_table_.UpdateReferences(updater_func);
2459 }
2460 
2461 
ProcessAllWeakReferences(WeakObjectRetainer * retainer)2462 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
2463   ProcessNativeContexts(retainer);
2464   ProcessAllocationSites(retainer);
2465 }
2466 
2467 
ProcessYoungWeakReferences(WeakObjectRetainer * retainer)2468 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
2469   ProcessNativeContexts(retainer);
2470 }
2471 
2472 
ProcessNativeContexts(WeakObjectRetainer * retainer)2473 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
2474   Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
2475   // Update the head of the list of contexts.
2476   set_native_contexts_list(head);
2477 }
2478 
2479 
ProcessAllocationSites(WeakObjectRetainer * retainer)2480 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
2481   Object* allocation_site_obj =
2482       VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
2483   set_allocation_sites_list(allocation_site_obj);
2484 }
2485 
ProcessWeakListRoots(WeakObjectRetainer * retainer)2486 void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
2487   set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
2488   set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
2489 }
2490 
ForeachAllocationSite(Object * list,std::function<void (AllocationSite *)> visitor)2491 void Heap::ForeachAllocationSite(Object* list,
2492                                  std::function<void(AllocationSite*)> visitor) {
2493   DisallowHeapAllocation disallow_heap_allocation;
2494   Object* current = list;
2495   while (current->IsAllocationSite()) {
2496     AllocationSite* site = AllocationSite::cast(current);
2497     visitor(site);
2498     Object* current_nested = site->nested_site();
2499     while (current_nested->IsAllocationSite()) {
2500       AllocationSite* nested_site = AllocationSite::cast(current_nested);
2501       visitor(nested_site);
2502       current_nested = nested_site->nested_site();
2503     }
2504     current = site->weak_next();
2505   }
2506 }
2507 
ResetAllAllocationSitesDependentCode(PretenureFlag flag)2508 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
2509   DisallowHeapAllocation no_allocation_scope;
2510   bool marked = false;
2511 
2512   ForeachAllocationSite(allocation_sites_list(),
2513                         [&marked, flag, this](AllocationSite* site) {
2514                           if (site->GetPretenureMode() == flag) {
2515                             site->ResetPretenureDecision();
2516                             site->set_deopt_dependent_code(true);
2517                             marked = true;
2518                             RemoveAllocationSitePretenuringFeedback(site);
2519                             return;
2520                           }
2521                         });
2522   if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
2523 }
2524 
2525 
EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)2526 void Heap::EvaluateOldSpaceLocalPretenuring(
2527     uint64_t size_of_objects_before_gc) {
2528   uint64_t size_of_objects_after_gc = SizeOfObjects();
2529   double old_generation_survival_rate =
2530       (static_cast<double>(size_of_objects_after_gc) * 100) /
2531       static_cast<double>(size_of_objects_before_gc);
2532 
2533   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
2534     // Too many objects died in the old generation, pretenuring of wrong
2535     // allocation sites may be the cause for that. We have to deopt all
2536     // dependent code registered in the allocation sites to re-evaluate
2537     // our pretenuring decisions.
2538     ResetAllAllocationSitesDependentCode(TENURED);
2539     if (FLAG_trace_pretenuring) {
2540       PrintF(
2541           "Deopt all allocation sites dependent code due to low survival "
2542           "rate in the old generation %f\n",
2543           old_generation_survival_rate);
2544     }
2545   }
2546 }
2547 
2548 
VisitExternalResources(v8::ExternalResourceVisitor * visitor)2549 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
2550   DisallowHeapAllocation no_allocation;
2551   // All external strings are listed in the external string table.
2552 
2553   class ExternalStringTableVisitorAdapter : public RootVisitor {
2554    public:
2555     explicit ExternalStringTableVisitorAdapter(
2556         Isolate* isolate, v8::ExternalResourceVisitor* visitor)
2557         : isolate_(isolate), visitor_(visitor) {}
2558     virtual void VisitRootPointers(Root root, const char* description,
2559                                    Object** start, Object** end) {
2560       for (Object** p = start; p < end; p++) {
2561         DCHECK((*p)->IsExternalString());
2562         visitor_->VisitExternalString(
2563             Utils::ToLocal(Handle<String>(String::cast(*p), isolate_)));
2564       }
2565     }
2566 
2567    private:
2568     Isolate* isolate_;
2569     v8::ExternalResourceVisitor* visitor_;
2570   } external_string_table_visitor(isolate(), visitor);
2571 
2572   external_string_table_.IterateAll(&external_string_table_visitor);
2573 }
2574 
2575 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
2576               0);  // NOLINT
2577 STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
2578               0);  // NOLINT
2579 #ifdef V8_HOST_ARCH_32_BIT
2580 STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
2581               0);  // NOLINT
2582 #endif
2583 
2584 
GetMaximumFillToAlign(AllocationAlignment alignment)2585 int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
2586   switch (alignment) {
2587     case kWordAligned:
2588       return 0;
2589     case kDoubleAligned:
2590     case kDoubleUnaligned:
2591       return kDoubleSize - kPointerSize;
2592     default:
2593       UNREACHABLE();
2594   }
2595   return 0;
2596 }
2597 
2598 
GetFillToAlign(Address address,AllocationAlignment alignment)2599 int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
2600   intptr_t offset = OffsetFrom(address);
2601   if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
2602     return kPointerSize;
2603   if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
2604     return kDoubleSize - kPointerSize;  // No fill if double is always aligned.
2605   return 0;
2606 }
2607 
2608 
PrecedeWithFiller(HeapObject * object,int filler_size)2609 HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
2610   CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
2611   return HeapObject::FromAddress(object->address() + filler_size);
2612 }
2613 
2614 
AlignWithFiller(HeapObject * object,int object_size,int allocation_size,AllocationAlignment alignment)2615 HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
2616                                   int allocation_size,
2617                                   AllocationAlignment alignment) {
2618   int filler_size = allocation_size - object_size;
2619   DCHECK_LT(0, filler_size);
2620   int pre_filler = GetFillToAlign(object->address(), alignment);
2621   if (pre_filler) {
2622     object = PrecedeWithFiller(object, pre_filler);
2623     filler_size -= pre_filler;
2624   }
2625   if (filler_size)
2626     CreateFillerObjectAt(object->address() + object_size, filler_size,
2627                          ClearRecordedSlots::kNo);
2628   return object;
2629 }
2630 
RegisterNewArrayBuffer(JSArrayBuffer * buffer)2631 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
2632   ArrayBufferTracker::RegisterNew(this, buffer);
2633 }
2634 
2635 
UnregisterArrayBuffer(JSArrayBuffer * buffer)2636 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
2637   ArrayBufferTracker::Unregister(this, buffer);
2638 }
2639 
ConfigureInitialOldGenerationSize()2640 void Heap::ConfigureInitialOldGenerationSize() {
2641   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2642     old_generation_allocation_limit_ =
2643         Max(heap_controller()->MinimumAllocationLimitGrowingStep(
2644                 CurrentHeapGrowingMode()),
2645             static_cast<size_t>(
2646                 static_cast<double>(old_generation_allocation_limit_) *
2647                 (tracer()->AverageSurvivalRatio() / 100)));
2648   }
2649 }
2650 
CreateJSEntryStub()2651 void Heap::CreateJSEntryStub() {
2652   JSEntryStub stub(isolate(), StackFrame::ENTRY);
2653   set_js_entry_code(*stub.GetCode());
2654 }
2655 
2656 
CreateJSConstructEntryStub()2657 void Heap::CreateJSConstructEntryStub() {
2658   JSEntryStub stub(isolate(), StackFrame::CONSTRUCT_ENTRY);
2659   set_js_construct_entry_code(*stub.GetCode());
2660 }
2661 
CreateJSRunMicrotasksEntryStub()2662 void Heap::CreateJSRunMicrotasksEntryStub() {
2663   JSEntryStub stub(isolate(), JSEntryStub::SpecialTarget::kRunMicrotasks);
2664   set_js_run_microtasks_entry_code(*stub.GetCode());
2665 }
2666 
CreateFixedStubs()2667 void Heap::CreateFixedStubs() {
2668   // Here we create roots for fixed stubs. They are needed at GC
2669   // for cooking and uncooking (check out frames.cc).
2670   // The eliminates the need for doing dictionary lookup in the
2671   // stub cache for these stubs.
2672   HandleScope scope(isolate());
2673   // Canonicalize handles, so that we can share constant pool entries pointing
2674   // to code targets without dereferencing their handles.
2675   CanonicalHandleScope canonical(isolate());
2676 
2677   // Create stubs that should be there, so we don't unexpectedly have to
2678   // create them if we need them during the creation of another stub.
2679   // Stub creation mixes raw pointers and handles in an unsafe manner so
2680   // we cannot create stubs while we are creating stubs.
2681   CodeStub::GenerateStubsAheadOfTime(isolate());
2682 
2683   // gcc-4.4 has problem generating correct code of following snippet:
2684   // {  JSEntryStub stub;
2685   //    js_entry_code_ = *stub.GetCode();
2686   // }
2687   // {  JSConstructEntryStub stub;
2688   //    js_construct_entry_code_ = *stub.GetCode();
2689   // }
2690   // To workaround the problem, make separate functions without inlining.
2691   Heap::CreateJSEntryStub();
2692   Heap::CreateJSConstructEntryStub();
2693   Heap::CreateJSRunMicrotasksEntryStub();
2694 }
2695 
RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index)2696 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2697   switch (root_index) {
2698     case kNumberStringCacheRootIndex:
2699     case kCodeStubsRootIndex:
2700     case kScriptListRootIndex:
2701     case kMaterializedObjectsRootIndex:
2702     case kMicrotaskQueueRootIndex:
2703     case kDetachedContextsRootIndex:
2704     case kRetainedMapsRootIndex:
2705     case kRetainingPathTargetsRootIndex:
2706     case kFeedbackVectorsForProfilingToolsRootIndex:
2707     case kNoScriptSharedFunctionInfosRootIndex:
2708     case kSerializedObjectsRootIndex:
2709     case kSerializedGlobalProxySizesRootIndex:
2710     case kPublicSymbolTableRootIndex:
2711     case kApiSymbolTableRootIndex:
2712     case kApiPrivateSymbolTableRootIndex:
2713     case kMessageListenersRootIndex:
2714     case kDeserializeLazyHandlerRootIndex:
2715     case kDeserializeLazyHandlerWideRootIndex:
2716     case kDeserializeLazyHandlerExtraWideRootIndex:
2717 // Smi values
2718 #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
2719       SMI_ROOT_LIST(SMI_ENTRY)
2720 #undef SMI_ENTRY
2721     // String table
2722     case kStringTableRootIndex:
2723       return true;
2724 
2725     default:
2726       return false;
2727   }
2728 }
2729 
RootCanBeTreatedAsConstant(RootListIndex root_index)2730 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2731   bool can_be = !RootCanBeWrittenAfterInitialization(root_index) &&
2732                 !InNewSpace(root(root_index));
2733   DCHECK_IMPLIES(can_be, IsImmovable(HeapObject::cast(root(root_index))));
2734   return can_be;
2735 }
2736 
2737 
FlushNumberStringCache()2738 void Heap::FlushNumberStringCache() {
2739   // Flush the number to string cache.
2740   int len = number_string_cache()->length();
2741   for (int i = 0; i < len; i++) {
2742     number_string_cache()->set_undefined(i);
2743   }
2744 }
2745 
2746 namespace {
2747 
RootIndexForFixedTypedArray(ExternalArrayType array_type)2748 Heap::RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type) {
2749   switch (array_type) {
2750 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
2751   case kExternal##Type##Array:                            \
2752     return Heap::kFixed##Type##ArrayMapRootIndex;
2753 
2754     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
2755 #undef ARRAY_TYPE_TO_ROOT_INDEX
2756   }
2757   UNREACHABLE();
2758 }
2759 
RootIndexForFixedTypedArray(ElementsKind elements_kind)2760 Heap::RootListIndex RootIndexForFixedTypedArray(ElementsKind elements_kind) {
2761   switch (elements_kind) {
2762 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
2763   case TYPE##_ELEMENTS:                           \
2764     return Heap::kFixed##Type##ArrayMapRootIndex;
2765     TYPED_ARRAYS(TYPED_ARRAY_CASE)
2766     default:
2767       UNREACHABLE();
2768 #undef TYPED_ARRAY_CASE
2769   }
2770 }
2771 
RootIndexForEmptyFixedTypedArray(ElementsKind elements_kind)2772 Heap::RootListIndex RootIndexForEmptyFixedTypedArray(
2773     ElementsKind elements_kind) {
2774   switch (elements_kind) {
2775 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype) \
2776   case TYPE##_ELEMENTS:                                     \
2777     return Heap::kEmptyFixed##Type##ArrayRootIndex;
2778 
2779     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
2780 #undef ELEMENT_KIND_TO_ROOT_INDEX
2781     default:
2782       UNREACHABLE();
2783   }
2784 }
2785 
2786 }  // namespace
2787 
MapForFixedTypedArray(ExternalArrayType array_type)2788 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
2789   return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
2790 }
2791 
MapForFixedTypedArray(ElementsKind elements_kind)2792 Map* Heap::MapForFixedTypedArray(ElementsKind elements_kind) {
2793   return Map::cast(roots_[RootIndexForFixedTypedArray(elements_kind)]);
2794 }
2795 
EmptyFixedTypedArrayForMap(const Map * map)2796 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(const Map* map) {
2797   return FixedTypedArrayBase::cast(
2798       roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
2799 }
2800 
CreateFillerObjectAt(Address addr,int size,ClearRecordedSlots clear_slots_mode,ClearFreedMemoryMode clear_memory_mode)2801 HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
2802                                        ClearRecordedSlots clear_slots_mode,
2803                                        ClearFreedMemoryMode clear_memory_mode) {
2804   if (size == 0) return nullptr;
2805   HeapObject* filler = HeapObject::FromAddress(addr);
2806   if (size == kPointerSize) {
2807     filler->set_map_after_allocation(
2808         reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)),
2809         SKIP_WRITE_BARRIER);
2810   } else if (size == 2 * kPointerSize) {
2811     filler->set_map_after_allocation(
2812         reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)),
2813         SKIP_WRITE_BARRIER);
2814     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
2815       Memory<Address>(addr + kPointerSize) =
2816           static_cast<Address>(kClearedFreeMemoryValue);
2817     }
2818   } else {
2819     DCHECK_GT(size, 2 * kPointerSize);
2820     filler->set_map_after_allocation(
2821         reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)),
2822         SKIP_WRITE_BARRIER);
2823     FreeSpace::cast(filler)->relaxed_write_size(size);
2824     if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
2825       memset(reinterpret_cast<void*>(addr + 2 * kPointerSize),
2826              kClearedFreeMemoryValue, size - 2 * kPointerSize);
2827     }
2828   }
2829   if (clear_slots_mode == ClearRecordedSlots::kYes) {
2830     ClearRecordedSlotRange(addr, addr + size);
2831   }
2832 
2833   // At this point, we may be deserializing the heap from a snapshot, and
2834   // none of the maps have been created yet and are nullptr.
2835   DCHECK((filler->map() == nullptr && !deserialization_complete_) ||
2836          filler->map()->IsMap());
2837   return filler;
2838 }
2839 
2840 
CanMoveObjectStart(HeapObject * object)2841 bool Heap::CanMoveObjectStart(HeapObject* object) {
2842   if (!FLAG_move_object_start) return false;
2843 
2844   // Sampling heap profiler may have a reference to the object.
2845   if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
2846 
2847   Address address = object->address();
2848 
2849   if (lo_space()->Contains(object)) return false;
2850 
2851   // We can move the object start if the page was already swept.
2852   return Page::FromAddress(address)->SweepingDone();
2853 }
2854 
IsImmovable(HeapObject * object)2855 bool Heap::IsImmovable(HeapObject* object) {
2856   MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
2857   return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE;
2858 }
2859 
2860 #ifdef ENABLE_SLOW_DCHECKS
2861 namespace {
2862 
2863 class LeftTrimmerVerifierRootVisitor : public RootVisitor {
2864  public:
LeftTrimmerVerifierRootVisitor(FixedArrayBase * to_check)2865   explicit LeftTrimmerVerifierRootVisitor(FixedArrayBase* to_check)
2866       : to_check_(to_check) {}
2867 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)2868   virtual void VisitRootPointers(Root root, const char* description,
2869                                  Object** start, Object** end) {
2870     for (Object** p = start; p < end; ++p) {
2871       DCHECK_NE(*p, to_check_);
2872     }
2873   }
2874 
2875  private:
2876   FixedArrayBase* to_check_;
2877 
2878   DISALLOW_COPY_AND_ASSIGN(LeftTrimmerVerifierRootVisitor);
2879 };
2880 }  // namespace
2881 #endif  // ENABLE_SLOW_DCHECKS
2882 
2883 namespace {
MayContainRecordedSlots(HeapObject * object)2884 bool MayContainRecordedSlots(HeapObject* object) {
2885   // New space object do not have recorded slots.
2886   if (MemoryChunk::FromHeapObject(object)->InNewSpace()) return false;
2887   // Whitelist objects that definitely do not have pointers.
2888   if (object->IsByteArray() || object->IsFixedDoubleArray()) return false;
2889   // Conservatively return true for other objects.
2890   return true;
2891 }
2892 }  // namespace
2893 
LeftTrimFixedArray(FixedArrayBase * object,int elements_to_trim)2894 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
2895                                          int elements_to_trim) {
2896   if (elements_to_trim == 0) {
2897     // This simplifies reasoning in the rest of the function.
2898     return object;
2899   }
2900   CHECK_NOT_NULL(object);
2901   DCHECK(CanMoveObjectStart(object));
2902   // Add custom visitor to concurrent marker if new left-trimmable type
2903   // is added.
2904   DCHECK(object->IsFixedArray() || object->IsFixedDoubleArray());
2905   const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
2906   const int bytes_to_trim = elements_to_trim * element_size;
2907   Map* map = object->map();
2908 
2909   // For now this trick is only applied to objects in new and paged space.
2910   // In large object space the object's start must coincide with chunk
2911   // and thus the trick is just not applicable.
2912   DCHECK(!lo_space()->Contains(object));
2913   DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
2914 
2915   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
2916   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
2917   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
2918 
2919   const int len = object->length();
2920   DCHECK(elements_to_trim <= len);
2921 
2922   // Calculate location of new array start.
2923   Address old_start = object->address();
2924   Address new_start = old_start + bytes_to_trim;
2925 
2926   if (incremental_marking()->IsMarking()) {
2927     incremental_marking()->NotifyLeftTrimming(
2928         object, HeapObject::FromAddress(new_start));
2929   }
2930 
2931   // Technically in new space this write might be omitted (except for
2932   // debug mode which iterates through the heap), but to play safer
2933   // we still do it.
2934   HeapObject* filler =
2935       CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
2936 
2937   // Initialize header of the trimmed array. Since left trimming is only
2938   // performed on pages which are not concurrently swept creating a filler
2939   // object does not require synchronization.
2940   RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
2941   RELAXED_WRITE_FIELD(object, bytes_to_trim + kPointerSize,
2942                       Smi::FromInt(len - elements_to_trim));
2943 
2944   FixedArrayBase* new_object =
2945       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
2946 
2947   // Remove recorded slots for the new map and length offset.
2948   ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
2949   ClearRecordedSlot(new_object, HeapObject::RawField(
2950                                     new_object, FixedArrayBase::kLengthOffset));
2951 
2952   // Handle invalidated old-to-old slots.
2953   if (incremental_marking()->IsCompacting() &&
2954       MayContainRecordedSlots(new_object)) {
2955     // If the array was right-trimmed before, then it is registered in
2956     // the invalidated_slots.
2957     MemoryChunk::FromHeapObject(new_object)
2958         ->MoveObjectWithInvalidatedSlots(filler, new_object);
2959     // We have to clear slots in the free space to avoid stale old-to-old slots.
2960     // Note we cannot use ClearFreedMemoryMode of CreateFillerObjectAt because
2961     // we need pointer granularity writes to avoid race with the concurrent
2962     // marking.
2963     if (filler->Size() > FreeSpace::kSize) {
2964       MemsetPointer(HeapObject::RawField(filler, FreeSpace::kSize),
2965                     ReadOnlyRoots(this).undefined_value(),
2966                     (filler->Size() - FreeSpace::kSize) / kPointerSize);
2967     }
2968   }
2969   // Notify the heap profiler of change in object layout.
2970   OnMoveEvent(new_object, object, new_object->Size());
2971 
2972 #ifdef ENABLE_SLOW_DCHECKS
2973   if (FLAG_enable_slow_asserts) {
2974     // Make sure the stack or other roots (e.g., Handles) don't contain pointers
2975     // to the original FixedArray (which is now the filler object).
2976     LeftTrimmerVerifierRootVisitor root_visitor(object);
2977     IterateRoots(&root_visitor, VISIT_ALL);
2978   }
2979 #endif  // ENABLE_SLOW_DCHECKS
2980 
2981   return new_object;
2982 }
2983 
RightTrimFixedArray(FixedArrayBase * object,int elements_to_trim)2984 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
2985   const int len = object->length();
2986   DCHECK_LE(elements_to_trim, len);
2987   DCHECK_GE(elements_to_trim, 0);
2988 
2989   int bytes_to_trim;
2990   DCHECK(!object->IsFixedTypedArrayBase());
2991   if (object->IsByteArray()) {
2992     int new_size = ByteArray::SizeFor(len - elements_to_trim);
2993     bytes_to_trim = ByteArray::SizeFor(len) - new_size;
2994     DCHECK_GE(bytes_to_trim, 0);
2995   } else if (object->IsFixedArray()) {
2996     CHECK_NE(elements_to_trim, len);
2997     bytes_to_trim = elements_to_trim * kPointerSize;
2998   } else {
2999     DCHECK(object->IsFixedDoubleArray());
3000     CHECK_NE(elements_to_trim, len);
3001     bytes_to_trim = elements_to_trim * kDoubleSize;
3002   }
3003 
3004   CreateFillerForArray<FixedArrayBase>(object, elements_to_trim, bytes_to_trim);
3005 }
3006 
RightTrimWeakFixedArray(WeakFixedArray * object,int elements_to_trim)3007 void Heap::RightTrimWeakFixedArray(WeakFixedArray* object,
3008                                    int elements_to_trim) {
3009   // This function is safe to use only at the end of the mark compact
3010   // collection: When marking, we record the weak slots, and shrinking
3011   // invalidates them.
3012   DCHECK_EQ(gc_state(), MARK_COMPACT);
3013   CreateFillerForArray<WeakFixedArray>(object, elements_to_trim,
3014                                        elements_to_trim * kPointerSize);
3015 }
3016 
3017 template <typename T>
CreateFillerForArray(T * object,int elements_to_trim,int bytes_to_trim)3018 void Heap::CreateFillerForArray(T* object, int elements_to_trim,
3019                                 int bytes_to_trim) {
3020   DCHECK(object->IsFixedArrayBase() || object->IsByteArray() ||
3021          object->IsWeakFixedArray());
3022 
3023   // For now this trick is only applied to objects in new and paged space.
3024   DCHECK(object->map() != ReadOnlyRoots(this).fixed_cow_array_map());
3025 
3026   if (bytes_to_trim == 0) {
3027     DCHECK_EQ(elements_to_trim, 0);
3028     // No need to create filler and update live bytes counters.
3029     return;
3030   }
3031 
3032   // Calculate location of new array end.
3033   int old_size = object->Size();
3034   Address old_end = object->address() + old_size;
3035   Address new_end = old_end - bytes_to_trim;
3036 
3037   // Register the array as an object with invalidated old-to-old slots. We
3038   // cannot use NotifyObjectLayoutChange as it would mark the array black,
3039   // which is not safe for left-trimming because left-trimming re-pushes
3040   // only grey arrays onto the marking worklist.
3041   if (incremental_marking()->IsCompacting() &&
3042       MayContainRecordedSlots(object)) {
3043     // Ensure that the object survives because the InvalidatedSlotsFilter will
3044     // compute its size from its map during pointers updating phase.
3045     incremental_marking()->WhiteToGreyAndPush(object);
3046     MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
3047         object, old_size);
3048   }
3049 
3050   // Technically in new space this write might be omitted (except for
3051   // debug mode which iterates through the heap), but to play safer
3052   // we still do it.
3053   // We do not create a filler for objects in large object space.
3054   // TODO(hpayer): We should shrink the large object page if the size
3055   // of the object changed significantly.
3056   if (!lo_space()->Contains(object)) {
3057     HeapObject* filler =
3058         CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
3059     DCHECK_NOT_NULL(filler);
3060     // Clear the mark bits of the black area that belongs now to the filler.
3061     // This is an optimization. The sweeper will release black fillers anyway.
3062     if (incremental_marking()->black_allocation() &&
3063         incremental_marking()->marking_state()->IsBlackOrGrey(filler)) {
3064       Page* page = Page::FromAddress(new_end);
3065       incremental_marking()->marking_state()->bitmap(page)->ClearRange(
3066           page->AddressToMarkbitIndex(new_end),
3067           page->AddressToMarkbitIndex(new_end + bytes_to_trim));
3068     }
3069   }
3070 
3071   // Initialize header of the trimmed array. We are storing the new length
3072   // using release store after creating a filler for the left-over space to
3073   // avoid races with the sweeper thread.
3074   object->synchronized_set_length(object->length() - elements_to_trim);
3075 
3076   // Notify the heap object allocation tracker of change in object layout. The
3077   // array may not be moved during GC, and size has to be adjusted nevertheless.
3078   for (auto& tracker : allocation_trackers_) {
3079     tracker->UpdateObjectSizeEvent(object->address(), object->Size());
3080   }
3081 }
3082 
MakeHeapIterable()3083 void Heap::MakeHeapIterable() {
3084   mark_compact_collector()->EnsureSweepingCompleted();
3085 }
3086 
3087 
ComputeMutatorUtilization(double mutator_speed,double gc_speed)3088 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
3089   const double kMinMutatorUtilization = 0.0;
3090   const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
3091   if (mutator_speed == 0) return kMinMutatorUtilization;
3092   if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
3093   // Derivation:
3094   // mutator_utilization = mutator_time / (mutator_time + gc_time)
3095   // mutator_time = 1 / mutator_speed
3096   // gc_time = 1 / gc_speed
3097   // mutator_utilization = (1 / mutator_speed) /
3098   //                       (1 / mutator_speed + 1 / gc_speed)
3099   // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
3100   return gc_speed / (mutator_speed + gc_speed);
3101 }
3102 
3103 
YoungGenerationMutatorUtilization()3104 double Heap::YoungGenerationMutatorUtilization() {
3105   double mutator_speed = static_cast<double>(
3106       tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
3107   double gc_speed =
3108       tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
3109   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
3110   if (FLAG_trace_mutator_utilization) {
3111     isolate()->PrintWithTimestamp(
3112         "Young generation mutator utilization = %.3f ("
3113         "mutator_speed=%.f, gc_speed=%.f)\n",
3114         result, mutator_speed, gc_speed);
3115   }
3116   return result;
3117 }
3118 
3119 
OldGenerationMutatorUtilization()3120 double Heap::OldGenerationMutatorUtilization() {
3121   double mutator_speed = static_cast<double>(
3122       tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
3123   double gc_speed = static_cast<double>(
3124       tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
3125   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
3126   if (FLAG_trace_mutator_utilization) {
3127     isolate()->PrintWithTimestamp(
3128         "Old generation mutator utilization = %.3f ("
3129         "mutator_speed=%.f, gc_speed=%.f)\n",
3130         result, mutator_speed, gc_speed);
3131   }
3132   return result;
3133 }
3134 
3135 
HasLowYoungGenerationAllocationRate()3136 bool Heap::HasLowYoungGenerationAllocationRate() {
3137   const double high_mutator_utilization = 0.993;
3138   return YoungGenerationMutatorUtilization() > high_mutator_utilization;
3139 }
3140 
3141 
HasLowOldGenerationAllocationRate()3142 bool Heap::HasLowOldGenerationAllocationRate() {
3143   const double high_mutator_utilization = 0.993;
3144   return OldGenerationMutatorUtilization() > high_mutator_utilization;
3145 }
3146 
3147 
HasLowAllocationRate()3148 bool Heap::HasLowAllocationRate() {
3149   return HasLowYoungGenerationAllocationRate() &&
3150          HasLowOldGenerationAllocationRate();
3151 }
3152 
IsIneffectiveMarkCompact(size_t old_generation_size,double mutator_utilization)3153 bool Heap::IsIneffectiveMarkCompact(size_t old_generation_size,
3154                                     double mutator_utilization) {
3155   const double kHighHeapPercentage = 0.8;
3156   const double kLowMutatorUtilization = 0.4;
3157   return old_generation_size >=
3158              kHighHeapPercentage * max_old_generation_size_ &&
3159          mutator_utilization < kLowMutatorUtilization;
3160 }
3161 
CheckIneffectiveMarkCompact(size_t old_generation_size,double mutator_utilization)3162 void Heap::CheckIneffectiveMarkCompact(size_t old_generation_size,
3163                                        double mutator_utilization) {
3164   const int kMaxConsecutiveIneffectiveMarkCompacts = 4;
3165   if (!FLAG_detect_ineffective_gcs_near_heap_limit) return;
3166   if (!IsIneffectiveMarkCompact(old_generation_size, mutator_utilization)) {
3167     consecutive_ineffective_mark_compacts_ = 0;
3168     return;
3169   }
3170   ++consecutive_ineffective_mark_compacts_;
3171   if (consecutive_ineffective_mark_compacts_ ==
3172       kMaxConsecutiveIneffectiveMarkCompacts) {
3173     if (InvokeNearHeapLimitCallback()) {
3174       // The callback increased the heap limit.
3175       consecutive_ineffective_mark_compacts_ = 0;
3176       return;
3177     }
3178     FatalProcessOutOfMemory("Ineffective mark-compacts near heap limit");
3179   }
3180 }
3181 
HasHighFragmentation()3182 bool Heap::HasHighFragmentation() {
3183   size_t used = OldGenerationSizeOfObjects();
3184   size_t committed = CommittedOldGenerationMemory();
3185   return HasHighFragmentation(used, committed);
3186 }
3187 
HasHighFragmentation(size_t used,size_t committed)3188 bool Heap::HasHighFragmentation(size_t used, size_t committed) {
3189   const size_t kSlack = 16 * MB;
3190   // Fragmentation is high if committed > 2 * used + kSlack.
3191   // Rewrite the exression to avoid overflow.
3192   DCHECK_GE(committed, used);
3193   return committed - used > used + kSlack;
3194 }
3195 
ShouldOptimizeForMemoryUsage()3196 bool Heap::ShouldOptimizeForMemoryUsage() {
3197   const size_t kOldGenerationSlack = max_old_generation_size_ / 8;
3198   return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
3199          isolate()->IsMemorySavingsModeActive() || HighMemoryPressure() ||
3200          !CanExpandOldGeneration(kOldGenerationSlack);
3201 }
3202 
ActivateMemoryReducerIfNeeded()3203 void Heap::ActivateMemoryReducerIfNeeded() {
3204   // Activate memory reducer when switching to background if
3205   // - there was no mark compact since the start.
3206   // - the committed memory can be potentially reduced.
3207   // 2 pages for the old, code, and map space + 1 page for new space.
3208   const int kMinCommittedMemory = 7 * Page::kPageSize;
3209   if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
3210       isolate()->IsIsolateInBackground()) {
3211     MemoryReducer::Event event;
3212     event.type = MemoryReducer::kPossibleGarbage;
3213     event.time_ms = MonotonicallyIncreasingTimeInMs();
3214     memory_reducer_->NotifyPossibleGarbage(event);
3215   }
3216 }
3217 
ReduceNewSpaceSize()3218 void Heap::ReduceNewSpaceSize() {
3219   // TODO(ulan): Unify this constant with the similar constant in
3220   // GCIdleTimeHandler once the change is merged to 4.5.
3221   static const size_t kLowAllocationThroughput = 1000;
3222   const double allocation_throughput =
3223       tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
3224 
3225   if (FLAG_predictable) return;
3226 
3227   if (ShouldReduceMemory() ||
3228       ((allocation_throughput != 0) &&
3229        (allocation_throughput < kLowAllocationThroughput))) {
3230     new_space_->Shrink();
3231     UncommitFromSpace();
3232   }
3233 }
3234 
FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason)3235 void Heap::FinalizeIncrementalMarkingIfComplete(
3236     GarbageCollectionReason gc_reason) {
3237   if (incremental_marking()->IsMarking() &&
3238       (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
3239        (!incremental_marking()->finalize_marking_completed() &&
3240         mark_compact_collector()->marking_worklist()->IsEmpty() &&
3241         local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
3242     FinalizeIncrementalMarkingIncrementally(gc_reason);
3243   } else if (incremental_marking()->IsComplete() ||
3244              (mark_compact_collector()->marking_worklist()->IsEmpty() &&
3245               local_embedder_heap_tracer()
3246                   ->ShouldFinalizeIncrementalMarking())) {
3247     CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
3248   }
3249 }
3250 
FinalizeIncrementalMarkingAtomically(GarbageCollectionReason gc_reason)3251 void Heap::FinalizeIncrementalMarkingAtomically(
3252     GarbageCollectionReason gc_reason) {
3253   DCHECK(!incremental_marking()->IsStopped());
3254   CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
3255 }
3256 
FinalizeIncrementalMarkingIncrementally(GarbageCollectionReason gc_reason)3257 void Heap::FinalizeIncrementalMarkingIncrementally(
3258     GarbageCollectionReason gc_reason) {
3259   if (FLAG_trace_incremental_marking) {
3260     isolate()->PrintWithTimestamp(
3261         "[IncrementalMarking] (%s).\n",
3262         Heap::GarbageCollectionReasonToString(gc_reason));
3263   }
3264 
3265   HistogramTimerScope incremental_marking_scope(
3266       isolate()->counters()->gc_incremental_marking_finalize());
3267   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
3268   TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
3269 
3270   {
3271     GCCallbacksScope scope(this);
3272     if (scope.CheckReenter()) {
3273       AllowHeapAllocation allow_allocation;
3274       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
3275       VMState<EXTERNAL> state(isolate_);
3276       HandleScope handle_scope(isolate_);
3277       CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
3278     }
3279   }
3280   incremental_marking()->FinalizeIncrementally();
3281   {
3282     GCCallbacksScope scope(this);
3283     if (scope.CheckReenter()) {
3284       AllowHeapAllocation allow_allocation;
3285       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
3286       VMState<EXTERNAL> state(isolate_);
3287       HandleScope handle_scope(isolate_);
3288       CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
3289     }
3290   }
3291 }
3292 
RegisterDeserializedObjectsForBlackAllocation(Reservation * reservations,const std::vector<HeapObject * > & large_objects,const std::vector<Address> & maps)3293 void Heap::RegisterDeserializedObjectsForBlackAllocation(
3294     Reservation* reservations, const std::vector<HeapObject*>& large_objects,
3295     const std::vector<Address>& maps) {
3296   // TODO(ulan): pause black allocation during deserialization to avoid
3297   // iterating all these objects in one go.
3298 
3299   if (!incremental_marking()->black_allocation()) return;
3300 
3301   // Iterate black objects in old space, code space, map space, and large
3302   // object space for side effects.
3303   IncrementalMarking::MarkingState* marking_state =
3304       incremental_marking()->marking_state();
3305   for (int i = OLD_SPACE; i < Serializer<>::kNumberOfSpaces; i++) {
3306     const Heap::Reservation& res = reservations[i];
3307     for (auto& chunk : res) {
3308       Address addr = chunk.start;
3309       while (addr < chunk.end) {
3310         HeapObject* obj = HeapObject::FromAddress(addr);
3311         // Objects can have any color because incremental marking can
3312         // start in the middle of Heap::ReserveSpace().
3313         if (marking_state->IsBlack(obj)) {
3314           incremental_marking()->ProcessBlackAllocatedObject(obj);
3315         }
3316         addr += obj->Size();
3317       }
3318     }
3319   }
3320   // We potentially deserialized wrappers which require registering with the
3321   // embedder as the marker will not find them.
3322   local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
3323 
3324   // Large object space doesn't use reservations, so it needs custom handling.
3325   for (HeapObject* object : large_objects) {
3326     incremental_marking()->ProcessBlackAllocatedObject(object);
3327   }
3328 
3329   // Map space doesn't use reservations, so it needs custom handling.
3330   for (Address addr : maps) {
3331     incremental_marking()->ProcessBlackAllocatedObject(
3332         HeapObject::FromAddress(addr));
3333   }
3334 }
3335 
NotifyObjectLayoutChange(HeapObject * object,int size,const DisallowHeapAllocation &)3336 void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
3337                                     const DisallowHeapAllocation&) {
3338   if (incremental_marking()->IsMarking()) {
3339     incremental_marking()->MarkBlackAndPush(object);
3340     if (incremental_marking()->IsCompacting() &&
3341         MayContainRecordedSlots(object)) {
3342       MemoryChunk::FromHeapObject(object)->RegisterObjectWithInvalidatedSlots(
3343           object, size);
3344     }
3345   }
3346 #ifdef VERIFY_HEAP
3347   if (FLAG_verify_heap) {
3348     DCHECK_NULL(pending_layout_change_object_);
3349     pending_layout_change_object_ = object;
3350   }
3351 #endif
3352 }
3353 
3354 #ifdef VERIFY_HEAP
3355 // Helper class for collecting slot addresses.
3356 class SlotCollectingVisitor final : public ObjectVisitor {
3357  public:
VisitPointers(HeapObject * host,Object ** start,Object ** end)3358   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
3359     VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
3360                   reinterpret_cast<MaybeObject**>(end));
3361   }
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)3362   void VisitPointers(HeapObject* host, MaybeObject** start,
3363                      MaybeObject** end) final {
3364     for (MaybeObject** p = start; p < end; p++) {
3365       slots_.push_back(p);
3366     }
3367   }
3368 
number_of_slots()3369   int number_of_slots() { return static_cast<int>(slots_.size()); }
3370 
slot(int i)3371   MaybeObject** slot(int i) { return slots_[i]; }
3372 
3373  private:
3374   std::vector<MaybeObject**> slots_;
3375 };
3376 
VerifyObjectLayoutChange(HeapObject * object,Map * new_map)3377 void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
3378   if (!FLAG_verify_heap) return;
3379 
3380   // Check that Heap::NotifyObjectLayout was called for object transitions
3381   // that are not safe for concurrent marking.
3382   // If you see this check triggering for a freshly allocated object,
3383   // use object->set_map_after_allocation() to initialize its map.
3384   if (pending_layout_change_object_ == nullptr) {
3385     if (object->IsJSObject()) {
3386       DCHECK(!object->map()->TransitionRequiresSynchronizationWithGC(new_map));
3387     } else {
3388       // Check that the set of slots before and after the transition match.
3389       SlotCollectingVisitor old_visitor;
3390       object->IterateFast(&old_visitor);
3391       MapWord old_map_word = object->map_word();
3392       // Temporarily set the new map to iterate new slots.
3393       object->set_map_word(MapWord::FromMap(new_map));
3394       SlotCollectingVisitor new_visitor;
3395       object->IterateFast(&new_visitor);
3396       // Restore the old map.
3397       object->set_map_word(old_map_word);
3398       DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
3399       for (int i = 0; i < new_visitor.number_of_slots(); i++) {
3400         DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
3401       }
3402     }
3403   } else {
3404     DCHECK_EQ(pending_layout_change_object_, object);
3405     pending_layout_change_object_ = nullptr;
3406   }
3407 }
3408 #endif
3409 
ComputeHeapState()3410 GCIdleTimeHeapState Heap::ComputeHeapState() {
3411   GCIdleTimeHeapState heap_state;
3412   heap_state.contexts_disposed = contexts_disposed_;
3413   heap_state.contexts_disposal_rate =
3414       tracer()->ContextDisposalRateInMilliseconds();
3415   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
3416   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
3417   return heap_state;
3418 }
3419 
3420 
PerformIdleTimeAction(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double deadline_in_ms)3421 bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
3422                                  GCIdleTimeHeapState heap_state,
3423                                  double deadline_in_ms) {
3424   bool result = false;
3425   switch (action.type) {
3426     case DONE:
3427       result = true;
3428       break;
3429     case DO_INCREMENTAL_STEP: {
3430       const double remaining_idle_time_in_ms =
3431           incremental_marking()->AdvanceIncrementalMarking(
3432               deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
3433               StepOrigin::kTask);
3434       if (remaining_idle_time_in_ms > 0.0) {
3435         FinalizeIncrementalMarkingIfComplete(
3436             GarbageCollectionReason::kFinalizeMarkingViaTask);
3437       }
3438       result = incremental_marking()->IsStopped();
3439       break;
3440     }
3441     case DO_FULL_GC: {
3442       DCHECK_LT(0, contexts_disposed_);
3443       HistogramTimerScope scope(isolate_->counters()->gc_context());
3444       TRACE_EVENT0("v8", "V8.GCContext");
3445       CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
3446       break;
3447     }
3448     case DO_NOTHING:
3449       break;
3450   }
3451 
3452   return result;
3453 }
3454 
IdleNotificationEpilogue(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double start_ms,double deadline_in_ms)3455 void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
3456                                     GCIdleTimeHeapState heap_state,
3457                                     double start_ms, double deadline_in_ms) {
3458   double idle_time_in_ms = deadline_in_ms - start_ms;
3459   double current_time = MonotonicallyIncreasingTimeInMs();
3460   last_idle_notification_time_ = current_time;
3461   double deadline_difference = deadline_in_ms - current_time;
3462 
3463   contexts_disposed_ = 0;
3464 
3465   if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
3466       FLAG_trace_idle_notification_verbose) {
3467     isolate_->PrintWithTimestamp(
3468         "Idle notification: requested idle time %.2f ms, used idle time %.2f "
3469         "ms, deadline usage %.2f ms [",
3470         idle_time_in_ms, idle_time_in_ms - deadline_difference,
3471         deadline_difference);
3472     action.Print();
3473     PrintF("]");
3474     if (FLAG_trace_idle_notification_verbose) {
3475       PrintF("[");
3476       heap_state.Print();
3477       PrintF("]");
3478     }
3479     PrintF("\n");
3480   }
3481 }
3482 
3483 
MonotonicallyIncreasingTimeInMs()3484 double Heap::MonotonicallyIncreasingTimeInMs() {
3485   return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
3486          static_cast<double>(base::Time::kMillisecondsPerSecond);
3487 }
3488 
3489 
IdleNotification(int idle_time_in_ms)3490 bool Heap::IdleNotification(int idle_time_in_ms) {
3491   return IdleNotification(
3492       V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
3493       (static_cast<double>(idle_time_in_ms) /
3494        static_cast<double>(base::Time::kMillisecondsPerSecond)));
3495 }
3496 
3497 
IdleNotification(double deadline_in_seconds)3498 bool Heap::IdleNotification(double deadline_in_seconds) {
3499   CHECK(HasBeenSetUp());
3500   double deadline_in_ms =
3501       deadline_in_seconds *
3502       static_cast<double>(base::Time::kMillisecondsPerSecond);
3503   HistogramTimerScope idle_notification_scope(
3504       isolate_->counters()->gc_idle_notification());
3505   TRACE_EVENT0("v8", "V8.GCIdleNotification");
3506   double start_ms = MonotonicallyIncreasingTimeInMs();
3507   double idle_time_in_ms = deadline_in_ms - start_ms;
3508 
3509   tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
3510                              OldGenerationAllocationCounter());
3511 
3512   GCIdleTimeHeapState heap_state = ComputeHeapState();
3513 
3514   GCIdleTimeAction action =
3515       gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
3516 
3517   bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
3518 
3519   IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
3520   return result;
3521 }
3522 
3523 
RecentIdleNotificationHappened()3524 bool Heap::RecentIdleNotificationHappened() {
3525   return (last_idle_notification_time_ +
3526           GCIdleTimeHandler::kMaxScheduledIdleTime) >
3527          MonotonicallyIncreasingTimeInMs();
3528 }
3529 
3530 class MemoryPressureInterruptTask : public CancelableTask {
3531  public:
MemoryPressureInterruptTask(Heap * heap)3532   explicit MemoryPressureInterruptTask(Heap* heap)
3533       : CancelableTask(heap->isolate()), heap_(heap) {}
3534 
~MemoryPressureInterruptTask()3535   virtual ~MemoryPressureInterruptTask() {}
3536 
3537  private:
3538   // v8::internal::CancelableTask overrides.
RunInternal()3539   void RunInternal() override { heap_->CheckMemoryPressure(); }
3540 
3541   Heap* heap_;
3542   DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
3543 };
3544 
CheckMemoryPressure()3545 void Heap::CheckMemoryPressure() {
3546   if (HighMemoryPressure()) {
3547     // The optimizing compiler may be unnecessarily holding on to memory.
3548     isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
3549   }
3550   MemoryPressureLevel memory_pressure_level = memory_pressure_level_;
3551   // Reset the memory pressure level to avoid recursive GCs triggered by
3552   // CheckMemoryPressure from AdjustAmountOfExternalMemory called by
3553   // the finalizers.
3554   memory_pressure_level_ = MemoryPressureLevel::kNone;
3555   if (memory_pressure_level == MemoryPressureLevel::kCritical) {
3556     CollectGarbageOnMemoryPressure();
3557   } else if (memory_pressure_level == MemoryPressureLevel::kModerate) {
3558     if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
3559       StartIncrementalMarking(kReduceMemoryFootprintMask,
3560                               GarbageCollectionReason::kMemoryPressure);
3561     }
3562   }
3563   if (memory_reducer_) {
3564     MemoryReducer::Event event;
3565     event.type = MemoryReducer::kPossibleGarbage;
3566     event.time_ms = MonotonicallyIncreasingTimeInMs();
3567     memory_reducer_->NotifyPossibleGarbage(event);
3568   }
3569 }
3570 
CollectGarbageOnMemoryPressure()3571 void Heap::CollectGarbageOnMemoryPressure() {
3572   const int kGarbageThresholdInBytes = 8 * MB;
3573   const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
3574   // This constant is the maximum response time in RAIL performance model.
3575   const double kMaxMemoryPressurePauseMs = 100;
3576 
3577   double start = MonotonicallyIncreasingTimeInMs();
3578   CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
3579                     GarbageCollectionReason::kMemoryPressure,
3580                     kGCCallbackFlagCollectAllAvailableGarbage);
3581   double end = MonotonicallyIncreasingTimeInMs();
3582 
3583   // Estimate how much memory we can free.
3584   int64_t potential_garbage =
3585       (CommittedMemory() - SizeOfObjects()) + external_memory_;
3586   // If we can potentially free large amount of memory, then start GC right
3587   // away instead of waiting for memory reducer.
3588   if (potential_garbage >= kGarbageThresholdInBytes &&
3589       potential_garbage >=
3590           CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
3591     // If we spent less than half of the time budget, then perform full GC
3592     // Otherwise, start incremental marking.
3593     if (end - start < kMaxMemoryPressurePauseMs / 2) {
3594       CollectAllGarbage(
3595           kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
3596           GarbageCollectionReason::kMemoryPressure,
3597           kGCCallbackFlagCollectAllAvailableGarbage);
3598     } else {
3599       if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
3600         StartIncrementalMarking(kReduceMemoryFootprintMask,
3601                                 GarbageCollectionReason::kMemoryPressure);
3602       }
3603     }
3604   }
3605 }
3606 
MemoryPressureNotification(MemoryPressureLevel level,bool is_isolate_locked)3607 void Heap::MemoryPressureNotification(MemoryPressureLevel level,
3608                                       bool is_isolate_locked) {
3609   MemoryPressureLevel previous = memory_pressure_level_;
3610   memory_pressure_level_ = level;
3611   if ((previous != MemoryPressureLevel::kCritical &&
3612        level == MemoryPressureLevel::kCritical) ||
3613       (previous == MemoryPressureLevel::kNone &&
3614        level == MemoryPressureLevel::kModerate)) {
3615     if (is_isolate_locked) {
3616       CheckMemoryPressure();
3617     } else {
3618       ExecutionAccess access(isolate());
3619       isolate()->stack_guard()->RequestGC();
3620       V8::GetCurrentPlatform()->CallOnForegroundThread(
3621           reinterpret_cast<v8::Isolate*>(isolate()),
3622           new MemoryPressureInterruptTask(this));
3623     }
3624   }
3625 }
3626 
AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,void * data)3627 void Heap::AddNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
3628                                     void* data) {
3629   const size_t kMaxCallbacks = 100;
3630   CHECK_LT(near_heap_limit_callbacks_.size(), kMaxCallbacks);
3631   for (auto callback_data : near_heap_limit_callbacks_) {
3632     CHECK_NE(callback_data.first, callback);
3633   }
3634   near_heap_limit_callbacks_.push_back(std::make_pair(callback, data));
3635 }
3636 
RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,size_t heap_limit)3637 void Heap::RemoveNearHeapLimitCallback(v8::NearHeapLimitCallback callback,
3638                                        size_t heap_limit) {
3639   for (size_t i = 0; i < near_heap_limit_callbacks_.size(); i++) {
3640     if (near_heap_limit_callbacks_[i].first == callback) {
3641       near_heap_limit_callbacks_.erase(near_heap_limit_callbacks_.begin() + i);
3642       if (heap_limit) {
3643         RestoreHeapLimit(heap_limit);
3644       }
3645       return;
3646     }
3647   }
3648   UNREACHABLE();
3649 }
3650 
InvokeNearHeapLimitCallback()3651 bool Heap::InvokeNearHeapLimitCallback() {
3652   if (near_heap_limit_callbacks_.size() > 0) {
3653     HandleScope scope(isolate());
3654     v8::NearHeapLimitCallback callback =
3655         near_heap_limit_callbacks_.back().first;
3656     void* data = near_heap_limit_callbacks_.back().second;
3657     size_t heap_limit = callback(data, max_old_generation_size_,
3658                                  initial_max_old_generation_size_);
3659     if (heap_limit > max_old_generation_size_) {
3660       max_old_generation_size_ = heap_limit;
3661       return true;
3662     }
3663   }
3664   return false;
3665 }
3666 
CollectCodeStatistics()3667 void Heap::CollectCodeStatistics() {
3668   TRACE_EVENT0("v8", "Heap::CollectCodeStatistics");
3669   CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
3670   // We do not look for code in new space, or map space.  If code
3671   // somehow ends up in those spaces, we would miss it here.
3672   CodeStatistics::CollectCodeStatistics(code_space_, isolate());
3673   CodeStatistics::CollectCodeStatistics(old_space_, isolate());
3674   CodeStatistics::CollectCodeStatistics(lo_space_, isolate());
3675 }
3676 
3677 #ifdef DEBUG
3678 
Print()3679 void Heap::Print() {
3680   if (!HasBeenSetUp()) return;
3681   isolate()->PrintStack(stdout);
3682 
3683   for (SpaceIterator it(this); it.has_next();) {
3684     it.next()->Print();
3685   }
3686 }
3687 
3688 
ReportCodeStatistics(const char * title)3689 void Heap::ReportCodeStatistics(const char* title) {
3690   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3691   CollectCodeStatistics();
3692   CodeStatistics::ReportCodeStatistics(isolate());
3693 }
3694 
3695 #endif  // DEBUG
3696 
GarbageCollectionReasonToString(GarbageCollectionReason gc_reason)3697 const char* Heap::GarbageCollectionReasonToString(
3698     GarbageCollectionReason gc_reason) {
3699   switch (gc_reason) {
3700     case GarbageCollectionReason::kAllocationFailure:
3701       return "allocation failure";
3702     case GarbageCollectionReason::kAllocationLimit:
3703       return "allocation limit";
3704     case GarbageCollectionReason::kContextDisposal:
3705       return "context disposal";
3706     case GarbageCollectionReason::kCountersExtension:
3707       return "counters extension";
3708     case GarbageCollectionReason::kDebugger:
3709       return "debugger";
3710     case GarbageCollectionReason::kDeserializer:
3711       return "deserialize";
3712     case GarbageCollectionReason::kExternalMemoryPressure:
3713       return "external memory pressure";
3714     case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
3715       return "finalize incremental marking via stack guard";
3716     case GarbageCollectionReason::kFinalizeMarkingViaTask:
3717       return "finalize incremental marking via task";
3718     case GarbageCollectionReason::kFullHashtable:
3719       return "full hash-table";
3720     case GarbageCollectionReason::kHeapProfiler:
3721       return "heap profiler";
3722     case GarbageCollectionReason::kIdleTask:
3723       return "idle task";
3724     case GarbageCollectionReason::kLastResort:
3725       return "last resort";
3726     case GarbageCollectionReason::kLowMemoryNotification:
3727       return "low memory notification";
3728     case GarbageCollectionReason::kMakeHeapIterable:
3729       return "make heap iterable";
3730     case GarbageCollectionReason::kMemoryPressure:
3731       return "memory pressure";
3732     case GarbageCollectionReason::kMemoryReducer:
3733       return "memory reducer";
3734     case GarbageCollectionReason::kRuntime:
3735       return "runtime";
3736     case GarbageCollectionReason::kSamplingProfiler:
3737       return "sampling profiler";
3738     case GarbageCollectionReason::kSnapshotCreator:
3739       return "snapshot creator";
3740     case GarbageCollectionReason::kTesting:
3741       return "testing";
3742     case GarbageCollectionReason::kExternalFinalize:
3743       return "external finalize";
3744     case GarbageCollectionReason::kUnknown:
3745       return "unknown";
3746   }
3747   UNREACHABLE();
3748 }
3749 
Contains(HeapObject * value)3750 bool Heap::Contains(HeapObject* value) {
3751   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
3752     return false;
3753   }
3754   return HasBeenSetUp() &&
3755          (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
3756           code_space_->Contains(value) || map_space_->Contains(value) ||
3757           lo_space_->Contains(value) || read_only_space_->Contains(value));
3758 }
3759 
ContainsSlow(Address addr)3760 bool Heap::ContainsSlow(Address addr) {
3761   if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
3762     return false;
3763   }
3764   return HasBeenSetUp() &&
3765          (new_space_->ToSpaceContainsSlow(addr) ||
3766           old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
3767           map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr) ||
3768           read_only_space_->Contains(addr));
3769 }
3770 
InSpace(HeapObject * value,AllocationSpace space)3771 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3772   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
3773     return false;
3774   }
3775   if (!HasBeenSetUp()) return false;
3776 
3777   switch (space) {
3778     case NEW_SPACE:
3779       return new_space_->ToSpaceContains(value);
3780     case OLD_SPACE:
3781       return old_space_->Contains(value);
3782     case CODE_SPACE:
3783       return code_space_->Contains(value);
3784     case MAP_SPACE:
3785       return map_space_->Contains(value);
3786     case LO_SPACE:
3787       return lo_space_->Contains(value);
3788     case NEW_LO_SPACE:
3789       return new_lo_space_->Contains(value);
3790     case RO_SPACE:
3791       return read_only_space_->Contains(value);
3792   }
3793   UNREACHABLE();
3794 }
3795 
InSpaceSlow(Address addr,AllocationSpace space)3796 bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
3797   if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
3798     return false;
3799   }
3800   if (!HasBeenSetUp()) return false;
3801 
3802   switch (space) {
3803     case NEW_SPACE:
3804       return new_space_->ToSpaceContainsSlow(addr);
3805     case OLD_SPACE:
3806       return old_space_->ContainsSlow(addr);
3807     case CODE_SPACE:
3808       return code_space_->ContainsSlow(addr);
3809     case MAP_SPACE:
3810       return map_space_->ContainsSlow(addr);
3811     case LO_SPACE:
3812       return lo_space_->ContainsSlow(addr);
3813     case NEW_LO_SPACE:
3814       return new_lo_space_->ContainsSlow(addr);
3815     case RO_SPACE:
3816       return read_only_space_->ContainsSlow(addr);
3817   }
3818   UNREACHABLE();
3819 }
3820 
IsValidAllocationSpace(AllocationSpace space)3821 bool Heap::IsValidAllocationSpace(AllocationSpace space) {
3822   switch (space) {
3823     case NEW_SPACE:
3824     case OLD_SPACE:
3825     case CODE_SPACE:
3826     case MAP_SPACE:
3827     case LO_SPACE:
3828     case NEW_LO_SPACE:
3829     case RO_SPACE:
3830       return true;
3831     default:
3832       return false;
3833   }
3834 }
3835 
3836 
RootIsImmortalImmovable(int root_index)3837 bool Heap::RootIsImmortalImmovable(int root_index) {
3838   switch (root_index) {
3839 #define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
3840     IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
3841 #undef IMMORTAL_IMMOVABLE_ROOT
3842 #define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
3843     INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
3844 #undef INTERNALIZED_STRING
3845 #define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
3846     STRING_TYPE_LIST(STRING_TYPE)
3847 #undef STRING_TYPE
3848     return true;
3849     default:
3850       return false;
3851   }
3852 }
3853 
3854 #ifdef VERIFY_HEAP
3855 class VerifyReadOnlyPointersVisitor : public VerifyPointersVisitor {
3856  public:
VerifyReadOnlyPointersVisitor(Heap * heap)3857   explicit VerifyReadOnlyPointersVisitor(Heap* heap)
3858       : VerifyPointersVisitor(heap) {}
3859 
3860  protected:
VerifyPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)3861   void VerifyPointers(HeapObject* host, MaybeObject** start,
3862                       MaybeObject** end) override {
3863     if (host != nullptr) {
3864       CHECK(heap_->InReadOnlySpace(host->map()));
3865     }
3866     VerifyPointersVisitor::VerifyPointers(host, start, end);
3867 
3868     for (MaybeObject** current = start; current < end; current++) {
3869       HeapObject* object;
3870       if ((*current)->ToStrongOrWeakHeapObject(&object)) {
3871         CHECK(heap_->InReadOnlySpace(object));
3872       }
3873     }
3874   }
3875 };
3876 
Verify()3877 void Heap::Verify() {
3878   CHECK(HasBeenSetUp());
3879   HandleScope scope(isolate());
3880 
3881   // We have to wait here for the sweeper threads to have an iterable heap.
3882   mark_compact_collector()->EnsureSweepingCompleted();
3883 
3884   VerifyPointersVisitor visitor(this);
3885   IterateRoots(&visitor, VISIT_ONLY_STRONG);
3886 
3887   VerifySmisVisitor smis_visitor;
3888   IterateSmiRoots(&smis_visitor);
3889 
3890   new_space_->Verify(isolate());
3891 
3892   old_space_->Verify(isolate(), &visitor);
3893   map_space_->Verify(isolate(), &visitor);
3894 
3895   VerifyPointersVisitor no_dirty_regions_visitor(this);
3896   code_space_->Verify(isolate(), &no_dirty_regions_visitor);
3897 
3898   lo_space_->Verify(isolate());
3899 
3900   VerifyReadOnlyPointersVisitor read_only_visitor(this);
3901   read_only_space_->Verify(isolate(), &read_only_visitor);
3902 }
3903 
3904 class SlotVerifyingVisitor : public ObjectVisitor {
3905  public:
SlotVerifyingVisitor(std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed)3906   SlotVerifyingVisitor(std::set<Address>* untyped,
3907                        std::set<std::pair<SlotType, Address> >* typed)
3908       : untyped_(untyped), typed_(typed) {}
3909 
3910   virtual bool ShouldHaveBeenRecorded(HeapObject* host,
3911                                       MaybeObject* target) = 0;
3912 
VisitPointers(HeapObject * host,Object ** start,Object ** end)3913   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
3914 #ifdef DEBUG
3915     for (Object** slot = start; slot < end; slot++) {
3916       DCHECK(!HasWeakHeapObjectTag(*slot));
3917     }
3918 #endif  // DEBUG
3919     VisitPointers(host, reinterpret_cast<MaybeObject**>(start),
3920                   reinterpret_cast<MaybeObject**>(end));
3921   }
3922 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)3923   void VisitPointers(HeapObject* host, MaybeObject** start,
3924                      MaybeObject** end) final {
3925     for (MaybeObject** slot = start; slot < end; slot++) {
3926       if (ShouldHaveBeenRecorded(host, *slot)) {
3927         CHECK_GT(untyped_->count(reinterpret_cast<Address>(slot)), 0);
3928       }
3929     }
3930   }
3931 
VisitCodeTarget(Code * host,RelocInfo * rinfo)3932   void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
3933     Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
3934     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
3935       CHECK(
3936           InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
3937           (rinfo->IsInConstantPool() &&
3938            InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
3939     }
3940   }
3941 
VisitEmbeddedPointer(Code * host,RelocInfo * rinfo)3942   void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
3943     Object* target = rinfo->target_object();
3944     if (ShouldHaveBeenRecorded(host, MaybeObject::FromObject(target))) {
3945       CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
3946             (rinfo->IsInConstantPool() &&
3947              InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
3948     }
3949   }
3950 
3951  private:
InTypedSet(SlotType type,Address slot)3952   bool InTypedSet(SlotType type, Address slot) {
3953     return typed_->count(std::make_pair(type, slot)) > 0;
3954   }
3955   std::set<Address>* untyped_;
3956   std::set<std::pair<SlotType, Address> >* typed_;
3957 };
3958 
3959 class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
3960  public:
OldToNewSlotVerifyingVisitor(std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed)3961   OldToNewSlotVerifyingVisitor(std::set<Address>* untyped,
3962                                std::set<std::pair<SlotType, Address>>* typed)
3963       : SlotVerifyingVisitor(untyped, typed) {}
3964 
ShouldHaveBeenRecorded(HeapObject * host,MaybeObject * target)3965   bool ShouldHaveBeenRecorded(HeapObject* host, MaybeObject* target) override {
3966     DCHECK_IMPLIES(
3967         target->IsStrongOrWeakHeapObject() && Heap::InNewSpace(target),
3968         Heap::InToSpace(target));
3969     return target->IsStrongOrWeakHeapObject() && Heap::InNewSpace(target) &&
3970            !Heap::InNewSpace(host);
3971   }
3972 };
3973 
3974 template <RememberedSetType direction>
CollectSlots(MemoryChunk * chunk,Address start,Address end,std::set<Address> * untyped,std::set<std::pair<SlotType,Address>> * typed)3975 void CollectSlots(MemoryChunk* chunk, Address start, Address end,
3976                   std::set<Address>* untyped,
3977                   std::set<std::pair<SlotType, Address> >* typed) {
3978   RememberedSet<direction>::Iterate(chunk,
3979                                     [start, end, untyped](Address slot) {
3980                                       if (start <= slot && slot < end) {
3981                                         untyped->insert(slot);
3982                                       }
3983                                       return KEEP_SLOT;
3984                                     },
3985                                     SlotSet::PREFREE_EMPTY_BUCKETS);
3986   RememberedSet<direction>::IterateTyped(
3987       chunk, [start, end, typed](SlotType type, Address host, Address slot) {
3988         if (start <= slot && slot < end) {
3989           typed->insert(std::make_pair(type, slot));
3990         }
3991         return KEEP_SLOT;
3992       });
3993 }
3994 
VerifyRememberedSetFor(HeapObject * object)3995 void Heap::VerifyRememberedSetFor(HeapObject* object) {
3996   MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
3997   DCHECK_IMPLIES(chunk->mutex() == nullptr, InReadOnlySpace(object));
3998   // In RO_SPACE chunk->mutex() may be nullptr, so just ignore it.
3999   base::LockGuard<base::Mutex, base::NullBehavior::kIgnoreIfNull> lock_guard(
4000       chunk->mutex());
4001   Address start = object->address();
4002   Address end = start + object->Size();
4003   std::set<Address> old_to_new;
4004   std::set<std::pair<SlotType, Address> > typed_old_to_new;
4005   if (!InNewSpace(object)) {
4006     store_buffer()->MoveAllEntriesToRememberedSet();
4007     CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
4008     OldToNewSlotVerifyingVisitor visitor(&old_to_new, &typed_old_to_new);
4009     object->IterateBody(&visitor);
4010   }
4011   // TODO(ulan): Add old to old slot set verification once all weak objects
4012   // have their own instance types and slots are recorded for all weal fields.
4013 }
4014 #endif
4015 
4016 #ifdef DEBUG
VerifyCountersAfterSweeping()4017 void Heap::VerifyCountersAfterSweeping() {
4018   PagedSpaces spaces(this);
4019   for (PagedSpace* space = spaces.next(); space != nullptr;
4020        space = spaces.next()) {
4021     space->VerifyCountersAfterSweeping();
4022   }
4023 }
4024 
VerifyCountersBeforeConcurrentSweeping()4025 void Heap::VerifyCountersBeforeConcurrentSweeping() {
4026   PagedSpaces spaces(this);
4027   for (PagedSpace* space = spaces.next(); space != nullptr;
4028        space = spaces.next()) {
4029     space->VerifyCountersBeforeConcurrentSweeping();
4030   }
4031 }
4032 #endif
4033 
ZapFromSpace()4034 void Heap::ZapFromSpace() {
4035   if (!new_space_->IsFromSpaceCommitted()) return;
4036   for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
4037     memory_allocator()->ZapBlock(page->area_start(),
4038                                  page->HighWaterMark() - page->area_start(),
4039                                  ZapValue());
4040   }
4041 }
4042 
ZapCodeObject(Address start_address,int size_in_bytes)4043 void Heap::ZapCodeObject(Address start_address, int size_in_bytes) {
4044 #ifdef DEBUG
4045   for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
4046     reinterpret_cast<Object**>(start_address)[i] = Smi::FromInt(kCodeZapValue);
4047   }
4048 #endif
4049 }
4050 
builtin(int index)4051 Code* Heap::builtin(int index) {
4052   DCHECK(Builtins::IsBuiltinId(index));
4053   // Code::cast cannot be used here since we access builtins
4054   // during the marking phase of mark sweep. See IC::Clear.
4055   return reinterpret_cast<Code*>(builtins_[index]);
4056 }
4057 
builtin_address(int index)4058 Address Heap::builtin_address(int index) {
4059   DCHECK(Builtins::IsBuiltinId(index) || index == Builtins::builtin_count);
4060   return reinterpret_cast<Address>(&builtins_[index]);
4061 }
4062 
set_builtin(int index,HeapObject * builtin)4063 void Heap::set_builtin(int index, HeapObject* builtin) {
4064   DCHECK(Builtins::IsBuiltinId(index));
4065   DCHECK(Internals::HasHeapObjectTag(builtin));
4066   // The given builtin may be completely uninitialized thus we cannot check its
4067   // type here.
4068   builtins_[index] = builtin;
4069 }
4070 
IterateRoots(RootVisitor * v,VisitMode mode)4071 void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
4072   IterateStrongRoots(v, mode);
4073   IterateWeakRoots(v, mode);
4074 }
4075 
IterateWeakRoots(RootVisitor * v,VisitMode mode)4076 void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
4077   const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
4078                          mode == VISIT_ALL_IN_MINOR_MC_MARK ||
4079                          mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
4080   v->VisitRootPointer(
4081       Root::kStringTable, nullptr,
4082       reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4083   v->Synchronize(VisitorSynchronization::kStringTable);
4084   if (!isMinorGC && mode != VISIT_ALL_IN_SWEEP_NEWSPACE &&
4085       mode != VISIT_FOR_SERIALIZATION) {
4086     // Scavenge collections have special processing for this.
4087     // Do not visit for serialization, since the external string table will
4088     // be populated from scratch upon deserialization.
4089     external_string_table_.IterateAll(v);
4090   }
4091   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4092 }
4093 
IterateSmiRoots(RootVisitor * v)4094 void Heap::IterateSmiRoots(RootVisitor* v) {
4095   // Acquire execution access since we are going to read stack limit values.
4096   ExecutionAccess access(isolate());
4097   v->VisitRootPointers(Root::kSmiRootList, nullptr, &roots_[kSmiRootsStart],
4098                        &roots_[kRootListLength]);
4099   v->Synchronize(VisitorSynchronization::kSmiRootList);
4100 }
4101 
4102 // We cannot avoid stale handles to left-trimmed objects, but can only make
4103 // sure all handles still needed are updated. Filter out a stale pointer
4104 // and clear the slot to allow post processing of handles (needed because
4105 // the sweeper might actually free the underlying page).
4106 class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
4107  public:
FixStaleLeftTrimmedHandlesVisitor(Heap * heap)4108   explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
4109     USE(heap_);
4110   }
4111 
VisitRootPointer(Root root,const char * description,Object ** p)4112   void VisitRootPointer(Root root, const char* description,
4113                         Object** p) override {
4114     FixHandle(p);
4115   }
4116 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)4117   void VisitRootPointers(Root root, const char* description, Object** start,
4118                          Object** end) override {
4119     for (Object** p = start; p < end; p++) FixHandle(p);
4120   }
4121 
4122  private:
FixHandle(Object ** p)4123   inline void FixHandle(Object** p) {
4124     if (!(*p)->IsHeapObject()) return;
4125     HeapObject* current = reinterpret_cast<HeapObject*>(*p);
4126     const MapWord map_word = current->map_word();
4127     if (!map_word.IsForwardingAddress() && current->IsFiller()) {
4128 #ifdef DEBUG
4129       // We need to find a FixedArrayBase map after walking the fillers.
4130       while (current->IsFiller()) {
4131         Address next = reinterpret_cast<Address>(current);
4132         if (current->map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
4133           next += kPointerSize;
4134         } else if (current->map() ==
4135                    ReadOnlyRoots(heap_).two_pointer_filler_map()) {
4136           next += 2 * kPointerSize;
4137         } else {
4138           next += current->Size();
4139         }
4140         current = reinterpret_cast<HeapObject*>(next);
4141       }
4142       DCHECK(current->IsFixedArrayBase());
4143 #endif  // DEBUG
4144       *p = nullptr;
4145     }
4146   }
4147 
4148   Heap* heap_;
4149 };
4150 
IterateStrongRoots(RootVisitor * v,VisitMode mode)4151 void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
4152   const bool isMinorGC = mode == VISIT_ALL_IN_SCAVENGE ||
4153                          mode == VISIT_ALL_IN_MINOR_MC_MARK ||
4154                          mode == VISIT_ALL_IN_MINOR_MC_UPDATE;
4155   v->VisitRootPointers(Root::kStrongRootList, nullptr, &roots_[0],
4156                        &roots_[kStrongRootListLength]);
4157   v->Synchronize(VisitorSynchronization::kStrongRootList);
4158 
4159   isolate_->bootstrapper()->Iterate(v);
4160   v->Synchronize(VisitorSynchronization::kBootstrapper);
4161   isolate_->Iterate(v);
4162   v->Synchronize(VisitorSynchronization::kTop);
4163   Relocatable::Iterate(isolate_, v);
4164   v->Synchronize(VisitorSynchronization::kRelocatable);
4165   isolate_->debug()->Iterate(v);
4166   v->Synchronize(VisitorSynchronization::kDebug);
4167 
4168   isolate_->compilation_cache()->Iterate(v);
4169   v->Synchronize(VisitorSynchronization::kCompilationCache);
4170 
4171   // Iterate over local handles in handle scopes.
4172   FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
4173   isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
4174   isolate_->handle_scope_implementer()->Iterate(v);
4175   isolate_->IterateDeferredHandles(v);
4176   v->Synchronize(VisitorSynchronization::kHandleScope);
4177 
4178   // Iterate over the builtin code objects and code stubs in the
4179   // heap. Note that it is not necessary to iterate over code objects
4180   // on scavenge collections.
4181   if (!isMinorGC) {
4182     IterateBuiltins(v);
4183     v->Synchronize(VisitorSynchronization::kBuiltins);
4184     isolate_->interpreter()->IterateDispatchTable(v);
4185     v->Synchronize(VisitorSynchronization::kDispatchTable);
4186   }
4187 
4188   // Iterate over global handles.
4189   switch (mode) {
4190     case VISIT_FOR_SERIALIZATION:
4191       // Global handles are not iterated by the serializer. Values referenced by
4192       // global handles need to be added manually.
4193       break;
4194     case VISIT_ONLY_STRONG:
4195       isolate_->global_handles()->IterateStrongRoots(v);
4196       break;
4197     case VISIT_ALL_IN_SCAVENGE:
4198       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4199       break;
4200     case VISIT_ALL_IN_MINOR_MC_MARK:
4201       // Global handles are processed manually be the minor MC.
4202       break;
4203     case VISIT_ALL_IN_MINOR_MC_UPDATE:
4204       // Global handles are processed manually be the minor MC.
4205       break;
4206     case VISIT_ALL_IN_SWEEP_NEWSPACE:
4207     case VISIT_ALL:
4208       isolate_->global_handles()->IterateAllRoots(v);
4209       break;
4210   }
4211   v->Synchronize(VisitorSynchronization::kGlobalHandles);
4212 
4213   // Iterate over eternal handles. Eternal handles are not iterated by the
4214   // serializer. Values referenced by eternal handles need to be added manually.
4215   if (mode != VISIT_FOR_SERIALIZATION) {
4216     if (isMinorGC) {
4217       isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4218     } else {
4219       isolate_->eternal_handles()->IterateAllRoots(v);
4220     }
4221   }
4222   v->Synchronize(VisitorSynchronization::kEternalHandles);
4223 
4224   // Iterate over pointers being held by inactive threads.
4225   isolate_->thread_manager()->Iterate(v);
4226   v->Synchronize(VisitorSynchronization::kThreadManager);
4227 
4228   // Iterate over other strong roots (currently only identity maps).
4229   for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
4230     v->VisitRootPointers(Root::kStrongRoots, nullptr, list->start, list->end);
4231   }
4232   v->Synchronize(VisitorSynchronization::kStrongRoots);
4233 
4234   // Iterate over the partial snapshot cache unless serializing.
4235   if (mode != VISIT_FOR_SERIALIZATION) {
4236     SerializerDeserializer::Iterate(isolate_, v);
4237     // We don't do a v->Synchronize call here because the serializer and the
4238     // deserializer are deliberately out of sync here.
4239   }
4240 }
4241 
IterateWeakGlobalHandles(RootVisitor * v)4242 void Heap::IterateWeakGlobalHandles(RootVisitor* v) {
4243   isolate_->global_handles()->IterateWeakRoots(v);
4244 }
4245 
IterateBuiltins(RootVisitor * v)4246 void Heap::IterateBuiltins(RootVisitor* v) {
4247   for (int i = 0; i < Builtins::builtin_count; i++) {
4248     v->VisitRootPointer(Root::kBuiltins, Builtins::name(i), &builtins_[i]);
4249   }
4250 }
4251 
4252 // TODO(1236194): Since the heap size is configurable on the command line
4253 // and through the API, we should gracefully handle the case that the heap
4254 // size is not big enough to fit all the initial objects.
ConfigureHeap(size_t max_semi_space_size_in_kb,size_t max_old_generation_size_in_mb,size_t code_range_size_in_mb)4255 void Heap::ConfigureHeap(size_t max_semi_space_size_in_kb,
4256                          size_t max_old_generation_size_in_mb,
4257                          size_t code_range_size_in_mb) {
4258   // Overwrite default configuration.
4259   if (max_semi_space_size_in_kb != 0) {
4260     max_semi_space_size_ =
4261         RoundUp<Page::kPageSize>(max_semi_space_size_in_kb * KB);
4262   }
4263   if (max_old_generation_size_in_mb != 0) {
4264     max_old_generation_size_ = max_old_generation_size_in_mb * MB;
4265   }
4266 
4267   // If max space size flags are specified overwrite the configuration.
4268   if (FLAG_max_semi_space_size > 0) {
4269     max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
4270   }
4271   if (FLAG_max_old_space_size > 0) {
4272     max_old_generation_size_ =
4273         static_cast<size_t>(FLAG_max_old_space_size) * MB;
4274   }
4275 
4276   if (Page::kPageSize > MB) {
4277     max_semi_space_size_ = RoundUp<Page::kPageSize>(max_semi_space_size_);
4278     max_old_generation_size_ =
4279         RoundUp<Page::kPageSize>(max_old_generation_size_);
4280   }
4281 
4282   if (FLAG_stress_compaction) {
4283     // This will cause more frequent GCs when stressing.
4284     max_semi_space_size_ = MB;
4285   }
4286 
4287   // The new space size must be a power of two to support single-bit testing
4288   // for containment.
4289   max_semi_space_size_ = static_cast<size_t>(base::bits::RoundUpToPowerOfTwo64(
4290       static_cast<uint64_t>(max_semi_space_size_)));
4291 
4292   if (max_semi_space_size_ == kMaxSemiSpaceSizeInKB * KB) {
4293     // Start with at least 1*MB semi-space on machines with a lot of memory.
4294     initial_semispace_size_ =
4295         Max(initial_semispace_size_, static_cast<size_t>(1 * MB));
4296   }
4297 
4298   if (FLAG_min_semi_space_size > 0) {
4299     size_t initial_semispace_size =
4300         static_cast<size_t>(FLAG_min_semi_space_size) * MB;
4301     if (initial_semispace_size > max_semi_space_size_) {
4302       initial_semispace_size_ = max_semi_space_size_;
4303       if (FLAG_trace_gc) {
4304         PrintIsolate(isolate_,
4305                      "Min semi-space size cannot be more than the maximum "
4306                      "semi-space size of %" PRIuS " MB\n",
4307                      max_semi_space_size_ / MB);
4308       }
4309     } else {
4310       initial_semispace_size_ =
4311           RoundUp<Page::kPageSize>(initial_semispace_size);
4312     }
4313   }
4314 
4315   initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4316 
4317   if (FLAG_semi_space_growth_factor < 2) {
4318     FLAG_semi_space_growth_factor = 2;
4319   }
4320 
4321   // The old generation is paged and needs at least one page for each space.
4322   int paged_space_count =
4323       LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
4324   initial_max_old_generation_size_ = max_old_generation_size_ =
4325       Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
4326           max_old_generation_size_);
4327 
4328   if (FLAG_initial_old_space_size > 0) {
4329     initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
4330   } else {
4331     initial_old_generation_size_ =
4332         max_old_generation_size_ / kInitalOldGenerationLimitFactor;
4333   }
4334   old_generation_allocation_limit_ = initial_old_generation_size_;
4335 
4336   // We rely on being able to allocate new arrays in paged spaces.
4337   DCHECK(kMaxRegularHeapObjectSize >=
4338          (JSArray::kSize +
4339           FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
4340           AllocationMemento::kSize));
4341 
4342   code_range_size_ = code_range_size_in_mb * MB;
4343 
4344   configured_ = true;
4345 }
4346 
4347 
AddToRingBuffer(const char * string)4348 void Heap::AddToRingBuffer(const char* string) {
4349   size_t first_part =
4350       Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
4351   memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
4352   ring_buffer_end_ += first_part;
4353   if (first_part < strlen(string)) {
4354     ring_buffer_full_ = true;
4355     size_t second_part = strlen(string) - first_part;
4356     memcpy(trace_ring_buffer_, string + first_part, second_part);
4357     ring_buffer_end_ = second_part;
4358   }
4359 }
4360 
4361 
GetFromRingBuffer(char * buffer)4362 void Heap::GetFromRingBuffer(char* buffer) {
4363   size_t copied = 0;
4364   if (ring_buffer_full_) {
4365     copied = kTraceRingBufferSize - ring_buffer_end_;
4366     memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
4367   }
4368   memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
4369 }
4370 
ConfigureHeapDefault()4371 void Heap::ConfigureHeapDefault() { ConfigureHeap(0, 0, 0); }
4372 
RecordStats(HeapStats * stats,bool take_snapshot)4373 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4374   *stats->start_marker = HeapStats::kStartMarker;
4375   *stats->end_marker = HeapStats::kEndMarker;
4376   *stats->ro_space_size = read_only_space_->Size();
4377   *stats->ro_space_capacity = read_only_space_->Capacity();
4378   *stats->new_space_size = new_space_->Size();
4379   *stats->new_space_capacity = new_space_->Capacity();
4380   *stats->old_space_size = old_space_->SizeOfObjects();
4381   *stats->old_space_capacity = old_space_->Capacity();
4382   *stats->code_space_size = code_space_->SizeOfObjects();
4383   *stats->code_space_capacity = code_space_->Capacity();
4384   *stats->map_space_size = map_space_->SizeOfObjects();
4385   *stats->map_space_capacity = map_space_->Capacity();
4386   *stats->lo_space_size = lo_space_->Size();
4387   isolate_->global_handles()->RecordStats(stats);
4388   *stats->memory_allocator_size = memory_allocator()->Size();
4389   *stats->memory_allocator_capacity =
4390       memory_allocator()->Size() + memory_allocator()->Available();
4391   *stats->os_error = base::OS::GetLastError();
4392   *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
4393   *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
4394   if (take_snapshot) {
4395     HeapIterator iterator(this);
4396     for (HeapObject* obj = iterator.next(); obj != nullptr;
4397          obj = iterator.next()) {
4398       InstanceType type = obj->map()->instance_type();
4399       DCHECK(0 <= type && type <= LAST_TYPE);
4400       stats->objects_per_type[type]++;
4401       stats->size_per_type[type] += obj->Size();
4402     }
4403   }
4404   if (stats->last_few_messages != nullptr)
4405     GetFromRingBuffer(stats->last_few_messages);
4406   if (stats->js_stacktrace != nullptr) {
4407     FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
4408     StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
4409     if (gc_state() == Heap::NOT_IN_GC) {
4410       isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
4411     } else {
4412       accumulator.Add("Cannot get stack trace in GC.");
4413     }
4414   }
4415 }
4416 
OldGenerationSizeOfObjects()4417 size_t Heap::OldGenerationSizeOfObjects() {
4418   PagedSpaces spaces(this, PagedSpaces::SpacesSpecifier::kAllPagedSpaces);
4419   size_t total = 0;
4420   for (PagedSpace* space = spaces.next(); space != nullptr;
4421        space = spaces.next()) {
4422     total += space->SizeOfObjects();
4423   }
4424   return total + lo_space_->SizeOfObjects();
4425 }
4426 
PromotedExternalMemorySize()4427 uint64_t Heap::PromotedExternalMemorySize() {
4428   if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
4429   return static_cast<uint64_t>(external_memory_ -
4430                                external_memory_at_last_mark_compact_);
4431 }
4432 
ShouldOptimizeForLoadTime()4433 bool Heap::ShouldOptimizeForLoadTime() {
4434   return isolate()->rail_mode() == PERFORMANCE_LOAD &&
4435          !AllocationLimitOvershotByLargeMargin() &&
4436          MonotonicallyIncreasingTimeInMs() <
4437              isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
4438 }
4439 
4440 // This predicate is called when an old generation space cannot allocated from
4441 // the free list and is about to add a new page. Returning false will cause a
4442 // major GC. It happens when the old generation allocation limit is reached and
4443 // - either we need to optimize for memory usage,
4444 // - or the incremental marking is not in progress and we cannot start it.
ShouldExpandOldGenerationOnSlowAllocation()4445 bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
4446   if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
4447   // We reached the old generation allocation limit.
4448 
4449   if (ShouldOptimizeForMemoryUsage()) return false;
4450 
4451   if (ShouldOptimizeForLoadTime()) return true;
4452 
4453   if (incremental_marking()->NeedsFinalization()) {
4454     return !AllocationLimitOvershotByLargeMargin();
4455   }
4456 
4457   if (incremental_marking()->IsStopped() &&
4458       IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
4459     // We cannot start incremental marking.
4460     return false;
4461   }
4462   return true;
4463 }
4464 
CurrentHeapGrowingMode()4465 Heap::HeapGrowingMode Heap::CurrentHeapGrowingMode() {
4466   if (ShouldReduceMemory() || FLAG_stress_compaction) {
4467     return Heap::HeapGrowingMode::kMinimal;
4468   }
4469 
4470   if (ShouldOptimizeForMemoryUsage()) {
4471     return Heap::HeapGrowingMode::kConservative;
4472   }
4473 
4474   if (memory_reducer()->ShouldGrowHeapSlowly()) {
4475     return Heap::HeapGrowingMode::kSlow;
4476   }
4477 
4478   return Heap::HeapGrowingMode::kDefault;
4479 }
4480 
4481 // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
4482 // The kNoLimit means that either incremental marking is disabled or it is too
4483 // early to start incremental marking.
4484 // The kSoftLimit means that incremental marking should be started soon.
4485 // The kHardLimit means that incremental marking should be started immediately.
IncrementalMarkingLimitReached()4486 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
4487   // Code using an AlwaysAllocateScope assumes that the GC state does not
4488   // change; that implies that no marking steps must be performed.
4489   if (!incremental_marking()->CanBeActivated() || always_allocate()) {
4490     // Incremental marking is disabled or it is too early to start.
4491     return IncrementalMarkingLimit::kNoLimit;
4492   }
4493   if (FLAG_stress_incremental_marking) {
4494     return IncrementalMarkingLimit::kHardLimit;
4495   }
4496   if (OldGenerationSizeOfObjects() <=
4497       IncrementalMarking::kActivationThreshold) {
4498     // Incremental marking is disabled or it is too early to start.
4499     return IncrementalMarkingLimit::kNoLimit;
4500   }
4501   if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
4502       HighMemoryPressure()) {
4503     // If there is high memory pressure or stress testing is enabled, then
4504     // start marking immediately.
4505     return IncrementalMarkingLimit::kHardLimit;
4506   }
4507 
4508   if (FLAG_stress_marking > 0) {
4509     double gained_since_last_gc =
4510         PromotedSinceLastGC() +
4511         (external_memory_ - external_memory_at_last_mark_compact_);
4512     double size_before_gc =
4513         OldGenerationObjectsAndPromotedExternalMemorySize() -
4514         gained_since_last_gc;
4515     double bytes_to_limit = old_generation_allocation_limit_ - size_before_gc;
4516     if (bytes_to_limit > 0) {
4517       double current_percent = (gained_since_last_gc / bytes_to_limit) * 100.0;
4518 
4519       if (FLAG_trace_stress_marking) {
4520         isolate()->PrintWithTimestamp(
4521             "[IncrementalMarking] %.2lf%% of the memory limit reached\n",
4522             current_percent);
4523       }
4524 
4525       if (FLAG_fuzzer_gc_analysis) {
4526         // Skips values >=100% since they already trigger marking.
4527         if (current_percent < 100.0) {
4528           max_marking_limit_reached_ =
4529               std::max(max_marking_limit_reached_, current_percent);
4530         }
4531       } else if (static_cast<int>(current_percent) >=
4532                  stress_marking_percentage_) {
4533         stress_marking_percentage_ = NextStressMarkingLimit();
4534         return IncrementalMarkingLimit::kHardLimit;
4535       }
4536     }
4537   }
4538 
4539   size_t old_generation_space_available = OldGenerationSpaceAvailable();
4540 
4541   if (old_generation_space_available > new_space_->Capacity()) {
4542     return IncrementalMarkingLimit::kNoLimit;
4543   }
4544   if (ShouldOptimizeForMemoryUsage()) {
4545     return IncrementalMarkingLimit::kHardLimit;
4546   }
4547   if (ShouldOptimizeForLoadTime()) {
4548     return IncrementalMarkingLimit::kNoLimit;
4549   }
4550   if (old_generation_space_available == 0) {
4551     return IncrementalMarkingLimit::kHardLimit;
4552   }
4553   return IncrementalMarkingLimit::kSoftLimit;
4554 }
4555 
EnableInlineAllocation()4556 void Heap::EnableInlineAllocation() {
4557   if (!inline_allocation_disabled_) return;
4558   inline_allocation_disabled_ = false;
4559 
4560   // Update inline allocation limit for new space.
4561   new_space()->UpdateInlineAllocationLimit(0);
4562 }
4563 
4564 
DisableInlineAllocation()4565 void Heap::DisableInlineAllocation() {
4566   if (inline_allocation_disabled_) return;
4567   inline_allocation_disabled_ = true;
4568 
4569   // Update inline allocation limit for new space.
4570   new_space()->UpdateInlineAllocationLimit(0);
4571 
4572   // Update inline allocation limit for old spaces.
4573   PagedSpaces spaces(this);
4574   CodeSpaceMemoryModificationScope modification_scope(this);
4575   for (PagedSpace* space = spaces.next(); space != nullptr;
4576        space = spaces.next()) {
4577     space->FreeLinearAllocationArea();
4578   }
4579 }
4580 
EnsureImmovableCode(HeapObject * heap_object,int object_size)4581 HeapObject* Heap::EnsureImmovableCode(HeapObject* heap_object,
4582                                       int object_size) {
4583   // Code objects which should stay at a fixed address are allocated either
4584   // in the first page of code space, in large object space, or (during
4585   // snapshot creation) the containing page is marked as immovable.
4586   DCHECK(heap_object);
4587   DCHECK(code_space_->Contains(heap_object));
4588   DCHECK_GE(object_size, 0);
4589   if (!Heap::IsImmovable(heap_object)) {
4590     if (isolate()->serializer_enabled() ||
4591         code_space_->first_page()->Contains(heap_object->address())) {
4592       MemoryChunk::FromAddress(heap_object->address())->MarkNeverEvacuate();
4593     } else {
4594       // Discard the first code allocation, which was on a page where it could
4595       // be moved.
4596       CreateFillerObjectAt(heap_object->address(), object_size,
4597                            ClearRecordedSlots::kNo);
4598       heap_object = AllocateRawCodeInLargeObjectSpace(object_size);
4599       UnprotectAndRegisterMemoryChunk(heap_object);
4600       ZapCodeObject(heap_object->address(), object_size);
4601       OnAllocationEvent(heap_object, object_size);
4602     }
4603   }
4604   return heap_object;
4605 }
4606 
AllocateRawWithLightRetry(int size,AllocationSpace space,AllocationAlignment alignment)4607 HeapObject* Heap::AllocateRawWithLightRetry(int size, AllocationSpace space,
4608                                             AllocationAlignment alignment) {
4609   HeapObject* result;
4610   AllocationResult alloc = AllocateRaw(size, space, alignment);
4611   if (alloc.To(&result)) {
4612     DCHECK(result != ReadOnlyRoots(this).exception());
4613     return result;
4614   }
4615   // Two GCs before panicking. In newspace will almost always succeed.
4616   for (int i = 0; i < 2; i++) {
4617     CollectGarbage(alloc.RetrySpace(),
4618                    GarbageCollectionReason::kAllocationFailure);
4619     alloc = AllocateRaw(size, space, alignment);
4620     if (alloc.To(&result)) {
4621       DCHECK(result != ReadOnlyRoots(this).exception());
4622       return result;
4623     }
4624   }
4625   return nullptr;
4626 }
4627 
AllocateRawWithRetryOrFail(int size,AllocationSpace space,AllocationAlignment alignment)4628 HeapObject* Heap::AllocateRawWithRetryOrFail(int size, AllocationSpace space,
4629                                              AllocationAlignment alignment) {
4630   AllocationResult alloc;
4631   HeapObject* result = AllocateRawWithLightRetry(size, space, alignment);
4632   if (result) return result;
4633 
4634   isolate()->counters()->gc_last_resort_from_handles()->Increment();
4635   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
4636   {
4637     AlwaysAllocateScope scope(isolate());
4638     alloc = AllocateRaw(size, space, alignment);
4639   }
4640   if (alloc.To(&result)) {
4641     DCHECK(result != ReadOnlyRoots(this).exception());
4642     return result;
4643   }
4644   // TODO(1181417): Fix this.
4645   FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
4646   return nullptr;
4647 }
4648 
4649 // TODO(jkummerow): Refactor this. AllocateRaw should take an "immovability"
4650 // parameter and just do what's necessary.
AllocateRawCodeInLargeObjectSpace(int size)4651 HeapObject* Heap::AllocateRawCodeInLargeObjectSpace(int size) {
4652   AllocationResult alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
4653   HeapObject* result;
4654   if (alloc.To(&result)) {
4655     DCHECK(result != ReadOnlyRoots(this).exception());
4656     return result;
4657   }
4658   // Two GCs before panicking.
4659   for (int i = 0; i < 2; i++) {
4660     CollectGarbage(alloc.RetrySpace(),
4661                    GarbageCollectionReason::kAllocationFailure);
4662     alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
4663     if (alloc.To(&result)) {
4664       DCHECK(result != ReadOnlyRoots(this).exception());
4665       return result;
4666     }
4667   }
4668   isolate()->counters()->gc_last_resort_from_handles()->Increment();
4669   CollectAllAvailableGarbage(GarbageCollectionReason::kLastResort);
4670   {
4671     AlwaysAllocateScope scope(isolate());
4672     alloc = lo_space()->AllocateRaw(size, EXECUTABLE);
4673   }
4674   if (alloc.To(&result)) {
4675     DCHECK(result != ReadOnlyRoots(this).exception());
4676     return result;
4677   }
4678   // TODO(1181417): Fix this.
4679   FatalProcessOutOfMemory("CALL_AND_RETRY_LAST");
4680   return nullptr;
4681 }
4682 
SetUp()4683 void Heap::SetUp() {
4684 #ifdef V8_ENABLE_ALLOCATION_TIMEOUT
4685   allocation_timeout_ = NextAllocationTimeout();
4686 #endif
4687 
4688   // Initialize heap spaces and initial maps and objects.
4689   //
4690   // If the heap is not yet configured (e.g. through the API), configure it.
4691   // Configuration is based on the flags new-space-size (really the semispace
4692   // size) and old-space-size if set or the initial values of semispace_size_
4693   // and old_generation_size_ otherwise.
4694   if (!configured_) ConfigureHeapDefault();
4695 
4696   mmap_region_base_ =
4697       reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
4698       ~kMmapRegionMask;
4699 
4700   // Set up memory allocator.
4701   memory_allocator_ =
4702       new MemoryAllocator(isolate_, MaxReserved(), code_range_size_);
4703 
4704   store_buffer_ = new StoreBuffer(this);
4705 
4706   heap_controller_ = new HeapController(this);
4707 
4708   mark_compact_collector_ = new MarkCompactCollector(this);
4709   incremental_marking_ =
4710       new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
4711                              mark_compact_collector_->weak_objects());
4712 
4713   if (FLAG_concurrent_marking) {
4714     MarkCompactCollector::MarkingWorklist* marking_worklist =
4715         mark_compact_collector_->marking_worklist();
4716     concurrent_marking_ = new ConcurrentMarking(
4717         this, marking_worklist->shared(), marking_worklist->bailout(),
4718         marking_worklist->on_hold(), mark_compact_collector_->weak_objects());
4719   } else {
4720     concurrent_marking_ =
4721         new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
4722   }
4723 
4724   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
4725     space_[i] = nullptr;
4726   }
4727 
4728   space_[RO_SPACE] = read_only_space_ = new ReadOnlySpace(this);
4729   space_[NEW_SPACE] = new_space_ =
4730       new NewSpace(this, initial_semispace_size_, max_semi_space_size_);
4731   space_[OLD_SPACE] = old_space_ = new OldSpace(this);
4732   space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
4733   space_[MAP_SPACE] = map_space_ = new MapSpace(this);
4734   space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this);
4735   space_[NEW_LO_SPACE] = new_lo_space_ = new NewLargeObjectSpace(this);
4736 
4737   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
4738        i++) {
4739     deferred_counters_[i] = 0;
4740   }
4741 
4742   tracer_ = new GCTracer(this);
4743 #ifdef ENABLE_MINOR_MC
4744   minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
4745 #else
4746   minor_mark_compact_collector_ = nullptr;
4747 #endif  // ENABLE_MINOR_MC
4748   array_buffer_collector_ = new ArrayBufferCollector(this);
4749   gc_idle_time_handler_ = new GCIdleTimeHandler();
4750   memory_reducer_ = new MemoryReducer(this);
4751   if (V8_UNLIKELY(FLAG_gc_stats)) {
4752     live_object_stats_ = new ObjectStats(this);
4753     dead_object_stats_ = new ObjectStats(this);
4754   }
4755   scavenge_job_ = new ScavengeJob();
4756   local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer(isolate());
4757 
4758   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
4759   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
4760 
4761   store_buffer()->SetUp();
4762 
4763   mark_compact_collector()->SetUp();
4764 #ifdef ENABLE_MINOR_MC
4765   if (minor_mark_compact_collector() != nullptr) {
4766     minor_mark_compact_collector()->SetUp();
4767   }
4768 #endif  // ENABLE_MINOR_MC
4769 
4770   idle_scavenge_observer_ = new IdleScavengeObserver(
4771       *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
4772   new_space()->AddAllocationObserver(idle_scavenge_observer_);
4773 
4774   SetGetExternallyAllocatedMemoryInBytesCallback(
4775       DefaultGetExternallyAllocatedMemoryInBytesCallback);
4776 
4777   if (FLAG_stress_marking > 0) {
4778     stress_marking_percentage_ = NextStressMarkingLimit();
4779     stress_marking_observer_ = new StressMarkingObserver(*this);
4780     AddAllocationObserversToAllSpaces(stress_marking_observer_,
4781                                       stress_marking_observer_);
4782   }
4783   if (FLAG_stress_scavenge > 0) {
4784     stress_scavenge_observer_ = new StressScavengeObserver(*this);
4785     new_space()->AddAllocationObserver(stress_scavenge_observer_);
4786   }
4787 
4788   write_protect_code_memory_ = FLAG_write_protect_code_memory;
4789 
4790   external_reference_table_.Init(isolate_);
4791 }
4792 
InitializeHashSeed()4793 void Heap::InitializeHashSeed() {
4794   uint64_t new_hash_seed;
4795   if (FLAG_hash_seed == 0) {
4796     int64_t rnd = isolate()->random_number_generator()->NextInt64();
4797     new_hash_seed = static_cast<uint64_t>(rnd);
4798   } else {
4799     new_hash_seed = static_cast<uint64_t>(FLAG_hash_seed);
4800   }
4801   hash_seed()->copy_in(0, reinterpret_cast<byte*>(&new_hash_seed), kInt64Size);
4802 }
4803 
SetStackLimits()4804 void Heap::SetStackLimits() {
4805   DCHECK_NOT_NULL(isolate_);
4806   DCHECK(isolate_ == isolate());
4807   // On 64 bit machines, pointers are generally out of range of Smis.  We write
4808   // something that looks like an out of range Smi to the GC.
4809 
4810   // Set up the special root array entries containing the stack limits.
4811   // These are actually addresses, but the tag makes the GC ignore it.
4812   roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
4813       (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
4814   roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
4815       (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
4816 }
4817 
ClearStackLimits()4818 void Heap::ClearStackLimits() {
4819   roots_[kStackLimitRootIndex] = Smi::kZero;
4820   roots_[kRealStackLimitRootIndex] = Smi::kZero;
4821 }
4822 
NextAllocationTimeout(int current_timeout)4823 int Heap::NextAllocationTimeout(int current_timeout) {
4824   if (FLAG_random_gc_interval > 0) {
4825     // If current timeout hasn't reached 0 the GC was caused by something
4826     // different than --stress-atomic-gc flag and we don't update the timeout.
4827     if (current_timeout <= 0) {
4828       return isolate()->fuzzer_rng()->NextInt(FLAG_random_gc_interval + 1);
4829     } else {
4830       return current_timeout;
4831     }
4832   }
4833   return FLAG_gc_interval;
4834 }
4835 
PrintAllocationsHash()4836 void Heap::PrintAllocationsHash() {
4837   uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
4838   PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
4839 }
4840 
PrintMaxMarkingLimitReached()4841 void Heap::PrintMaxMarkingLimitReached() {
4842   PrintF("\n### Maximum marking limit reached = %.02lf\n",
4843          max_marking_limit_reached_);
4844 }
4845 
PrintMaxNewSpaceSizeReached()4846 void Heap::PrintMaxNewSpaceSizeReached() {
4847   PrintF("\n### Maximum new space size reached = %.02lf\n",
4848          stress_scavenge_observer_->MaxNewSpaceSizeReached());
4849 }
4850 
NextStressMarkingLimit()4851 int Heap::NextStressMarkingLimit() {
4852   return isolate()->fuzzer_rng()->NextInt(FLAG_stress_marking + 1);
4853 }
4854 
NotifyDeserializationComplete()4855 void Heap::NotifyDeserializationComplete() {
4856   PagedSpaces spaces(this);
4857   for (PagedSpace* s = spaces.next(); s != nullptr; s = spaces.next()) {
4858     if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
4859 #ifdef DEBUG
4860     // All pages right after bootstrapping must be marked as never-evacuate.
4861     for (Page* p : *s) {
4862       DCHECK(p->NeverEvacuate());
4863     }
4864 #endif  // DEBUG
4865   }
4866 
4867   read_only_space()->MarkAsReadOnly();
4868   deserialization_complete_ = true;
4869 }
4870 
SetEmbedderHeapTracer(EmbedderHeapTracer * tracer)4871 void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
4872   DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
4873   local_embedder_heap_tracer()->SetRemoteTracer(tracer);
4874 }
4875 
TracePossibleWrapper(JSObject * js_object)4876 void Heap::TracePossibleWrapper(JSObject* js_object) {
4877   DCHECK(js_object->IsApiWrapper());
4878   if (js_object->GetEmbedderFieldCount() >= 2 &&
4879       js_object->GetEmbedderField(0) &&
4880       js_object->GetEmbedderField(0) != ReadOnlyRoots(this).undefined_value() &&
4881       js_object->GetEmbedderField(1) != ReadOnlyRoots(this).undefined_value()) {
4882     DCHECK_EQ(0,
4883               reinterpret_cast<intptr_t>(js_object->GetEmbedderField(0)) % 2);
4884     local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
4885         reinterpret_cast<void*>(js_object->GetEmbedderField(0)),
4886         reinterpret_cast<void*>(js_object->GetEmbedderField(1))));
4887   }
4888 }
4889 
RegisterExternallyReferencedObject(Object ** object)4890 void Heap::RegisterExternallyReferencedObject(Object** object) {
4891   // The embedder is not aware of whether numbers are materialized as heap
4892   // objects are just passed around as Smis.
4893   if (!(*object)->IsHeapObject()) return;
4894   HeapObject* heap_object = HeapObject::cast(*object);
4895   DCHECK(Contains(heap_object));
4896   if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
4897     incremental_marking()->WhiteToGreyAndPush(heap_object);
4898   } else {
4899     DCHECK(mark_compact_collector()->in_use());
4900     mark_compact_collector()->MarkExternallyReferencedObject(heap_object);
4901   }
4902 }
4903 
StartTearDown()4904 void Heap::StartTearDown() { SetGCState(TEAR_DOWN); }
4905 
TearDown()4906 void Heap::TearDown() {
4907   DCHECK_EQ(gc_state_, TEAR_DOWN);
4908 #ifdef VERIFY_HEAP
4909   if (FLAG_verify_heap) {
4910     Verify();
4911   }
4912 #endif
4913 
4914   UpdateMaximumCommitted();
4915 
4916   if (FLAG_verify_predictable || FLAG_fuzzer_gc_analysis) {
4917     PrintAllocationsHash();
4918   }
4919 
4920   if (FLAG_fuzzer_gc_analysis) {
4921     if (FLAG_stress_marking > 0) {
4922       PrintMaxMarkingLimitReached();
4923     }
4924     if (FLAG_stress_scavenge > 0) {
4925       PrintMaxNewSpaceSizeReached();
4926     }
4927   }
4928 
4929   new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
4930   delete idle_scavenge_observer_;
4931   idle_scavenge_observer_ = nullptr;
4932 
4933   if (FLAG_stress_marking > 0) {
4934     RemoveAllocationObserversFromAllSpaces(stress_marking_observer_,
4935                                            stress_marking_observer_);
4936     delete stress_marking_observer_;
4937     stress_marking_observer_ = nullptr;
4938   }
4939   if (FLAG_stress_scavenge > 0) {
4940     new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
4941     delete stress_scavenge_observer_;
4942     stress_scavenge_observer_ = nullptr;
4943   }
4944 
4945   if (heap_controller_ != nullptr) {
4946     delete heap_controller_;
4947     heap_controller_ = nullptr;
4948   }
4949 
4950   if (mark_compact_collector_ != nullptr) {
4951     mark_compact_collector_->TearDown();
4952     delete mark_compact_collector_;
4953     mark_compact_collector_ = nullptr;
4954   }
4955 
4956 #ifdef ENABLE_MINOR_MC
4957   if (minor_mark_compact_collector_ != nullptr) {
4958     minor_mark_compact_collector_->TearDown();
4959     delete minor_mark_compact_collector_;
4960     minor_mark_compact_collector_ = nullptr;
4961   }
4962 #endif  // ENABLE_MINOR_MC
4963 
4964   if (array_buffer_collector_ != nullptr) {
4965     delete array_buffer_collector_;
4966     array_buffer_collector_ = nullptr;
4967   }
4968 
4969   delete incremental_marking_;
4970   incremental_marking_ = nullptr;
4971 
4972   delete concurrent_marking_;
4973   concurrent_marking_ = nullptr;
4974 
4975   delete gc_idle_time_handler_;
4976   gc_idle_time_handler_ = nullptr;
4977 
4978   if (memory_reducer_ != nullptr) {
4979     memory_reducer_->TearDown();
4980     delete memory_reducer_;
4981     memory_reducer_ = nullptr;
4982   }
4983 
4984   if (live_object_stats_ != nullptr) {
4985     delete live_object_stats_;
4986     live_object_stats_ = nullptr;
4987   }
4988 
4989   if (dead_object_stats_ != nullptr) {
4990     delete dead_object_stats_;
4991     dead_object_stats_ = nullptr;
4992   }
4993 
4994   delete local_embedder_heap_tracer_;
4995   local_embedder_heap_tracer_ = nullptr;
4996 
4997   delete scavenge_job_;
4998   scavenge_job_ = nullptr;
4999 
5000   isolate_->global_handles()->TearDown();
5001 
5002   external_string_table_.TearDown();
5003 
5004   // Tear down all ArrayBuffers before tearing down the heap since  their
5005   // byte_length may be a HeapNumber which is required for freeing the backing
5006   // store.
5007   ArrayBufferTracker::TearDown(this);
5008 
5009   delete tracer_;
5010   tracer_ = nullptr;
5011 
5012   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
5013     delete space_[i];
5014     space_[i] = nullptr;
5015   }
5016 
5017   store_buffer()->TearDown();
5018 
5019   memory_allocator()->TearDown();
5020 
5021   StrongRootsList* next = nullptr;
5022   for (StrongRootsList* list = strong_roots_list_; list; list = next) {
5023     next = list->next;
5024     delete list;
5025   }
5026   strong_roots_list_ = nullptr;
5027 
5028   delete store_buffer_;
5029   store_buffer_ = nullptr;
5030 
5031   delete memory_allocator_;
5032   memory_allocator_ = nullptr;
5033 }
5034 
AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,GCType gc_type,void * data)5035 void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
5036                                  GCType gc_type, void* data) {
5037   DCHECK_NOT_NULL(callback);
5038   DCHECK(gc_prologue_callbacks_.end() ==
5039          std::find(gc_prologue_callbacks_.begin(), gc_prologue_callbacks_.end(),
5040                    GCCallbackTuple(callback, gc_type, data)));
5041   gc_prologue_callbacks_.emplace_back(callback, gc_type, data);
5042 }
5043 
RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,void * data)5044 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
5045                                     void* data) {
5046   DCHECK_NOT_NULL(callback);
5047   for (size_t i = 0; i < gc_prologue_callbacks_.size(); i++) {
5048     if (gc_prologue_callbacks_[i].callback == callback &&
5049         gc_prologue_callbacks_[i].data == data) {
5050       gc_prologue_callbacks_[i] = gc_prologue_callbacks_.back();
5051       gc_prologue_callbacks_.pop_back();
5052       return;
5053     }
5054   }
5055   UNREACHABLE();
5056 }
5057 
AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,GCType gc_type,void * data)5058 void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
5059                                  GCType gc_type, void* data) {
5060   DCHECK_NOT_NULL(callback);
5061   DCHECK(gc_epilogue_callbacks_.end() ==
5062          std::find(gc_epilogue_callbacks_.begin(), gc_epilogue_callbacks_.end(),
5063                    GCCallbackTuple(callback, gc_type, data)));
5064   gc_epilogue_callbacks_.emplace_back(callback, gc_type, data);
5065 }
5066 
RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,void * data)5067 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallbackWithData callback,
5068                                     void* data) {
5069   DCHECK_NOT_NULL(callback);
5070   for (size_t i = 0; i < gc_epilogue_callbacks_.size(); i++) {
5071     if (gc_epilogue_callbacks_[i].callback == callback &&
5072         gc_epilogue_callbacks_[i].data == data) {
5073       gc_epilogue_callbacks_[i] = gc_epilogue_callbacks_.back();
5074       gc_epilogue_callbacks_.pop_back();
5075       return;
5076     }
5077   }
5078   UNREACHABLE();
5079 }
5080 
5081 namespace {
CompactWeakArrayList(Heap * heap,Handle<WeakArrayList> array,PretenureFlag pretenure)5082 Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
5083                                            Handle<WeakArrayList> array,
5084                                            PretenureFlag pretenure) {
5085   if (array->length() == 0) {
5086     return array;
5087   }
5088   int new_length = array->CountLiveWeakReferences();
5089   if (new_length == array->length()) {
5090     return array;
5091   }
5092 
5093   Handle<WeakArrayList> new_array = WeakArrayList::EnsureSpace(
5094       heap->isolate(),
5095       handle(ReadOnlyRoots(heap).empty_weak_array_list(), heap->isolate()),
5096       new_length, pretenure);
5097   // Allocation might have caused GC and turned some of the elements into
5098   // cleared weak heap objects. Count the number of live references again and
5099   // fill in the new array.
5100   int copy_to = 0;
5101   for (int i = 0; i < array->length(); i++) {
5102     MaybeObject* element = array->Get(i);
5103     if (element->IsClearedWeakHeapObject()) continue;
5104     new_array->Set(copy_to++, element);
5105   }
5106   new_array->set_length(copy_to);
5107   return new_array;
5108 }
5109 
5110 }  // anonymous namespace
5111 
CompactWeakArrayLists(PretenureFlag pretenure)5112 void Heap::CompactWeakArrayLists(PretenureFlag pretenure) {
5113   // Find known PrototypeUsers and compact them.
5114   std::vector<Handle<PrototypeInfo>> prototype_infos;
5115   {
5116     HeapIterator iterator(this);
5117     for (HeapObject* o = iterator.next(); o != nullptr; o = iterator.next()) {
5118       if (o->IsPrototypeInfo()) {
5119         PrototypeInfo* prototype_info = PrototypeInfo::cast(o);
5120         if (prototype_info->prototype_users()->IsWeakArrayList()) {
5121           prototype_infos.emplace_back(handle(prototype_info, isolate()));
5122         }
5123       }
5124     }
5125   }
5126   for (auto& prototype_info : prototype_infos) {
5127     Handle<WeakArrayList> array(
5128         WeakArrayList::cast(prototype_info->prototype_users()), isolate());
5129     DCHECK_IMPLIES(pretenure == TENURED,
5130                    InOldSpace(*array) ||
5131                        *array == ReadOnlyRoots(this).empty_weak_array_list());
5132     WeakArrayList* new_array = PrototypeUsers::Compact(
5133         array, this, JSObject::PrototypeRegistryCompactionCallback, pretenure);
5134     prototype_info->set_prototype_users(new_array);
5135   }
5136 
5137   // Find known WeakArrayLists and compact them.
5138   Handle<WeakArrayList> scripts(script_list(), isolate());
5139   DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*scripts));
5140   scripts = CompactWeakArrayList(this, scripts, pretenure);
5141   set_script_list(*scripts);
5142 
5143   Handle<WeakArrayList> no_script_list(noscript_shared_function_infos(),
5144                                        isolate());
5145   DCHECK_IMPLIES(pretenure == TENURED, InOldSpace(*no_script_list));
5146   no_script_list = CompactWeakArrayList(this, no_script_list, pretenure);
5147   set_noscript_shared_function_infos(*no_script_list);
5148 }
5149 
AddRetainedMap(Handle<Map> map)5150 void Heap::AddRetainedMap(Handle<Map> map) {
5151   if (map->is_in_retained_map_list()) {
5152     return;
5153   }
5154   Handle<WeakArrayList> array(retained_maps(), isolate());
5155   if (array->IsFull()) {
5156     CompactRetainedMaps(*array);
5157   }
5158   array =
5159       WeakArrayList::AddToEnd(isolate(), array, MaybeObjectHandle::Weak(map));
5160   array = WeakArrayList::AddToEnd(
5161       isolate(), array,
5162       MaybeObjectHandle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()));
5163   if (*array != retained_maps()) {
5164     set_retained_maps(*array);
5165   }
5166   map->set_is_in_retained_map_list(true);
5167 }
5168 
CompactRetainedMaps(WeakArrayList * retained_maps)5169 void Heap::CompactRetainedMaps(WeakArrayList* retained_maps) {
5170   DCHECK_EQ(retained_maps, this->retained_maps());
5171   int length = retained_maps->length();
5172   int new_length = 0;
5173   int new_number_of_disposed_maps = 0;
5174   // This loop compacts the array by removing cleared weak cells.
5175   for (int i = 0; i < length; i += 2) {
5176     MaybeObject* maybe_object = retained_maps->Get(i);
5177     if (maybe_object->IsClearedWeakHeapObject()) {
5178       continue;
5179     }
5180 
5181     DCHECK(maybe_object->IsWeakHeapObject());
5182 
5183     MaybeObject* age = retained_maps->Get(i + 1);
5184     DCHECK(age->IsSmi());
5185     if (i != new_length) {
5186       retained_maps->Set(new_length, maybe_object);
5187       retained_maps->Set(new_length + 1, age);
5188     }
5189     if (i < number_of_disposed_maps_) {
5190       new_number_of_disposed_maps += 2;
5191     }
5192     new_length += 2;
5193   }
5194   number_of_disposed_maps_ = new_number_of_disposed_maps;
5195   HeapObject* undefined = ReadOnlyRoots(this).undefined_value();
5196   for (int i = new_length; i < length; i++) {
5197     retained_maps->Set(i, HeapObjectReference::Strong(undefined));
5198   }
5199   if (new_length != length) retained_maps->set_length(new_length);
5200 }
5201 
FatalProcessOutOfMemory(const char * location)5202 void Heap::FatalProcessOutOfMemory(const char* location) {
5203   v8::internal::V8::FatalProcessOutOfMemory(isolate(), location, true);
5204 }
5205 
5206 #ifdef DEBUG
5207 
5208 class PrintHandleVisitor : public RootVisitor {
5209  public:
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)5210   void VisitRootPointers(Root root, const char* description, Object** start,
5211                          Object** end) override {
5212     for (Object** p = start; p < end; p++)
5213       PrintF("  handle %p to %p\n", reinterpret_cast<void*>(p),
5214              reinterpret_cast<void*>(*p));
5215   }
5216 };
5217 
5218 
PrintHandles()5219 void Heap::PrintHandles() {
5220   PrintF("Handles:\n");
5221   PrintHandleVisitor v;
5222   isolate_->handle_scope_implementer()->Iterate(&v);
5223 }
5224 
5225 #endif
5226 
5227 class CheckHandleCountVisitor : public RootVisitor {
5228  public:
CheckHandleCountVisitor()5229   CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor()5230   ~CheckHandleCountVisitor() override {
5231     CHECK_GT(HandleScope::kCheckHandleThreshold, handle_count_);
5232   }
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)5233   void VisitRootPointers(Root root, const char* description, Object** start,
5234                          Object** end) override {
5235     handle_count_ += end - start;
5236   }
5237 
5238  private:
5239   ptrdiff_t handle_count_;
5240 };
5241 
5242 
CheckHandleCount()5243 void Heap::CheckHandleCount() {
5244   CheckHandleCountVisitor v;
5245   isolate_->handle_scope_implementer()->Iterate(&v);
5246 }
5247 
store_buffer_top_address()5248 Address* Heap::store_buffer_top_address() {
5249   return store_buffer()->top_address();
5250 }
5251 
5252 // static
store_buffer_mask_constant()5253 intptr_t Heap::store_buffer_mask_constant() {
5254   return StoreBuffer::kStoreBufferMask;
5255 }
5256 
5257 // static
store_buffer_overflow_function_address()5258 Address Heap::store_buffer_overflow_function_address() {
5259   return FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow);
5260 }
5261 
ClearRecordedSlot(HeapObject * object,Object ** slot)5262 void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
5263   Address slot_addr = reinterpret_cast<Address>(slot);
5264   Page* page = Page::FromAddress(slot_addr);
5265   if (!page->InNewSpace()) {
5266     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5267     store_buffer()->DeleteEntry(slot_addr);
5268   }
5269 }
5270 
HasRecordedSlot(HeapObject * object,Object ** slot)5271 bool Heap::HasRecordedSlot(HeapObject* object, Object** slot) {
5272   if (InNewSpace(object)) {
5273     return false;
5274   }
5275   Address slot_addr = reinterpret_cast<Address>(slot);
5276   Page* page = Page::FromAddress(slot_addr);
5277   DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5278   store_buffer()->MoveAllEntriesToRememberedSet();
5279   return RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr) ||
5280          RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr);
5281 }
5282 
ClearRecordedSlotRange(Address start,Address end)5283 void Heap::ClearRecordedSlotRange(Address start, Address end) {
5284   Page* page = Page::FromAddress(start);
5285   if (!page->InNewSpace()) {
5286     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5287     store_buffer()->DeleteEntry(start, end);
5288   }
5289 }
5290 
next()5291 PagedSpace* PagedSpaces::next() {
5292   switch (counter_++) {
5293     case RO_SPACE:
5294       // skip NEW_SPACE
5295       counter_++;
5296       return heap_->read_only_space();
5297     case OLD_SPACE:
5298       return heap_->old_space();
5299     case CODE_SPACE:
5300       return heap_->code_space();
5301     case MAP_SPACE:
5302       return heap_->map_space();
5303     default:
5304       return nullptr;
5305   }
5306 }
5307 
SpaceIterator(Heap * heap)5308 SpaceIterator::SpaceIterator(Heap* heap)
5309     : heap_(heap), current_space_(FIRST_SPACE - 1) {}
5310 
~SpaceIterator()5311 SpaceIterator::~SpaceIterator() {
5312 }
5313 
5314 
has_next()5315 bool SpaceIterator::has_next() {
5316   // Iterate until no more spaces.
5317   return current_space_ != LAST_SPACE;
5318 }
5319 
next()5320 Space* SpaceIterator::next() {
5321   DCHECK(has_next());
5322   return heap_->space(++current_space_);
5323 }
5324 
5325 
5326 class HeapObjectsFilter {
5327  public:
~HeapObjectsFilter()5328   virtual ~HeapObjectsFilter() {}
5329   virtual bool SkipObject(HeapObject* object) = 0;
5330 };
5331 
5332 
5333 class UnreachableObjectsFilter : public HeapObjectsFilter {
5334  public:
UnreachableObjectsFilter(Heap * heap)5335   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5336     MarkReachableObjects();
5337   }
5338 
~UnreachableObjectsFilter()5339   ~UnreachableObjectsFilter() {
5340     for (auto it : reachable_) {
5341       delete it.second;
5342       it.second = nullptr;
5343     }
5344   }
5345 
SkipObject(HeapObject * object)5346   bool SkipObject(HeapObject* object) {
5347     if (object->IsFiller()) return true;
5348     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
5349     if (reachable_.count(chunk) == 0) return true;
5350     return reachable_[chunk]->count(object) == 0;
5351   }
5352 
5353  private:
MarkAsReachable(HeapObject * object)5354   bool MarkAsReachable(HeapObject* object) {
5355     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
5356     if (reachable_.count(chunk) == 0) {
5357       reachable_[chunk] = new std::unordered_set<HeapObject*>();
5358     }
5359     if (reachable_[chunk]->count(object)) return false;
5360     reachable_[chunk]->insert(object);
5361     return true;
5362   }
5363 
5364   class MarkingVisitor : public ObjectVisitor, public RootVisitor {
5365    public:
MarkingVisitor(UnreachableObjectsFilter * filter)5366     explicit MarkingVisitor(UnreachableObjectsFilter* filter)
5367         : filter_(filter) {}
5368 
VisitPointers(HeapObject * host,Object ** start,Object ** end)5369     void VisitPointers(HeapObject* host, Object** start,
5370                        Object** end) override {
5371       MarkPointers(reinterpret_cast<MaybeObject**>(start),
5372                    reinterpret_cast<MaybeObject**>(end));
5373     }
5374 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)5375     void VisitPointers(HeapObject* host, MaybeObject** start,
5376                        MaybeObject** end) final {
5377       MarkPointers(start, end);
5378     }
5379 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)5380     void VisitRootPointers(Root root, const char* description, Object** start,
5381                            Object** end) override {
5382       MarkPointers(reinterpret_cast<MaybeObject**>(start),
5383                    reinterpret_cast<MaybeObject**>(end));
5384     }
5385 
TransitiveClosure()5386     void TransitiveClosure() {
5387       while (!marking_stack_.empty()) {
5388         HeapObject* obj = marking_stack_.back();
5389         marking_stack_.pop_back();
5390         obj->Iterate(this);
5391       }
5392     }
5393 
5394    private:
MarkPointers(MaybeObject ** start,MaybeObject ** end)5395     void MarkPointers(MaybeObject** start, MaybeObject** end) {
5396       // Treat weak references as strong.
5397       for (MaybeObject** p = start; p < end; p++) {
5398         HeapObject* heap_object;
5399         if ((*p)->ToStrongOrWeakHeapObject(&heap_object)) {
5400           if (filter_->MarkAsReachable(heap_object)) {
5401             marking_stack_.push_back(heap_object);
5402           }
5403         }
5404       }
5405     }
5406     UnreachableObjectsFilter* filter_;
5407     std::vector<HeapObject*> marking_stack_;
5408   };
5409 
5410   friend class MarkingVisitor;
5411 
MarkReachableObjects()5412   void MarkReachableObjects() {
5413     MarkingVisitor visitor(this);
5414     heap_->IterateRoots(&visitor, VISIT_ALL);
5415     visitor.TransitiveClosure();
5416   }
5417 
5418   Heap* heap_;
5419   DisallowHeapAllocation no_allocation_;
5420   std::unordered_map<MemoryChunk*, std::unordered_set<HeapObject*>*> reachable_;
5421 };
5422 
HeapIterator(Heap * heap,HeapIterator::HeapObjectsFiltering filtering)5423 HeapIterator::HeapIterator(Heap* heap,
5424                            HeapIterator::HeapObjectsFiltering filtering)
5425     : no_heap_allocation_(),
5426       heap_(heap),
5427       filtering_(filtering),
5428       filter_(nullptr),
5429       space_iterator_(nullptr),
5430       object_iterator_(nullptr) {
5431   heap_->MakeHeapIterable();
5432   heap_->heap_iterator_start();
5433   // Start the iteration.
5434   space_iterator_ = new SpaceIterator(heap_);
5435   switch (filtering_) {
5436     case kFilterUnreachable:
5437       filter_ = new UnreachableObjectsFilter(heap_);
5438       break;
5439     default:
5440       break;
5441   }
5442   object_iterator_ = space_iterator_->next()->GetObjectIterator();
5443 }
5444 
5445 
~HeapIterator()5446 HeapIterator::~HeapIterator() {
5447   heap_->heap_iterator_end();
5448 #ifdef DEBUG
5449   // Assert that in filtering mode we have iterated through all
5450   // objects. Otherwise, heap will be left in an inconsistent state.
5451   if (filtering_ != kNoFiltering) {
5452     DCHECK_NULL(object_iterator_);
5453   }
5454 #endif
5455   delete space_iterator_;
5456   delete filter_;
5457 }
5458 
5459 
next()5460 HeapObject* HeapIterator::next() {
5461   if (filter_ == nullptr) return NextObject();
5462 
5463   HeapObject* obj = NextObject();
5464   while ((obj != nullptr) && (filter_->SkipObject(obj))) obj = NextObject();
5465   return obj;
5466 }
5467 
5468 
NextObject()5469 HeapObject* HeapIterator::NextObject() {
5470   // No iterator means we are done.
5471   if (object_iterator_.get() == nullptr) return nullptr;
5472 
5473   if (HeapObject* obj = object_iterator_.get()->Next()) {
5474     // If the current iterator has more objects we are fine.
5475     return obj;
5476   } else {
5477     // Go though the spaces looking for one that has objects.
5478     while (space_iterator_->has_next()) {
5479       object_iterator_ = space_iterator_->next()->GetObjectIterator();
5480       if (HeapObject* obj = object_iterator_.get()->Next()) {
5481         return obj;
5482       }
5483     }
5484   }
5485   // Done with the last space.
5486   object_iterator_.reset(nullptr);
5487   return nullptr;
5488 }
5489 
5490 
UpdateTotalGCTime(double duration)5491 void Heap::UpdateTotalGCTime(double duration) {
5492   if (FLAG_trace_gc_verbose) {
5493     total_gc_time_ms_ += duration;
5494   }
5495 }
5496 
CleanUpNewSpaceStrings()5497 void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
5498   int last = 0;
5499   Isolate* isolate = heap_->isolate();
5500   for (size_t i = 0; i < new_space_strings_.size(); ++i) {
5501     Object* o = new_space_strings_[i];
5502     if (o->IsTheHole(isolate)) {
5503       continue;
5504     }
5505     // The real external string is already in one of these vectors and was or
5506     // will be processed. Re-processing it will add a duplicate to the vector.
5507     if (o->IsThinString()) continue;
5508     DCHECK(o->IsExternalString());
5509     if (InNewSpace(o)) {
5510       new_space_strings_[last++] = o;
5511     } else {
5512       old_space_strings_.push_back(o);
5513     }
5514   }
5515   new_space_strings_.resize(last);
5516 }
5517 
CleanUpAll()5518 void Heap::ExternalStringTable::CleanUpAll() {
5519   CleanUpNewSpaceStrings();
5520   int last = 0;
5521   Isolate* isolate = heap_->isolate();
5522   for (size_t i = 0; i < old_space_strings_.size(); ++i) {
5523     Object* o = old_space_strings_[i];
5524     if (o->IsTheHole(isolate)) {
5525       continue;
5526     }
5527     // The real external string is already in one of these vectors and was or
5528     // will be processed. Re-processing it will add a duplicate to the vector.
5529     if (o->IsThinString()) continue;
5530     DCHECK(o->IsExternalString());
5531     DCHECK(!InNewSpace(o));
5532     old_space_strings_[last++] = o;
5533   }
5534   old_space_strings_.resize(last);
5535 #ifdef VERIFY_HEAP
5536   if (FLAG_verify_heap) {
5537     Verify();
5538   }
5539 #endif
5540 }
5541 
TearDown()5542 void Heap::ExternalStringTable::TearDown() {
5543   for (size_t i = 0; i < new_space_strings_.size(); ++i) {
5544     Object* o = new_space_strings_[i];
5545     // Dont finalize thin strings.
5546     if (o->IsThinString()) continue;
5547     heap_->FinalizeExternalString(ExternalString::cast(o));
5548   }
5549   new_space_strings_.clear();
5550   for (size_t i = 0; i < old_space_strings_.size(); ++i) {
5551     Object* o = old_space_strings_[i];
5552     // Dont finalize thin strings.
5553     if (o->IsThinString()) continue;
5554     heap_->FinalizeExternalString(ExternalString::cast(o));
5555   }
5556   old_space_strings_.clear();
5557 }
5558 
5559 
RememberUnmappedPage(Address page,bool compacted)5560 void Heap::RememberUnmappedPage(Address page, bool compacted) {
5561   // Tag the page pointer to make it findable in the dump file.
5562   if (compacted) {
5563     page ^= 0xC1EAD & (Page::kPageSize - 1);  // Cleared.
5564   } else {
5565     page ^= 0x1D1ED & (Page::kPageSize - 1);  // I died.
5566   }
5567   remembered_unmapped_pages_[remembered_unmapped_pages_index_] = page;
5568   remembered_unmapped_pages_index_++;
5569   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
5570 }
5571 
RegisterStrongRoots(Object ** start,Object ** end)5572 void Heap::RegisterStrongRoots(Object** start, Object** end) {
5573   StrongRootsList* list = new StrongRootsList();
5574   list->next = strong_roots_list_;
5575   list->start = start;
5576   list->end = end;
5577   strong_roots_list_ = list;
5578 }
5579 
5580 
UnregisterStrongRoots(Object ** start)5581 void Heap::UnregisterStrongRoots(Object** start) {
5582   StrongRootsList* prev = nullptr;
5583   StrongRootsList* list = strong_roots_list_;
5584   while (list != nullptr) {
5585     StrongRootsList* next = list->next;
5586     if (list->start == start) {
5587       if (prev) {
5588         prev->next = next;
5589       } else {
5590         strong_roots_list_ = next;
5591       }
5592       delete list;
5593     } else {
5594       prev = list;
5595     }
5596     list = next;
5597   }
5598 }
5599 
IsDeserializeLazyHandler(Code * code)5600 bool Heap::IsDeserializeLazyHandler(Code* code) {
5601   return (code == deserialize_lazy_handler() ||
5602           code == deserialize_lazy_handler_wide() ||
5603           code == deserialize_lazy_handler_extra_wide());
5604 }
5605 
SetDeserializeLazyHandler(Code * code)5606 void Heap::SetDeserializeLazyHandler(Code* code) {
5607   set_deserialize_lazy_handler(code);
5608 }
5609 
SetDeserializeLazyHandlerWide(Code * code)5610 void Heap::SetDeserializeLazyHandlerWide(Code* code) {
5611   set_deserialize_lazy_handler_wide(code);
5612 }
5613 
SetDeserializeLazyHandlerExtraWide(Code * code)5614 void Heap::SetDeserializeLazyHandlerExtraWide(Code* code) {
5615   set_deserialize_lazy_handler_extra_wide(code);
5616 }
5617 
SetBuiltinsConstantsTable(FixedArray * cache)5618 void Heap::SetBuiltinsConstantsTable(FixedArray* cache) {
5619   set_builtins_constants_table(cache);
5620 }
5621 
NumberOfTrackedHeapObjectTypes()5622 size_t Heap::NumberOfTrackedHeapObjectTypes() {
5623   return ObjectStats::OBJECT_STATS_COUNT;
5624 }
5625 
5626 
ObjectCountAtLastGC(size_t index)5627 size_t Heap::ObjectCountAtLastGC(size_t index) {
5628   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
5629     return 0;
5630   return live_object_stats_->object_count_last_gc(index);
5631 }
5632 
5633 
ObjectSizeAtLastGC(size_t index)5634 size_t Heap::ObjectSizeAtLastGC(size_t index) {
5635   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
5636     return 0;
5637   return live_object_stats_->object_size_last_gc(index);
5638 }
5639 
5640 
GetObjectTypeName(size_t index,const char ** object_type,const char ** object_sub_type)5641 bool Heap::GetObjectTypeName(size_t index, const char** object_type,
5642                              const char** object_sub_type) {
5643   if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
5644 
5645   switch (static_cast<int>(index)) {
5646 #define COMPARE_AND_RETURN_NAME(name) \
5647   case name:                          \
5648     *object_type = #name;             \
5649     *object_sub_type = "";            \
5650     return true;
5651     INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
5652 #undef COMPARE_AND_RETURN_NAME
5653 
5654 #define COMPARE_AND_RETURN_NAME(name)                       \
5655   case ObjectStats::FIRST_VIRTUAL_TYPE + ObjectStats::name: \
5656     *object_type = #name;                                   \
5657     *object_sub_type = "";                                  \
5658     return true;
5659     VIRTUAL_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
5660 #undef COMPARE_AND_RETURN_NAME
5661   }
5662   return false;
5663 }
5664 
NumberOfNativeContexts()5665 size_t Heap::NumberOfNativeContexts() {
5666   int result = 0;
5667   Object* context = native_contexts_list();
5668   while (!context->IsUndefined(isolate())) {
5669     ++result;
5670     Context* native_context = Context::cast(context);
5671     context = native_context->next_context_link();
5672   }
5673   return result;
5674 }
5675 
NumberOfDetachedContexts()5676 size_t Heap::NumberOfDetachedContexts() {
5677   // The detached_contexts() array has two entries per detached context.
5678   return detached_contexts()->length() / 2;
5679 }
5680 
AllocationSpaceName(AllocationSpace space)5681 const char* AllocationSpaceName(AllocationSpace space) {
5682   switch (space) {
5683     case NEW_SPACE:
5684       return "NEW_SPACE";
5685     case OLD_SPACE:
5686       return "OLD_SPACE";
5687     case CODE_SPACE:
5688       return "CODE_SPACE";
5689     case MAP_SPACE:
5690       return "MAP_SPACE";
5691     case LO_SPACE:
5692       return "LO_SPACE";
5693     case NEW_LO_SPACE:
5694       return "NEW_LO_SPACE";
5695     case RO_SPACE:
5696       return "RO_SPACE";
5697     default:
5698       UNREACHABLE();
5699   }
5700   return nullptr;
5701 }
5702 
VisitPointers(HeapObject * host,Object ** start,Object ** end)5703 void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
5704                                           Object** end) {
5705   VerifyPointers(host, reinterpret_cast<MaybeObject**>(start),
5706                  reinterpret_cast<MaybeObject**>(end));
5707 }
5708 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)5709 void VerifyPointersVisitor::VisitPointers(HeapObject* host, MaybeObject** start,
5710                                           MaybeObject** end) {
5711   VerifyPointers(host, start, end);
5712 }
5713 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)5714 void VerifyPointersVisitor::VisitRootPointers(Root root,
5715                                               const char* description,
5716                                               Object** start, Object** end) {
5717   VerifyPointers(nullptr, reinterpret_cast<MaybeObject**>(start),
5718                  reinterpret_cast<MaybeObject**>(end));
5719 }
5720 
VerifyPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)5721 void VerifyPointersVisitor::VerifyPointers(HeapObject* host,
5722                                            MaybeObject** start,
5723                                            MaybeObject** end) {
5724   for (MaybeObject** current = start; current < end; current++) {
5725     HeapObject* object;
5726     if ((*current)->ToStrongOrWeakHeapObject(&object)) {
5727       CHECK(heap_->Contains(object));
5728       CHECK(object->map()->IsMap());
5729     } else {
5730       CHECK((*current)->IsSmi() || (*current)->IsClearedWeakHeapObject());
5731     }
5732   }
5733 }
5734 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)5735 void VerifySmisVisitor::VisitRootPointers(Root root, const char* description,
5736                                           Object** start, Object** end) {
5737   for (Object** current = start; current < end; current++) {
5738     CHECK((*current)->IsSmi());
5739   }
5740 }
5741 
AllowedToBeMigrated(HeapObject * obj,AllocationSpace dst)5742 bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
5743   // Object migration is governed by the following rules:
5744   //
5745   // 1) Objects in new-space can be migrated to the old space
5746   //    that matches their target space or they stay in new-space.
5747   // 2) Objects in old-space stay in the same space when migrating.
5748   // 3) Fillers (two or more words) can migrate due to left-trimming of
5749   //    fixed arrays in new-space or old space.
5750   // 4) Fillers (one word) can never migrate, they are skipped by
5751   //    incremental marking explicitly to prevent invalid pattern.
5752   //
5753   // Since this function is used for debugging only, we do not place
5754   // asserts here, but check everything explicitly.
5755   if (obj->map() == ReadOnlyRoots(this).one_pointer_filler_map()) return false;
5756   InstanceType type = obj->map()->instance_type();
5757   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
5758   AllocationSpace src = chunk->owner()->identity();
5759   switch (src) {
5760     case NEW_SPACE:
5761       return dst == NEW_SPACE || dst == OLD_SPACE;
5762     case OLD_SPACE:
5763       return dst == OLD_SPACE;
5764     case CODE_SPACE:
5765       return dst == CODE_SPACE && type == CODE_TYPE;
5766     case MAP_SPACE:
5767     case LO_SPACE:
5768     case NEW_LO_SPACE:
5769     case RO_SPACE:
5770       return false;
5771   }
5772   UNREACHABLE();
5773 }
5774 
CreateObjectStats()5775 void Heap::CreateObjectStats() {
5776   if (V8_LIKELY(FLAG_gc_stats == 0)) return;
5777   if (!live_object_stats_) {
5778     live_object_stats_ = new ObjectStats(this);
5779   }
5780   if (!dead_object_stats_) {
5781     dead_object_stats_ = new ObjectStats(this);
5782   }
5783 }
5784 
AllocationStep(int bytes_allocated,Address soon_object,size_t size)5785 void AllocationObserver::AllocationStep(int bytes_allocated,
5786                                         Address soon_object, size_t size) {
5787   DCHECK_GE(bytes_allocated, 0);
5788   bytes_to_next_step_ -= bytes_allocated;
5789   if (bytes_to_next_step_ <= 0) {
5790     Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object, size);
5791     step_size_ = GetNextStepSize();
5792     bytes_to_next_step_ = step_size_;
5793   }
5794   DCHECK_GE(bytes_to_next_step_, 0);
5795 }
5796 
5797 namespace {
5798 
GcSafeMapOfCodeSpaceObject(HeapObject * object)5799 Map* GcSafeMapOfCodeSpaceObject(HeapObject* object) {
5800   MapWord map_word = object->map_word();
5801   return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress()->map()
5802                                         : map_word.ToMap();
5803 }
5804 
GcSafeSizeOfCodeSpaceObject(HeapObject * object)5805 int GcSafeSizeOfCodeSpaceObject(HeapObject* object) {
5806   return object->SizeFromMap(GcSafeMapOfCodeSpaceObject(object));
5807 }
5808 
GcSafeCastToCode(Heap * heap,HeapObject * object,Address inner_pointer)5809 Code* GcSafeCastToCode(Heap* heap, HeapObject* object, Address inner_pointer) {
5810   Code* code = reinterpret_cast<Code*>(object);
5811   DCHECK_NOT_NULL(code);
5812   DCHECK(heap->GcSafeCodeContains(code, inner_pointer));
5813   return code;
5814 }
5815 
5816 }  // namespace
5817 
GcSafeCodeContains(HeapObject * code,Address addr)5818 bool Heap::GcSafeCodeContains(HeapObject* code, Address addr) {
5819   Map* map = GcSafeMapOfCodeSpaceObject(code);
5820   DCHECK(map == ReadOnlyRoots(this).code_map());
5821   if (InstructionStream::TryLookupCode(isolate(), addr) == code) return true;
5822   Address start = code->address();
5823   Address end = code->address() + code->SizeFromMap(map);
5824   return start <= addr && addr < end;
5825 }
5826 
GcSafeFindCodeForInnerPointer(Address inner_pointer)5827 Code* Heap::GcSafeFindCodeForInnerPointer(Address inner_pointer) {
5828   Code* code = InstructionStream::TryLookupCode(isolate(), inner_pointer);
5829   if (code != nullptr) return code;
5830 
5831   // Check if the inner pointer points into a large object chunk.
5832   LargePage* large_page = lo_space()->FindPage(inner_pointer);
5833   if (large_page != nullptr) {
5834     return GcSafeCastToCode(this, large_page->GetObject(), inner_pointer);
5835   }
5836 
5837   DCHECK(code_space()->Contains(inner_pointer));
5838 
5839   // Iterate through the page until we reach the end or find an object starting
5840   // after the inner pointer.
5841   Page* page = Page::FromAddress(inner_pointer);
5842   DCHECK_EQ(page->owner(), code_space());
5843   mark_compact_collector()->sweeper()->EnsurePageIsIterable(page);
5844 
5845   Address addr = page->skip_list()->StartFor(inner_pointer);
5846   Address top = code_space()->top();
5847   Address limit = code_space()->limit();
5848 
5849   while (true) {
5850     if (addr == top && addr != limit) {
5851       addr = limit;
5852       continue;
5853     }
5854 
5855     HeapObject* obj = HeapObject::FromAddress(addr);
5856     int obj_size = GcSafeSizeOfCodeSpaceObject(obj);
5857     Address next_addr = addr + obj_size;
5858     if (next_addr > inner_pointer)
5859       return GcSafeCastToCode(this, obj, inner_pointer);
5860     addr = next_addr;
5861   }
5862 }
5863 
WriteBarrierForCodeSlow(Code * code)5864 void Heap::WriteBarrierForCodeSlow(Code* code) {
5865   for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
5866        !it.done(); it.next()) {
5867     GenerationalBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
5868     MarkingBarrierForCode(code, it.rinfo(), it.rinfo()->target_object());
5869   }
5870 }
5871 
GenerationalBarrierSlow(HeapObject * object,Address slot,HeapObject * value)5872 void Heap::GenerationalBarrierSlow(HeapObject* object, Address slot,
5873                                    HeapObject* value) {
5874   Heap* heap = Heap::FromWritableHeapObject(object);
5875   heap->store_buffer()->InsertEntry(slot);
5876 }
5877 
GenerationalBarrierForElementsSlow(Heap * heap,FixedArray * array,int offset,int length)5878 void Heap::GenerationalBarrierForElementsSlow(Heap* heap, FixedArray* array,
5879                                               int offset, int length) {
5880   for (int i = 0; i < length; i++) {
5881     if (!InNewSpace(array->get(offset + i))) continue;
5882     heap->store_buffer()->InsertEntry(
5883         reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
5884   }
5885 }
5886 
GenerationalBarrierForCodeSlow(Code * host,RelocInfo * rinfo,HeapObject * object)5887 void Heap::GenerationalBarrierForCodeSlow(Code* host, RelocInfo* rinfo,
5888                                           HeapObject* object) {
5889   DCHECK(InNewSpace(object));
5890   Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
5891   RelocInfo::Mode rmode = rinfo->rmode();
5892   Address addr = rinfo->pc();
5893   SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
5894   if (rinfo->IsInConstantPool()) {
5895     addr = rinfo->constant_pool_entry_address();
5896     if (RelocInfo::IsCodeTargetMode(rmode)) {
5897       slot_type = CODE_ENTRY_SLOT;
5898     } else {
5899       DCHECK(RelocInfo::IsEmbeddedObject(rmode));
5900       slot_type = OBJECT_SLOT;
5901     }
5902   }
5903   RememberedSet<OLD_TO_NEW>::InsertTyped(
5904       source_page, reinterpret_cast<Address>(host), slot_type, addr);
5905 }
5906 
MarkingBarrierSlow(HeapObject * object,Address slot,HeapObject * value)5907 void Heap::MarkingBarrierSlow(HeapObject* object, Address slot,
5908                               HeapObject* value) {
5909   Heap* heap = Heap::FromWritableHeapObject(object);
5910   heap->incremental_marking()->RecordWriteSlow(
5911       object, reinterpret_cast<HeapObjectReference**>(slot), value);
5912 }
5913 
MarkingBarrierForElementsSlow(Heap * heap,HeapObject * object)5914 void Heap::MarkingBarrierForElementsSlow(Heap* heap, HeapObject* object) {
5915   if (FLAG_concurrent_marking ||
5916       heap->incremental_marking()->marking_state()->IsBlack(object)) {
5917     heap->incremental_marking()->RevisitObject(object);
5918   }
5919 }
5920 
MarkingBarrierForCodeSlow(Code * host,RelocInfo * rinfo,HeapObject * object)5921 void Heap::MarkingBarrierForCodeSlow(Code* host, RelocInfo* rinfo,
5922                                      HeapObject* object) {
5923   Heap* heap = Heap::FromWritableHeapObject(host);
5924   DCHECK(heap->incremental_marking()->IsMarking());
5925   heap->incremental_marking()->RecordWriteIntoCode(host, rinfo, object);
5926 }
5927 
PageFlagsAreConsistent(HeapObject * object)5928 bool Heap::PageFlagsAreConsistent(HeapObject* object) {
5929   Heap* heap = Heap::FromWritableHeapObject(object);
5930   MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
5931   heap_internals::MemoryChunk* slim_chunk =
5932       heap_internals::MemoryChunk::FromHeapObject(object);
5933 
5934   const bool generation_consistency =
5935       chunk->owner()->identity() != NEW_SPACE ||
5936       (chunk->InNewSpace() && slim_chunk->InNewSpace());
5937   const bool marking_consistency =
5938       !heap->incremental_marking()->IsMarking() ||
5939       (chunk->IsFlagSet(MemoryChunk::INCREMENTAL_MARKING) &&
5940        slim_chunk->IsMarking());
5941 
5942   return generation_consistency && marking_consistency;
5943 }
5944 
5945 static_assert(MemoryChunk::Flag::INCREMENTAL_MARKING ==
5946                   heap_internals::MemoryChunk::kMarkingBit,
5947               "Incremental marking flag inconsistent");
5948 static_assert(MemoryChunk::Flag::IN_FROM_SPACE ==
5949                   heap_internals::MemoryChunk::kFromSpaceBit,
5950               "From space flag inconsistent");
5951 static_assert(MemoryChunk::Flag::IN_TO_SPACE ==
5952                   heap_internals::MemoryChunk::kToSpaceBit,
5953               "To space flag inconsistent");
5954 static_assert(MemoryChunk::kFlagsOffset ==
5955                   heap_internals::MemoryChunk::kFlagsOffset,
5956               "Flag offset inconsistent");
5957 
SetEmbedderStackStateForNextFinalizaton(EmbedderHeapTracer::EmbedderStackState stack_state)5958 void Heap::SetEmbedderStackStateForNextFinalizaton(
5959     EmbedderHeapTracer::EmbedderStackState stack_state) {
5960   local_embedder_heap_tracer()->SetEmbedderStackStateForNextFinalization(
5961       stack_state);
5962 }
5963 
5964 }  // namespace internal
5965 }  // namespace v8
5966