1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/heap.h"
6 
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/ast/context-slot-cache.h"
10 #include "src/base/bits.h"
11 #include "src/base/once.h"
12 #include "src/base/utils/random-number-generator.h"
13 #include "src/bootstrapper.h"
14 #include "src/codegen.h"
15 #include "src/compilation-cache.h"
16 #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
17 #include "src/conversions.h"
18 #include "src/debug/debug.h"
19 #include "src/deoptimizer.h"
20 #include "src/global-handles.h"
21 #include "src/heap/array-buffer-tracker-inl.h"
22 #include "src/heap/code-stats.h"
23 #include "src/heap/gc-idle-time-handler.h"
24 #include "src/heap/gc-tracer.h"
25 #include "src/heap/incremental-marking.h"
26 #include "src/heap/mark-compact-inl.h"
27 #include "src/heap/mark-compact.h"
28 #include "src/heap/memory-reducer.h"
29 #include "src/heap/object-stats.h"
30 #include "src/heap/objects-visiting-inl.h"
31 #include "src/heap/objects-visiting.h"
32 #include "src/heap/remembered-set.h"
33 #include "src/heap/scavenge-job.h"
34 #include "src/heap/scavenger-inl.h"
35 #include "src/heap/store-buffer.h"
36 #include "src/interpreter/interpreter.h"
37 #include "src/regexp/jsregexp.h"
38 #include "src/runtime-profiler.h"
39 #include "src/snapshot/natives.h"
40 #include "src/snapshot/serializer-common.h"
41 #include "src/snapshot/snapshot.h"
42 #include "src/tracing/trace-event.h"
43 #include "src/type-feedback-vector.h"
44 #include "src/utils.h"
45 #include "src/v8.h"
46 #include "src/v8threads.h"
47 #include "src/vm-state-inl.h"
48 
49 namespace v8 {
50 namespace internal {
51 
52 
53 struct Heap::StrongRootsList {
54   Object** start;
55   Object** end;
56   StrongRootsList* next;
57 };
58 
59 class IdleScavengeObserver : public AllocationObserver {
60  public:
IdleScavengeObserver(Heap & heap,intptr_t step_size)61   IdleScavengeObserver(Heap& heap, intptr_t step_size)
62       : AllocationObserver(step_size), heap_(heap) {}
63 
Step(int bytes_allocated,Address,size_t)64   void Step(int bytes_allocated, Address, size_t) override {
65     heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
66   }
67 
68  private:
69   Heap& heap_;
70 };
71 
Heap()72 Heap::Heap()
73     : external_memory_(0),
74       external_memory_limit_(kExternalAllocationSoftLimit),
75       external_memory_at_last_mark_compact_(0),
76       isolate_(nullptr),
77       code_range_size_(0),
78       // semispace_size_ should be a power of 2 and old_generation_size_ should
79       // be a multiple of Page::kPageSize.
80       max_semi_space_size_(8 * (kPointerSize / 4) * MB),
81       initial_semispace_size_(MB),
82       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
83       initial_old_generation_size_(max_old_generation_size_ /
84                                    kInitalOldGenerationLimitFactor),
85       old_generation_size_configured_(false),
86       max_executable_size_(256ul * (kPointerSize / 4) * MB),
87       // Variables set based on semispace_size_ and old_generation_size_ in
88       // ConfigureHeap.
89       // Will be 4 * reserved_semispace_size_ to ensure that young
90       // generation can be aligned to its size.
91       maximum_committed_(0),
92       survived_since_last_expansion_(0),
93       survived_last_scavenge_(0),
94       always_allocate_scope_count_(0),
95       memory_pressure_level_(MemoryPressureLevel::kNone),
96       contexts_disposed_(0),
97       number_of_disposed_maps_(0),
98       global_ic_age_(0),
99       new_space_(nullptr),
100       old_space_(NULL),
101       code_space_(NULL),
102       map_space_(NULL),
103       lo_space_(NULL),
104       gc_state_(NOT_IN_GC),
105       gc_post_processing_depth_(0),
106       allocations_count_(0),
107       raw_allocations_hash_(0),
108       ms_count_(0),
109       gc_count_(0),
110       remembered_unmapped_pages_index_(0),
111 #ifdef DEBUG
112       allocation_timeout_(0),
113 #endif  // DEBUG
114       old_generation_allocation_limit_(initial_old_generation_size_),
115       inline_allocation_disabled_(false),
116       total_regexp_code_generated_(0),
117       tracer_(nullptr),
118       promoted_objects_size_(0),
119       promotion_ratio_(0),
120       semi_space_copied_object_size_(0),
121       previous_semi_space_copied_object_size_(0),
122       semi_space_copied_rate_(0),
123       nodes_died_in_new_space_(0),
124       nodes_copied_in_new_space_(0),
125       nodes_promoted_(0),
126       maximum_size_scavenges_(0),
127       last_idle_notification_time_(0.0),
128       last_gc_time_(0.0),
129       scavenge_collector_(nullptr),
130       mark_compact_collector_(nullptr),
131       memory_allocator_(nullptr),
132       store_buffer_(nullptr),
133       incremental_marking_(nullptr),
134       gc_idle_time_handler_(nullptr),
135       memory_reducer_(nullptr),
136       live_object_stats_(nullptr),
137       dead_object_stats_(nullptr),
138       scavenge_job_(nullptr),
139       idle_scavenge_observer_(nullptr),
140       full_codegen_bytes_generated_(0),
141       crankshaft_codegen_bytes_generated_(0),
142       new_space_allocation_counter_(0),
143       old_generation_allocation_counter_at_last_gc_(0),
144       old_generation_size_at_last_gc_(0),
145       gcs_since_last_deopt_(0),
146       global_pretenuring_feedback_(nullptr),
147       ring_buffer_full_(false),
148       ring_buffer_end_(0),
149       promotion_queue_(this),
150       configured_(false),
151       current_gc_flags_(Heap::kNoGCFlags),
152       current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
153       external_string_table_(this),
154       gc_callbacks_depth_(0),
155       deserialization_complete_(false),
156       strong_roots_list_(NULL),
157       heap_iterator_depth_(0),
158       embedder_heap_tracer_(nullptr),
159       force_oom_(false),
160       delay_sweeper_tasks_for_testing_(false) {
161 // Allow build-time customization of the max semispace size. Building
162 // V8 with snapshots and a non-default max semispace size is much
163 // easier if you can define it as part of the build environment.
164 #if defined(V8_MAX_SEMISPACE_SIZE)
165   max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
166 #endif
167 
168   // Ensure old_generation_size_ is a multiple of kPageSize.
169   DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
170 
171   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
172   set_native_contexts_list(NULL);
173   set_allocation_sites_list(Smi::kZero);
174   set_encountered_weak_collections(Smi::kZero);
175   set_encountered_weak_cells(Smi::kZero);
176   set_encountered_transition_arrays(Smi::kZero);
177   // Put a dummy entry in the remembered pages so we can find the list the
178   // minidump even if there are no real unmapped pages.
179   RememberUnmappedPage(NULL, false);
180 }
181 
Capacity()182 size_t Heap::Capacity() {
183   if (!HasBeenSetUp()) return 0;
184 
185   return new_space_->Capacity() + OldGenerationCapacity();
186 }
187 
OldGenerationCapacity()188 size_t Heap::OldGenerationCapacity() {
189   if (!HasBeenSetUp()) return 0;
190 
191   return old_space_->Capacity() + code_space_->Capacity() +
192          map_space_->Capacity() + lo_space_->SizeOfObjects();
193 }
194 
CommittedOldGenerationMemory()195 size_t Heap::CommittedOldGenerationMemory() {
196   if (!HasBeenSetUp()) return 0;
197 
198   return old_space_->CommittedMemory() + code_space_->CommittedMemory() +
199          map_space_->CommittedMemory() + lo_space_->Size();
200 }
201 
CommittedMemory()202 size_t Heap::CommittedMemory() {
203   if (!HasBeenSetUp()) return 0;
204 
205   return new_space_->CommittedMemory() + CommittedOldGenerationMemory();
206 }
207 
208 
CommittedPhysicalMemory()209 size_t Heap::CommittedPhysicalMemory() {
210   if (!HasBeenSetUp()) return 0;
211 
212   return new_space_->CommittedPhysicalMemory() +
213          old_space_->CommittedPhysicalMemory() +
214          code_space_->CommittedPhysicalMemory() +
215          map_space_->CommittedPhysicalMemory() +
216          lo_space_->CommittedPhysicalMemory();
217 }
218 
CommittedMemoryExecutable()219 size_t Heap::CommittedMemoryExecutable() {
220   if (!HasBeenSetUp()) return 0;
221 
222   return static_cast<size_t>(memory_allocator()->SizeExecutable());
223 }
224 
225 
UpdateMaximumCommitted()226 void Heap::UpdateMaximumCommitted() {
227   if (!HasBeenSetUp()) return;
228 
229   const size_t current_committed_memory = CommittedMemory();
230   if (current_committed_memory > maximum_committed_) {
231     maximum_committed_ = current_committed_memory;
232   }
233 }
234 
Available()235 size_t Heap::Available() {
236   if (!HasBeenSetUp()) return 0;
237 
238   size_t total = 0;
239   AllSpaces spaces(this);
240   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
241     total += space->Available();
242   }
243   return total;
244 }
245 
246 
HasBeenSetUp()247 bool Heap::HasBeenSetUp() {
248   return old_space_ != NULL && code_space_ != NULL && map_space_ != NULL &&
249          lo_space_ != NULL;
250 }
251 
252 
SelectGarbageCollector(AllocationSpace space,const char ** reason)253 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
254                                               const char** reason) {
255   // Is global GC requested?
256   if (space != NEW_SPACE) {
257     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
258     *reason = "GC in old space requested";
259     return MARK_COMPACTOR;
260   }
261 
262   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
263     *reason = "GC in old space forced by flags";
264     return MARK_COMPACTOR;
265   }
266 
267   if (incremental_marking()->NeedsFinalization() &&
268       AllocationLimitOvershotByLargeMargin()) {
269     *reason = "Incremental marking needs finalization";
270     return MARK_COMPACTOR;
271   }
272 
273   // Is there enough space left in OLD to guarantee that a scavenge can
274   // succeed?
275   //
276   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
277   // for object promotion. It counts only the bytes that the memory
278   // allocator has not yet allocated from the OS and assigned to any space,
279   // and does not count available bytes already in the old space or code
280   // space.  Undercounting is safe---we may get an unrequested full GC when
281   // a scavenge would have succeeded.
282   if (memory_allocator()->MaxAvailable() <= new_space_->Size()) {
283     isolate_->counters()
284         ->gc_compactor_caused_by_oldspace_exhaustion()
285         ->Increment();
286     *reason = "scavenge might not succeed";
287     return MARK_COMPACTOR;
288   }
289 
290   // Default
291   *reason = NULL;
292   return YoungGenerationCollector();
293 }
294 
295 
296 // TODO(1238405): Combine the infrastructure for --heap-stats and
297 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsBeforeGC()298 void Heap::ReportStatisticsBeforeGC() {
299 // Heap::ReportHeapStatistics will also log NewSpace statistics when
300 // compiled --log-gc is set.  The following logic is used to avoid
301 // double logging.
302 #ifdef DEBUG
303   if (FLAG_heap_stats || FLAG_log_gc) new_space_->CollectStatistics();
304   if (FLAG_heap_stats) {
305     ReportHeapStatistics("Before GC");
306   } else if (FLAG_log_gc) {
307     new_space_->ReportStatistics();
308   }
309   if (FLAG_heap_stats || FLAG_log_gc) new_space_->ClearHistograms();
310 #else
311   if (FLAG_log_gc) {
312     new_space_->CollectStatistics();
313     new_space_->ReportStatistics();
314     new_space_->ClearHistograms();
315   }
316 #endif  // DEBUG
317 }
318 
319 
PrintShortHeapStatistics()320 void Heap::PrintShortHeapStatistics() {
321   if (!FLAG_trace_gc_verbose) return;
322   PrintIsolate(isolate_, "Memory allocator,   used: %6" PRIuS
323                          " KB,"
324                          " available: %6" PRIuS " KB\n",
325                memory_allocator()->Size() / KB,
326                memory_allocator()->Available() / KB);
327   PrintIsolate(isolate_, "New space,          used: %6" PRIuS
328                          " KB"
329                          ", available: %6" PRIuS
330                          " KB"
331                          ", committed: %6" PRIuS " KB\n",
332                new_space_->Size() / KB, new_space_->Available() / KB,
333                new_space_->CommittedMemory() / KB);
334   PrintIsolate(isolate_, "Old space,          used: %6" PRIuS
335                          " KB"
336                          ", available: %6" PRIuS
337                          " KB"
338                          ", committed: %6" PRIuS " KB\n",
339                old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
340                old_space_->CommittedMemory() / KB);
341   PrintIsolate(isolate_, "Code space,         used: %6" PRIuS
342                          " KB"
343                          ", available: %6" PRIuS
344                          " KB"
345                          ", committed: %6" PRIuS "KB\n",
346                code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
347                code_space_->CommittedMemory() / KB);
348   PrintIsolate(isolate_, "Map space,          used: %6" PRIuS
349                          " KB"
350                          ", available: %6" PRIuS
351                          " KB"
352                          ", committed: %6" PRIuS " KB\n",
353                map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
354                map_space_->CommittedMemory() / KB);
355   PrintIsolate(isolate_, "Large object space, used: %6" PRIuS
356                          " KB"
357                          ", available: %6" PRIuS
358                          " KB"
359                          ", committed: %6" PRIuS " KB\n",
360                lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
361                lo_space_->CommittedMemory() / KB);
362   PrintIsolate(isolate_, "All spaces,         used: %6" PRIuS
363                          " KB"
364                          ", available: %6" PRIuS
365                          " KB"
366                          ", committed: %6" PRIuS "KB\n",
367                this->SizeOfObjects() / KB, this->Available() / KB,
368                this->CommittedMemory() / KB);
369   PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n",
370                external_memory_ / KB);
371   PrintIsolate(isolate_, "Total time spent in GC  : %.1f ms\n",
372                total_gc_time_ms_);
373 }
374 
375 // TODO(1238405): Combine the infrastructure for --heap-stats and
376 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsAfterGC()377 void Heap::ReportStatisticsAfterGC() {
378 // Similar to the before GC, we use some complicated logic to ensure that
379 // NewSpace statistics are logged exactly once when --log-gc is turned on.
380 #if defined(DEBUG)
381   if (FLAG_heap_stats) {
382     new_space_->CollectStatistics();
383     ReportHeapStatistics("After GC");
384   } else if (FLAG_log_gc) {
385     new_space_->ReportStatistics();
386   }
387 #else
388   if (FLAG_log_gc) new_space_->ReportStatistics();
389 #endif  // DEBUG
390   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
391        ++i) {
392     int count = deferred_counters_[i];
393     deferred_counters_[i] = 0;
394     while (count > 0) {
395       count--;
396       isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
397     }
398   }
399 }
400 
401 
IncrementDeferredCount(v8::Isolate::UseCounterFeature feature)402 void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
403   deferred_counters_[feature]++;
404 }
405 
UncommitFromSpace()406 bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); }
407 
GarbageCollectionPrologue()408 void Heap::GarbageCollectionPrologue() {
409   {
410     AllowHeapAllocation for_the_first_part_of_prologue;
411     gc_count_++;
412 
413 #ifdef VERIFY_HEAP
414     if (FLAG_verify_heap) {
415       Verify();
416     }
417 #endif
418   }
419 
420   // Reset GC statistics.
421   promoted_objects_size_ = 0;
422   previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
423   semi_space_copied_object_size_ = 0;
424   nodes_died_in_new_space_ = 0;
425   nodes_copied_in_new_space_ = 0;
426   nodes_promoted_ = 0;
427 
428   UpdateMaximumCommitted();
429 
430 #ifdef DEBUG
431   DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
432 
433   if (FLAG_gc_verbose) Print();
434 
435   ReportStatisticsBeforeGC();
436 #endif  // DEBUG
437 
438   if (new_space_->IsAtMaximumCapacity()) {
439     maximum_size_scavenges_++;
440   } else {
441     maximum_size_scavenges_ = 0;
442   }
443   CheckNewSpaceExpansionCriteria();
444   UpdateNewSpaceAllocationCounter();
445   store_buffer()->MoveAllEntriesToRememberedSet();
446 }
447 
SizeOfObjects()448 size_t Heap::SizeOfObjects() {
449   size_t total = 0;
450   AllSpaces spaces(this);
451   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
452     total += space->SizeOfObjects();
453   }
454   return total;
455 }
456 
457 
GetSpaceName(int idx)458 const char* Heap::GetSpaceName(int idx) {
459   switch (idx) {
460     case NEW_SPACE:
461       return "new_space";
462     case OLD_SPACE:
463       return "old_space";
464     case MAP_SPACE:
465       return "map_space";
466     case CODE_SPACE:
467       return "code_space";
468     case LO_SPACE:
469       return "large_object_space";
470     default:
471       UNREACHABLE();
472   }
473   return nullptr;
474 }
475 
476 
RepairFreeListsAfterDeserialization()477 void Heap::RepairFreeListsAfterDeserialization() {
478   PagedSpaces spaces(this);
479   for (PagedSpace* space = spaces.next(); space != NULL;
480        space = spaces.next()) {
481     space->RepairFreeListsAfterDeserialization();
482   }
483 }
484 
MergeAllocationSitePretenuringFeedback(const base::HashMap & local_pretenuring_feedback)485 void Heap::MergeAllocationSitePretenuringFeedback(
486     const base::HashMap& local_pretenuring_feedback) {
487   AllocationSite* site = nullptr;
488   for (base::HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
489        local_entry != nullptr;
490        local_entry = local_pretenuring_feedback.Next(local_entry)) {
491     site = reinterpret_cast<AllocationSite*>(local_entry->key);
492     MapWord map_word = site->map_word();
493     if (map_word.IsForwardingAddress()) {
494       site = AllocationSite::cast(map_word.ToForwardingAddress());
495     }
496 
497     // We have not validated the allocation site yet, since we have not
498     // dereferenced the site during collecting information.
499     // This is an inlined check of AllocationMemento::IsValid.
500     if (!site->IsAllocationSite() || site->IsZombie()) continue;
501 
502     int value =
503         static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
504     DCHECK_GT(value, 0);
505 
506     if (site->IncrementMementoFoundCount(value)) {
507       global_pretenuring_feedback_->LookupOrInsert(site,
508                                                    ObjectHash(site->address()));
509     }
510   }
511 }
512 
513 
514 class Heap::PretenuringScope {
515  public:
PretenuringScope(Heap * heap)516   explicit PretenuringScope(Heap* heap) : heap_(heap) {
517     heap_->global_pretenuring_feedback_ =
518         new base::HashMap(kInitialFeedbackCapacity);
519   }
520 
~PretenuringScope()521   ~PretenuringScope() {
522     delete heap_->global_pretenuring_feedback_;
523     heap_->global_pretenuring_feedback_ = nullptr;
524   }
525 
526  private:
527   Heap* heap_;
528 };
529 
530 
ProcessPretenuringFeedback()531 void Heap::ProcessPretenuringFeedback() {
532   bool trigger_deoptimization = false;
533   if (FLAG_allocation_site_pretenuring) {
534     int tenure_decisions = 0;
535     int dont_tenure_decisions = 0;
536     int allocation_mementos_found = 0;
537     int allocation_sites = 0;
538     int active_allocation_sites = 0;
539 
540     AllocationSite* site = nullptr;
541 
542     // Step 1: Digest feedback for recorded allocation sites.
543     bool maximum_size_scavenge = MaximumSizeScavenge();
544     for (base::HashMap::Entry* e = global_pretenuring_feedback_->Start();
545          e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
546       allocation_sites++;
547       site = reinterpret_cast<AllocationSite*>(e->key);
548       int found_count = site->memento_found_count();
549       // An entry in the storage does not imply that the count is > 0 because
550       // allocation sites might have been reset due to too many objects dying
551       // in old space.
552       if (found_count > 0) {
553         DCHECK(site->IsAllocationSite());
554         active_allocation_sites++;
555         allocation_mementos_found += found_count;
556         if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
557           trigger_deoptimization = true;
558         }
559         if (site->GetPretenureMode() == TENURED) {
560           tenure_decisions++;
561         } else {
562           dont_tenure_decisions++;
563         }
564       }
565     }
566 
567     // Step 2: Deopt maybe tenured allocation sites if necessary.
568     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
569     if (deopt_maybe_tenured) {
570       Object* list_element = allocation_sites_list();
571       while (list_element->IsAllocationSite()) {
572         site = AllocationSite::cast(list_element);
573         DCHECK(site->IsAllocationSite());
574         allocation_sites++;
575         if (site->IsMaybeTenure()) {
576           site->set_deopt_dependent_code(true);
577           trigger_deoptimization = true;
578         }
579         list_element = site->weak_next();
580       }
581     }
582 
583     if (trigger_deoptimization) {
584       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
585     }
586 
587     if (FLAG_trace_pretenuring_statistics &&
588         (allocation_mementos_found > 0 || tenure_decisions > 0 ||
589          dont_tenure_decisions > 0)) {
590       PrintIsolate(isolate(),
591                    "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
592                    "active_sites=%d "
593                    "mementos=%d tenured=%d not_tenured=%d\n",
594                    deopt_maybe_tenured ? 1 : 0, allocation_sites,
595                    active_allocation_sites, allocation_mementos_found,
596                    tenure_decisions, dont_tenure_decisions);
597     }
598   }
599 }
600 
601 
DeoptMarkedAllocationSites()602 void Heap::DeoptMarkedAllocationSites() {
603   // TODO(hpayer): If iterating over the allocation sites list becomes a
604   // performance issue, use a cache data structure in heap instead.
605   Object* list_element = allocation_sites_list();
606   while (list_element->IsAllocationSite()) {
607     AllocationSite* site = AllocationSite::cast(list_element);
608     if (site->deopt_dependent_code()) {
609       site->dependent_code()->MarkCodeForDeoptimization(
610           isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
611       site->set_deopt_dependent_code(false);
612     }
613     list_element = site->weak_next();
614   }
615   Deoptimizer::DeoptimizeMarkedCode(isolate_);
616 }
617 
618 
GarbageCollectionEpilogue()619 void Heap::GarbageCollectionEpilogue() {
620   // In release mode, we only zap the from space under heap verification.
621   if (Heap::ShouldZapGarbage()) {
622     ZapFromSpace();
623   }
624 
625 #ifdef VERIFY_HEAP
626   if (FLAG_verify_heap) {
627     Verify();
628   }
629 #endif
630 
631   AllowHeapAllocation for_the_rest_of_the_epilogue;
632 
633 #ifdef DEBUG
634   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
635   if (FLAG_print_handles) PrintHandles();
636   if (FLAG_gc_verbose) Print();
637   if (FLAG_code_stats) ReportCodeStatistics("After GC");
638   if (FLAG_check_handle_count) CheckHandleCount();
639 #endif
640   if (FLAG_deopt_every_n_garbage_collections > 0) {
641     // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
642     // the topmost optimized frame can be deoptimized safely, because it
643     // might not have a lazy bailout point right after its current PC.
644     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
645       Deoptimizer::DeoptimizeAll(isolate());
646       gcs_since_last_deopt_ = 0;
647     }
648   }
649 
650   UpdateMaximumCommitted();
651 
652   isolate_->counters()->alive_after_last_gc()->Set(
653       static_cast<int>(SizeOfObjects()));
654 
655   isolate_->counters()->string_table_capacity()->Set(
656       string_table()->Capacity());
657   isolate_->counters()->number_of_symbols()->Set(
658       string_table()->NumberOfElements());
659 
660   if (CommittedMemory() > 0) {
661     isolate_->counters()->external_fragmentation_total()->AddSample(
662         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
663 
664     isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
665         (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
666     isolate_->counters()->heap_fraction_old_space()->AddSample(static_cast<int>(
667         (old_space()->CommittedMemory() * 100.0) / CommittedMemory()));
668     isolate_->counters()->heap_fraction_code_space()->AddSample(
669         static_cast<int>((code_space()->CommittedMemory() * 100.0) /
670                          CommittedMemory()));
671     isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
672         (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
673     isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
674         (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
675 
676     isolate_->counters()->heap_sample_total_committed()->AddSample(
677         static_cast<int>(CommittedMemory() / KB));
678     isolate_->counters()->heap_sample_total_used()->AddSample(
679         static_cast<int>(SizeOfObjects() / KB));
680     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
681         static_cast<int>(map_space()->CommittedMemory() / KB));
682     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
683         static_cast<int>(code_space()->CommittedMemory() / KB));
684 
685     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
686         static_cast<int>(MaximumCommittedMemory() / KB));
687   }
688 
689 #define UPDATE_COUNTERS_FOR_SPACE(space)                \
690   isolate_->counters()->space##_bytes_available()->Set( \
691       static_cast<int>(space()->Available()));          \
692   isolate_->counters()->space##_bytes_committed()->Set( \
693       static_cast<int>(space()->CommittedMemory()));    \
694   isolate_->counters()->space##_bytes_used()->Set(      \
695       static_cast<int>(space()->SizeOfObjects()));
696 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
697   if (space()->CommittedMemory() > 0) {                                \
698     isolate_->counters()->external_fragmentation_##space()->AddSample( \
699         static_cast<int>(100 -                                         \
700                          (space()->SizeOfObjects() * 100.0) /          \
701                              space()->CommittedMemory()));             \
702   }
703 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
704   UPDATE_COUNTERS_FOR_SPACE(space)                         \
705   UPDATE_FRAGMENTATION_FOR_SPACE(space)
706 
707   UPDATE_COUNTERS_FOR_SPACE(new_space)
708   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
709   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
710   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
711   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
712 #undef UPDATE_COUNTERS_FOR_SPACE
713 #undef UPDATE_FRAGMENTATION_FOR_SPACE
714 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
715 
716 #ifdef DEBUG
717   ReportStatisticsAfterGC();
718 #endif  // DEBUG
719 
720   // Remember the last top pointer so that we can later find out
721   // whether we allocated in new space since the last GC.
722   new_space_top_after_last_gc_ = new_space()->top();
723   last_gc_time_ = MonotonicallyIncreasingTimeInMs();
724 
725   ReduceNewSpaceSize();
726 }
727 
728 
PreprocessStackTraces()729 void Heap::PreprocessStackTraces() {
730   WeakFixedArray::Iterator iterator(weak_stack_trace_list());
731   FixedArray* elements;
732   while ((elements = iterator.Next<FixedArray>())) {
733     for (int j = 1; j < elements->length(); j += 4) {
734       Object* maybe_code = elements->get(j + 2);
735       // If GC happens while adding a stack trace to the weak fixed array,
736       // which has been copied into a larger backing store, we may run into
737       // a stack trace that has already been preprocessed. Guard against this.
738       if (!maybe_code->IsAbstractCode()) break;
739       AbstractCode* abstract_code = AbstractCode::cast(maybe_code);
740       int offset = Smi::cast(elements->get(j + 3))->value();
741       int pos = abstract_code->SourcePosition(offset);
742       elements->set(j + 2, Smi::FromInt(pos));
743     }
744   }
745   // We must not compact the weak fixed list here, as we may be in the middle
746   // of writing to it, when the GC triggered. Instead, we reset the root value.
747   set_weak_stack_trace_list(Smi::kZero);
748 }
749 
750 
751 class GCCallbacksScope {
752  public:
GCCallbacksScope(Heap * heap)753   explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
754     heap_->gc_callbacks_depth_++;
755   }
~GCCallbacksScope()756   ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
757 
CheckReenter()758   bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
759 
760  private:
761   Heap* heap_;
762 };
763 
764 
HandleGCRequest()765 void Heap::HandleGCRequest() {
766   if (HighMemoryPressure()) {
767     incremental_marking()->reset_request_type();
768     CheckMemoryPressure();
769   } else if (incremental_marking()->request_type() ==
770              IncrementalMarking::COMPLETE_MARKING) {
771     incremental_marking()->reset_request_type();
772     CollectAllGarbage(current_gc_flags_,
773                       GarbageCollectionReason::kFinalizeMarkingViaStackGuard,
774                       current_gc_callback_flags_);
775   } else if (incremental_marking()->request_type() ==
776                  IncrementalMarking::FINALIZATION &&
777              incremental_marking()->IsMarking() &&
778              !incremental_marking()->finalize_marking_completed()) {
779     incremental_marking()->reset_request_type();
780     FinalizeIncrementalMarking(
781         GarbageCollectionReason::kFinalizeMarkingViaStackGuard);
782   }
783 }
784 
785 
ScheduleIdleScavengeIfNeeded(int bytes_allocated)786 void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
787   scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
788 }
789 
FinalizeIncrementalMarking(GarbageCollectionReason gc_reason)790 void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) {
791   if (FLAG_trace_incremental_marking) {
792     isolate()->PrintWithTimestamp(
793         "[IncrementalMarking] (%s).\n",
794         Heap::GarbageCollectionReasonToString(gc_reason));
795   }
796 
797   HistogramTimerScope incremental_marking_scope(
798       isolate()->counters()->gc_incremental_marking_finalize());
799   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize");
800   TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
801 
802   {
803     GCCallbacksScope scope(this);
804     if (scope.CheckReenter()) {
805       AllowHeapAllocation allow_allocation;
806       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE);
807       VMState<EXTERNAL> state(isolate_);
808       HandleScope handle_scope(isolate_);
809       CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
810     }
811   }
812   incremental_marking()->FinalizeIncrementally();
813   {
814     GCCallbacksScope scope(this);
815     if (scope.CheckReenter()) {
816       AllowHeapAllocation allow_allocation;
817       TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE);
818       VMState<EXTERNAL> state(isolate_);
819       HandleScope handle_scope(isolate_);
820       CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
821     }
822   }
823 }
824 
825 
GCTypeTimer(GarbageCollector collector)826 HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
827   if (IsYoungGenerationCollector(collector)) {
828     return isolate_->counters()->gc_scavenger();
829   } else {
830     if (!incremental_marking()->IsStopped()) {
831       if (ShouldReduceMemory()) {
832         return isolate_->counters()->gc_finalize_reduce_memory();
833       } else {
834         return isolate_->counters()->gc_finalize();
835       }
836     } else {
837       return isolate_->counters()->gc_compactor();
838     }
839   }
840 }
841 
CollectAllGarbage(int flags,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags gc_callback_flags)842 void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason,
843                              const v8::GCCallbackFlags gc_callback_flags) {
844   // Since we are ignoring the return value, the exact choice of space does
845   // not matter, so long as we do not specify NEW_SPACE, which would not
846   // cause a full GC.
847   set_current_gc_flags(flags);
848   CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
849   set_current_gc_flags(kNoGCFlags);
850 }
851 
CollectAllAvailableGarbage(GarbageCollectionReason gc_reason)852 void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
853   // Since we are ignoring the return value, the exact choice of space does
854   // not matter, so long as we do not specify NEW_SPACE, which would not
855   // cause a full GC.
856   // Major GC would invoke weak handle callbacks on weakly reachable
857   // handles, but won't collect weakly reachable objects until next
858   // major GC.  Therefore if we collect aggressively and weak handle callback
859   // has been invoked, we rerun major GC to release objects which become
860   // garbage.
861   // Note: as weak callbacks can execute arbitrary code, we cannot
862   // hope that eventually there will be no weak callbacks invocations.
863   // Therefore stop recollecting after several attempts.
864   if (isolate()->concurrent_recompilation_enabled()) {
865     // The optimizing compiler may be unnecessarily holding on to memory.
866     DisallowHeapAllocation no_recursive_gc;
867     isolate()->optimizing_compile_dispatcher()->Flush(
868         OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
869   }
870   isolate()->ClearSerializerData();
871   set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
872   isolate_->compilation_cache()->Clear();
873   const int kMaxNumberOfAttempts = 7;
874   const int kMinNumberOfAttempts = 2;
875   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
876     if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
877                         v8::kGCCallbackFlagCollectAllAvailableGarbage) &&
878         attempt + 1 >= kMinNumberOfAttempts) {
879       break;
880     }
881   }
882   set_current_gc_flags(kNoGCFlags);
883   new_space_->Shrink();
884   UncommitFromSpace();
885 }
886 
ReportExternalMemoryPressure()887 void Heap::ReportExternalMemoryPressure() {
888   if (external_memory_ >
889       (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) {
890     CollectAllGarbage(
891         kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask,
892         GarbageCollectionReason::kExternalMemoryPressure,
893         static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage |
894                                      kGCCallbackFlagCollectAllExternalMemory));
895     return;
896   }
897   if (incremental_marking()->IsStopped()) {
898     if (incremental_marking()->CanBeActivated()) {
899       StartIncrementalMarking(
900           i::Heap::kNoGCFlags, GarbageCollectionReason::kExternalMemoryPressure,
901           static_cast<GCCallbackFlags>(
902               kGCCallbackFlagSynchronousPhantomCallbackProcessing |
903               kGCCallbackFlagCollectAllExternalMemory));
904     } else {
905       CollectAllGarbage(i::Heap::kNoGCFlags,
906                         GarbageCollectionReason::kExternalMemoryPressure,
907                         kGCCallbackFlagSynchronousPhantomCallbackProcessing);
908     }
909   } else {
910     // Incremental marking is turned on an has already been started.
911     const double pressure =
912         static_cast<double>(external_memory_ -
913                             external_memory_at_last_mark_compact_ -
914                             kExternalAllocationSoftLimit) /
915         external_memory_hard_limit();
916     DCHECK_GE(1, pressure);
917     const double kMaxStepSizeOnExternalLimit = 25;
918     const double deadline = MonotonicallyIncreasingTimeInMs() +
919                             pressure * kMaxStepSizeOnExternalLimit;
920     incremental_marking()->AdvanceIncrementalMarking(
921         deadline, IncrementalMarking::GC_VIA_STACK_GUARD,
922         IncrementalMarking::FORCE_COMPLETION, StepOrigin::kV8);
923   }
924 }
925 
926 
EnsureFillerObjectAtTop()927 void Heap::EnsureFillerObjectAtTop() {
928   // There may be an allocation memento behind objects in new space. Upon
929   // evacuation of a non-full new space (or if we are on the last page) there
930   // may be uninitialized memory behind top. We fill the remainder of the page
931   // with a filler.
932   Address to_top = new_space_->top();
933   Page* page = Page::FromAddress(to_top - kPointerSize);
934   if (page->Contains(to_top)) {
935     int remaining_in_page = static_cast<int>(page->area_end() - to_top);
936     CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
937   }
938 }
939 
CollectGarbage(GarbageCollector collector,GarbageCollectionReason gc_reason,const char * collector_reason,const v8::GCCallbackFlags gc_callback_flags)940 bool Heap::CollectGarbage(GarbageCollector collector,
941                           GarbageCollectionReason gc_reason,
942                           const char* collector_reason,
943                           const v8::GCCallbackFlags gc_callback_flags) {
944   // The VM is in the GC state until exiting this function.
945   VMState<GC> state(isolate_);
946 
947 #ifdef DEBUG
948   // Reset the allocation timeout to the GC interval, but make sure to
949   // allow at least a few allocations after a collection. The reason
950   // for this is that we have a lot of allocation sequences and we
951   // assume that a garbage collection will allow the subsequent
952   // allocation attempts to go through.
953   allocation_timeout_ = Max(6, FLAG_gc_interval);
954 #endif
955 
956   EnsureFillerObjectAtTop();
957 
958   if (IsYoungGenerationCollector(collector) &&
959       !incremental_marking()->IsStopped()) {
960     if (FLAG_trace_incremental_marking) {
961       isolate()->PrintWithTimestamp(
962           "[IncrementalMarking] Scavenge during marking.\n");
963     }
964   }
965 
966   if (collector == MARK_COMPACTOR && FLAG_incremental_marking &&
967       !ShouldFinalizeIncrementalMarking() && !ShouldAbortIncrementalMarking() &&
968       !incremental_marking()->IsStopped() &&
969       !incremental_marking()->should_hurry() &&
970       !incremental_marking()->NeedsFinalization() &&
971       !IsCloseToOutOfMemory(new_space_->Capacity())) {
972     if (!incremental_marking()->IsComplete() &&
973         !mark_compact_collector()->marking_deque()->IsEmpty() &&
974         !FLAG_gc_global) {
975       if (FLAG_trace_incremental_marking) {
976         isolate()->PrintWithTimestamp(
977             "[IncrementalMarking] Delaying MarkSweep.\n");
978       }
979       collector = YoungGenerationCollector();
980       collector_reason = "incremental marking delaying mark-sweep";
981     }
982   }
983 
984   bool next_gc_likely_to_collect_more = false;
985   size_t committed_memory_before = 0;
986 
987   if (collector == MARK_COMPACTOR) {
988     committed_memory_before = CommittedOldGenerationMemory();
989   }
990 
991   {
992     tracer()->Start(collector, gc_reason, collector_reason);
993     DCHECK(AllowHeapAllocation::IsAllowed());
994     DisallowHeapAllocation no_allocation_during_gc;
995     GarbageCollectionPrologue();
996 
997     {
998       HistogramTimer* gc_type_timer = GCTypeTimer(collector);
999       HistogramTimerScope histogram_timer_scope(gc_type_timer);
1000       TRACE_EVENT0("v8", gc_type_timer->name());
1001 
1002       next_gc_likely_to_collect_more =
1003           PerformGarbageCollection(collector, gc_callback_flags);
1004     }
1005 
1006     GarbageCollectionEpilogue();
1007     if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
1008       isolate()->CheckDetachedContextsAfterGC();
1009     }
1010 
1011     if (collector == MARK_COMPACTOR) {
1012       size_t committed_memory_after = CommittedOldGenerationMemory();
1013       size_t used_memory_after = PromotedSpaceSizeOfObjects();
1014       MemoryReducer::Event event;
1015       event.type = MemoryReducer::kMarkCompact;
1016       event.time_ms = MonotonicallyIncreasingTimeInMs();
1017       // Trigger one more GC if
1018       // - this GC decreased committed memory,
1019       // - there is high fragmentation,
1020       // - there are live detached contexts.
1021       event.next_gc_likely_to_collect_more =
1022           (committed_memory_before > committed_memory_after + MB) ||
1023           HasHighFragmentation(used_memory_after, committed_memory_after) ||
1024           (detached_contexts()->length() > 0);
1025       if (deserialization_complete_) {
1026         memory_reducer_->NotifyMarkCompact(event);
1027       }
1028       memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
1029     }
1030 
1031     tracer()->Stop(collector);
1032   }
1033 
1034   if (collector == MARK_COMPACTOR &&
1035       (gc_callback_flags & (kGCCallbackFlagForced |
1036                             kGCCallbackFlagCollectAllAvailableGarbage)) != 0) {
1037     isolate()->CountUsage(v8::Isolate::kForcedGC);
1038   }
1039 
1040   // Start incremental marking for the next cycle. The heap snapshot
1041   // generator needs incremental marking to stay off after it aborted.
1042   // We do this only for scavenger to avoid a loop where mark-compact
1043   // causes another mark-compact.
1044   if (IsYoungGenerationCollector(collector) &&
1045       !ShouldAbortIncrementalMarking()) {
1046     StartIncrementalMarkingIfAllocationLimitIsReached(kNoGCFlags,
1047                                                       kNoGCCallbackFlags);
1048   }
1049 
1050   return next_gc_likely_to_collect_more;
1051 }
1052 
1053 
NotifyContextDisposed(bool dependant_context)1054 int Heap::NotifyContextDisposed(bool dependant_context) {
1055   if (!dependant_context) {
1056     tracer()->ResetSurvivalEvents();
1057     old_generation_size_configured_ = false;
1058     MemoryReducer::Event event;
1059     event.type = MemoryReducer::kPossibleGarbage;
1060     event.time_ms = MonotonicallyIncreasingTimeInMs();
1061     memory_reducer_->NotifyPossibleGarbage(event);
1062   }
1063   if (isolate()->concurrent_recompilation_enabled()) {
1064     // Flush the queued recompilation tasks.
1065     isolate()->optimizing_compile_dispatcher()->Flush(
1066         OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
1067   }
1068   AgeInlineCaches();
1069   number_of_disposed_maps_ = retained_maps()->Length();
1070   tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
1071   return ++contexts_disposed_;
1072 }
1073 
StartIncrementalMarking(int gc_flags,GarbageCollectionReason gc_reason,GCCallbackFlags gc_callback_flags)1074 void Heap::StartIncrementalMarking(int gc_flags,
1075                                    GarbageCollectionReason gc_reason,
1076                                    GCCallbackFlags gc_callback_flags) {
1077   DCHECK(incremental_marking()->IsStopped());
1078   set_current_gc_flags(gc_flags);
1079   current_gc_callback_flags_ = gc_callback_flags;
1080   incremental_marking()->Start(gc_reason);
1081 }
1082 
StartIncrementalMarkingIfAllocationLimitIsReached(int gc_flags,const GCCallbackFlags gc_callback_flags)1083 void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
1084     int gc_flags, const GCCallbackFlags gc_callback_flags) {
1085   if (incremental_marking()->IsStopped()) {
1086     IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
1087     if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
1088       incremental_marking()->incremental_marking_job()->ScheduleTask(this);
1089     } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
1090       StartIncrementalMarking(gc_flags,
1091                               GarbageCollectionReason::kAllocationLimit,
1092                               gc_callback_flags);
1093     }
1094   }
1095 }
1096 
StartIdleIncrementalMarking(GarbageCollectionReason gc_reason)1097 void Heap::StartIdleIncrementalMarking(GarbageCollectionReason gc_reason) {
1098   gc_idle_time_handler_->ResetNoProgressCounter();
1099   StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
1100                           kNoGCCallbackFlags);
1101 }
1102 
1103 
MoveElements(FixedArray * array,int dst_index,int src_index,int len)1104 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
1105                         int len) {
1106   if (len == 0) return;
1107 
1108   DCHECK(array->map() != fixed_cow_array_map());
1109   Object** dst_objects = array->data_start() + dst_index;
1110   MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
1111   FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len);
1112 }
1113 
1114 
1115 #ifdef VERIFY_HEAP
1116 // Helper class for verifying the string table.
1117 class StringTableVerifier : public ObjectVisitor {
1118  public:
VisitPointers(Object ** start,Object ** end)1119   void VisitPointers(Object** start, Object** end) override {
1120     // Visit all HeapObject pointers in [start, end).
1121     for (Object** p = start; p < end; p++) {
1122       if ((*p)->IsHeapObject()) {
1123         HeapObject* object = HeapObject::cast(*p);
1124         Isolate* isolate = object->GetIsolate();
1125         // Check that the string is actually internalized.
1126         CHECK(object->IsTheHole(isolate) || object->IsUndefined(isolate) ||
1127               object->IsInternalizedString());
1128       }
1129     }
1130   }
1131 };
1132 
1133 
VerifyStringTable(Heap * heap)1134 static void VerifyStringTable(Heap* heap) {
1135   StringTableVerifier verifier;
1136   heap->string_table()->IterateElements(&verifier);
1137 }
1138 #endif  // VERIFY_HEAP
1139 
ReserveSpace(Reservation * reservations,List<Address> * maps)1140 bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
1141   bool gc_performed = true;
1142   int counter = 0;
1143   static const int kThreshold = 20;
1144   while (gc_performed && counter++ < kThreshold) {
1145     gc_performed = false;
1146     for (int space = NEW_SPACE; space < SerializerDeserializer::kNumberOfSpaces;
1147          space++) {
1148       Reservation* reservation = &reservations[space];
1149       DCHECK_LE(1, reservation->length());
1150       if (reservation->at(0).size == 0) continue;
1151       bool perform_gc = false;
1152       if (space == MAP_SPACE) {
1153         // We allocate each map individually to avoid fragmentation.
1154         maps->Clear();
1155         DCHECK_EQ(1, reservation->length());
1156         int num_maps = reservation->at(0).size / Map::kSize;
1157         for (int i = 0; i < num_maps; i++) {
1158           // The deserializer will update the skip list.
1159           AllocationResult allocation = map_space()->AllocateRawUnaligned(
1160               Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
1161           HeapObject* free_space = nullptr;
1162           if (allocation.To(&free_space)) {
1163             // Mark with a free list node, in case we have a GC before
1164             // deserializing.
1165             Address free_space_address = free_space->address();
1166             CreateFillerObjectAt(free_space_address, Map::kSize,
1167                                  ClearRecordedSlots::kNo, ClearBlackArea::kNo);
1168             maps->Add(free_space_address);
1169           } else {
1170             perform_gc = true;
1171             break;
1172           }
1173         }
1174       } else if (space == LO_SPACE) {
1175         // Just check that we can allocate during deserialization.
1176         DCHECK_EQ(1, reservation->length());
1177         perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
1178       } else {
1179         for (auto& chunk : *reservation) {
1180           AllocationResult allocation;
1181           int size = chunk.size;
1182           DCHECK_LE(static_cast<size_t>(size),
1183                     MemoryAllocator::PageAreaSize(
1184                         static_cast<AllocationSpace>(space)));
1185           if (space == NEW_SPACE) {
1186             allocation = new_space()->AllocateRawUnaligned(size);
1187           } else {
1188             // The deserializer will update the skip list.
1189             allocation = paged_space(space)->AllocateRawUnaligned(
1190                 size, PagedSpace::IGNORE_SKIP_LIST);
1191           }
1192           HeapObject* free_space = nullptr;
1193           if (allocation.To(&free_space)) {
1194             // Mark with a free list node, in case we have a GC before
1195             // deserializing.
1196             Address free_space_address = free_space->address();
1197             CreateFillerObjectAt(free_space_address, size,
1198                                  ClearRecordedSlots::kNo, ClearBlackArea::kNo);
1199             DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
1200             chunk.start = free_space_address;
1201             chunk.end = free_space_address + size;
1202           } else {
1203             perform_gc = true;
1204             break;
1205           }
1206         }
1207       }
1208       if (perform_gc) {
1209         if (space == NEW_SPACE) {
1210           CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer);
1211         } else {
1212           if (counter > 1) {
1213             CollectAllGarbage(
1214                 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
1215                 GarbageCollectionReason::kDeserializer);
1216           } else {
1217             CollectAllGarbage(kAbortIncrementalMarkingMask,
1218                               GarbageCollectionReason::kDeserializer);
1219           }
1220         }
1221         gc_performed = true;
1222         break;  // Abort for-loop over spaces and retry.
1223       }
1224     }
1225   }
1226 
1227   return !gc_performed;
1228 }
1229 
1230 
EnsureFromSpaceIsCommitted()1231 void Heap::EnsureFromSpaceIsCommitted() {
1232   if (new_space_->CommitFromSpaceIfNeeded()) return;
1233 
1234   // Committing memory to from space failed.
1235   // Memory is exhausted and we will die.
1236   V8::FatalProcessOutOfMemory("Committing semi space failed.");
1237 }
1238 
1239 
ClearNormalizedMapCaches()1240 void Heap::ClearNormalizedMapCaches() {
1241   if (isolate_->bootstrapper()->IsActive() &&
1242       !incremental_marking()->IsMarking()) {
1243     return;
1244   }
1245 
1246   Object* context = native_contexts_list();
1247   while (!context->IsUndefined(isolate())) {
1248     // GC can happen when the context is not fully initialized,
1249     // so the cache can be undefined.
1250     Object* cache =
1251         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1252     if (!cache->IsUndefined(isolate())) {
1253       NormalizedMapCache::cast(cache)->Clear();
1254     }
1255     context = Context::cast(context)->next_context_link();
1256   }
1257 }
1258 
1259 
UpdateSurvivalStatistics(int start_new_space_size)1260 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1261   if (start_new_space_size == 0) return;
1262 
1263   promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1264                       static_cast<double>(start_new_space_size) * 100);
1265 
1266   if (previous_semi_space_copied_object_size_ > 0) {
1267     promotion_rate_ =
1268         (static_cast<double>(promoted_objects_size_) /
1269          static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1270   } else {
1271     promotion_rate_ = 0;
1272   }
1273 
1274   semi_space_copied_rate_ =
1275       (static_cast<double>(semi_space_copied_object_size_) /
1276        static_cast<double>(start_new_space_size) * 100);
1277 
1278   double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1279   tracer()->AddSurvivalRatio(survival_rate);
1280 }
1281 
PerformGarbageCollection(GarbageCollector collector,const v8::GCCallbackFlags gc_callback_flags)1282 bool Heap::PerformGarbageCollection(
1283     GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1284   int freed_global_handles = 0;
1285 
1286   if (!IsYoungGenerationCollector(collector)) {
1287     PROFILE(isolate_, CodeMovingGCEvent());
1288   }
1289 
1290 #ifdef VERIFY_HEAP
1291   if (FLAG_verify_heap) {
1292     VerifyStringTable(this);
1293   }
1294 #endif
1295 
1296   GCType gc_type =
1297       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1298 
1299   {
1300     GCCallbacksScope scope(this);
1301     if (scope.CheckReenter()) {
1302       AllowHeapAllocation allow_allocation;
1303       TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_PROLOGUE);
1304       VMState<EXTERNAL> state(isolate_);
1305       HandleScope handle_scope(isolate_);
1306       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1307     }
1308   }
1309 
1310   EnsureFromSpaceIsCommitted();
1311 
1312   int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
1313 
1314   {
1315     Heap::PretenuringScope pretenuring_scope(this);
1316 
1317     switch (collector) {
1318       case MARK_COMPACTOR:
1319         UpdateOldGenerationAllocationCounter();
1320         // Perform mark-sweep with optional compaction.
1321         MarkCompact();
1322         old_generation_size_configured_ = true;
1323         // This should be updated before PostGarbageCollectionProcessing, which
1324         // can cause another GC. Take into account the objects promoted during
1325         // GC.
1326         old_generation_allocation_counter_at_last_gc_ +=
1327             static_cast<size_t>(promoted_objects_size_);
1328         old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
1329         break;
1330       case MINOR_MARK_COMPACTOR:
1331         MinorMarkCompact();
1332         break;
1333       case SCAVENGER:
1334         Scavenge();
1335         break;
1336     }
1337 
1338     ProcessPretenuringFeedback();
1339   }
1340 
1341   UpdateSurvivalStatistics(start_new_space_size);
1342   ConfigureInitialOldGenerationSize();
1343 
1344   isolate_->counters()->objs_since_last_young()->Set(0);
1345 
1346   gc_post_processing_depth_++;
1347   {
1348     AllowHeapAllocation allow_allocation;
1349     TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_WEAK_GLOBAL_HANDLES);
1350     freed_global_handles =
1351         isolate_->global_handles()->PostGarbageCollectionProcessing(
1352             collector, gc_callback_flags);
1353   }
1354   gc_post_processing_depth_--;
1355 
1356   isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1357 
1358   // Update relocatables.
1359   Relocatable::PostGarbageCollectionProcessing(isolate_);
1360 
1361   double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1362   double mutator_speed =
1363       tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond();
1364   size_t old_gen_size = PromotedSpaceSizeOfObjects();
1365   if (collector == MARK_COMPACTOR) {
1366     // Register the amount of external allocated memory.
1367     external_memory_at_last_mark_compact_ = external_memory_;
1368     external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
1369     SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1370   } else if (HasLowYoungGenerationAllocationRate() &&
1371              old_generation_size_configured_) {
1372     DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1373   }
1374 
1375   {
1376     GCCallbacksScope scope(this);
1377     if (scope.CheckReenter()) {
1378       AllowHeapAllocation allow_allocation;
1379       TRACE_GC(tracer(), GCTracer::Scope::EXTERNAL_EPILOGUE);
1380       VMState<EXTERNAL> state(isolate_);
1381       HandleScope handle_scope(isolate_);
1382       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1383     }
1384   }
1385 
1386 #ifdef VERIFY_HEAP
1387   if (FLAG_verify_heap) {
1388     VerifyStringTable(this);
1389   }
1390 #endif
1391 
1392   return freed_global_handles > 0;
1393 }
1394 
1395 
CallGCPrologueCallbacks(GCType gc_type,GCCallbackFlags flags)1396 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1397   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1398     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1399       if (!gc_prologue_callbacks_[i].pass_isolate) {
1400         v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
1401             gc_prologue_callbacks_[i].callback);
1402         callback(gc_type, flags);
1403       } else {
1404         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1405         gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1406       }
1407     }
1408   }
1409   if (FLAG_trace_object_groups && (gc_type == kGCTypeIncrementalMarking ||
1410                                    gc_type == kGCTypeMarkSweepCompact)) {
1411     isolate_->global_handles()->PrintObjectGroups();
1412   }
1413 }
1414 
1415 
CallGCEpilogueCallbacks(GCType gc_type,GCCallbackFlags gc_callback_flags)1416 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1417                                    GCCallbackFlags gc_callback_flags) {
1418   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1419     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1420       if (!gc_epilogue_callbacks_[i].pass_isolate) {
1421         v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
1422             gc_epilogue_callbacks_[i].callback);
1423         callback(gc_type, gc_callback_flags);
1424       } else {
1425         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1426         gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1427       }
1428     }
1429   }
1430 }
1431 
1432 
MarkCompact()1433 void Heap::MarkCompact() {
1434   PauseAllocationObserversScope pause_observers(this);
1435 
1436   gc_state_ = MARK_COMPACT;
1437   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1438 
1439   uint64_t size_of_objects_before_gc = SizeOfObjects();
1440 
1441   mark_compact_collector()->Prepare();
1442 
1443   ms_count_++;
1444 
1445   MarkCompactPrologue();
1446 
1447   mark_compact_collector()->CollectGarbage();
1448 
1449   LOG(isolate_, ResourceEvent("markcompact", "end"));
1450 
1451   MarkCompactEpilogue();
1452 
1453   if (FLAG_allocation_site_pretenuring) {
1454     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1455   }
1456 }
1457 
MinorMarkCompact()1458 void Heap::MinorMarkCompact() { UNREACHABLE(); }
1459 
MarkCompactEpilogue()1460 void Heap::MarkCompactEpilogue() {
1461   TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
1462   gc_state_ = NOT_IN_GC;
1463 
1464   isolate_->counters()->objs_since_last_full()->Set(0);
1465 
1466   incremental_marking()->Epilogue();
1467 
1468   PreprocessStackTraces();
1469   DCHECK(incremental_marking()->IsStopped());
1470 
1471   mark_compact_collector()->marking_deque()->StopUsing();
1472 }
1473 
1474 
MarkCompactPrologue()1475 void Heap::MarkCompactPrologue() {
1476   TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE);
1477   isolate_->context_slot_cache()->Clear();
1478   isolate_->descriptor_lookup_cache()->Clear();
1479   RegExpResultsCache::Clear(string_split_cache());
1480   RegExpResultsCache::Clear(regexp_multiple_cache());
1481 
1482   isolate_->compilation_cache()->MarkCompactPrologue();
1483 
1484   CompletelyClearInstanceofCache();
1485 
1486   FlushNumberStringCache();
1487   ClearNormalizedMapCaches();
1488 }
1489 
1490 
CheckNewSpaceExpansionCriteria()1491 void Heap::CheckNewSpaceExpansionCriteria() {
1492   if (FLAG_experimental_new_space_growth_heuristic) {
1493     if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
1494         survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) {
1495       // Grow the size of new space if there is room to grow, and more than 10%
1496       // have survived the last scavenge.
1497       new_space_->Grow();
1498       survived_since_last_expansion_ = 0;
1499     }
1500   } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() &&
1501              survived_since_last_expansion_ > new_space_->TotalCapacity()) {
1502     // Grow the size of new space if there is room to grow, and enough data
1503     // has survived scavenge since the last expansion.
1504     new_space_->Grow();
1505     survived_since_last_expansion_ = 0;
1506   }
1507 }
1508 
1509 
IsUnscavengedHeapObject(Heap * heap,Object ** p)1510 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1511   return heap->InNewSpace(*p) &&
1512          !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1513 }
1514 
1515 
IsUnmodifiedHeapObject(Object ** p)1516 static bool IsUnmodifiedHeapObject(Object** p) {
1517   Object* object = *p;
1518   if (object->IsSmi()) return false;
1519   HeapObject* heap_object = HeapObject::cast(object);
1520   if (!object->IsJSObject()) return false;
1521   JSObject* js_object = JSObject::cast(object);
1522   if (!js_object->WasConstructedFromApiFunction()) return false;
1523   JSFunction* constructor =
1524       JSFunction::cast(js_object->map()->GetConstructor());
1525 
1526   return constructor->initial_map() == heap_object->map();
1527 }
1528 
1529 
Initialize()1530 void PromotionQueue::Initialize() {
1531   // The last to-space page may be used for promotion queue. On promotion
1532   // conflict, we use the emergency stack.
1533   DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1534          0);
1535   front_ = rear_ =
1536       reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
1537   limit_ = reinterpret_cast<struct Entry*>(
1538       Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
1539           ->area_start());
1540   emergency_stack_ = NULL;
1541 }
1542 
Destroy()1543 void PromotionQueue::Destroy() {
1544   DCHECK(is_empty());
1545   delete emergency_stack_;
1546   emergency_stack_ = NULL;
1547 }
1548 
RelocateQueueHead()1549 void PromotionQueue::RelocateQueueHead() {
1550   DCHECK(emergency_stack_ == NULL);
1551 
1552   Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
1553   struct Entry* head_start = rear_;
1554   struct Entry* head_end =
1555       Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
1556 
1557   int entries_count =
1558       static_cast<int>(head_end - head_start) / sizeof(struct Entry);
1559 
1560   emergency_stack_ = new List<Entry>(2 * entries_count);
1561 
1562   while (head_start != head_end) {
1563     struct Entry* entry = head_start++;
1564     // New space allocation in SemiSpaceCopyObject marked the region
1565     // overlapping with promotion queue as uninitialized.
1566     MSAN_MEMORY_IS_INITIALIZED(entry, sizeof(struct Entry));
1567     emergency_stack_->Add(*entry);
1568   }
1569   rear_ = head_end;
1570 }
1571 
1572 
1573 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1574  public:
ScavengeWeakObjectRetainer(Heap * heap)1575   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1576 
RetainAs(Object * object)1577   virtual Object* RetainAs(Object* object) {
1578     if (!heap_->InFromSpace(object)) {
1579       return object;
1580     }
1581 
1582     MapWord map_word = HeapObject::cast(object)->map_word();
1583     if (map_word.IsForwardingAddress()) {
1584       return map_word.ToForwardingAddress();
1585     }
1586     return NULL;
1587   }
1588 
1589  private:
1590   Heap* heap_;
1591 };
1592 
1593 
Scavenge()1594 void Heap::Scavenge() {
1595   TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
1596   RelocationLock relocation_lock(this);
1597   // There are soft limits in the allocation code, designed to trigger a mark
1598   // sweep collection by failing allocations. There is no sense in trying to
1599   // trigger one during scavenge: scavenges allocation should always succeed.
1600   AlwaysAllocateScope scope(isolate());
1601 
1602   // Bump-pointer allocations done during scavenge are not real allocations.
1603   // Pause the inline allocation steps.
1604   PauseAllocationObserversScope pause_observers(this);
1605 
1606   mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
1607 
1608   gc_state_ = SCAVENGE;
1609 
1610   // Implements Cheney's copying algorithm
1611   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1612 
1613   // Used for updating survived_since_last_expansion_ at function end.
1614   size_t survived_watermark = PromotedSpaceSizeOfObjects();
1615 
1616   scavenge_collector_->SelectScavengingVisitorsTable();
1617 
1618   if (UsingEmbedderHeapTracer()) {
1619     // Register found wrappers with embedder so it can add them to its marking
1620     // deque and correctly manage the case when v8 scavenger collects the
1621     // wrappers by either keeping wrappables alive, or cleaning marking deque.
1622     RegisterWrappersWithEmbedderHeapTracer();
1623   }
1624 
1625   // Flip the semispaces.  After flipping, to space is empty, from space has
1626   // live objects.
1627   new_space_->Flip();
1628   new_space_->ResetAllocationInfo();
1629 
1630   // We need to sweep newly copied objects which can be either in the
1631   // to space or promoted to the old generation.  For to-space
1632   // objects, we treat the bottom of the to space as a queue.  Newly
1633   // copied and unswept objects lie between a 'front' mark and the
1634   // allocation pointer.
1635   //
1636   // Promoted objects can go into various old-generation spaces, and
1637   // can be allocated internally in the spaces (from the free list).
1638   // We treat the top of the to space as a queue of addresses of
1639   // promoted objects.  The addresses of newly promoted and unswept
1640   // objects lie between a 'front' mark and a 'rear' mark that is
1641   // updated as a side effect of promoting an object.
1642   //
1643   // There is guaranteed to be enough room at the top of the to space
1644   // for the addresses of promoted objects: every object promoted
1645   // frees up its size in bytes from the top of the new space, and
1646   // objects are at least one pointer in size.
1647   Address new_space_front = new_space_->ToSpaceStart();
1648   promotion_queue_.Initialize();
1649 
1650   ScavengeVisitor scavenge_visitor(this);
1651 
1652   isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
1653       &IsUnmodifiedHeapObject);
1654 
1655   {
1656     // Copy roots.
1657     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
1658     IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1659   }
1660 
1661   {
1662     // Copy objects reachable from the old generation.
1663     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
1664     RememberedSet<OLD_TO_NEW>::Iterate(this, [this](Address addr) {
1665       return Scavenger::CheckAndScavengeObject(this, addr);
1666     });
1667 
1668     RememberedSet<OLD_TO_NEW>::IterateTyped(
1669         this, [this](SlotType type, Address host_addr, Address addr) {
1670           return UpdateTypedSlotHelper::UpdateTypedSlot(
1671               isolate(), type, addr, [this](Object** addr) {
1672                 // We expect that objects referenced by code are long living.
1673                 // If we do not force promotion, then we need to clear
1674                 // old_to_new slots in dead code objects after mark-compact.
1675                 return Scavenger::CheckAndScavengeObject(
1676                     this, reinterpret_cast<Address>(addr));
1677               });
1678         });
1679   }
1680 
1681   {
1682     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
1683     // Copy objects reachable from the encountered weak collections list.
1684     scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1685   }
1686 
1687   {
1688     // Copy objects reachable from the code flushing candidates list.
1689     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
1690     MarkCompactCollector* collector = mark_compact_collector();
1691     if (collector->is_code_flushing_enabled()) {
1692       collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1693     }
1694   }
1695 
1696   {
1697     TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
1698     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1699   }
1700 
1701   isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
1702       &IsUnscavengedHeapObject);
1703 
1704   isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
1705       &scavenge_visitor);
1706   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1707 
1708   UpdateNewSpaceReferencesInExternalStringTable(
1709       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1710 
1711   promotion_queue_.Destroy();
1712 
1713   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1714 
1715   ScavengeWeakObjectRetainer weak_object_retainer(this);
1716   ProcessYoungWeakReferences(&weak_object_retainer);
1717 
1718   DCHECK(new_space_front == new_space_->top());
1719 
1720   // Set age mark.
1721   new_space_->set_age_mark(new_space_->top());
1722 
1723   ArrayBufferTracker::FreeDeadInNewSpace(this);
1724 
1725   // Update how much has survived scavenge.
1726   DCHECK_GE(PromotedSpaceSizeOfObjects(), survived_watermark);
1727   IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
1728                                  new_space_->Size() - survived_watermark);
1729 
1730   LOG(isolate_, ResourceEvent("scavenge", "end"));
1731 
1732   gc_state_ = NOT_IN_GC;
1733 }
1734 
1735 
UpdateNewSpaceReferenceInExternalStringTableEntry(Heap * heap,Object ** p)1736 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1737                                                                 Object** p) {
1738   MapWord first_word = HeapObject::cast(*p)->map_word();
1739 
1740   if (!first_word.IsForwardingAddress()) {
1741     // Unreachable external string can be finalized.
1742     heap->FinalizeExternalString(String::cast(*p));
1743     return NULL;
1744   }
1745 
1746   // String is still reachable.
1747   return String::cast(first_word.ToForwardingAddress());
1748 }
1749 
1750 
UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1751 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1752     ExternalStringTableUpdaterCallback updater_func) {
1753   if (external_string_table_.new_space_strings_.is_empty()) return;
1754 
1755   Object** start = &external_string_table_.new_space_strings_[0];
1756   Object** end = start + external_string_table_.new_space_strings_.length();
1757   Object** last = start;
1758 
1759   for (Object** p = start; p < end; ++p) {
1760     String* target = updater_func(this, p);
1761 
1762     if (target == NULL) continue;
1763 
1764     DCHECK(target->IsExternalString());
1765 
1766     if (InNewSpace(target)) {
1767       // String is still in new space.  Update the table entry.
1768       *last = target;
1769       ++last;
1770     } else {
1771       // String got promoted.  Move it to the old string list.
1772       external_string_table_.AddOldString(target);
1773     }
1774   }
1775 
1776   DCHECK(last <= end);
1777   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1778 }
1779 
1780 
UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1781 void Heap::UpdateReferencesInExternalStringTable(
1782     ExternalStringTableUpdaterCallback updater_func) {
1783   // Update old space string references.
1784   if (external_string_table_.old_space_strings_.length() > 0) {
1785     Object** start = &external_string_table_.old_space_strings_[0];
1786     Object** end = start + external_string_table_.old_space_strings_.length();
1787     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1788   }
1789 
1790   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1791 }
1792 
1793 
ProcessAllWeakReferences(WeakObjectRetainer * retainer)1794 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
1795   ProcessNativeContexts(retainer);
1796   ProcessAllocationSites(retainer);
1797 }
1798 
1799 
ProcessYoungWeakReferences(WeakObjectRetainer * retainer)1800 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
1801   ProcessNativeContexts(retainer);
1802 }
1803 
1804 
ProcessNativeContexts(WeakObjectRetainer * retainer)1805 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1806   Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1807   // Update the head of the list of contexts.
1808   set_native_contexts_list(head);
1809 }
1810 
1811 
ProcessAllocationSites(WeakObjectRetainer * retainer)1812 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1813   Object* allocation_site_obj =
1814       VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1815   set_allocation_sites_list(allocation_site_obj);
1816 }
1817 
ProcessWeakListRoots(WeakObjectRetainer * retainer)1818 void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) {
1819   set_native_contexts_list(retainer->RetainAs(native_contexts_list()));
1820   set_allocation_sites_list(retainer->RetainAs(allocation_sites_list()));
1821 }
1822 
ResetAllAllocationSitesDependentCode(PretenureFlag flag)1823 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1824   DisallowHeapAllocation no_allocation_scope;
1825   Object* cur = allocation_sites_list();
1826   bool marked = false;
1827   while (cur->IsAllocationSite()) {
1828     AllocationSite* casted = AllocationSite::cast(cur);
1829     if (casted->GetPretenureMode() == flag) {
1830       casted->ResetPretenureDecision();
1831       casted->set_deopt_dependent_code(true);
1832       marked = true;
1833       RemoveAllocationSitePretenuringFeedback(casted);
1834     }
1835     cur = casted->weak_next();
1836   }
1837   if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1838 }
1839 
1840 
EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)1841 void Heap::EvaluateOldSpaceLocalPretenuring(
1842     uint64_t size_of_objects_before_gc) {
1843   uint64_t size_of_objects_after_gc = SizeOfObjects();
1844   double old_generation_survival_rate =
1845       (static_cast<double>(size_of_objects_after_gc) * 100) /
1846       static_cast<double>(size_of_objects_before_gc);
1847 
1848   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1849     // Too many objects died in the old generation, pretenuring of wrong
1850     // allocation sites may be the cause for that. We have to deopt all
1851     // dependent code registered in the allocation sites to re-evaluate
1852     // our pretenuring decisions.
1853     ResetAllAllocationSitesDependentCode(TENURED);
1854     if (FLAG_trace_pretenuring) {
1855       PrintF(
1856           "Deopt all allocation sites dependent code due to low survival "
1857           "rate in the old generation %f\n",
1858           old_generation_survival_rate);
1859     }
1860   }
1861 }
1862 
1863 
VisitExternalResources(v8::ExternalResourceVisitor * visitor)1864 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1865   DisallowHeapAllocation no_allocation;
1866   // All external strings are listed in the external string table.
1867 
1868   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1869    public:
1870     explicit ExternalStringTableVisitorAdapter(
1871         v8::ExternalResourceVisitor* visitor)
1872         : visitor_(visitor) {}
1873     virtual void VisitPointers(Object** start, Object** end) {
1874       for (Object** p = start; p < end; p++) {
1875         DCHECK((*p)->IsExternalString());
1876         visitor_->VisitExternalString(
1877             Utils::ToLocal(Handle<String>(String::cast(*p))));
1878       }
1879     }
1880 
1881    private:
1882     v8::ExternalResourceVisitor* visitor_;
1883   } external_string_table_visitor(visitor);
1884 
1885   external_string_table_.Iterate(&external_string_table_visitor);
1886 }
1887 
DoScavenge(ObjectVisitor * scavenge_visitor,Address new_space_front)1888 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1889                          Address new_space_front) {
1890   do {
1891     SemiSpace::AssertValidRange(new_space_front, new_space_->top());
1892     // The addresses new_space_front and new_space_.top() define a
1893     // queue of unprocessed copied objects.  Process them until the
1894     // queue is empty.
1895     while (new_space_front != new_space_->top()) {
1896       if (!Page::IsAlignedToPageSize(new_space_front)) {
1897         HeapObject* object = HeapObject::FromAddress(new_space_front);
1898         new_space_front +=
1899             StaticScavengeVisitor::IterateBody(object->map(), object);
1900       } else {
1901         new_space_front = Page::FromAllocationAreaAddress(new_space_front)
1902                               ->next_page()
1903                               ->area_start();
1904       }
1905     }
1906 
1907     // Promote and process all the to-be-promoted objects.
1908     {
1909       while (!promotion_queue()->is_empty()) {
1910         HeapObject* target;
1911         int32_t size;
1912         bool was_marked_black;
1913         promotion_queue()->remove(&target, &size, &was_marked_black);
1914 
1915         // Promoted object might be already partially visited
1916         // during old space pointer iteration. Thus we search specifically
1917         // for pointers to from semispace instead of looking for pointers
1918         // to new space.
1919         DCHECK(!target->IsMap());
1920 
1921         IterateAndScavengePromotedObject(target, static_cast<int>(size),
1922                                          was_marked_black);
1923       }
1924     }
1925 
1926     // Take another spin if there are now unswept objects in new space
1927     // (there are currently no more unswept promoted objects).
1928   } while (new_space_front != new_space_->top());
1929 
1930   return new_space_front;
1931 }
1932 
1933 
1934 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1935               0);  // NOLINT
1936 STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
1937               0);  // NOLINT
1938 #ifdef V8_HOST_ARCH_32_BIT
1939 STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
1940               0);  // NOLINT
1941 #endif
1942 
1943 
GetMaximumFillToAlign(AllocationAlignment alignment)1944 int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
1945   switch (alignment) {
1946     case kWordAligned:
1947       return 0;
1948     case kDoubleAligned:
1949     case kDoubleUnaligned:
1950       return kDoubleSize - kPointerSize;
1951     case kSimd128Unaligned:
1952       return kSimd128Size - kPointerSize;
1953     default:
1954       UNREACHABLE();
1955   }
1956   return 0;
1957 }
1958 
1959 
GetFillToAlign(Address address,AllocationAlignment alignment)1960 int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
1961   intptr_t offset = OffsetFrom(address);
1962   if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
1963     return kPointerSize;
1964   if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
1965     return kDoubleSize - kPointerSize;  // No fill if double is always aligned.
1966   if (alignment == kSimd128Unaligned) {
1967     return (kSimd128Size - (static_cast<int>(offset) + kPointerSize)) &
1968            kSimd128AlignmentMask;
1969   }
1970   return 0;
1971 }
1972 
1973 
PrecedeWithFiller(HeapObject * object,int filler_size)1974 HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
1975   CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo);
1976   return HeapObject::FromAddress(object->address() + filler_size);
1977 }
1978 
1979 
AlignWithFiller(HeapObject * object,int object_size,int allocation_size,AllocationAlignment alignment)1980 HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
1981                                   int allocation_size,
1982                                   AllocationAlignment alignment) {
1983   int filler_size = allocation_size - object_size;
1984   DCHECK(filler_size > 0);
1985   int pre_filler = GetFillToAlign(object->address(), alignment);
1986   if (pre_filler) {
1987     object = PrecedeWithFiller(object, pre_filler);
1988     filler_size -= pre_filler;
1989   }
1990   if (filler_size)
1991     CreateFillerObjectAt(object->address() + object_size, filler_size,
1992                          ClearRecordedSlots::kNo);
1993   return object;
1994 }
1995 
1996 
DoubleAlignForDeserialization(HeapObject * object,int size)1997 HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
1998   return AlignWithFiller(object, size - kPointerSize, size, kDoubleAligned);
1999 }
2000 
2001 
RegisterNewArrayBuffer(JSArrayBuffer * buffer)2002 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
2003   ArrayBufferTracker::RegisterNew(this, buffer);
2004 }
2005 
2006 
UnregisterArrayBuffer(JSArrayBuffer * buffer)2007 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
2008   ArrayBufferTracker::Unregister(this, buffer);
2009 }
2010 
2011 
ConfigureInitialOldGenerationSize()2012 void Heap::ConfigureInitialOldGenerationSize() {
2013   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2014     old_generation_allocation_limit_ =
2015         Max(MinimumAllocationLimitGrowingStep(),
2016             static_cast<size_t>(
2017                 static_cast<double>(old_generation_allocation_limit_) *
2018                 (tracer()->AverageSurvivalRatio() / 100)));
2019   }
2020 }
2021 
2022 
AllocatePartialMap(InstanceType instance_type,int instance_size)2023 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2024                                           int instance_size) {
2025   Object* result = nullptr;
2026   AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
2027   if (!allocation.To(&result)) return allocation;
2028 
2029   // Map::cast cannot be used due to uninitialized map field.
2030   reinterpret_cast<Map*>(result)->set_map(
2031       reinterpret_cast<Map*>(root(kMetaMapRootIndex)));
2032   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2033   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2034   // Initialize to only containing tagged fields.
2035   reinterpret_cast<Map*>(result)->set_visitor_id(
2036       StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
2037   if (FLAG_unbox_double_fields) {
2038     reinterpret_cast<Map*>(result)
2039         ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2040   }
2041   reinterpret_cast<Map*>(result)->clear_unused();
2042   reinterpret_cast<Map*>(result)
2043       ->set_inobject_properties_or_constructor_function_index(0);
2044   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2045   reinterpret_cast<Map*>(result)->set_bit_field(0);
2046   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2047   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2048                    Map::OwnsDescriptors::encode(true) |
2049                    Map::ConstructionCounter::encode(Map::kNoSlackTracking);
2050   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2051   reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::kZero);
2052   return result;
2053 }
2054 
2055 
AllocateMap(InstanceType instance_type,int instance_size,ElementsKind elements_kind)2056 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2057                                    int instance_size,
2058                                    ElementsKind elements_kind) {
2059   HeapObject* result = nullptr;
2060   AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
2061   if (!allocation.To(&result)) return allocation;
2062 
2063   isolate()->counters()->maps_created()->Increment();
2064   result->set_map_no_write_barrier(meta_map());
2065   Map* map = Map::cast(result);
2066   map->set_instance_type(instance_type);
2067   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2068   map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
2069   map->set_instance_size(instance_size);
2070   map->clear_unused();
2071   map->set_inobject_properties_or_constructor_function_index(0);
2072   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2073   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2074                           SKIP_WRITE_BARRIER);
2075   map->set_weak_cell_cache(Smi::kZero);
2076   map->set_raw_transitions(Smi::kZero);
2077   map->set_unused_property_fields(0);
2078   map->set_instance_descriptors(empty_descriptor_array());
2079   if (FLAG_unbox_double_fields) {
2080     map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2081   }
2082   // Must be called only after |instance_type|, |instance_size| and
2083   // |layout_descriptor| are set.
2084   map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
2085   map->set_bit_field(0);
2086   map->set_bit_field2(1 << Map::kIsExtensible);
2087   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2088                    Map::OwnsDescriptors::encode(true) |
2089                    Map::ConstructionCounter::encode(Map::kNoSlackTracking);
2090   map->set_bit_field3(bit_field3);
2091   map->set_elements_kind(elements_kind);
2092   map->set_new_target_is_base(true);
2093 
2094   return map;
2095 }
2096 
2097 
AllocateFillerObject(int size,bool double_align,AllocationSpace space)2098 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2099                                             AllocationSpace space) {
2100   HeapObject* obj = nullptr;
2101   {
2102     AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
2103     AllocationResult allocation = AllocateRaw(size, space, align);
2104     if (!allocation.To(&obj)) return allocation;
2105   }
2106 #ifdef DEBUG
2107   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2108   DCHECK(chunk->owner()->identity() == space);
2109 #endif
2110   CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo,
2111                        ClearBlackArea::kNo);
2112   return obj;
2113 }
2114 
2115 
2116 const Heap::StringTypeTable Heap::string_type_table[] = {
2117 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2118   { type, size, k##camel_name##MapRootIndex }             \
2119   ,
2120     STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2121 #undef STRING_TYPE_ELEMENT
2122 };
2123 
2124 
2125 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2126     {"", kempty_stringRootIndex},
2127 #define CONSTANT_STRING_ELEMENT(name, contents) \
2128   { contents, k##name##RootIndex }              \
2129   ,
2130     INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2131 #undef CONSTANT_STRING_ELEMENT
2132 };
2133 
2134 
2135 const Heap::StructTable Heap::struct_table[] = {
2136 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)        \
2137   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2138   ,
2139     STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2140 #undef STRUCT_TABLE_ELEMENT
2141 };
2142 
2143 namespace {
2144 
FinalizePartialMap(Heap * heap,Map * map)2145 void FinalizePartialMap(Heap* heap, Map* map) {
2146   map->set_code_cache(heap->empty_fixed_array());
2147   map->set_dependent_code(DependentCode::cast(heap->empty_fixed_array()));
2148   map->set_raw_transitions(Smi::kZero);
2149   map->set_instance_descriptors(heap->empty_descriptor_array());
2150   if (FLAG_unbox_double_fields) {
2151     map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2152   }
2153   map->set_prototype(heap->null_value());
2154   map->set_constructor_or_backpointer(heap->null_value());
2155 }
2156 
2157 }  // namespace
2158 
CreateInitialMaps()2159 bool Heap::CreateInitialMaps() {
2160   HeapObject* obj = nullptr;
2161   {
2162     AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2163     if (!allocation.To(&obj)) return false;
2164   }
2165   // Map::cast cannot be used due to uninitialized map field.
2166   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2167   set_meta_map(new_meta_map);
2168   new_meta_map->set_map(new_meta_map);
2169 
2170   {  // Partial map allocation
2171 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name)                \
2172   {                                                                          \
2173     Map* map;                                                                \
2174     if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2175     set_##field_name##_map(map);                                             \
2176   }
2177 
2178     ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2179     fixed_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
2180     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2181     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2182     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2183 
2184 #undef ALLOCATE_PARTIAL_MAP
2185   }
2186 
2187   // Allocate the empty array.
2188   {
2189     AllocationResult allocation = AllocateEmptyFixedArray();
2190     if (!allocation.To(&obj)) return false;
2191   }
2192   set_empty_fixed_array(FixedArray::cast(obj));
2193 
2194   {
2195     AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
2196     if (!allocation.To(&obj)) return false;
2197   }
2198   set_null_value(Oddball::cast(obj));
2199   Oddball::cast(obj)->set_kind(Oddball::kNull);
2200 
2201   {
2202     AllocationResult allocation = Allocate(undefined_map(), OLD_SPACE);
2203     if (!allocation.To(&obj)) return false;
2204   }
2205   set_undefined_value(Oddball::cast(obj));
2206   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2207   DCHECK(!InNewSpace(undefined_value()));
2208   {
2209     AllocationResult allocation = Allocate(the_hole_map(), OLD_SPACE);
2210     if (!allocation.To(&obj)) return false;
2211   }
2212   set_the_hole_value(Oddball::cast(obj));
2213   Oddball::cast(obj)->set_kind(Oddball::kTheHole);
2214 
2215   // Set preliminary exception sentinel value before actually initializing it.
2216   set_exception(null_value());
2217 
2218   // Allocate the empty descriptor array.
2219   {
2220     AllocationResult allocation = AllocateEmptyFixedArray();
2221     if (!allocation.To(&obj)) return false;
2222   }
2223   set_empty_descriptor_array(DescriptorArray::cast(obj));
2224 
2225   // Fix the instance_descriptors for the existing maps.
2226   FinalizePartialMap(this, meta_map());
2227   FinalizePartialMap(this, fixed_array_map());
2228   FinalizePartialMap(this, undefined_map());
2229   undefined_map()->set_is_undetectable();
2230   FinalizePartialMap(this, null_map());
2231   null_map()->set_is_undetectable();
2232   FinalizePartialMap(this, the_hole_map());
2233 
2234   {  // Map allocation
2235 #define ALLOCATE_MAP(instance_type, size, field_name)               \
2236   {                                                                 \
2237     Map* map;                                                       \
2238     if (!AllocateMap((instance_type), size).To(&map)) return false; \
2239     set_##field_name##_map(map);                                    \
2240   }
2241 
2242 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2243   ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2244 
2245 #define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
2246                                constructor_function_index)      \
2247   {                                                             \
2248     ALLOCATE_MAP((instance_type), (size), field_name);          \
2249     field_name##_map()->SetConstructorFunctionIndex(            \
2250         (constructor_function_index));                          \
2251   }
2252 
2253     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2254     fixed_cow_array_map()->set_elements_kind(FAST_HOLEY_ELEMENTS);
2255     DCHECK_NE(fixed_array_map(), fixed_cow_array_map());
2256 
2257     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2258     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
2259     ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
2260                            Context::NUMBER_FUNCTION_INDEX)
2261     ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2262                  mutable_heap_number)
2263     ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
2264                            Context::SYMBOL_FUNCTION_INDEX)
2265 #define ALLOCATE_SIMD128_MAP(TYPE, Type, type, lane_count, lane_type) \
2266   ALLOCATE_PRIMITIVE_MAP(SIMD128_VALUE_TYPE, Type::kSize, type,       \
2267                          Context::TYPE##_FUNCTION_INDEX)
2268     SIMD128_TYPES(ALLOCATE_SIMD128_MAP)
2269 #undef ALLOCATE_SIMD128_MAP
2270     ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2271 
2272     ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
2273                            Context::BOOLEAN_FUNCTION_INDEX);
2274     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2275     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2276     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2277     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2278     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2279     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
2280     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
2281 
2282     for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2283       const StringTypeTable& entry = string_type_table[i];
2284       {
2285         AllocationResult allocation = AllocateMap(entry.type, entry.size);
2286         if (!allocation.To(&obj)) return false;
2287       }
2288       Map* map = Map::cast(obj);
2289       map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
2290       // Mark cons string maps as unstable, because their objects can change
2291       // maps during GC.
2292       if (StringShape(entry.type).IsCons()) map->mark_unstable();
2293       roots_[entry.index] = map;
2294     }
2295 
2296     {  // Create a separate external one byte string map for native sources.
2297       AllocationResult allocation =
2298           AllocateMap(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE,
2299                       ExternalOneByteString::kShortSize);
2300       if (!allocation.To(&obj)) return false;
2301       Map* map = Map::cast(obj);
2302       map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
2303       set_native_source_string_map(map);
2304     }
2305 
2306     ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2307     fixed_double_array_map()->set_elements_kind(FAST_HOLEY_DOUBLE_ELEMENTS);
2308     ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2309     ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
2310     ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2311 
2312 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2313   ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2314 
2315     TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2316 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2317 
2318     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2319 
2320     ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2321 
2322     ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2323     ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2324     ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
2325     ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2326     ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2327 
2328     ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
2329 
2330     for (unsigned i = 0; i < arraysize(struct_table); i++) {
2331       const StructTable& entry = struct_table[i];
2332       Map* map;
2333       if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2334       roots_[entry.index] = map;
2335     }
2336 
2337     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2338     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2339     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, unseeded_number_dictionary)
2340 
2341     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2342     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2343     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2344     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
2345     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2346     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2347     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
2348     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
2349 
2350     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2351     native_context_map()->set_dictionary_map(true);
2352     native_context_map()->set_visitor_id(
2353         StaticVisitorBase::kVisitNativeContext);
2354 
2355     ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2356                  shared_function_info)
2357 
2358     ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2359     ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2360     external_map()->set_is_extensible(false);
2361 #undef ALLOCATE_PRIMITIVE_MAP
2362 #undef ALLOCATE_VARSIZE_MAP
2363 #undef ALLOCATE_MAP
2364   }
2365 
2366   {
2367     AllocationResult allocation = AllocateEmptyScopeInfo();
2368     if (!allocation.To(&obj)) return false;
2369   }
2370 
2371   set_empty_scope_info(ScopeInfo::cast(obj));
2372   {
2373     AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
2374     if (!allocation.To(&obj)) return false;
2375   }
2376   set_true_value(Oddball::cast(obj));
2377   Oddball::cast(obj)->set_kind(Oddball::kTrue);
2378 
2379   {
2380     AllocationResult allocation = Allocate(boolean_map(), OLD_SPACE);
2381     if (!allocation.To(&obj)) return false;
2382   }
2383   set_false_value(Oddball::cast(obj));
2384   Oddball::cast(obj)->set_kind(Oddball::kFalse);
2385 
2386   {  // Empty arrays
2387     {
2388       ByteArray* byte_array;
2389       if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2390       set_empty_byte_array(byte_array);
2391     }
2392 
2393 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2394   {                                                                     \
2395     FixedTypedArrayBase* obj;                                           \
2396     if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2397       return false;                                                     \
2398     set_empty_fixed_##type##_array(obj);                                \
2399   }
2400 
2401     TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2402 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2403   }
2404   DCHECK(!InNewSpace(empty_fixed_array()));
2405   return true;
2406 }
2407 
2408 
AllocateHeapNumber(double value,MutableMode mode,PretenureFlag pretenure)2409 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2410                                           PretenureFlag pretenure) {
2411   // Statically ensure that it is safe to allocate heap numbers in paged
2412   // spaces.
2413   int size = HeapNumber::kSize;
2414   STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize);
2415 
2416   AllocationSpace space = SelectSpace(pretenure);
2417 
2418   HeapObject* result = nullptr;
2419   {
2420     AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
2421     if (!allocation.To(&result)) return allocation;
2422   }
2423 
2424   Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2425   HeapObject::cast(result)->set_map_no_write_barrier(map);
2426   HeapNumber::cast(result)->set_value(value);
2427   return result;
2428 }
2429 
2430 #define SIMD_ALLOCATE_DEFINITION(TYPE, Type, type, lane_count, lane_type) \
2431   AllocationResult Heap::Allocate##Type(lane_type lanes[lane_count],      \
2432                                         PretenureFlag pretenure) {        \
2433     int size = Type::kSize;                                               \
2434     STATIC_ASSERT(Type::kSize <= kMaxRegularHeapObjectSize);              \
2435                                                                           \
2436     AllocationSpace space = SelectSpace(pretenure);                       \
2437                                                                           \
2438     HeapObject* result = nullptr;                                         \
2439     {                                                                     \
2440       AllocationResult allocation =                                       \
2441           AllocateRaw(size, space, kSimd128Unaligned);                    \
2442       if (!allocation.To(&result)) return allocation;                     \
2443     }                                                                     \
2444                                                                           \
2445     result->set_map_no_write_barrier(type##_map());                       \
2446     Type* instance = Type::cast(result);                                  \
2447     for (int i = 0; i < lane_count; i++) {                                \
2448       instance->set_lane(i, lanes[i]);                                    \
2449     }                                                                     \
2450     return result;                                                        \
2451   }
SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)2452 SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)
2453 #undef SIMD_ALLOCATE_DEFINITION
2454 
2455 
2456 AllocationResult Heap::AllocateCell(Object* value) {
2457   int size = Cell::kSize;
2458   STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize);
2459 
2460   HeapObject* result = nullptr;
2461   {
2462     AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2463     if (!allocation.To(&result)) return allocation;
2464   }
2465   result->set_map_no_write_barrier(cell_map());
2466   Cell::cast(result)->set_value(value);
2467   return result;
2468 }
2469 
AllocatePropertyCell()2470 AllocationResult Heap::AllocatePropertyCell() {
2471   int size = PropertyCell::kSize;
2472   STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize);
2473 
2474   HeapObject* result = nullptr;
2475   AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2476   if (!allocation.To(&result)) return allocation;
2477 
2478   result->set_map_no_write_barrier(global_property_cell_map());
2479   PropertyCell* cell = PropertyCell::cast(result);
2480   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2481                            SKIP_WRITE_BARRIER);
2482   cell->set_property_details(PropertyDetails(Smi::kZero));
2483   cell->set_value(the_hole_value());
2484   return result;
2485 }
2486 
2487 
AllocateWeakCell(HeapObject * value)2488 AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
2489   int size = WeakCell::kSize;
2490   STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize);
2491   HeapObject* result = nullptr;
2492   {
2493     AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2494     if (!allocation.To(&result)) return allocation;
2495   }
2496   result->set_map_no_write_barrier(weak_cell_map());
2497   WeakCell::cast(result)->initialize(value);
2498   WeakCell::cast(result)->clear_next(the_hole_value());
2499   return result;
2500 }
2501 
2502 
AllocateTransitionArray(int capacity)2503 AllocationResult Heap::AllocateTransitionArray(int capacity) {
2504   DCHECK(capacity > 0);
2505   HeapObject* raw_array = nullptr;
2506   {
2507     AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
2508     if (!allocation.To(&raw_array)) return allocation;
2509   }
2510   raw_array->set_map_no_write_barrier(transition_array_map());
2511   TransitionArray* array = TransitionArray::cast(raw_array);
2512   array->set_length(capacity);
2513   MemsetPointer(array->data_start(), undefined_value(), capacity);
2514   // Transition arrays are tenured. When black allocation is on we have to
2515   // add the transition array to the list of encountered_transition_arrays.
2516   if (incremental_marking()->black_allocation()) {
2517     array->set_next_link(encountered_transition_arrays(),
2518                          UPDATE_WEAK_WRITE_BARRIER);
2519     set_encountered_transition_arrays(array);
2520   } else {
2521     array->set_next_link(undefined_value(), SKIP_WRITE_BARRIER);
2522   }
2523   return array;
2524 }
2525 
2526 
CreateApiObjects()2527 void Heap::CreateApiObjects() {
2528   HandleScope scope(isolate());
2529   set_message_listeners(*TemplateList::New(isolate(), 2));
2530 }
2531 
2532 
CreateJSEntryStub()2533 void Heap::CreateJSEntryStub() {
2534   JSEntryStub stub(isolate(), StackFrame::ENTRY);
2535   set_js_entry_code(*stub.GetCode());
2536 }
2537 
2538 
CreateJSConstructEntryStub()2539 void Heap::CreateJSConstructEntryStub() {
2540   JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2541   set_js_construct_entry_code(*stub.GetCode());
2542 }
2543 
2544 
CreateFixedStubs()2545 void Heap::CreateFixedStubs() {
2546   // Here we create roots for fixed stubs. They are needed at GC
2547   // for cooking and uncooking (check out frames.cc).
2548   // The eliminates the need for doing dictionary lookup in the
2549   // stub cache for these stubs.
2550   HandleScope scope(isolate());
2551 
2552   // Create stubs that should be there, so we don't unexpectedly have to
2553   // create them if we need them during the creation of another stub.
2554   // Stub creation mixes raw pointers and handles in an unsafe manner so
2555   // we cannot create stubs while we are creating stubs.
2556   CodeStub::GenerateStubsAheadOfTime(isolate());
2557 
2558   // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2559   // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2560   // is created.
2561 
2562   // gcc-4.4 has problem generating correct code of following snippet:
2563   // {  JSEntryStub stub;
2564   //    js_entry_code_ = *stub.GetCode();
2565   // }
2566   // {  JSConstructEntryStub stub;
2567   //    js_construct_entry_code_ = *stub.GetCode();
2568   // }
2569   // To workaround the problem, make separate functions without inlining.
2570   Heap::CreateJSEntryStub();
2571   Heap::CreateJSConstructEntryStub();
2572 }
2573 
2574 
CreateInitialObjects()2575 void Heap::CreateInitialObjects() {
2576   HandleScope scope(isolate());
2577   Factory* factory = isolate()->factory();
2578 
2579   // The -0 value must be set before NewNumber works.
2580   set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2581   DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2582 
2583   set_nan_value(*factory->NewHeapNumber(
2584       std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
2585   set_hole_nan_value(*factory->NewHeapNumber(bit_cast<double>(kHoleNanInt64),
2586                                              IMMUTABLE, TENURED));
2587   set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2588   set_minus_infinity_value(
2589       *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
2590 
2591   // Allocate initial string table.
2592   set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2593 
2594   // Allocate
2595 
2596   // Finish initializing oddballs after creating the string table.
2597   Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2598                       factory->nan_value(), "undefined", Oddball::kUndefined);
2599 
2600   // Initialize the null_value.
2601   Oddball::Initialize(isolate(), factory->null_value(), "null",
2602                       handle(Smi::kZero, isolate()), "object", Oddball::kNull);
2603 
2604   // Initialize the_hole_value.
2605   Oddball::Initialize(isolate(), factory->the_hole_value(), "hole",
2606                       factory->hole_nan_value(), "undefined",
2607                       Oddball::kTheHole);
2608 
2609   // Initialize the true_value.
2610   Oddball::Initialize(isolate(), factory->true_value(), "true",
2611                       handle(Smi::FromInt(1), isolate()), "boolean",
2612                       Oddball::kTrue);
2613 
2614   // Initialize the false_value.
2615   Oddball::Initialize(isolate(), factory->false_value(), "false",
2616                       handle(Smi::kZero, isolate()), "boolean",
2617                       Oddball::kFalse);
2618 
2619   set_uninitialized_value(
2620       *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
2621                            handle(Smi::FromInt(-1), isolate()), "undefined",
2622                            Oddball::kUninitialized));
2623 
2624   set_arguments_marker(
2625       *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
2626                            handle(Smi::FromInt(-4), isolate()), "undefined",
2627                            Oddball::kArgumentsMarker));
2628 
2629   set_no_interceptor_result_sentinel(*factory->NewOddball(
2630       factory->no_interceptor_result_sentinel_map(),
2631       "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2632       "undefined", Oddball::kOther));
2633 
2634   set_termination_exception(*factory->NewOddball(
2635       factory->termination_exception_map(), "termination_exception",
2636       handle(Smi::FromInt(-3), isolate()), "undefined", Oddball::kOther));
2637 
2638   set_exception(*factory->NewOddball(factory->exception_map(), "exception",
2639                                      handle(Smi::FromInt(-5), isolate()),
2640                                      "undefined", Oddball::kException));
2641 
2642   set_optimized_out(*factory->NewOddball(factory->optimized_out_map(),
2643                                          "optimized_out",
2644                                          handle(Smi::FromInt(-6), isolate()),
2645                                          "undefined", Oddball::kOptimizedOut));
2646 
2647   set_stale_register(
2648       *factory->NewOddball(factory->stale_register_map(), "stale_register",
2649                            handle(Smi::FromInt(-7), isolate()), "undefined",
2650                            Oddball::kStaleRegister));
2651 
2652   for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
2653     Handle<String> str =
2654         factory->InternalizeUtf8String(constant_string_table[i].contents);
2655     roots_[constant_string_table[i].index] = *str;
2656   }
2657 
2658   // Create the code_stubs dictionary. The initial size is set to avoid
2659   // expanding the dictionary during bootstrapping.
2660   set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2661 
2662   set_instanceof_cache_function(Smi::kZero);
2663   set_instanceof_cache_map(Smi::kZero);
2664   set_instanceof_cache_answer(Smi::kZero);
2665 
2666   {
2667     HandleScope scope(isolate());
2668 #define SYMBOL_INIT(name)                                              \
2669   {                                                                    \
2670     Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
2671     Handle<Symbol> symbol(isolate()->factory()->NewPrivateSymbol());   \
2672     symbol->set_name(*name##d);                                        \
2673     roots_[k##name##RootIndex] = *symbol;                              \
2674   }
2675     PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
2676 #undef SYMBOL_INIT
2677   }
2678 
2679   {
2680     HandleScope scope(isolate());
2681 #define SYMBOL_INIT(name, description)                                      \
2682   Handle<Symbol> name = factory->NewSymbol();                               \
2683   Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
2684   name->set_name(*name##d);                                                 \
2685   roots_[k##name##RootIndex] = *name;
2686     PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
2687 #undef SYMBOL_INIT
2688 
2689 #define SYMBOL_INIT(name, description)                                      \
2690   Handle<Symbol> name = factory->NewSymbol();                               \
2691   Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
2692   name->set_is_well_known_symbol(true);                                     \
2693   name->set_name(*name##d);                                                 \
2694   roots_[k##name##RootIndex] = *name;
2695     WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
2696 #undef SYMBOL_INIT
2697   }
2698 
2699   Handle<NameDictionary> empty_properties_dictionary =
2700       NameDictionary::New(isolate(), 0, TENURED);
2701   empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
2702   set_empty_properties_dictionary(*empty_properties_dictionary);
2703 
2704   set_number_string_cache(
2705       *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2706 
2707   // Allocate cache for single character one byte strings.
2708   set_single_character_string_cache(
2709       *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2710 
2711   // Allocate cache for string split and regexp-multiple.
2712   set_string_split_cache(*factory->NewFixedArray(
2713       RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2714   set_regexp_multiple_cache(*factory->NewFixedArray(
2715       RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2716 
2717   // Allocate cache for external strings pointing to native source code.
2718   set_natives_source_cache(
2719       *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2720 
2721   set_experimental_natives_source_cache(
2722       *factory->NewFixedArray(ExperimentalNatives::GetBuiltinsCount()));
2723 
2724   set_extra_natives_source_cache(
2725       *factory->NewFixedArray(ExtraNatives::GetBuiltinsCount()));
2726 
2727   set_experimental_extra_natives_source_cache(
2728       *factory->NewFixedArray(ExperimentalExtraNatives::GetBuiltinsCount()));
2729 
2730   set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2731 
2732   // The symbol registry is initialized lazily.
2733   set_symbol_registry(Smi::kZero);
2734 
2735   // Microtask queue uses the empty fixed array as a sentinel for "empty".
2736   // Number of queued microtasks stored in Isolate::pending_microtask_count().
2737   set_microtask_queue(empty_fixed_array());
2738 
2739   {
2740     StaticFeedbackVectorSpec spec;
2741     FeedbackVectorSlot slot = spec.AddLoadICSlot();
2742     DCHECK_EQ(slot, FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot));
2743 
2744     slot = spec.AddKeyedLoadICSlot();
2745     DCHECK_EQ(slot,
2746               FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
2747 
2748     slot = spec.AddStoreICSlot();
2749     DCHECK_EQ(slot, FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot));
2750 
2751     slot = spec.AddKeyedStoreICSlot();
2752     DCHECK_EQ(slot,
2753               FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
2754 
2755     Handle<TypeFeedbackMetadata> dummy_metadata =
2756         TypeFeedbackMetadata::New(isolate(), &spec);
2757     Handle<TypeFeedbackVector> dummy_vector =
2758         TypeFeedbackVector::New(isolate(), dummy_metadata);
2759 
2760     set_dummy_vector(*dummy_vector);
2761 
2762     // Now initialize dummy vector's entries.
2763     LoadICNexus(isolate()).ConfigureMegamorphic();
2764     StoreICNexus(isolate()).ConfigureMegamorphic();
2765     KeyedLoadICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY);
2766     KeyedStoreICNexus(isolate()).ConfigureMegamorphicKeyed(PROPERTY);
2767   }
2768 
2769   {
2770     // Create a canonical empty TypeFeedbackVector, which is shared by all
2771     // functions that don't need actual type feedback slots. Note however
2772     // that all these functions will share the same invocation count, but
2773     // that shouldn't matter since we only use the invocation count to
2774     // relativize the absolute call counts, but we can only have call counts
2775     // if we have actual feedback slots.
2776     Handle<FixedArray> empty_type_feedback_vector = factory->NewFixedArray(
2777         TypeFeedbackVector::kReservedIndexCount, TENURED);
2778     empty_type_feedback_vector->set(TypeFeedbackVector::kMetadataIndex,
2779                                     empty_fixed_array());
2780     empty_type_feedback_vector->set(TypeFeedbackVector::kInvocationCountIndex,
2781                                     Smi::kZero);
2782     set_empty_type_feedback_vector(*empty_type_feedback_vector);
2783 
2784     // We use a canonical empty LiteralsArray for all functions that neither
2785     // have literals nor need a TypeFeedbackVector (besides the invocation
2786     // count special slot).
2787     Handle<FixedArray> empty_literals_array =
2788         factory->NewFixedArray(1, TENURED);
2789     empty_literals_array->set(0, *empty_type_feedback_vector);
2790     set_empty_literals_array(*empty_literals_array);
2791   }
2792 
2793   {
2794     Handle<FixedArray> empty_sloppy_arguments_elements =
2795         factory->NewFixedArray(2, TENURED);
2796     empty_sloppy_arguments_elements->set_map(sloppy_arguments_elements_map());
2797     set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
2798   }
2799 
2800   {
2801     Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
2802     set_empty_weak_cell(*cell);
2803     cell->clear();
2804   }
2805 
2806   set_detached_contexts(empty_fixed_array());
2807   set_retained_maps(ArrayList::cast(empty_fixed_array()));
2808 
2809   set_weak_object_to_code_table(
2810       *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
2811                           TENURED));
2812 
2813   set_weak_new_space_object_to_code_list(
2814       ArrayList::cast(*(factory->NewFixedArray(16, TENURED))));
2815   weak_new_space_object_to_code_list()->SetLength(0);
2816 
2817   set_script_list(Smi::kZero);
2818 
2819   Handle<SeededNumberDictionary> slow_element_dictionary =
2820       SeededNumberDictionary::New(isolate(), 0, TENURED);
2821   slow_element_dictionary->set_requires_slow_elements();
2822   set_empty_slow_element_dictionary(*slow_element_dictionary);
2823 
2824   set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2825 
2826   // Handling of script id generation is in Heap::NextScriptId().
2827   set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2828   set_next_template_serial_number(Smi::kZero);
2829 
2830   // Allocate the empty script.
2831   Handle<Script> script = factory->NewScript(factory->empty_string());
2832   script->set_type(Script::TYPE_NATIVE);
2833   set_empty_script(*script);
2834 
2835   Handle<PropertyCell> cell = factory->NewPropertyCell();
2836   cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
2837   set_array_protector(*cell);
2838 
2839   cell = factory->NewPropertyCell();
2840   cell->set_value(the_hole_value());
2841   set_empty_property_cell(*cell);
2842 
2843   cell = factory->NewPropertyCell();
2844   cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
2845   set_has_instance_protector(*cell);
2846 
2847   Handle<Cell> is_concat_spreadable_cell = factory->NewCell(
2848       handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
2849   set_is_concat_spreadable_protector(*is_concat_spreadable_cell);
2850 
2851   Handle<Cell> species_cell = factory->NewCell(
2852       handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
2853   set_species_protector(*species_cell);
2854 
2855   cell = factory->NewPropertyCell();
2856   cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
2857   set_string_length_protector(*cell);
2858 
2859   Handle<Cell> fast_array_iteration_cell = factory->NewCell(
2860       handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
2861   set_fast_array_iteration_protector(*fast_array_iteration_cell);
2862 
2863   Handle<Cell> array_iterator_cell = factory->NewCell(
2864       handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
2865   set_array_iterator_protector(*array_iterator_cell);
2866 
2867   set_serialized_templates(empty_fixed_array());
2868 
2869   set_weak_stack_trace_list(Smi::kZero);
2870 
2871   set_noscript_shared_function_infos(Smi::kZero);
2872 
2873   // Initialize context slot cache.
2874   isolate_->context_slot_cache()->Clear();
2875 
2876   // Initialize descriptor cache.
2877   isolate_->descriptor_lookup_cache()->Clear();
2878 
2879   // Initialize compilation cache.
2880   isolate_->compilation_cache()->Clear();
2881 }
2882 
RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index)2883 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2884   switch (root_index) {
2885     case kNumberStringCacheRootIndex:
2886     case kInstanceofCacheFunctionRootIndex:
2887     case kInstanceofCacheMapRootIndex:
2888     case kInstanceofCacheAnswerRootIndex:
2889     case kCodeStubsRootIndex:
2890     case kEmptyScriptRootIndex:
2891     case kSymbolRegistryRootIndex:
2892     case kScriptListRootIndex:
2893     case kMaterializedObjectsRootIndex:
2894     case kMicrotaskQueueRootIndex:
2895     case kDetachedContextsRootIndex:
2896     case kWeakObjectToCodeTableRootIndex:
2897     case kWeakNewSpaceObjectToCodeListRootIndex:
2898     case kRetainedMapsRootIndex:
2899     case kNoScriptSharedFunctionInfosRootIndex:
2900     case kWeakStackTraceListRootIndex:
2901     case kSerializedTemplatesRootIndex:
2902 // Smi values
2903 #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
2904       SMI_ROOT_LIST(SMI_ENTRY)
2905 #undef SMI_ENTRY
2906     // String table
2907     case kStringTableRootIndex:
2908       return true;
2909 
2910     default:
2911       return false;
2912   }
2913 }
2914 
2915 
RootCanBeTreatedAsConstant(RootListIndex root_index)2916 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2917   return !RootCanBeWrittenAfterInitialization(root_index) &&
2918          !InNewSpace(root(root_index));
2919 }
2920 
2921 
FullSizeNumberStringCacheLength()2922 int Heap::FullSizeNumberStringCacheLength() {
2923   // Compute the size of the number string cache based on the max newspace size.
2924   // The number string cache has a minimum size based on twice the initial cache
2925   // size to ensure that it is bigger after being made 'full size'.
2926   size_t number_string_cache_size = max_semi_space_size_ / 512;
2927   number_string_cache_size =
2928       Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2),
2929           Min<size_t>(0x4000u, number_string_cache_size));
2930   // There is a string and a number per entry so the length is twice the number
2931   // of entries.
2932   return static_cast<int>(number_string_cache_size * 2);
2933 }
2934 
2935 
FlushNumberStringCache()2936 void Heap::FlushNumberStringCache() {
2937   // Flush the number to string cache.
2938   int len = number_string_cache()->length();
2939   for (int i = 0; i < len; i++) {
2940     number_string_cache()->set_undefined(i);
2941   }
2942 }
2943 
2944 
MapForFixedTypedArray(ExternalArrayType array_type)2945 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
2946   return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
2947 }
2948 
2949 
RootIndexForFixedTypedArray(ExternalArrayType array_type)2950 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
2951     ExternalArrayType array_type) {
2952   switch (array_type) {
2953 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
2954   case kExternal##Type##Array:                                  \
2955     return kFixed##Type##ArrayMapRootIndex;
2956 
2957     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
2958 #undef ARRAY_TYPE_TO_ROOT_INDEX
2959 
2960     default:
2961       UNREACHABLE();
2962       return kUndefinedValueRootIndex;
2963   }
2964 }
2965 
2966 
RootIndexForEmptyFixedTypedArray(ElementsKind elementsKind)2967 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
2968     ElementsKind elementsKind) {
2969   switch (elementsKind) {
2970 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
2971   case TYPE##_ELEMENTS:                                           \
2972     return kEmptyFixed##Type##ArrayRootIndex;
2973 
2974     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
2975 #undef ELEMENT_KIND_TO_ROOT_INDEX
2976     default:
2977       UNREACHABLE();
2978       return kUndefinedValueRootIndex;
2979   }
2980 }
2981 
2982 
EmptyFixedTypedArrayForMap(Map * map)2983 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
2984   return FixedTypedArrayBase::cast(
2985       roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
2986 }
2987 
2988 
AllocateForeign(Address address,PretenureFlag pretenure)2989 AllocationResult Heap::AllocateForeign(Address address,
2990                                        PretenureFlag pretenure) {
2991   // Statically ensure that it is safe to allocate foreigns in paged spaces.
2992   STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize);
2993   AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
2994   Foreign* result = nullptr;
2995   AllocationResult allocation = Allocate(foreign_map(), space);
2996   if (!allocation.To(&result)) return allocation;
2997   result->set_foreign_address(address);
2998   return result;
2999 }
3000 
3001 
AllocateByteArray(int length,PretenureFlag pretenure)3002 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3003   if (length < 0 || length > ByteArray::kMaxLength) {
3004     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3005   }
3006   int size = ByteArray::SizeFor(length);
3007   AllocationSpace space = SelectSpace(pretenure);
3008   HeapObject* result = nullptr;
3009   {
3010     AllocationResult allocation = AllocateRaw(size, space);
3011     if (!allocation.To(&result)) return allocation;
3012   }
3013 
3014   result->set_map_no_write_barrier(byte_array_map());
3015   ByteArray::cast(result)->set_length(length);
3016   return result;
3017 }
3018 
3019 
AllocateBytecodeArray(int length,const byte * const raw_bytecodes,int frame_size,int parameter_count,FixedArray * constant_pool)3020 AllocationResult Heap::AllocateBytecodeArray(int length,
3021                                              const byte* const raw_bytecodes,
3022                                              int frame_size,
3023                                              int parameter_count,
3024                                              FixedArray* constant_pool) {
3025   if (length < 0 || length > BytecodeArray::kMaxLength) {
3026     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3027   }
3028   // Bytecode array is pretenured, so constant pool array should be to.
3029   DCHECK(!InNewSpace(constant_pool));
3030 
3031   int size = BytecodeArray::SizeFor(length);
3032   HeapObject* result = nullptr;
3033   {
3034     AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3035     if (!allocation.To(&result)) return allocation;
3036   }
3037 
3038   result->set_map_no_write_barrier(bytecode_array_map());
3039   BytecodeArray* instance = BytecodeArray::cast(result);
3040   instance->set_length(length);
3041   instance->set_frame_size(frame_size);
3042   instance->set_parameter_count(parameter_count);
3043   instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
3044   instance->set_osr_loop_nesting_level(0);
3045   instance->set_constant_pool(constant_pool);
3046   instance->set_handler_table(empty_fixed_array());
3047   instance->set_source_position_table(empty_byte_array());
3048   CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
3049 
3050   return result;
3051 }
3052 
CreateFillerObjectAt(Address addr,int size,ClearRecordedSlots mode,ClearBlackArea black_area_mode)3053 void Heap::CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode,
3054                                 ClearBlackArea black_area_mode) {
3055   if (size == 0) return;
3056   HeapObject* filler = HeapObject::FromAddress(addr);
3057   if (size == kPointerSize) {
3058     filler->set_map_no_write_barrier(
3059         reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)));
3060   } else if (size == 2 * kPointerSize) {
3061     filler->set_map_no_write_barrier(
3062         reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)));
3063   } else {
3064     DCHECK_GT(size, 2 * kPointerSize);
3065     filler->set_map_no_write_barrier(
3066         reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
3067     FreeSpace::cast(filler)->nobarrier_set_size(size);
3068   }
3069   if (mode == ClearRecordedSlots::kYes) {
3070     ClearRecordedSlotRange(addr, addr + size);
3071   }
3072 
3073   // If the location where the filler is created is within a black area we have
3074   // to clear the mark bits of the filler space.
3075   if (black_area_mode == ClearBlackArea::kYes &&
3076       incremental_marking()->black_allocation() &&
3077       Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(addr))) {
3078     Page* page = Page::FromAddress(addr);
3079     page->markbits()->ClearRange(page->AddressToMarkbitIndex(addr),
3080                                  page->AddressToMarkbitIndex(addr + size));
3081   }
3082 
3083   // At this point, we may be deserializing the heap from a snapshot, and
3084   // none of the maps have been created yet and are NULL.
3085   DCHECK((filler->map() == NULL && !deserialization_complete_) ||
3086          filler->map()->IsMap());
3087 }
3088 
3089 
CanMoveObjectStart(HeapObject * object)3090 bool Heap::CanMoveObjectStart(HeapObject* object) {
3091   if (!FLAG_move_object_start) return false;
3092 
3093   // Sampling heap profiler may have a reference to the object.
3094   if (isolate()->heap_profiler()->is_sampling_allocations()) return false;
3095 
3096   Address address = object->address();
3097 
3098   if (lo_space()->Contains(object)) return false;
3099 
3100   // We can move the object start if the page was already swept.
3101   return Page::FromAddress(address)->SweepingDone();
3102 }
3103 
3104 
AdjustLiveBytes(HeapObject * object,int by,InvocationMode mode)3105 void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
3106   // As long as the inspected object is black and we are currently not iterating
3107   // the heap using HeapIterator, we can update the live byte count. We cannot
3108   // update while using HeapIterator because the iterator is temporarily
3109   // marking the whole object graph, without updating live bytes.
3110   if (lo_space()->Contains(object)) {
3111     lo_space()->AdjustLiveBytes(by);
3112   } else if (!in_heap_iterator() &&
3113              !mark_compact_collector()->sweeping_in_progress() &&
3114              Marking::IsBlack(ObjectMarking::MarkBitFrom(object->address()))) {
3115     if (mode == SEQUENTIAL_TO_SWEEPER) {
3116       MemoryChunk::IncrementLiveBytesFromGC(object, by);
3117     } else {
3118       MemoryChunk::IncrementLiveBytesFromMutator(object, by);
3119     }
3120   }
3121 }
3122 
3123 
LeftTrimFixedArray(FixedArrayBase * object,int elements_to_trim)3124 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3125                                          int elements_to_trim) {
3126   CHECK_NOT_NULL(object);
3127   DCHECK(!object->IsFixedTypedArrayBase());
3128   DCHECK(!object->IsByteArray());
3129   const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3130   const int bytes_to_trim = elements_to_trim * element_size;
3131   Map* map = object->map();
3132 
3133   // For now this trick is only applied to objects in new and paged space.
3134   // In large object space the object's start must coincide with chunk
3135   // and thus the trick is just not applicable.
3136   DCHECK(!lo_space()->Contains(object));
3137   DCHECK(object->map() != fixed_cow_array_map());
3138 
3139   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3140   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3141   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3142 
3143   const int len = object->length();
3144   DCHECK(elements_to_trim <= len);
3145 
3146   // Calculate location of new array start.
3147   Address old_start = object->address();
3148   Address new_start = old_start + bytes_to_trim;
3149 
3150   // Transfer the mark bits to their new location if the object is not within
3151   // a black area.
3152   if (!incremental_marking()->black_allocation() ||
3153       !Marking::IsBlack(ObjectMarking::MarkBitFrom(new_start))) {
3154     IncrementalMarking::TransferMark(this, old_start, new_start);
3155   }
3156 
3157   // Technically in new space this write might be omitted (except for
3158   // debug mode which iterates through the heap), but to play safer
3159   // we still do it.
3160   CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
3161   // Initialize header of the trimmed array. Since left trimming is only
3162   // performed on pages which are not concurrently swept creating a filler
3163   // object does not require synchronization.
3164   DCHECK(CanMoveObjectStart(object));
3165   Object** former_start = HeapObject::RawField(object, 0);
3166   int new_start_index = elements_to_trim * (element_size / kPointerSize);
3167   former_start[new_start_index] = map;
3168   former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3169 
3170   FixedArrayBase* new_object =
3171       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3172 
3173   // Maintain consistency of live bytes during incremental marking
3174   AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
3175 
3176   // Remove recorded slots for the new map and length offset.
3177   ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
3178   ClearRecordedSlot(new_object, HeapObject::RawField(
3179                                     new_object, FixedArrayBase::kLengthOffset));
3180 
3181   // Notify the heap profiler of change in object layout.
3182   OnMoveEvent(new_object, object, new_object->Size());
3183   return new_object;
3184 }
3185 
3186 
3187 // Force instantiation of templatized method.
3188 template void Heap::RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
3189     FixedArrayBase*, int);
3190 template void Heap::RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
3191     FixedArrayBase*, int);
3192 
3193 
3194 template<Heap::InvocationMode mode>
RightTrimFixedArray(FixedArrayBase * object,int elements_to_trim)3195 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3196   const int len = object->length();
3197   DCHECK_LE(elements_to_trim, len);
3198   DCHECK_GE(elements_to_trim, 0);
3199 
3200   int bytes_to_trim;
3201   if (object->IsFixedTypedArrayBase()) {
3202     InstanceType type = object->map()->instance_type();
3203     bytes_to_trim =
3204         FixedTypedArrayBase::TypedArraySize(type, len) -
3205         FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
3206   } else if (object->IsByteArray()) {
3207     int new_size = ByteArray::SizeFor(len - elements_to_trim);
3208     bytes_to_trim = ByteArray::SizeFor(len) - new_size;
3209     DCHECK_GE(bytes_to_trim, 0);
3210   } else {
3211     const int element_size =
3212         object->IsFixedArray() ? kPointerSize : kDoubleSize;
3213     bytes_to_trim = elements_to_trim * element_size;
3214   }
3215 
3216 
3217   // For now this trick is only applied to objects in new and paged space.
3218   DCHECK(object->map() != fixed_cow_array_map());
3219 
3220   if (bytes_to_trim == 0) {
3221     // No need to create filler and update live bytes counters, just initialize
3222     // header of the trimmed array.
3223     object->synchronized_set_length(len - elements_to_trim);
3224     return;
3225   }
3226 
3227   // Calculate location of new array end.
3228   Address old_end = object->address() + object->Size();
3229   Address new_end = old_end - bytes_to_trim;
3230 
3231   // Technically in new space this write might be omitted (except for
3232   // debug mode which iterates through the heap), but to play safer
3233   // we still do it.
3234   // We do not create a filler for objects in large object space.
3235   // TODO(hpayer): We should shrink the large object page if the size
3236   // of the object changed significantly.
3237   if (!lo_space()->Contains(object)) {
3238     CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
3239   }
3240 
3241   // Initialize header of the trimmed array. We are storing the new length
3242   // using release store after creating a filler for the left-over space to
3243   // avoid races with the sweeper thread.
3244   object->synchronized_set_length(len - elements_to_trim);
3245 
3246   // Maintain consistency of live bytes during incremental marking
3247   AdjustLiveBytes(object, -bytes_to_trim, mode);
3248 
3249   // Notify the heap profiler of change in object layout. The array may not be
3250   // moved during GC, and size has to be adjusted nevertheless.
3251   HeapProfiler* profiler = isolate()->heap_profiler();
3252   if (profiler->is_tracking_allocations()) {
3253     profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3254   }
3255 }
3256 
3257 
AllocateFixedTypedArrayWithExternalPointer(int length,ExternalArrayType array_type,void * external_pointer,PretenureFlag pretenure)3258 AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
3259     int length, ExternalArrayType array_type, void* external_pointer,
3260     PretenureFlag pretenure) {
3261   int size = FixedTypedArrayBase::kHeaderSize;
3262   AllocationSpace space = SelectSpace(pretenure);
3263   HeapObject* result = nullptr;
3264   {
3265     AllocationResult allocation = AllocateRaw(size, space);
3266     if (!allocation.To(&result)) return allocation;
3267   }
3268 
3269   result->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
3270   FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
3271   elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
3272   elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
3273   elements->set_length(length);
3274   return elements;
3275 }
3276 
ForFixedTypedArray(ExternalArrayType array_type,int * element_size,ElementsKind * element_kind)3277 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3278                                ElementsKind* element_kind) {
3279   switch (array_type) {
3280 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3281   case kExternal##Type##Array:                          \
3282     *element_size = size;                               \
3283     *element_kind = TYPE##_ELEMENTS;                    \
3284     return;
3285 
3286     TYPED_ARRAYS(TYPED_ARRAY_CASE)
3287 #undef TYPED_ARRAY_CASE
3288 
3289     default:
3290       *element_size = 0;               // Bogus
3291       *element_kind = UINT8_ELEMENTS;  // Bogus
3292       UNREACHABLE();
3293   }
3294 }
3295 
3296 
AllocateFixedTypedArray(int length,ExternalArrayType array_type,bool initialize,PretenureFlag pretenure)3297 AllocationResult Heap::AllocateFixedTypedArray(int length,
3298                                                ExternalArrayType array_type,
3299                                                bool initialize,
3300                                                PretenureFlag pretenure) {
3301   int element_size;
3302   ElementsKind elements_kind;
3303   ForFixedTypedArray(array_type, &element_size, &elements_kind);
3304   int size = OBJECT_POINTER_ALIGN(length * element_size +
3305                                   FixedTypedArrayBase::kDataOffset);
3306   AllocationSpace space = SelectSpace(pretenure);
3307 
3308   HeapObject* object = nullptr;
3309   AllocationResult allocation = AllocateRaw(
3310       size, space,
3311       array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
3312   if (!allocation.To(&object)) return allocation;
3313 
3314   object->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
3315   FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3316   elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
3317   elements->set_external_pointer(
3318       ExternalReference::fixed_typed_array_base_data_offset().address(),
3319       SKIP_WRITE_BARRIER);
3320   elements->set_length(length);
3321   if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
3322   return elements;
3323 }
3324 
3325 
AllocateCode(int object_size,bool immovable)3326 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3327   DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3328   AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
3329 
3330   HeapObject* result = nullptr;
3331   if (!allocation.To(&result)) return allocation;
3332   if (immovable) {
3333     Address address = result->address();
3334     // Code objects which should stay at a fixed address are allocated either
3335     // in the first page of code space (objects on the first page of each space
3336     // are never moved) or in large object space.
3337     if (!code_space_->FirstPage()->Contains(address) &&
3338         MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3339       // Discard the first code allocation, which was on a page where it could
3340       // be moved.
3341       CreateFillerObjectAt(result->address(), object_size,
3342                            ClearRecordedSlots::kNo);
3343       allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3344       if (!allocation.To(&result)) return allocation;
3345       OnAllocationEvent(result, object_size);
3346     }
3347   }
3348 
3349   result->set_map_no_write_barrier(code_map());
3350   Code* code = Code::cast(result);
3351   DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
3352   DCHECK(!memory_allocator()->code_range()->valid() ||
3353          memory_allocator()->code_range()->contains(code->address()) ||
3354          object_size <= code_space()->AreaSize());
3355   code->set_gc_metadata(Smi::kZero);
3356   code->set_ic_age(global_ic_age_);
3357   return code;
3358 }
3359 
3360 
CopyCode(Code * code)3361 AllocationResult Heap::CopyCode(Code* code) {
3362   AllocationResult allocation;
3363 
3364   HeapObject* result = nullptr;
3365   // Allocate an object the same size as the code object.
3366   int obj_size = code->Size();
3367   allocation = AllocateRaw(obj_size, CODE_SPACE);
3368   if (!allocation.To(&result)) return allocation;
3369 
3370   // Copy code object.
3371   Address old_addr = code->address();
3372   Address new_addr = result->address();
3373   CopyBlock(new_addr, old_addr, obj_size);
3374   Code* new_code = Code::cast(result);
3375 
3376   // Relocate the copy.
3377   DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
3378   DCHECK(!memory_allocator()->code_range()->valid() ||
3379          memory_allocator()->code_range()->contains(code->address()) ||
3380          obj_size <= code_space()->AreaSize());
3381   new_code->Relocate(new_addr - old_addr);
3382   // We have to iterate over the object and process its pointers when black
3383   // allocation is on.
3384   incremental_marking()->IterateBlackObject(new_code);
3385   // Record all references to embedded objects in the new code object.
3386   RecordWritesIntoCode(new_code);
3387   return new_code;
3388 }
3389 
CopyBytecodeArray(BytecodeArray * bytecode_array)3390 AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
3391   int size = BytecodeArray::SizeFor(bytecode_array->length());
3392   HeapObject* result = nullptr;
3393   {
3394     AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3395     if (!allocation.To(&result)) return allocation;
3396   }
3397 
3398   result->set_map_no_write_barrier(bytecode_array_map());
3399   BytecodeArray* copy = BytecodeArray::cast(result);
3400   copy->set_length(bytecode_array->length());
3401   copy->set_frame_size(bytecode_array->frame_size());
3402   copy->set_parameter_count(bytecode_array->parameter_count());
3403   copy->set_constant_pool(bytecode_array->constant_pool());
3404   copy->set_handler_table(bytecode_array->handler_table());
3405   copy->set_source_position_table(bytecode_array->source_position_table());
3406   copy->set_interrupt_budget(bytecode_array->interrupt_budget());
3407   copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
3408   bytecode_array->CopyBytecodesTo(copy);
3409   return copy;
3410 }
3411 
InitializeAllocationMemento(AllocationMemento * memento,AllocationSite * allocation_site)3412 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3413                                        AllocationSite* allocation_site) {
3414   memento->set_map_no_write_barrier(allocation_memento_map());
3415   DCHECK(allocation_site->map() == allocation_site_map());
3416   memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3417   if (FLAG_allocation_site_pretenuring) {
3418     allocation_site->IncrementMementoCreateCount();
3419   }
3420 }
3421 
3422 
Allocate(Map * map,AllocationSpace space,AllocationSite * allocation_site)3423 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3424                                 AllocationSite* allocation_site) {
3425   DCHECK(gc_state_ == NOT_IN_GC);
3426   DCHECK(map->instance_type() != MAP_TYPE);
3427   int size = map->instance_size();
3428   if (allocation_site != NULL) {
3429     size += AllocationMemento::kSize;
3430   }
3431   HeapObject* result = nullptr;
3432   AllocationResult allocation = AllocateRaw(size, space);
3433   if (!allocation.To(&result)) return allocation;
3434   // No need for write barrier since object is white and map is in old space.
3435   result->set_map_no_write_barrier(map);
3436   if (allocation_site != NULL) {
3437     AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3438         reinterpret_cast<Address>(result) + map->instance_size());
3439     InitializeAllocationMemento(alloc_memento, allocation_site);
3440   }
3441   return result;
3442 }
3443 
3444 
InitializeJSObjectFromMap(JSObject * obj,FixedArray * properties,Map * map)3445 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3446                                      Map* map) {
3447   obj->set_properties(properties);
3448   obj->initialize_elements();
3449   // TODO(1240798): Initialize the object's body using valid initial values
3450   // according to the object's initial map.  For example, if the map's
3451   // instance type is JS_ARRAY_TYPE, the length field should be initialized
3452   // to a number (e.g. Smi::kZero) and the elements initialized to a
3453   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
3454   // verification code has to cope with (temporarily) invalid objects.  See
3455   // for example, JSArray::JSArrayVerify).
3456   InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
3457 }
3458 
3459 
InitializeJSObjectBody(JSObject * obj,Map * map,int start_offset)3460 void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
3461   if (start_offset == map->instance_size()) return;
3462   DCHECK_LT(start_offset, map->instance_size());
3463 
3464   // We cannot always fill with one_pointer_filler_map because objects
3465   // created from API functions expect their internal fields to be initialized
3466   // with undefined_value.
3467   // Pre-allocated fields need to be initialized with undefined_value as well
3468   // so that object accesses before the constructor completes (e.g. in the
3469   // debugger) will not cause a crash.
3470 
3471   // In case of Array subclassing the |map| could already be transitioned
3472   // to different elements kind from the initial map on which we track slack.
3473   bool in_progress = map->IsInobjectSlackTrackingInProgress();
3474   Object* filler;
3475   if (in_progress) {
3476     filler = one_pointer_filler_map();
3477   } else {
3478     filler = undefined_value();
3479   }
3480   obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
3481   if (in_progress) {
3482     map->FindRootMap()->InobjectSlackTrackingStep();
3483   }
3484 }
3485 
3486 
AllocateJSObjectFromMap(Map * map,PretenureFlag pretenure,AllocationSite * allocation_site)3487 AllocationResult Heap::AllocateJSObjectFromMap(
3488     Map* map, PretenureFlag pretenure, AllocationSite* allocation_site) {
3489   // JSFunctions should be allocated using AllocateFunction to be
3490   // properly initialized.
3491   DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3492 
3493   // Both types of global objects should be allocated using
3494   // AllocateGlobalObject to be properly initialized.
3495   DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3496 
3497   // Allocate the backing storage for the properties.
3498   FixedArray* properties = empty_fixed_array();
3499 
3500   // Allocate the JSObject.
3501   AllocationSpace space = SelectSpace(pretenure);
3502   JSObject* js_obj = nullptr;
3503   AllocationResult allocation = Allocate(map, space, allocation_site);
3504   if (!allocation.To(&js_obj)) return allocation;
3505 
3506   // Initialize the JSObject.
3507   InitializeJSObjectFromMap(js_obj, properties, map);
3508   DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements() ||
3509          js_obj->HasFastStringWrapperElements() ||
3510          js_obj->HasFastArgumentsElements());
3511   return js_obj;
3512 }
3513 
3514 
AllocateJSObject(JSFunction * constructor,PretenureFlag pretenure,AllocationSite * allocation_site)3515 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3516                                         PretenureFlag pretenure,
3517                                         AllocationSite* allocation_site) {
3518   DCHECK(constructor->has_initial_map());
3519 
3520   // Allocate the object based on the constructors initial map.
3521   AllocationResult allocation = AllocateJSObjectFromMap(
3522       constructor->initial_map(), pretenure, allocation_site);
3523 #ifdef DEBUG
3524   // Make sure result is NOT a global object if valid.
3525   HeapObject* obj = nullptr;
3526   DCHECK(!allocation.To(&obj) || !obj->IsJSGlobalObject());
3527 #endif
3528   return allocation;
3529 }
3530 
3531 
CopyJSObject(JSObject * source,AllocationSite * site)3532 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3533   // Make the clone.
3534   Map* map = source->map();
3535 
3536   // We can only clone regexps, normal objects, api objects, errors or arrays.
3537   // Copying anything else will break invariants.
3538   CHECK(map->instance_type() == JS_REGEXP_TYPE ||
3539         map->instance_type() == JS_OBJECT_TYPE ||
3540         map->instance_type() == JS_ERROR_TYPE ||
3541         map->instance_type() == JS_ARRAY_TYPE ||
3542         map->instance_type() == JS_API_OBJECT_TYPE ||
3543         map->instance_type() == JS_SPECIAL_API_OBJECT_TYPE);
3544 
3545   int object_size = map->instance_size();
3546   HeapObject* clone = nullptr;
3547 
3548   DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3549 
3550   int adjusted_object_size =
3551       site != NULL ? object_size + AllocationMemento::kSize : object_size;
3552   AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
3553   if (!allocation.To(&clone)) return allocation;
3554 
3555   SLOW_DCHECK(InNewSpace(clone));
3556   // Since we know the clone is allocated in new space, we can copy
3557   // the contents without worrying about updating the write barrier.
3558   CopyBlock(clone->address(), source->address(), object_size);
3559 
3560   if (site != NULL) {
3561     AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3562         reinterpret_cast<Address>(clone) + object_size);
3563     InitializeAllocationMemento(alloc_memento, site);
3564   }
3565 
3566   SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3567               source->GetElementsKind());
3568   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3569   FixedArray* properties = FixedArray::cast(source->properties());
3570   // Update elements if necessary.
3571   if (elements->length() > 0) {
3572     FixedArrayBase* elem = nullptr;
3573     {
3574       AllocationResult allocation;
3575       if (elements->map() == fixed_cow_array_map()) {
3576         allocation = FixedArray::cast(elements);
3577       } else if (source->HasFastDoubleElements()) {
3578         allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3579       } else {
3580         allocation = CopyFixedArray(FixedArray::cast(elements));
3581       }
3582       if (!allocation.To(&elem)) return allocation;
3583     }
3584     JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
3585   }
3586   // Update properties if necessary.
3587   if (properties->length() > 0) {
3588     FixedArray* prop = nullptr;
3589     {
3590       AllocationResult allocation = CopyFixedArray(properties);
3591       if (!allocation.To(&prop)) return allocation;
3592     }
3593     JSObject::cast(clone)->set_properties(prop, SKIP_WRITE_BARRIER);
3594   }
3595   // Return the new clone.
3596   return clone;
3597 }
3598 
3599 
WriteOneByteData(Vector<const char> vector,uint8_t * chars,int len)3600 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3601                                     int len) {
3602   // Only works for one byte strings.
3603   DCHECK(vector.length() == len);
3604   MemCopy(chars, vector.start(), len);
3605 }
3606 
WriteTwoByteData(Vector<const char> vector,uint16_t * chars,int len)3607 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3608                                     int len) {
3609   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3610   size_t stream_length = vector.length();
3611   while (stream_length != 0) {
3612     size_t consumed = 0;
3613     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3614     DCHECK(c != unibrow::Utf8::kBadChar);
3615     DCHECK(consumed <= stream_length);
3616     stream_length -= consumed;
3617     stream += consumed;
3618     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3619       len -= 2;
3620       if (len < 0) break;
3621       *chars++ = unibrow::Utf16::LeadSurrogate(c);
3622       *chars++ = unibrow::Utf16::TrailSurrogate(c);
3623     } else {
3624       len -= 1;
3625       if (len < 0) break;
3626       *chars++ = c;
3627     }
3628   }
3629   DCHECK(stream_length == 0);
3630   DCHECK(len == 0);
3631 }
3632 
3633 
WriteOneByteData(String * s,uint8_t * chars,int len)3634 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3635   DCHECK(s->length() == len);
3636   String::WriteToFlat(s, chars, 0, len);
3637 }
3638 
3639 
WriteTwoByteData(String * s,uint16_t * chars,int len)3640 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3641   DCHECK(s->length() == len);
3642   String::WriteToFlat(s, chars, 0, len);
3643 }
3644 
3645 
3646 template <bool is_one_byte, typename T>
AllocateInternalizedStringImpl(T t,int chars,uint32_t hash_field)3647 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3648                                                       uint32_t hash_field) {
3649   DCHECK(chars >= 0);
3650   // Compute map and object size.
3651   int size;
3652   Map* map;
3653 
3654   DCHECK_LE(0, chars);
3655   DCHECK_GE(String::kMaxLength, chars);
3656   if (is_one_byte) {
3657     map = one_byte_internalized_string_map();
3658     size = SeqOneByteString::SizeFor(chars);
3659   } else {
3660     map = internalized_string_map();
3661     size = SeqTwoByteString::SizeFor(chars);
3662   }
3663 
3664   // Allocate string.
3665   HeapObject* result = nullptr;
3666   {
3667     AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3668     if (!allocation.To(&result)) return allocation;
3669   }
3670 
3671   result->set_map_no_write_barrier(map);
3672   // Set length and hash fields of the allocated string.
3673   String* answer = String::cast(result);
3674   answer->set_length(chars);
3675   answer->set_hash_field(hash_field);
3676 
3677   DCHECK_EQ(size, answer->Size());
3678 
3679   if (is_one_byte) {
3680     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3681   } else {
3682     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3683   }
3684   return answer;
3685 }
3686 
3687 
3688 // Need explicit instantiations.
3689 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3690                                                                      int,
3691                                                                      uint32_t);
3692 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3693                                                                       int,
3694                                                                       uint32_t);
3695 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3696     Vector<const char>, int, uint32_t);
3697 
3698 
AllocateRawOneByteString(int length,PretenureFlag pretenure)3699 AllocationResult Heap::AllocateRawOneByteString(int length,
3700                                                 PretenureFlag pretenure) {
3701   DCHECK_LE(0, length);
3702   DCHECK_GE(String::kMaxLength, length);
3703   int size = SeqOneByteString::SizeFor(length);
3704   DCHECK(size <= SeqOneByteString::kMaxSize);
3705   AllocationSpace space = SelectSpace(pretenure);
3706 
3707   HeapObject* result = nullptr;
3708   {
3709     AllocationResult allocation = AllocateRaw(size, space);
3710     if (!allocation.To(&result)) return allocation;
3711   }
3712 
3713   // Partially initialize the object.
3714   result->set_map_no_write_barrier(one_byte_string_map());
3715   String::cast(result)->set_length(length);
3716   String::cast(result)->set_hash_field(String::kEmptyHashField);
3717   DCHECK_EQ(size, HeapObject::cast(result)->Size());
3718 
3719   return result;
3720 }
3721 
3722 
AllocateRawTwoByteString(int length,PretenureFlag pretenure)3723 AllocationResult Heap::AllocateRawTwoByteString(int length,
3724                                                 PretenureFlag pretenure) {
3725   DCHECK_LE(0, length);
3726   DCHECK_GE(String::kMaxLength, length);
3727   int size = SeqTwoByteString::SizeFor(length);
3728   DCHECK(size <= SeqTwoByteString::kMaxSize);
3729   AllocationSpace space = SelectSpace(pretenure);
3730 
3731   HeapObject* result = nullptr;
3732   {
3733     AllocationResult allocation = AllocateRaw(size, space);
3734     if (!allocation.To(&result)) return allocation;
3735   }
3736 
3737   // Partially initialize the object.
3738   result->set_map_no_write_barrier(string_map());
3739   String::cast(result)->set_length(length);
3740   String::cast(result)->set_hash_field(String::kEmptyHashField);
3741   DCHECK_EQ(size, HeapObject::cast(result)->Size());
3742   return result;
3743 }
3744 
3745 
AllocateEmptyFixedArray()3746 AllocationResult Heap::AllocateEmptyFixedArray() {
3747   int size = FixedArray::SizeFor(0);
3748   HeapObject* result = nullptr;
3749   {
3750     AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3751     if (!allocation.To(&result)) return allocation;
3752   }
3753   // Initialize the object.
3754   result->set_map_no_write_barrier(fixed_array_map());
3755   FixedArray::cast(result)->set_length(0);
3756   return result;
3757 }
3758 
AllocateEmptyScopeInfo()3759 AllocationResult Heap::AllocateEmptyScopeInfo() {
3760   int size = FixedArray::SizeFor(0);
3761   HeapObject* result = nullptr;
3762   {
3763     AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3764     if (!allocation.To(&result)) return allocation;
3765   }
3766   // Initialize the object.
3767   result->set_map_no_write_barrier(scope_info_map());
3768   FixedArray::cast(result)->set_length(0);
3769   return result;
3770 }
3771 
CopyAndTenureFixedCOWArray(FixedArray * src)3772 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3773   if (!InNewSpace(src)) {
3774     return src;
3775   }
3776 
3777   int len = src->length();
3778   HeapObject* obj = nullptr;
3779   {
3780     AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3781     if (!allocation.To(&obj)) return allocation;
3782   }
3783   obj->set_map_no_write_barrier(fixed_array_map());
3784   FixedArray* result = FixedArray::cast(obj);
3785   result->set_length(len);
3786 
3787   // Copy the content.
3788   DisallowHeapAllocation no_gc;
3789   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3790   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3791 
3792   // TODO(mvstanton): The map is set twice because of protection against calling
3793   // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3794   // we might then be able to remove this whole method.
3795   HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
3796   return result;
3797 }
3798 
3799 
AllocateEmptyFixedTypedArray(ExternalArrayType array_type)3800 AllocationResult Heap::AllocateEmptyFixedTypedArray(
3801     ExternalArrayType array_type) {
3802   return AllocateFixedTypedArray(0, array_type, false, TENURED);
3803 }
3804 
3805 
CopyFixedArrayAndGrow(FixedArray * src,int grow_by,PretenureFlag pretenure)3806 AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
3807                                              PretenureFlag pretenure) {
3808   int old_len = src->length();
3809   int new_len = old_len + grow_by;
3810   DCHECK(new_len >= old_len);
3811   HeapObject* obj = nullptr;
3812   {
3813     AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
3814     if (!allocation.To(&obj)) return allocation;
3815   }
3816 
3817   obj->set_map_no_write_barrier(fixed_array_map());
3818   FixedArray* result = FixedArray::cast(obj);
3819   result->set_length(new_len);
3820 
3821   // Copy the content.
3822   DisallowHeapAllocation no_gc;
3823   WriteBarrierMode mode = obj->GetWriteBarrierMode(no_gc);
3824   for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
3825   MemsetPointer(result->data_start() + old_len, undefined_value(), grow_by);
3826   return result;
3827 }
3828 
CopyFixedArrayUpTo(FixedArray * src,int new_len,PretenureFlag pretenure)3829 AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
3830                                           PretenureFlag pretenure) {
3831   if (new_len == 0) return empty_fixed_array();
3832 
3833   DCHECK_LE(new_len, src->length());
3834 
3835   HeapObject* obj = nullptr;
3836   {
3837     AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
3838     if (!allocation.To(&obj)) return allocation;
3839   }
3840   obj->set_map_no_write_barrier(fixed_array_map());
3841 
3842   FixedArray* result = FixedArray::cast(obj);
3843   result->set_length(new_len);
3844 
3845   // Copy the content.
3846   DisallowHeapAllocation no_gc;
3847   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3848   for (int i = 0; i < new_len; i++) result->set(i, src->get(i), mode);
3849   return result;
3850 }
3851 
CopyFixedArrayWithMap(FixedArray * src,Map * map)3852 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3853   int len = src->length();
3854   HeapObject* obj = nullptr;
3855   {
3856     AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
3857     if (!allocation.To(&obj)) return allocation;
3858   }
3859   obj->set_map_no_write_barrier(map);
3860 
3861   FixedArray* result = FixedArray::cast(obj);
3862   DisallowHeapAllocation no_gc;
3863   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3864 
3865   // Eliminate the write barrier if possible.
3866   if (mode == SKIP_WRITE_BARRIER) {
3867     CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
3868               FixedArray::SizeFor(len) - kPointerSize);
3869     return obj;
3870   }
3871 
3872   // Slow case: Just copy the content one-by-one.
3873   result->set_length(len);
3874   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3875   return result;
3876 }
3877 
3878 
CopyFixedDoubleArrayWithMap(FixedDoubleArray * src,Map * map)3879 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
3880                                                    Map* map) {
3881   int len = src->length();
3882   HeapObject* obj = nullptr;
3883   {
3884     AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
3885     if (!allocation.To(&obj)) return allocation;
3886   }
3887   obj->set_map_no_write_barrier(map);
3888   CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
3889             src->address() + FixedDoubleArray::kLengthOffset,
3890             FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
3891   return obj;
3892 }
3893 
3894 
AllocateRawFixedArray(int length,PretenureFlag pretenure)3895 AllocationResult Heap::AllocateRawFixedArray(int length,
3896                                              PretenureFlag pretenure) {
3897   if (length < 0 || length > FixedArray::kMaxLength) {
3898     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3899   }
3900   int size = FixedArray::SizeFor(length);
3901   AllocationSpace space = SelectSpace(pretenure);
3902 
3903   AllocationResult result = AllocateRaw(size, space);
3904   if (!result.IsRetry() && size > kMaxRegularHeapObjectSize &&
3905       FLAG_use_marking_progress_bar) {
3906     MemoryChunk* chunk =
3907         MemoryChunk::FromAddress(result.ToObjectChecked()->address());
3908     chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
3909   }
3910   return result;
3911 }
3912 
3913 
AllocateFixedArrayWithFiller(int length,PretenureFlag pretenure,Object * filler)3914 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
3915                                                     PretenureFlag pretenure,
3916                                                     Object* filler) {
3917   DCHECK(length >= 0);
3918   DCHECK(empty_fixed_array()->IsFixedArray());
3919   if (length == 0) return empty_fixed_array();
3920 
3921   DCHECK(!InNewSpace(filler));
3922   HeapObject* result = nullptr;
3923   {
3924     AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
3925     if (!allocation.To(&result)) return allocation;
3926   }
3927 
3928   result->set_map_no_write_barrier(fixed_array_map());
3929   FixedArray* array = FixedArray::cast(result);
3930   array->set_length(length);
3931   MemsetPointer(array->data_start(), filler, length);
3932   return array;
3933 }
3934 
3935 
AllocateFixedArray(int length,PretenureFlag pretenure)3936 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
3937   return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
3938 }
3939 
3940 
AllocateUninitializedFixedArray(int length)3941 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
3942   if (length == 0) return empty_fixed_array();
3943 
3944   HeapObject* obj = nullptr;
3945   {
3946     AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
3947     if (!allocation.To(&obj)) return allocation;
3948   }
3949 
3950   obj->set_map_no_write_barrier(fixed_array_map());
3951   FixedArray::cast(obj)->set_length(length);
3952   return obj;
3953 }
3954 
3955 
AllocateUninitializedFixedDoubleArray(int length,PretenureFlag pretenure)3956 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
3957     int length, PretenureFlag pretenure) {
3958   if (length == 0) return empty_fixed_array();
3959 
3960   HeapObject* elements = nullptr;
3961   AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
3962   if (!allocation.To(&elements)) return allocation;
3963 
3964   elements->set_map_no_write_barrier(fixed_double_array_map());
3965   FixedDoubleArray::cast(elements)->set_length(length);
3966   return elements;
3967 }
3968 
3969 
AllocateRawFixedDoubleArray(int length,PretenureFlag pretenure)3970 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
3971                                                    PretenureFlag pretenure) {
3972   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
3973     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3974   }
3975   int size = FixedDoubleArray::SizeFor(length);
3976   AllocationSpace space = SelectSpace(pretenure);
3977 
3978   HeapObject* object = nullptr;
3979   {
3980     AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned);
3981     if (!allocation.To(&object)) return allocation;
3982   }
3983 
3984   return object;
3985 }
3986 
3987 
AllocateSymbol()3988 AllocationResult Heap::AllocateSymbol() {
3989   // Statically ensure that it is safe to allocate symbols in paged spaces.
3990   STATIC_ASSERT(Symbol::kSize <= kMaxRegularHeapObjectSize);
3991 
3992   HeapObject* result = nullptr;
3993   AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
3994   if (!allocation.To(&result)) return allocation;
3995 
3996   result->set_map_no_write_barrier(symbol_map());
3997 
3998   // Generate a random hash value.
3999   int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
4000 
4001   Symbol::cast(result)
4002       ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4003   Symbol::cast(result)->set_name(undefined_value());
4004   Symbol::cast(result)->set_flags(0);
4005 
4006   DCHECK(!Symbol::cast(result)->is_private());
4007   return result;
4008 }
4009 
4010 
AllocateStruct(InstanceType type)4011 AllocationResult Heap::AllocateStruct(InstanceType type) {
4012   Map* map;
4013   switch (type) {
4014 #define MAKE_CASE(NAME, Name, name) \
4015   case NAME##_TYPE:                 \
4016     map = name##_map();             \
4017     break;
4018     STRUCT_LIST(MAKE_CASE)
4019 #undef MAKE_CASE
4020     default:
4021       UNREACHABLE();
4022       return exception();
4023   }
4024   int size = map->instance_size();
4025   Struct* result = nullptr;
4026   {
4027     AllocationResult allocation = Allocate(map, OLD_SPACE);
4028     if (!allocation.To(&result)) return allocation;
4029   }
4030   result->InitializeBody(size);
4031   return result;
4032 }
4033 
4034 
IsHeapIterable()4035 bool Heap::IsHeapIterable() {
4036   // TODO(hpayer): This function is not correct. Allocation folding in old
4037   // space breaks the iterability.
4038   return new_space_top_after_last_gc_ == new_space()->top();
4039 }
4040 
4041 
MakeHeapIterable()4042 void Heap::MakeHeapIterable() {
4043   DCHECK(AllowHeapAllocation::IsAllowed());
4044   if (!IsHeapIterable()) {
4045     CollectAllGarbage(kMakeHeapIterableMask,
4046                       GarbageCollectionReason::kMakeHeapIterable);
4047   }
4048   if (mark_compact_collector()->sweeping_in_progress()) {
4049     mark_compact_collector()->EnsureSweepingCompleted();
4050   }
4051   DCHECK(IsHeapIterable());
4052 }
4053 
4054 
ComputeMutatorUtilization(double mutator_speed,double gc_speed)4055 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
4056   const double kMinMutatorUtilization = 0.0;
4057   const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
4058   if (mutator_speed == 0) return kMinMutatorUtilization;
4059   if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
4060   // Derivation:
4061   // mutator_utilization = mutator_time / (mutator_time + gc_time)
4062   // mutator_time = 1 / mutator_speed
4063   // gc_time = 1 / gc_speed
4064   // mutator_utilization = (1 / mutator_speed) /
4065   //                       (1 / mutator_speed + 1 / gc_speed)
4066   // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
4067   return gc_speed / (mutator_speed + gc_speed);
4068 }
4069 
4070 
YoungGenerationMutatorUtilization()4071 double Heap::YoungGenerationMutatorUtilization() {
4072   double mutator_speed = static_cast<double>(
4073       tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4074   double gc_speed =
4075       tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects);
4076   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
4077   if (FLAG_trace_mutator_utilization) {
4078     isolate()->PrintWithTimestamp(
4079         "Young generation mutator utilization = %.3f ("
4080         "mutator_speed=%.f, gc_speed=%.f)\n",
4081         result, mutator_speed, gc_speed);
4082   }
4083   return result;
4084 }
4085 
4086 
OldGenerationMutatorUtilization()4087 double Heap::OldGenerationMutatorUtilization() {
4088   double mutator_speed = static_cast<double>(
4089       tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
4090   double gc_speed = static_cast<double>(
4091       tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
4092   double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
4093   if (FLAG_trace_mutator_utilization) {
4094     isolate()->PrintWithTimestamp(
4095         "Old generation mutator utilization = %.3f ("
4096         "mutator_speed=%.f, gc_speed=%.f)\n",
4097         result, mutator_speed, gc_speed);
4098   }
4099   return result;
4100 }
4101 
4102 
HasLowYoungGenerationAllocationRate()4103 bool Heap::HasLowYoungGenerationAllocationRate() {
4104   const double high_mutator_utilization = 0.993;
4105   return YoungGenerationMutatorUtilization() > high_mutator_utilization;
4106 }
4107 
4108 
HasLowOldGenerationAllocationRate()4109 bool Heap::HasLowOldGenerationAllocationRate() {
4110   const double high_mutator_utilization = 0.993;
4111   return OldGenerationMutatorUtilization() > high_mutator_utilization;
4112 }
4113 
4114 
HasLowAllocationRate()4115 bool Heap::HasLowAllocationRate() {
4116   return HasLowYoungGenerationAllocationRate() &&
4117          HasLowOldGenerationAllocationRate();
4118 }
4119 
4120 
HasHighFragmentation()4121 bool Heap::HasHighFragmentation() {
4122   size_t used = PromotedSpaceSizeOfObjects();
4123   size_t committed = CommittedOldGenerationMemory();
4124   return HasHighFragmentation(used, committed);
4125 }
4126 
HasHighFragmentation(size_t used,size_t committed)4127 bool Heap::HasHighFragmentation(size_t used, size_t committed) {
4128   const size_t kSlack = 16 * MB;
4129   // Fragmentation is high if committed > 2 * used + kSlack.
4130   // Rewrite the exression to avoid overflow.
4131   DCHECK_GE(committed, used);
4132   return committed - used > used + kSlack;
4133 }
4134 
ShouldOptimizeForMemoryUsage()4135 bool Heap::ShouldOptimizeForMemoryUsage() {
4136   return FLAG_optimize_for_size || isolate()->IsIsolateInBackground() ||
4137          HighMemoryPressure() || IsLowMemoryDevice();
4138 }
4139 
ActivateMemoryReducerIfNeeded()4140 void Heap::ActivateMemoryReducerIfNeeded() {
4141   // Activate memory reducer when switching to background if
4142   // - there was no mark compact since the start.
4143   // - the committed memory can be potentially reduced.
4144   // 2 pages for the old, code, and map space + 1 page for new space.
4145   const int kMinCommittedMemory = 7 * Page::kPageSize;
4146   if (ms_count_ == 0 && CommittedMemory() > kMinCommittedMemory &&
4147       isolate()->IsIsolateInBackground()) {
4148     MemoryReducer::Event event;
4149     event.type = MemoryReducer::kPossibleGarbage;
4150     event.time_ms = MonotonicallyIncreasingTimeInMs();
4151     memory_reducer_->NotifyPossibleGarbage(event);
4152   }
4153 }
4154 
ReduceNewSpaceSize()4155 void Heap::ReduceNewSpaceSize() {
4156   // TODO(ulan): Unify this constant with the similar constant in
4157   // GCIdleTimeHandler once the change is merged to 4.5.
4158   static const size_t kLowAllocationThroughput = 1000;
4159   const double allocation_throughput =
4160       tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
4161 
4162   if (FLAG_predictable) return;
4163 
4164   if (ShouldReduceMemory() ||
4165       ((allocation_throughput != 0) &&
4166        (allocation_throughput < kLowAllocationThroughput))) {
4167     new_space_->Shrink();
4168     UncommitFromSpace();
4169   }
4170 }
4171 
MarkingDequesAreEmpty()4172 bool Heap::MarkingDequesAreEmpty() {
4173   return mark_compact_collector()->marking_deque()->IsEmpty() &&
4174          (!UsingEmbedderHeapTracer() ||
4175           (wrappers_to_trace() == 0 &&
4176            embedder_heap_tracer()->NumberOfWrappersToTrace() == 0));
4177 }
4178 
FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason)4179 void Heap::FinalizeIncrementalMarkingIfComplete(
4180     GarbageCollectionReason gc_reason) {
4181   if (incremental_marking()->IsMarking() &&
4182       (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4183        (!incremental_marking()->finalize_marking_completed() &&
4184         MarkingDequesAreEmpty()))) {
4185     FinalizeIncrementalMarking(gc_reason);
4186   } else if (incremental_marking()->IsComplete() || MarkingDequesAreEmpty()) {
4187     CollectAllGarbage(current_gc_flags_, gc_reason);
4188   }
4189 }
4190 
TryFinalizeIdleIncrementalMarking(double idle_time_in_ms,GarbageCollectionReason gc_reason)4191 bool Heap::TryFinalizeIdleIncrementalMarking(
4192     double idle_time_in_ms, GarbageCollectionReason gc_reason) {
4193   size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
4194   double final_incremental_mark_compact_speed_in_bytes_per_ms =
4195       tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
4196   if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4197       (!incremental_marking()->finalize_marking_completed() &&
4198        MarkingDequesAreEmpty() &&
4199        gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
4200            idle_time_in_ms))) {
4201     FinalizeIncrementalMarking(gc_reason);
4202     return true;
4203   } else if (incremental_marking()->IsComplete() ||
4204              (MarkingDequesAreEmpty() &&
4205               gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
4206                   idle_time_in_ms, size_of_objects,
4207                   final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4208     CollectAllGarbage(current_gc_flags_, gc_reason);
4209     return true;
4210   }
4211   return false;
4212 }
4213 
RegisterReservationsForBlackAllocation(Reservation * reservations)4214 void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
4215   // TODO(hpayer): We do not have to iterate reservations on black objects
4216   // for marking. We just have to execute the special visiting side effect
4217   // code that adds objects to global data structures, e.g. for array buffers.
4218 
4219   // Code space, map space, and large object space do not use black pages.
4220   // Hence we have to color all objects of the reservation first black to avoid
4221   // unnecessary marking deque load.
4222   if (incremental_marking()->black_allocation()) {
4223     for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
4224       const Heap::Reservation& res = reservations[i];
4225       for (auto& chunk : res) {
4226         Address addr = chunk.start;
4227         while (addr < chunk.end) {
4228           HeapObject* obj = HeapObject::FromAddress(addr);
4229           Marking::MarkBlack(ObjectMarking::MarkBitFrom(obj));
4230           addr += obj->Size();
4231         }
4232       }
4233     }
4234     for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
4235       const Heap::Reservation& res = reservations[i];
4236       for (auto& chunk : res) {
4237         Address addr = chunk.start;
4238         while (addr < chunk.end) {
4239           HeapObject* obj = HeapObject::FromAddress(addr);
4240           incremental_marking()->IterateBlackObject(obj);
4241           addr += obj->Size();
4242         }
4243       }
4244     }
4245   }
4246 }
4247 
ComputeHeapState()4248 GCIdleTimeHeapState Heap::ComputeHeapState() {
4249   GCIdleTimeHeapState heap_state;
4250   heap_state.contexts_disposed = contexts_disposed_;
4251   heap_state.contexts_disposal_rate =
4252       tracer()->ContextDisposalRateInMilliseconds();
4253   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4254   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4255   return heap_state;
4256 }
4257 
4258 
PerformIdleTimeAction(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double deadline_in_ms)4259 bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
4260                                  GCIdleTimeHeapState heap_state,
4261                                  double deadline_in_ms) {
4262   bool result = false;
4263   switch (action.type) {
4264     case DONE:
4265       result = true;
4266       break;
4267     case DO_INCREMENTAL_STEP: {
4268       const double remaining_idle_time_in_ms =
4269           incremental_marking()->AdvanceIncrementalMarking(
4270               deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
4271               IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
4272       if (remaining_idle_time_in_ms > 0.0) {
4273         TryFinalizeIdleIncrementalMarking(
4274             remaining_idle_time_in_ms,
4275             GarbageCollectionReason::kFinalizeMarkingViaTask);
4276       }
4277       result = incremental_marking()->IsStopped();
4278       break;
4279     }
4280     case DO_FULL_GC: {
4281       DCHECK(contexts_disposed_ > 0);
4282       HistogramTimerScope scope(isolate_->counters()->gc_context());
4283       TRACE_EVENT0("v8", "V8.GCContext");
4284       CollectAllGarbage(kNoGCFlags, GarbageCollectionReason::kContextDisposal);
4285       break;
4286     }
4287     case DO_NOTHING:
4288       break;
4289   }
4290 
4291   return result;
4292 }
4293 
4294 
IdleNotificationEpilogue(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double start_ms,double deadline_in_ms)4295 void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
4296                                     GCIdleTimeHeapState heap_state,
4297                                     double start_ms, double deadline_in_ms) {
4298   double idle_time_in_ms = deadline_in_ms - start_ms;
4299   double current_time = MonotonicallyIncreasingTimeInMs();
4300   last_idle_notification_time_ = current_time;
4301   double deadline_difference = deadline_in_ms - current_time;
4302 
4303   contexts_disposed_ = 0;
4304 
4305   isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4306       static_cast<int>(idle_time_in_ms));
4307 
4308   if (deadline_in_ms - start_ms >
4309       GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
4310     int committed_memory = static_cast<int>(CommittedMemory() / KB);
4311     int used_memory = static_cast<int>(heap_state.size_of_objects / KB);
4312     isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
4313         start_ms, committed_memory);
4314     isolate()->counters()->aggregated_memory_heap_used()->AddSample(
4315         start_ms, used_memory);
4316   }
4317 
4318   if (deadline_difference >= 0) {
4319     if (action.type != DONE && action.type != DO_NOTHING) {
4320       isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4321           static_cast<int>(deadline_difference));
4322     }
4323   } else {
4324     isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4325         static_cast<int>(-deadline_difference));
4326   }
4327 
4328   if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
4329       FLAG_trace_idle_notification_verbose) {
4330     isolate_->PrintWithTimestamp(
4331         "Idle notification: requested idle time %.2f ms, used idle time %.2f "
4332         "ms, deadline usage %.2f ms [",
4333         idle_time_in_ms, idle_time_in_ms - deadline_difference,
4334         deadline_difference);
4335     action.Print();
4336     PrintF("]");
4337     if (FLAG_trace_idle_notification_verbose) {
4338       PrintF("[");
4339       heap_state.Print();
4340       PrintF("]");
4341     }
4342     PrintF("\n");
4343   }
4344 }
4345 
4346 
MonotonicallyIncreasingTimeInMs()4347 double Heap::MonotonicallyIncreasingTimeInMs() {
4348   return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
4349          static_cast<double>(base::Time::kMillisecondsPerSecond);
4350 }
4351 
4352 
IdleNotification(int idle_time_in_ms)4353 bool Heap::IdleNotification(int idle_time_in_ms) {
4354   return IdleNotification(
4355       V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
4356       (static_cast<double>(idle_time_in_ms) /
4357        static_cast<double>(base::Time::kMillisecondsPerSecond)));
4358 }
4359 
4360 
IdleNotification(double deadline_in_seconds)4361 bool Heap::IdleNotification(double deadline_in_seconds) {
4362   CHECK(HasBeenSetUp());
4363   double deadline_in_ms =
4364       deadline_in_seconds *
4365       static_cast<double>(base::Time::kMillisecondsPerSecond);
4366   HistogramTimerScope idle_notification_scope(
4367       isolate_->counters()->gc_idle_notification());
4368   TRACE_EVENT0("v8", "V8.GCIdleNotification");
4369   double start_ms = MonotonicallyIncreasingTimeInMs();
4370   double idle_time_in_ms = deadline_in_ms - start_ms;
4371 
4372   tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
4373                              OldGenerationAllocationCounter());
4374 
4375   GCIdleTimeHeapState heap_state = ComputeHeapState();
4376 
4377   GCIdleTimeAction action =
4378       gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
4379 
4380   bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
4381 
4382   IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
4383   return result;
4384 }
4385 
4386 
RecentIdleNotificationHappened()4387 bool Heap::RecentIdleNotificationHappened() {
4388   return (last_idle_notification_time_ +
4389           GCIdleTimeHandler::kMaxScheduledIdleTime) >
4390          MonotonicallyIncreasingTimeInMs();
4391 }
4392 
4393 class MemoryPressureInterruptTask : public CancelableTask {
4394  public:
MemoryPressureInterruptTask(Heap * heap)4395   explicit MemoryPressureInterruptTask(Heap* heap)
4396       : CancelableTask(heap->isolate()), heap_(heap) {}
4397 
~MemoryPressureInterruptTask()4398   virtual ~MemoryPressureInterruptTask() {}
4399 
4400  private:
4401   // v8::internal::CancelableTask overrides.
RunInternal()4402   void RunInternal() override { heap_->CheckMemoryPressure(); }
4403 
4404   Heap* heap_;
4405   DISALLOW_COPY_AND_ASSIGN(MemoryPressureInterruptTask);
4406 };
4407 
CheckMemoryPressure()4408 void Heap::CheckMemoryPressure() {
4409   if (HighMemoryPressure()) {
4410     if (isolate()->concurrent_recompilation_enabled()) {
4411       // The optimizing compiler may be unnecessarily holding on to memory.
4412       DisallowHeapAllocation no_recursive_gc;
4413       isolate()->optimizing_compile_dispatcher()->Flush(
4414           OptimizingCompileDispatcher::BlockingBehavior::kDontBlock);
4415     }
4416   }
4417   if (memory_pressure_level_.Value() == MemoryPressureLevel::kCritical) {
4418     CollectGarbageOnMemoryPressure();
4419   } else if (memory_pressure_level_.Value() == MemoryPressureLevel::kModerate) {
4420     if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4421       StartIncrementalMarking(kReduceMemoryFootprintMask,
4422                               GarbageCollectionReason::kMemoryPressure);
4423     }
4424   }
4425   MemoryReducer::Event event;
4426   event.type = MemoryReducer::kPossibleGarbage;
4427   event.time_ms = MonotonicallyIncreasingTimeInMs();
4428   memory_reducer_->NotifyPossibleGarbage(event);
4429 }
4430 
CollectGarbageOnMemoryPressure()4431 void Heap::CollectGarbageOnMemoryPressure() {
4432   const int kGarbageThresholdInBytes = 8 * MB;
4433   const double kGarbageThresholdAsFractionOfTotalMemory = 0.1;
4434   // This constant is the maximum response time in RAIL performance model.
4435   const double kMaxMemoryPressurePauseMs = 100;
4436 
4437   double start = MonotonicallyIncreasingTimeInMs();
4438   CollectAllGarbage(kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
4439                     GarbageCollectionReason::kMemoryPressure,
4440                     kGCCallbackFlagCollectAllAvailableGarbage);
4441   double end = MonotonicallyIncreasingTimeInMs();
4442 
4443   // Estimate how much memory we can free.
4444   int64_t potential_garbage =
4445       (CommittedMemory() - SizeOfObjects()) + external_memory_;
4446   // If we can potentially free large amount of memory, then start GC right
4447   // away instead of waiting for memory reducer.
4448   if (potential_garbage >= kGarbageThresholdInBytes &&
4449       potential_garbage >=
4450           CommittedMemory() * kGarbageThresholdAsFractionOfTotalMemory) {
4451     // If we spent less than half of the time budget, then perform full GC
4452     // Otherwise, start incremental marking.
4453     if (end - start < kMaxMemoryPressurePauseMs / 2) {
4454       CollectAllGarbage(
4455           kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
4456           GarbageCollectionReason::kMemoryPressure,
4457           kGCCallbackFlagCollectAllAvailableGarbage);
4458     } else {
4459       if (FLAG_incremental_marking && incremental_marking()->IsStopped()) {
4460         StartIncrementalMarking(kReduceMemoryFootprintMask,
4461                                 GarbageCollectionReason::kMemoryPressure);
4462       }
4463     }
4464   }
4465 }
4466 
MemoryPressureNotification(MemoryPressureLevel level,bool is_isolate_locked)4467 void Heap::MemoryPressureNotification(MemoryPressureLevel level,
4468                                       bool is_isolate_locked) {
4469   MemoryPressureLevel previous = memory_pressure_level_.Value();
4470   memory_pressure_level_.SetValue(level);
4471   if ((previous != MemoryPressureLevel::kCritical &&
4472        level == MemoryPressureLevel::kCritical) ||
4473       (previous == MemoryPressureLevel::kNone &&
4474        level == MemoryPressureLevel::kModerate)) {
4475     if (is_isolate_locked) {
4476       CheckMemoryPressure();
4477     } else {
4478       ExecutionAccess access(isolate());
4479       isolate()->stack_guard()->RequestGC();
4480       V8::GetCurrentPlatform()->CallOnForegroundThread(
4481           reinterpret_cast<v8::Isolate*>(isolate()),
4482           new MemoryPressureInterruptTask(this));
4483     }
4484   }
4485 }
4486 
CollectCodeStatistics()4487 void Heap::CollectCodeStatistics() {
4488   CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
4489   // We do not look for code in new space, or map space.  If code
4490   // somehow ends up in those spaces, we would miss it here.
4491   CodeStatistics::CollectCodeStatistics(code_space_, isolate());
4492   CodeStatistics::CollectCodeStatistics(old_space_, isolate());
4493   CodeStatistics::CollectCodeStatistics(lo_space_, isolate());
4494 }
4495 
4496 #ifdef DEBUG
4497 
Print()4498 void Heap::Print() {
4499   if (!HasBeenSetUp()) return;
4500   isolate()->PrintStack(stdout);
4501   AllSpaces spaces(this);
4502   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4503     space->Print();
4504   }
4505 }
4506 
4507 
ReportCodeStatistics(const char * title)4508 void Heap::ReportCodeStatistics(const char* title) {
4509   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4510   CollectCodeStatistics();
4511   CodeStatistics::ReportCodeStatistics(isolate());
4512 }
4513 
4514 
4515 // This function expects that NewSpace's allocated objects histogram is
4516 // populated (via a call to CollectStatistics or else as a side effect of a
4517 // just-completed scavenge collection).
ReportHeapStatistics(const char * title)4518 void Heap::ReportHeapStatistics(const char* title) {
4519   USE(title);
4520   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4521          gc_count_);
4522   PrintF("old_generation_allocation_limit_ %" V8PRIdPTR "\n",
4523          old_generation_allocation_limit_);
4524 
4525   PrintF("\n");
4526   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4527   isolate_->global_handles()->PrintStats();
4528   PrintF("\n");
4529 
4530   PrintF("Heap statistics : ");
4531   memory_allocator()->ReportStatistics();
4532   PrintF("To space : ");
4533   new_space_->ReportStatistics();
4534   PrintF("Old space : ");
4535   old_space_->ReportStatistics();
4536   PrintF("Code space : ");
4537   code_space_->ReportStatistics();
4538   PrintF("Map space : ");
4539   map_space_->ReportStatistics();
4540   PrintF("Large object space : ");
4541   lo_space_->ReportStatistics();
4542   PrintF(">>>>>> ========================================= >>>>>>\n");
4543 }
4544 
4545 #endif  // DEBUG
4546 
GarbageCollectionReasonToString(GarbageCollectionReason gc_reason)4547 const char* Heap::GarbageCollectionReasonToString(
4548     GarbageCollectionReason gc_reason) {
4549   switch (gc_reason) {
4550     case GarbageCollectionReason::kAllocationFailure:
4551       return "allocation failure";
4552     case GarbageCollectionReason::kAllocationLimit:
4553       return "allocation limit";
4554     case GarbageCollectionReason::kContextDisposal:
4555       return "context disposal";
4556     case GarbageCollectionReason::kCountersExtension:
4557       return "counters extension";
4558     case GarbageCollectionReason::kDebugger:
4559       return "debugger";
4560     case GarbageCollectionReason::kDeserializer:
4561       return "deserialize";
4562     case GarbageCollectionReason::kExternalMemoryPressure:
4563       return "external memory pressure";
4564     case GarbageCollectionReason::kFinalizeMarkingViaStackGuard:
4565       return "finalize incremental marking via stack guard";
4566     case GarbageCollectionReason::kFinalizeMarkingViaTask:
4567       return "finalize incremental marking via task";
4568     case GarbageCollectionReason::kFullHashtable:
4569       return "full hash-table";
4570     case GarbageCollectionReason::kHeapProfiler:
4571       return "heap profiler";
4572     case GarbageCollectionReason::kIdleTask:
4573       return "idle task";
4574     case GarbageCollectionReason::kLastResort:
4575       return "last resort";
4576     case GarbageCollectionReason::kLowMemoryNotification:
4577       return "low memory notification";
4578     case GarbageCollectionReason::kMakeHeapIterable:
4579       return "make heap iterable";
4580     case GarbageCollectionReason::kMemoryPressure:
4581       return "memory pressure";
4582     case GarbageCollectionReason::kMemoryReducer:
4583       return "memory reducer";
4584     case GarbageCollectionReason::kRuntime:
4585       return "runtime";
4586     case GarbageCollectionReason::kSamplingProfiler:
4587       return "sampling profiler";
4588     case GarbageCollectionReason::kSnapshotCreator:
4589       return "snapshot creator";
4590     case GarbageCollectionReason::kTesting:
4591       return "testing";
4592     case GarbageCollectionReason::kUnknown:
4593       return "unknown";
4594   }
4595   UNREACHABLE();
4596   return "";
4597 }
4598 
Contains(HeapObject * value)4599 bool Heap::Contains(HeapObject* value) {
4600   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
4601     return false;
4602   }
4603   return HasBeenSetUp() &&
4604          (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
4605           code_space_->Contains(value) || map_space_->Contains(value) ||
4606           lo_space_->Contains(value));
4607 }
4608 
ContainsSlow(Address addr)4609 bool Heap::ContainsSlow(Address addr) {
4610   if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
4611     return false;
4612   }
4613   return HasBeenSetUp() &&
4614          (new_space_->ToSpaceContainsSlow(addr) ||
4615           old_space_->ContainsSlow(addr) || code_space_->ContainsSlow(addr) ||
4616           map_space_->ContainsSlow(addr) || lo_space_->ContainsSlow(addr));
4617 }
4618 
InSpace(HeapObject * value,AllocationSpace space)4619 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4620   if (memory_allocator()->IsOutsideAllocatedSpace(value->address())) {
4621     return false;
4622   }
4623   if (!HasBeenSetUp()) return false;
4624 
4625   switch (space) {
4626     case NEW_SPACE:
4627       return new_space_->ToSpaceContains(value);
4628     case OLD_SPACE:
4629       return old_space_->Contains(value);
4630     case CODE_SPACE:
4631       return code_space_->Contains(value);
4632     case MAP_SPACE:
4633       return map_space_->Contains(value);
4634     case LO_SPACE:
4635       return lo_space_->Contains(value);
4636   }
4637   UNREACHABLE();
4638   return false;
4639 }
4640 
InSpaceSlow(Address addr,AllocationSpace space)4641 bool Heap::InSpaceSlow(Address addr, AllocationSpace space) {
4642   if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
4643     return false;
4644   }
4645   if (!HasBeenSetUp()) return false;
4646 
4647   switch (space) {
4648     case NEW_SPACE:
4649       return new_space_->ToSpaceContainsSlow(addr);
4650     case OLD_SPACE:
4651       return old_space_->ContainsSlow(addr);
4652     case CODE_SPACE:
4653       return code_space_->ContainsSlow(addr);
4654     case MAP_SPACE:
4655       return map_space_->ContainsSlow(addr);
4656     case LO_SPACE:
4657       return lo_space_->ContainsSlow(addr);
4658   }
4659   UNREACHABLE();
4660   return false;
4661 }
4662 
4663 
IsValidAllocationSpace(AllocationSpace space)4664 bool Heap::IsValidAllocationSpace(AllocationSpace space) {
4665   switch (space) {
4666     case NEW_SPACE:
4667     case OLD_SPACE:
4668     case CODE_SPACE:
4669     case MAP_SPACE:
4670     case LO_SPACE:
4671       return true;
4672     default:
4673       return false;
4674   }
4675 }
4676 
4677 
RootIsImmortalImmovable(int root_index)4678 bool Heap::RootIsImmortalImmovable(int root_index) {
4679   switch (root_index) {
4680 #define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
4681     IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
4682 #undef IMMORTAL_IMMOVABLE_ROOT
4683 #define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
4684     INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
4685 #undef INTERNALIZED_STRING
4686 #define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
4687     STRING_TYPE_LIST(STRING_TYPE)
4688 #undef STRING_TYPE
4689     return true;
4690     default:
4691       return false;
4692   }
4693 }
4694 
4695 
4696 #ifdef VERIFY_HEAP
Verify()4697 void Heap::Verify() {
4698   CHECK(HasBeenSetUp());
4699   HandleScope scope(isolate());
4700 
4701   if (mark_compact_collector()->sweeping_in_progress()) {
4702     // We have to wait here for the sweeper threads to have an iterable heap.
4703     mark_compact_collector()->EnsureSweepingCompleted();
4704   }
4705 
4706   VerifyPointersVisitor visitor;
4707   IterateRoots(&visitor, VISIT_ONLY_STRONG);
4708 
4709   VerifySmisVisitor smis_visitor;
4710   IterateSmiRoots(&smis_visitor);
4711 
4712   new_space_->Verify();
4713 
4714   old_space_->Verify(&visitor);
4715   map_space_->Verify(&visitor);
4716 
4717   VerifyPointersVisitor no_dirty_regions_visitor;
4718   code_space_->Verify(&no_dirty_regions_visitor);
4719 
4720   lo_space_->Verify();
4721 
4722   mark_compact_collector()->VerifyWeakEmbeddedObjectsInCode();
4723   if (FLAG_omit_map_checks_for_leaf_maps) {
4724     mark_compact_collector()->VerifyOmittedMapChecks();
4725   }
4726 }
4727 #endif
4728 
4729 
ZapFromSpace()4730 void Heap::ZapFromSpace() {
4731   if (!new_space_->IsFromSpaceCommitted()) return;
4732   for (Page* page : NewSpacePageRange(new_space_->FromSpaceStart(),
4733                                       new_space_->FromSpaceEnd())) {
4734     for (Address cursor = page->area_start(), limit = page->area_end();
4735          cursor < limit; cursor += kPointerSize) {
4736       Memory::Address_at(cursor) = kFromSpaceZapValue;
4737     }
4738   }
4739 }
4740 
4741 class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
4742  public:
IterateAndScavengePromotedObjectsVisitor(Heap * heap,HeapObject * target,bool record_slots)4743   IterateAndScavengePromotedObjectsVisitor(Heap* heap, HeapObject* target,
4744                                            bool record_slots)
4745       : heap_(heap), target_(target), record_slots_(record_slots) {}
4746 
VisitPointers(Object ** start,Object ** end)4747   inline void VisitPointers(Object** start, Object** end) override {
4748     Address slot_address = reinterpret_cast<Address>(start);
4749     Page* page = Page::FromAddress(slot_address);
4750 
4751     while (slot_address < reinterpret_cast<Address>(end)) {
4752       Object** slot = reinterpret_cast<Object**>(slot_address);
4753       Object* target = *slot;
4754 
4755       if (target->IsHeapObject()) {
4756         if (heap_->InFromSpace(target)) {
4757           Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(slot),
4758                                     HeapObject::cast(target));
4759           target = *slot;
4760           if (heap_->InNewSpace(target)) {
4761             SLOW_DCHECK(heap_->InToSpace(target));
4762             SLOW_DCHECK(target->IsHeapObject());
4763             RememberedSet<OLD_TO_NEW>::Insert(page, slot_address);
4764           }
4765           SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
4766               HeapObject::cast(target)));
4767         } else if (record_slots_ &&
4768                    MarkCompactCollector::IsOnEvacuationCandidate(
4769                        HeapObject::cast(target))) {
4770           heap_->mark_compact_collector()->RecordSlot(target_, slot, target);
4771         }
4772       }
4773 
4774       slot_address += kPointerSize;
4775     }
4776   }
4777 
VisitCodeEntry(Address code_entry_slot)4778   inline void VisitCodeEntry(Address code_entry_slot) override {
4779     // Black allocation requires us to process objects referenced by
4780     // promoted objects.
4781     if (heap_->incremental_marking()->black_allocation()) {
4782       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
4783       IncrementalMarking::MarkGrey(heap_, code);
4784     }
4785   }
4786 
4787  private:
4788   Heap* heap_;
4789   HeapObject* target_;
4790   bool record_slots_;
4791 };
4792 
IterateAndScavengePromotedObject(HeapObject * target,int size,bool was_marked_black)4793 void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size,
4794                                             bool was_marked_black) {
4795   // We are not collecting slots on new space objects during mutation
4796   // thus we have to scan for pointers to evacuation candidates when we
4797   // promote objects. But we should not record any slots in non-black
4798   // objects. Grey object's slots would be rescanned.
4799   // White object might not survive until the end of collection
4800   // it would be a violation of the invariant to record it's slots.
4801   bool record_slots = false;
4802   if (incremental_marking()->IsCompacting()) {
4803     MarkBit mark_bit = ObjectMarking::MarkBitFrom(target);
4804     record_slots = Marking::IsBlack(mark_bit);
4805   }
4806 
4807   IterateAndScavengePromotedObjectsVisitor visitor(this, target, record_slots);
4808   if (target->IsJSFunction()) {
4809     // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
4810     // this links are recorded during processing of weak lists.
4811     JSFunction::BodyDescriptorWeakCode::IterateBody(target, size, &visitor);
4812   } else {
4813     target->IterateBody(target->map()->instance_type(), size, &visitor);
4814   }
4815 
4816   // When black allocations is on, we have to visit not already marked black
4817   // objects (in new space) promoted to black pages to keep their references
4818   // alive.
4819   // TODO(hpayer): Implement a special promotion visitor that incorporates
4820   // regular visiting and IteratePromotedObjectPointers.
4821   if (!was_marked_black) {
4822     if (incremental_marking()->black_allocation()) {
4823       IncrementalMarking::MarkGrey(this, target->map());
4824       incremental_marking()->IterateBlackObject(target);
4825     }
4826   }
4827 }
4828 
4829 
IterateRoots(ObjectVisitor * v,VisitMode mode)4830 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4831   IterateStrongRoots(v, mode);
4832   IterateWeakRoots(v, mode);
4833 }
4834 
4835 
IterateWeakRoots(ObjectVisitor * v,VisitMode mode)4836 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4837   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4838   v->Synchronize(VisitorSynchronization::kStringTable);
4839   if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4840     // Scavenge collections have special processing for this.
4841     external_string_table_.Iterate(v);
4842   }
4843   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4844 }
4845 
4846 
IterateSmiRoots(ObjectVisitor * v)4847 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4848   // Acquire execution access since we are going to read stack limit values.
4849   ExecutionAccess access(isolate());
4850   v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4851   v->Synchronize(VisitorSynchronization::kSmiRootList);
4852 }
4853 
4854 // We cannot avoid stale handles to left-trimmed objects, but can only make
4855 // sure all handles still needed are updated. Filter out a stale pointer
4856 // and clear the slot to allow post processing of handles (needed because
4857 // the sweeper might actually free the underlying page).
4858 class FixStaleLeftTrimmedHandlesVisitor : public ObjectVisitor {
4859  public:
FixStaleLeftTrimmedHandlesVisitor(Heap * heap)4860   explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
4861     USE(heap_);
4862   }
4863 
VisitPointer(Object ** p)4864   void VisitPointer(Object** p) override { FixHandle(p); }
4865 
VisitPointers(Object ** start,Object ** end)4866   void VisitPointers(Object** start, Object** end) override {
4867     for (Object** p = start; p < end; p++) FixHandle(p);
4868   }
4869 
4870  private:
FixHandle(Object ** p)4871   inline void FixHandle(Object** p) {
4872     HeapObject* current = reinterpret_cast<HeapObject*>(*p);
4873     if (!current->IsHeapObject()) return;
4874     const MapWord map_word = current->map_word();
4875     if (!map_word.IsForwardingAddress() && current->IsFiller()) {
4876 #ifdef DEBUG
4877       // We need to find a FixedArrayBase map after walking the fillers.
4878       while (current->IsFiller()) {
4879         Address next = reinterpret_cast<Address>(current);
4880         if (current->map() == heap_->one_pointer_filler_map()) {
4881           next += kPointerSize;
4882         } else if (current->map() == heap_->two_pointer_filler_map()) {
4883           next += 2 * kPointerSize;
4884         } else {
4885           next += current->Size();
4886         }
4887         current = reinterpret_cast<HeapObject*>(next);
4888       }
4889       DCHECK(current->IsFixedArrayBase());
4890 #endif  // DEBUG
4891       *p = nullptr;
4892     }
4893   }
4894 
4895   Heap* heap_;
4896 };
4897 
IterateStrongRoots(ObjectVisitor * v,VisitMode mode)4898 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4899   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4900   v->Synchronize(VisitorSynchronization::kStrongRootList);
4901   // The serializer/deserializer iterates the root list twice, first to pick
4902   // off immortal immovable roots to make sure they end up on the first page,
4903   // and then again for the rest.
4904   if (mode == VISIT_ONLY_STRONG_ROOT_LIST) return;
4905 
4906   isolate_->bootstrapper()->Iterate(v);
4907   v->Synchronize(VisitorSynchronization::kBootstrapper);
4908   isolate_->Iterate(v);
4909   v->Synchronize(VisitorSynchronization::kTop);
4910   Relocatable::Iterate(isolate_, v);
4911   v->Synchronize(VisitorSynchronization::kRelocatable);
4912   isolate_->debug()->Iterate(v);
4913   v->Synchronize(VisitorSynchronization::kDebug);
4914 
4915   isolate_->compilation_cache()->Iterate(v);
4916   v->Synchronize(VisitorSynchronization::kCompilationCache);
4917 
4918   // Iterate over local handles in handle scopes.
4919   FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
4920   isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
4921   isolate_->handle_scope_implementer()->Iterate(v);
4922   isolate_->IterateDeferredHandles(v);
4923   v->Synchronize(VisitorSynchronization::kHandleScope);
4924 
4925   // Iterate over the builtin code objects and code stubs in the
4926   // heap. Note that it is not necessary to iterate over code objects
4927   // on scavenge collections.
4928   if (mode != VISIT_ALL_IN_SCAVENGE) {
4929     isolate_->builtins()->IterateBuiltins(v);
4930     v->Synchronize(VisitorSynchronization::kBuiltins);
4931     isolate_->interpreter()->IterateDispatchTable(v);
4932     v->Synchronize(VisitorSynchronization::kDispatchTable);
4933   }
4934 
4935   // Iterate over global handles.
4936   switch (mode) {
4937     case VISIT_ONLY_STRONG_ROOT_LIST:
4938       UNREACHABLE();
4939       break;
4940     case VISIT_ONLY_STRONG:
4941     case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
4942       isolate_->global_handles()->IterateStrongRoots(v);
4943       break;
4944     case VISIT_ALL_IN_SCAVENGE:
4945       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4946       break;
4947     case VISIT_ALL_IN_SWEEP_NEWSPACE:
4948     case VISIT_ALL:
4949       isolate_->global_handles()->IterateAllRoots(v);
4950       break;
4951   }
4952   v->Synchronize(VisitorSynchronization::kGlobalHandles);
4953 
4954   // Iterate over eternal handles.
4955   if (mode == VISIT_ALL_IN_SCAVENGE) {
4956     isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4957   } else {
4958     isolate_->eternal_handles()->IterateAllRoots(v);
4959   }
4960   v->Synchronize(VisitorSynchronization::kEternalHandles);
4961 
4962   // Iterate over pointers being held by inactive threads.
4963   isolate_->thread_manager()->Iterate(v);
4964   v->Synchronize(VisitorSynchronization::kThreadManager);
4965 
4966   // Iterate over other strong roots (currently only identity maps).
4967   for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
4968     v->VisitPointers(list->start, list->end);
4969   }
4970   v->Synchronize(VisitorSynchronization::kStrongRoots);
4971 
4972   // Iterate over the partial snapshot cache unless serializing.
4973   if (mode != VISIT_ONLY_STRONG_FOR_SERIALIZATION) {
4974     SerializerDeserializer::Iterate(isolate_, v);
4975   }
4976   // We don't do a v->Synchronize call here, because in debug mode that will
4977   // output a flag to the snapshot.  However at this point the serializer and
4978   // deserializer are deliberately a little unsynchronized (see above) so the
4979   // checking of the sync flag in the snapshot would fail.
4980 }
4981 
4982 
4983 // TODO(1236194): Since the heap size is configurable on the command line
4984 // and through the API, we should gracefully handle the case that the heap
4985 // size is not big enough to fit all the initial objects.
ConfigureHeap(size_t max_semi_space_size,size_t max_old_space_size,size_t max_executable_size,size_t code_range_size)4986 bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
4987                          size_t max_executable_size, size_t code_range_size) {
4988   if (HasBeenSetUp()) return false;
4989 
4990   // Overwrite default configuration.
4991   if (max_semi_space_size != 0) {
4992     max_semi_space_size_ = max_semi_space_size * MB;
4993   }
4994   if (max_old_space_size != 0) {
4995     max_old_generation_size_ = max_old_space_size * MB;
4996   }
4997   if (max_executable_size != 0) {
4998     max_executable_size_ = max_executable_size * MB;
4999   }
5000 
5001   // If max space size flags are specified overwrite the configuration.
5002   if (FLAG_max_semi_space_size > 0) {
5003     max_semi_space_size_ = static_cast<size_t>(FLAG_max_semi_space_size) * MB;
5004   }
5005   if (FLAG_max_old_space_size > 0) {
5006     max_old_generation_size_ =
5007         static_cast<size_t>(FLAG_max_old_space_size) * MB;
5008   }
5009   if (FLAG_max_executable_size > 0) {
5010     max_executable_size_ = static_cast<size_t>(FLAG_max_executable_size) * MB;
5011   }
5012 
5013   if (Page::kPageSize > MB) {
5014     max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize);
5015     max_old_generation_size_ =
5016         ROUND_UP(max_old_generation_size_, Page::kPageSize);
5017     max_executable_size_ = ROUND_UP(max_executable_size_, Page::kPageSize);
5018   }
5019 
5020   if (FLAG_stress_compaction) {
5021     // This will cause more frequent GCs when stressing.
5022     max_semi_space_size_ = MB;
5023   }
5024 
5025   // The new space size must be a power of two to support single-bit testing
5026   // for containment.
5027   max_semi_space_size_ = base::bits::RoundUpToPowerOfTwo32(
5028       static_cast<uint32_t>(max_semi_space_size_));
5029 
5030   if (FLAG_min_semi_space_size > 0) {
5031     size_t initial_semispace_size =
5032         static_cast<size_t>(FLAG_min_semi_space_size) * MB;
5033     if (initial_semispace_size > max_semi_space_size_) {
5034       initial_semispace_size_ = max_semi_space_size_;
5035       if (FLAG_trace_gc) {
5036         PrintIsolate(isolate_,
5037                      "Min semi-space size cannot be more than the maximum "
5038                      "semi-space size of %" PRIuS " MB\n",
5039                      max_semi_space_size_ / MB);
5040       }
5041     } else {
5042       initial_semispace_size_ =
5043           ROUND_UP(initial_semispace_size, Page::kPageSize);
5044     }
5045   }
5046 
5047   initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
5048 
5049   if (FLAG_semi_space_growth_factor < 2) {
5050     FLAG_semi_space_growth_factor = 2;
5051   }
5052 
5053   // The old generation is paged and needs at least one page for each space.
5054   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5055   max_old_generation_size_ =
5056       Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
5057           max_old_generation_size_);
5058 
5059   // The max executable size must be less than or equal to the max old
5060   // generation size.
5061   if (max_executable_size_ > max_old_generation_size_) {
5062     max_executable_size_ = max_old_generation_size_;
5063   }
5064 
5065   if (FLAG_initial_old_space_size > 0) {
5066     initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
5067   } else {
5068     initial_old_generation_size_ =
5069         max_old_generation_size_ / kInitalOldGenerationLimitFactor;
5070   }
5071   old_generation_allocation_limit_ = initial_old_generation_size_;
5072 
5073   // We rely on being able to allocate new arrays in paged spaces.
5074   DCHECK(kMaxRegularHeapObjectSize >=
5075          (JSArray::kSize +
5076           FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
5077           AllocationMemento::kSize));
5078 
5079   code_range_size_ = code_range_size * MB;
5080 
5081   configured_ = true;
5082   return true;
5083 }
5084 
5085 
AddToRingBuffer(const char * string)5086 void Heap::AddToRingBuffer(const char* string) {
5087   size_t first_part =
5088       Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
5089   memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
5090   ring_buffer_end_ += first_part;
5091   if (first_part < strlen(string)) {
5092     ring_buffer_full_ = true;
5093     size_t second_part = strlen(string) - first_part;
5094     memcpy(trace_ring_buffer_, string + first_part, second_part);
5095     ring_buffer_end_ = second_part;
5096   }
5097 }
5098 
5099 
GetFromRingBuffer(char * buffer)5100 void Heap::GetFromRingBuffer(char* buffer) {
5101   size_t copied = 0;
5102   if (ring_buffer_full_) {
5103     copied = kTraceRingBufferSize - ring_buffer_end_;
5104     memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
5105   }
5106   memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
5107 }
5108 
5109 
ConfigureHeapDefault()5110 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
5111 
5112 
RecordStats(HeapStats * stats,bool take_snapshot)5113 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5114   *stats->start_marker = HeapStats::kStartMarker;
5115   *stats->end_marker = HeapStats::kEndMarker;
5116   *stats->new_space_size = new_space_->Size();
5117   *stats->new_space_capacity = new_space_->Capacity();
5118   *stats->old_space_size = old_space_->SizeOfObjects();
5119   *stats->old_space_capacity = old_space_->Capacity();
5120   *stats->code_space_size = code_space_->SizeOfObjects();
5121   *stats->code_space_capacity = code_space_->Capacity();
5122   *stats->map_space_size = map_space_->SizeOfObjects();
5123   *stats->map_space_capacity = map_space_->Capacity();
5124   *stats->lo_space_size = lo_space_->Size();
5125   isolate_->global_handles()->RecordStats(stats);
5126   *stats->memory_allocator_size = memory_allocator()->Size();
5127   *stats->memory_allocator_capacity =
5128       memory_allocator()->Size() + memory_allocator()->Available();
5129   *stats->os_error = base::OS::GetLastError();
5130   *stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
5131   *stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
5132   if (take_snapshot) {
5133     HeapIterator iterator(this);
5134     for (HeapObject* obj = iterator.next(); obj != NULL;
5135          obj = iterator.next()) {
5136       InstanceType type = obj->map()->instance_type();
5137       DCHECK(0 <= type && type <= LAST_TYPE);
5138       stats->objects_per_type[type]++;
5139       stats->size_per_type[type] += obj->Size();
5140     }
5141   }
5142   if (stats->last_few_messages != NULL)
5143     GetFromRingBuffer(stats->last_few_messages);
5144   if (stats->js_stacktrace != NULL) {
5145     FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
5146     StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
5147     if (gc_state() == Heap::NOT_IN_GC) {
5148       isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
5149     } else {
5150       accumulator.Add("Cannot get stack trace in GC.");
5151     }
5152   }
5153 }
5154 
PromotedSpaceSizeOfObjects()5155 size_t Heap::PromotedSpaceSizeOfObjects() {
5156   return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5157          map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
5158 }
5159 
PromotedExternalMemorySize()5160 uint64_t Heap::PromotedExternalMemorySize() {
5161   if (external_memory_ <= external_memory_at_last_mark_compact_) return 0;
5162   return static_cast<uint64_t>(external_memory_ -
5163                                external_memory_at_last_mark_compact_);
5164 }
5165 
5166 
5167 const double Heap::kMinHeapGrowingFactor = 1.1;
5168 const double Heap::kMaxHeapGrowingFactor = 4.0;
5169 const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
5170 const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
5171 const double Heap::kConservativeHeapGrowingFactor = 1.3;
5172 const double Heap::kTargetMutatorUtilization = 0.97;
5173 
5174 
5175 // Given GC speed in bytes per ms, the allocation throughput in bytes per ms
5176 // (mutator speed), this function returns the heap growing factor that will
5177 // achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
5178 // remain the same until the next GC.
5179 //
5180 // For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
5181 // TM / (TM + TG), where TM is the time spent in the mutator and TG is the
5182 // time spent in the garbage collector.
5183 //
5184 // Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
5185 // time-frame from the end of the current GC to the end of the next GC. Based
5186 // on the MU we can compute the heap growing factor F as
5187 //
5188 // F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
5189 //
5190 // This formula can be derived as follows.
5191 //
5192 // F = Limit / Live by definition, where the Limit is the allocation limit,
5193 // and the Live is size of live objects.
5194 // Let’s assume that we already know the Limit. Then:
5195 //   TG = Limit / gc_speed
5196 //   TM = (TM + TG) * MU, by definition of MU.
5197 //   TM = TG * MU / (1 - MU)
5198 //   TM = Limit *  MU / (gc_speed * (1 - MU))
5199 // On the other hand, if the allocation throughput remains constant:
5200 //   Limit = Live + TM * allocation_throughput = Live + TM * mutator_speed
5201 // Solving it for TM, we get
5202 //   TM = (Limit - Live) / mutator_speed
5203 // Combining the two equation for TM:
5204 //   (Limit - Live) / mutator_speed = Limit * MU / (gc_speed * (1 - MU))
5205 //   (Limit - Live) = Limit * MU * mutator_speed / (gc_speed * (1 - MU))
5206 // substitute R = gc_speed / mutator_speed
5207 //   (Limit - Live) = Limit * MU  / (R * (1 - MU))
5208 // substitute F = Limit / Live
5209 //   F - 1 = F * MU  / (R * (1 - MU))
5210 //   F - F * MU / (R * (1 - MU)) = 1
5211 //   F * (1 - MU / (R * (1 - MU))) = 1
5212 //   F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
5213 //   F = R * (1 - MU) / (R * (1 - MU) - MU)
HeapGrowingFactor(double gc_speed,double mutator_speed)5214 double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
5215   if (gc_speed == 0 || mutator_speed == 0) return kMaxHeapGrowingFactor;
5216 
5217   const double speed_ratio = gc_speed / mutator_speed;
5218   const double mu = kTargetMutatorUtilization;
5219 
5220   const double a = speed_ratio * (1 - mu);
5221   const double b = speed_ratio * (1 - mu) - mu;
5222 
5223   // The factor is a / b, but we need to check for small b first.
5224   double factor =
5225       (a < b * kMaxHeapGrowingFactor) ? a / b : kMaxHeapGrowingFactor;
5226   factor = Min(factor, kMaxHeapGrowingFactor);
5227   factor = Max(factor, kMinHeapGrowingFactor);
5228   return factor;
5229 }
5230 
CalculateOldGenerationAllocationLimit(double factor,size_t old_gen_size)5231 size_t Heap::CalculateOldGenerationAllocationLimit(double factor,
5232                                                    size_t old_gen_size) {
5233   CHECK(factor > 1.0);
5234   CHECK(old_gen_size > 0);
5235   uint64_t limit = static_cast<uint64_t>(old_gen_size * factor);
5236   limit = Max(limit, static_cast<uint64_t>(old_gen_size) +
5237                          MinimumAllocationLimitGrowingStep());
5238   limit += new_space_->Capacity();
5239   uint64_t halfway_to_the_max =
5240       (static_cast<uint64_t>(old_gen_size) + max_old_generation_size_) / 2;
5241   return static_cast<size_t>(Min(limit, halfway_to_the_max));
5242 }
5243 
MinimumAllocationLimitGrowingStep()5244 size_t Heap::MinimumAllocationLimitGrowingStep() {
5245   const size_t kRegularAllocationLimitGrowingStep = 8;
5246   const size_t kLowMemoryAllocationLimitGrowingStep = 2;
5247   size_t limit = (Page::kPageSize > MB ? Page::kPageSize : MB);
5248   return limit * (ShouldOptimizeForMemoryUsage()
5249                       ? kLowMemoryAllocationLimitGrowingStep
5250                       : kRegularAllocationLimitGrowingStep);
5251 }
5252 
SetOldGenerationAllocationLimit(size_t old_gen_size,double gc_speed,double mutator_speed)5253 void Heap::SetOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
5254                                            double mutator_speed) {
5255   double factor = HeapGrowingFactor(gc_speed, mutator_speed);
5256 
5257   if (FLAG_trace_gc_verbose) {
5258     isolate_->PrintWithTimestamp(
5259         "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
5260         "(gc=%.f, mutator=%.f)\n",
5261         factor, kTargetMutatorUtilization, gc_speed / mutator_speed, gc_speed,
5262         mutator_speed);
5263   }
5264 
5265   if (IsMemoryConstrainedDevice()) {
5266     factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
5267   }
5268 
5269   if (memory_reducer_->ShouldGrowHeapSlowly() ||
5270       ShouldOptimizeForMemoryUsage()) {
5271     factor = Min(factor, kConservativeHeapGrowingFactor);
5272   }
5273 
5274   if (FLAG_stress_compaction || ShouldReduceMemory()) {
5275     factor = kMinHeapGrowingFactor;
5276   }
5277 
5278   if (FLAG_heap_growing_percent > 0) {
5279     factor = 1.0 + FLAG_heap_growing_percent / 100.0;
5280   }
5281 
5282   old_generation_allocation_limit_ =
5283       CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5284 
5285   if (FLAG_trace_gc_verbose) {
5286     isolate_->PrintWithTimestamp(
5287         "Grow: old size: %" PRIuS " KB, new limit: %" PRIuS " KB (%.1f)\n",
5288         old_gen_size / KB, old_generation_allocation_limit_ / KB, factor);
5289   }
5290 }
5291 
DampenOldGenerationAllocationLimit(size_t old_gen_size,double gc_speed,double mutator_speed)5292 void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
5293                                               double gc_speed,
5294                                               double mutator_speed) {
5295   double factor = HeapGrowingFactor(gc_speed, mutator_speed);
5296   size_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5297   if (limit < old_generation_allocation_limit_) {
5298     if (FLAG_trace_gc_verbose) {
5299       isolate_->PrintWithTimestamp(
5300           "Dampen: old size: %" PRIuS " KB, old limit: %" PRIuS
5301           " KB, "
5302           "new limit: %" PRIuS " KB (%.1f)\n",
5303           old_gen_size / KB, old_generation_allocation_limit_ / KB, limit / KB,
5304           factor);
5305     }
5306     old_generation_allocation_limit_ = limit;
5307   }
5308 }
5309 
5310 // This predicate is called when an old generation space cannot allocated from
5311 // the free list and is about to add a new page. Returning false will cause a
5312 // major GC. It happens when the old generation allocation limit is reached and
5313 // - either we need to optimize for memory usage,
5314 // - or the incremental marking is not in progress and we cannot start it.
ShouldExpandOldGenerationOnSlowAllocation()5315 bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
5316   if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
5317   // We reached the old generation allocation limit.
5318 
5319   if (ShouldOptimizeForMemoryUsage()) return false;
5320 
5321   if (incremental_marking()->NeedsFinalization()) {
5322     return !AllocationLimitOvershotByLargeMargin();
5323   }
5324 
5325   if (incremental_marking()->IsStopped() &&
5326       IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
5327     // We cannot start incremental marking.
5328     return false;
5329   }
5330   return true;
5331 }
5332 
5333 // This function returns either kNoLimit, kSoftLimit, or kHardLimit.
5334 // The kNoLimit means that either incremental marking is disabled or it is too
5335 // early to start incremental marking.
5336 // The kSoftLimit means that incremental marking should be started soon.
5337 // The kHardLimit means that incremental marking should be started immediately.
IncrementalMarkingLimitReached()5338 Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
5339   if (!incremental_marking()->CanBeActivated() ||
5340       PromotedSpaceSizeOfObjects() <=
5341           IncrementalMarking::kActivationThreshold) {
5342     // Incremental marking is disabled or it is too early to start.
5343     return IncrementalMarkingLimit::kNoLimit;
5344   }
5345   if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
5346       HighMemoryPressure()) {
5347     // If there is high memory pressure or stress testing is enabled, then
5348     // start marking immediately.
5349     return IncrementalMarkingLimit::kHardLimit;
5350   }
5351   size_t old_generation_space_available = OldGenerationSpaceAvailable();
5352   if (old_generation_space_available > new_space_->Capacity()) {
5353     return IncrementalMarkingLimit::kNoLimit;
5354   }
5355   // We are close to the allocation limit.
5356   // Choose between the hard and the soft limits.
5357   if (old_generation_space_available == 0 || ShouldOptimizeForMemoryUsage()) {
5358     return IncrementalMarkingLimit::kHardLimit;
5359   }
5360   return IncrementalMarkingLimit::kSoftLimit;
5361 }
5362 
EnableInlineAllocation()5363 void Heap::EnableInlineAllocation() {
5364   if (!inline_allocation_disabled_) return;
5365   inline_allocation_disabled_ = false;
5366 
5367   // Update inline allocation limit for new space.
5368   new_space()->UpdateInlineAllocationLimit(0);
5369 }
5370 
5371 
DisableInlineAllocation()5372 void Heap::DisableInlineAllocation() {
5373   if (inline_allocation_disabled_) return;
5374   inline_allocation_disabled_ = true;
5375 
5376   // Update inline allocation limit for new space.
5377   new_space()->UpdateInlineAllocationLimit(0);
5378 
5379   // Update inline allocation limit for old spaces.
5380   PagedSpaces spaces(this);
5381   for (PagedSpace* space = spaces.next(); space != NULL;
5382        space = spaces.next()) {
5383     space->EmptyAllocationInfo();
5384   }
5385 }
5386 
5387 
5388 V8_DECLARE_ONCE(initialize_gc_once);
5389 
InitializeGCOnce()5390 static void InitializeGCOnce() {
5391   Scavenger::Initialize();
5392   StaticScavengeVisitor::Initialize();
5393   MarkCompactCollector::Initialize();
5394 }
5395 
5396 
SetUp()5397 bool Heap::SetUp() {
5398 #ifdef DEBUG
5399   allocation_timeout_ = FLAG_gc_interval;
5400 #endif
5401 
5402   // Initialize heap spaces and initial maps and objects. Whenever something
5403   // goes wrong, just return false. The caller should check the results and
5404   // call Heap::TearDown() to release allocated memory.
5405   //
5406   // If the heap is not yet configured (e.g. through the API), configure it.
5407   // Configuration is based on the flags new-space-size (really the semispace
5408   // size) and old-space-size if set or the initial values of semispace_size_
5409   // and old_generation_size_ otherwise.
5410   if (!configured_) {
5411     if (!ConfigureHeapDefault()) return false;
5412   }
5413 
5414   base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5415 
5416   // Set up memory allocator.
5417   memory_allocator_ = new MemoryAllocator(isolate_);
5418   if (!memory_allocator_->SetUp(MaxReserved(), MaxExecutableSize(),
5419                                 code_range_size_))
5420     return false;
5421 
5422   // Initialize store buffer.
5423   store_buffer_ = new StoreBuffer(this);
5424 
5425   // Initialize incremental marking.
5426   incremental_marking_ = new IncrementalMarking(this);
5427 
5428   for (int i = 0; i <= LAST_SPACE; i++) {
5429     space_[i] = nullptr;
5430   }
5431 
5432   space_[NEW_SPACE] = new_space_ = new NewSpace(this);
5433   if (!new_space_->SetUp(initial_semispace_size_, max_semi_space_size_)) {
5434     return false;
5435   }
5436   new_space_top_after_last_gc_ = new_space()->top();
5437 
5438   space_[OLD_SPACE] = old_space_ =
5439       new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
5440   if (!old_space_->SetUp()) return false;
5441 
5442   space_[CODE_SPACE] = code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
5443   if (!code_space_->SetUp()) return false;
5444 
5445   space_[MAP_SPACE] = map_space_ = new MapSpace(this, MAP_SPACE);
5446   if (!map_space_->SetUp()) return false;
5447 
5448   // The large object code space may contain code or data.  We set the memory
5449   // to be non-executable here for safety, but this means we need to enable it
5450   // explicitly when allocating large code objects.
5451   space_[LO_SPACE] = lo_space_ = new LargeObjectSpace(this, LO_SPACE);
5452   if (!lo_space_->SetUp()) return false;
5453 
5454   // Set up the seed that is used to randomize the string hash function.
5455   DCHECK(hash_seed() == 0);
5456   if (FLAG_randomize_hashes) {
5457     if (FLAG_hash_seed == 0) {
5458       int rnd = isolate()->random_number_generator()->NextInt();
5459       set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5460     } else {
5461       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5462     }
5463   }
5464 
5465   for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
5466        i++) {
5467     deferred_counters_[i] = 0;
5468   }
5469 
5470   tracer_ = new GCTracer(this);
5471   scavenge_collector_ = new Scavenger(this);
5472   mark_compact_collector_ = new MarkCompactCollector(this);
5473   gc_idle_time_handler_ = new GCIdleTimeHandler();
5474   memory_reducer_ = new MemoryReducer(this);
5475   if (V8_UNLIKELY(FLAG_gc_stats)) {
5476     live_object_stats_ = new ObjectStats(this);
5477     dead_object_stats_ = new ObjectStats(this);
5478   }
5479   scavenge_job_ = new ScavengeJob();
5480 
5481   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5482   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5483 
5484   store_buffer()->SetUp();
5485 
5486   mark_compact_collector()->SetUp();
5487 
5488   idle_scavenge_observer_ = new IdleScavengeObserver(
5489       *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
5490   new_space()->AddAllocationObserver(idle_scavenge_observer_);
5491 
5492   return true;
5493 }
5494 
5495 
CreateHeapObjects()5496 bool Heap::CreateHeapObjects() {
5497   // Create initial maps.
5498   if (!CreateInitialMaps()) return false;
5499   CreateApiObjects();
5500 
5501   // Create initial objects
5502   CreateInitialObjects();
5503   CHECK_EQ(0u, gc_count_);
5504 
5505   set_native_contexts_list(undefined_value());
5506   set_allocation_sites_list(undefined_value());
5507 
5508   return true;
5509 }
5510 
5511 
SetStackLimits()5512 void Heap::SetStackLimits() {
5513   DCHECK(isolate_ != NULL);
5514   DCHECK(isolate_ == isolate());
5515   // On 64 bit machines, pointers are generally out of range of Smis.  We write
5516   // something that looks like an out of range Smi to the GC.
5517 
5518   // Set up the special root array entries containing the stack limits.
5519   // These are actually addresses, but the tag makes the GC ignore it.
5520   roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5521       (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5522   roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5523       (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5524 }
5525 
ClearStackLimits()5526 void Heap::ClearStackLimits() {
5527   roots_[kStackLimitRootIndex] = Smi::kZero;
5528   roots_[kRealStackLimitRootIndex] = Smi::kZero;
5529 }
5530 
PrintAlloctionsHash()5531 void Heap::PrintAlloctionsHash() {
5532   uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
5533   PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
5534 }
5535 
5536 
NotifyDeserializationComplete()5537 void Heap::NotifyDeserializationComplete() {
5538   DCHECK_EQ(0, gc_count());
5539   PagedSpaces spaces(this);
5540   for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
5541     if (isolate()->snapshot_available()) s->ShrinkImmortalImmovablePages();
5542 #ifdef DEBUG
5543     // All pages right after bootstrapping must be marked as never-evacuate.
5544     for (Page* p : *s) {
5545       CHECK(p->NeverEvacuate());
5546     }
5547 #endif  // DEBUG
5548   }
5549 
5550   deserialization_complete_ = true;
5551 }
5552 
SetEmbedderHeapTracer(EmbedderHeapTracer * tracer)5553 void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
5554   DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
5555   embedder_heap_tracer_ = tracer;
5556 }
5557 
RegisterWrappersWithEmbedderHeapTracer()5558 void Heap::RegisterWrappersWithEmbedderHeapTracer() {
5559   DCHECK(UsingEmbedderHeapTracer());
5560   if (wrappers_to_trace_.empty()) {
5561     return;
5562   }
5563   embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
5564   wrappers_to_trace_.clear();
5565 }
5566 
TracePossibleWrapper(JSObject * js_object)5567 void Heap::TracePossibleWrapper(JSObject* js_object) {
5568   DCHECK(js_object->WasConstructedFromApiFunction());
5569   if (js_object->GetInternalFieldCount() >= 2 &&
5570       js_object->GetInternalField(0) &&
5571       js_object->GetInternalField(0) != undefined_value() &&
5572       js_object->GetInternalField(1) != undefined_value()) {
5573     DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
5574     wrappers_to_trace_.push_back(std::pair<void*, void*>(
5575         reinterpret_cast<void*>(js_object->GetInternalField(0)),
5576         reinterpret_cast<void*>(js_object->GetInternalField(1))));
5577   }
5578 }
5579 
RequiresImmediateWrapperProcessing()5580 bool Heap::RequiresImmediateWrapperProcessing() {
5581   const size_t kTooManyWrappers = 16000;
5582   return wrappers_to_trace_.size() > kTooManyWrappers;
5583 }
5584 
RegisterExternallyReferencedObject(Object ** object)5585 void Heap::RegisterExternallyReferencedObject(Object** object) {
5586   HeapObject* heap_object = HeapObject::cast(*object);
5587   DCHECK(Contains(heap_object));
5588   if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
5589     IncrementalMarking::MarkGrey(this, heap_object);
5590   } else {
5591     DCHECK(mark_compact_collector()->in_use());
5592     MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
5593     mark_compact_collector()->MarkObject(heap_object, mark_bit);
5594   }
5595 }
5596 
TearDown()5597 void Heap::TearDown() {
5598 #ifdef VERIFY_HEAP
5599   if (FLAG_verify_heap) {
5600     Verify();
5601   }
5602 #endif
5603 
5604   UpdateMaximumCommitted();
5605 
5606   if (FLAG_print_max_heap_committed) {
5607     PrintF("\n");
5608     PrintF("maximum_committed_by_heap=%" PRIuS " ", MaximumCommittedMemory());
5609     PrintF("maximum_committed_by_new_space=%" PRIuS " ",
5610            new_space_->MaximumCommittedMemory());
5611     PrintF("maximum_committed_by_old_space=%" PRIuS " ",
5612            old_space_->MaximumCommittedMemory());
5613     PrintF("maximum_committed_by_code_space=%" PRIuS " ",
5614            code_space_->MaximumCommittedMemory());
5615     PrintF("maximum_committed_by_map_space=%" PRIuS " ",
5616            map_space_->MaximumCommittedMemory());
5617     PrintF("maximum_committed_by_lo_space=%" PRIuS " ",
5618            lo_space_->MaximumCommittedMemory());
5619     PrintF("\n\n");
5620   }
5621 
5622   if (FLAG_verify_predictable) {
5623     PrintAlloctionsHash();
5624   }
5625 
5626   new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
5627   delete idle_scavenge_observer_;
5628   idle_scavenge_observer_ = nullptr;
5629 
5630   delete scavenge_collector_;
5631   scavenge_collector_ = nullptr;
5632 
5633   if (mark_compact_collector_ != nullptr) {
5634     mark_compact_collector_->TearDown();
5635     delete mark_compact_collector_;
5636     mark_compact_collector_ = nullptr;
5637   }
5638 
5639   delete incremental_marking_;
5640   incremental_marking_ = nullptr;
5641 
5642   delete gc_idle_time_handler_;
5643   gc_idle_time_handler_ = nullptr;
5644 
5645   if (memory_reducer_ != nullptr) {
5646     memory_reducer_->TearDown();
5647     delete memory_reducer_;
5648     memory_reducer_ = nullptr;
5649   }
5650 
5651   if (live_object_stats_ != nullptr) {
5652     delete live_object_stats_;
5653     live_object_stats_ = nullptr;
5654   }
5655 
5656   if (dead_object_stats_ != nullptr) {
5657     delete dead_object_stats_;
5658     dead_object_stats_ = nullptr;
5659   }
5660 
5661   delete scavenge_job_;
5662   scavenge_job_ = nullptr;
5663 
5664   isolate_->global_handles()->TearDown();
5665 
5666   external_string_table_.TearDown();
5667 
5668   delete tracer_;
5669   tracer_ = nullptr;
5670 
5671   new_space_->TearDown();
5672   delete new_space_;
5673   new_space_ = nullptr;
5674 
5675   if (old_space_ != NULL) {
5676     delete old_space_;
5677     old_space_ = NULL;
5678   }
5679 
5680   if (code_space_ != NULL) {
5681     delete code_space_;
5682     code_space_ = NULL;
5683   }
5684 
5685   if (map_space_ != NULL) {
5686     delete map_space_;
5687     map_space_ = NULL;
5688   }
5689 
5690   if (lo_space_ != NULL) {
5691     lo_space_->TearDown();
5692     delete lo_space_;
5693     lo_space_ = NULL;
5694   }
5695 
5696   store_buffer()->TearDown();
5697 
5698   memory_allocator()->TearDown();
5699 
5700   StrongRootsList* next = NULL;
5701   for (StrongRootsList* list = strong_roots_list_; list; list = next) {
5702     next = list->next;
5703     delete list;
5704   }
5705   strong_roots_list_ = NULL;
5706 
5707   delete store_buffer_;
5708   store_buffer_ = nullptr;
5709 
5710   delete memory_allocator_;
5711   memory_allocator_ = nullptr;
5712 }
5713 
5714 
AddGCPrologueCallback(v8::Isolate::GCCallback callback,GCType gc_type,bool pass_isolate)5715 void Heap::AddGCPrologueCallback(v8::Isolate::GCCallback callback,
5716                                  GCType gc_type, bool pass_isolate) {
5717   DCHECK(callback != NULL);
5718   GCCallbackPair pair(callback, gc_type, pass_isolate);
5719   DCHECK(!gc_prologue_callbacks_.Contains(pair));
5720   return gc_prologue_callbacks_.Add(pair);
5721 }
5722 
5723 
RemoveGCPrologueCallback(v8::Isolate::GCCallback callback)5724 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallback callback) {
5725   DCHECK(callback != NULL);
5726   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5727     if (gc_prologue_callbacks_[i].callback == callback) {
5728       gc_prologue_callbacks_.Remove(i);
5729       return;
5730     }
5731   }
5732   UNREACHABLE();
5733 }
5734 
5735 
AddGCEpilogueCallback(v8::Isolate::GCCallback callback,GCType gc_type,bool pass_isolate)5736 void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
5737                                  GCType gc_type, bool pass_isolate) {
5738   DCHECK(callback != NULL);
5739   GCCallbackPair pair(callback, gc_type, pass_isolate);
5740   DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5741   return gc_epilogue_callbacks_.Add(pair);
5742 }
5743 
5744 
RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback)5745 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback) {
5746   DCHECK(callback != NULL);
5747   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5748     if (gc_epilogue_callbacks_[i].callback == callback) {
5749       gc_epilogue_callbacks_.Remove(i);
5750       return;
5751     }
5752   }
5753   UNREACHABLE();
5754 }
5755 
5756 // TODO(ishell): Find a better place for this.
AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,Handle<WeakCell> code)5757 void Heap::AddWeakNewSpaceObjectToCodeDependency(Handle<HeapObject> obj,
5758                                                  Handle<WeakCell> code) {
5759   DCHECK(InNewSpace(*obj));
5760   DCHECK(!InNewSpace(*code));
5761   Handle<ArrayList> list(weak_new_space_object_to_code_list(), isolate());
5762   list = ArrayList::Add(list, isolate()->factory()->NewWeakCell(obj), code);
5763   if (*list != weak_new_space_object_to_code_list()) {
5764     set_weak_new_space_object_to_code_list(*list);
5765   }
5766 }
5767 
5768 // TODO(ishell): Find a better place for this.
AddWeakObjectToCodeDependency(Handle<HeapObject> obj,Handle<DependentCode> dep)5769 void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
5770                                          Handle<DependentCode> dep) {
5771   DCHECK(!InNewSpace(*obj));
5772   DCHECK(!InNewSpace(*dep));
5773   Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
5774   table = WeakHashTable::Put(table, obj, dep);
5775   if (*table != weak_object_to_code_table())
5776     set_weak_object_to_code_table(*table);
5777   DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
5778 }
5779 
5780 
LookupWeakObjectToCodeDependency(Handle<HeapObject> obj)5781 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
5782   Object* dep = weak_object_to_code_table()->Lookup(obj);
5783   if (dep->IsDependentCode()) return DependentCode::cast(dep);
5784   return DependentCode::cast(empty_fixed_array());
5785 }
5786 
5787 namespace {
CompactWeakFixedArray(Object * object)5788 void CompactWeakFixedArray(Object* object) {
5789   if (object->IsWeakFixedArray()) {
5790     WeakFixedArray* array = WeakFixedArray::cast(object);
5791     array->Compact<WeakFixedArray::NullCallback>();
5792   }
5793 }
5794 }  // anonymous namespace
5795 
CompactWeakFixedArrays()5796 void Heap::CompactWeakFixedArrays() {
5797   // Find known WeakFixedArrays and compact them.
5798   HeapIterator iterator(this);
5799   for (HeapObject* o = iterator.next(); o != NULL; o = iterator.next()) {
5800     if (o->IsPrototypeInfo()) {
5801       Object* prototype_users = PrototypeInfo::cast(o)->prototype_users();
5802       if (prototype_users->IsWeakFixedArray()) {
5803         WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
5804         array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
5805       }
5806     } else if (o->IsScript()) {
5807       CompactWeakFixedArray(Script::cast(o)->shared_function_infos());
5808     }
5809   }
5810   CompactWeakFixedArray(noscript_shared_function_infos());
5811   CompactWeakFixedArray(script_list());
5812   CompactWeakFixedArray(weak_stack_trace_list());
5813 }
5814 
AddRetainedMap(Handle<Map> map)5815 void Heap::AddRetainedMap(Handle<Map> map) {
5816   Handle<WeakCell> cell = Map::WeakCellForMap(map);
5817   Handle<ArrayList> array(retained_maps(), isolate());
5818   if (array->IsFull()) {
5819     CompactRetainedMaps(*array);
5820   }
5821   array = ArrayList::Add(
5822       array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
5823       ArrayList::kReloadLengthAfterAllocation);
5824   if (*array != retained_maps()) {
5825     set_retained_maps(*array);
5826   }
5827 }
5828 
5829 
CompactRetainedMaps(ArrayList * retained_maps)5830 void Heap::CompactRetainedMaps(ArrayList* retained_maps) {
5831   DCHECK_EQ(retained_maps, this->retained_maps());
5832   int length = retained_maps->Length();
5833   int new_length = 0;
5834   int new_number_of_disposed_maps = 0;
5835   // This loop compacts the array by removing cleared weak cells.
5836   for (int i = 0; i < length; i += 2) {
5837     DCHECK(retained_maps->Get(i)->IsWeakCell());
5838     WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
5839     Object* age = retained_maps->Get(i + 1);
5840     if (cell->cleared()) continue;
5841     if (i != new_length) {
5842       retained_maps->Set(new_length, cell);
5843       retained_maps->Set(new_length + 1, age);
5844     }
5845     if (i < number_of_disposed_maps_) {
5846       new_number_of_disposed_maps += 2;
5847     }
5848     new_length += 2;
5849   }
5850   number_of_disposed_maps_ = new_number_of_disposed_maps;
5851   Object* undefined = undefined_value();
5852   for (int i = new_length; i < length; i++) {
5853     retained_maps->Clear(i, undefined);
5854   }
5855   if (new_length != length) retained_maps->SetLength(new_length);
5856 }
5857 
FatalProcessOutOfMemory(const char * location,bool is_heap_oom)5858 void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
5859   v8::internal::V8::FatalProcessOutOfMemory(location, is_heap_oom);
5860 }
5861 
5862 #ifdef DEBUG
5863 
5864 class PrintHandleVisitor : public ObjectVisitor {
5865  public:
VisitPointers(Object ** start,Object ** end)5866   void VisitPointers(Object** start, Object** end) override {
5867     for (Object** p = start; p < end; p++)
5868       PrintF("  handle %p to %p\n", reinterpret_cast<void*>(p),
5869              reinterpret_cast<void*>(*p));
5870   }
5871 };
5872 
5873 
PrintHandles()5874 void Heap::PrintHandles() {
5875   PrintF("Handles:\n");
5876   PrintHandleVisitor v;
5877   isolate_->handle_scope_implementer()->Iterate(&v);
5878 }
5879 
5880 #endif
5881 
5882 class CheckHandleCountVisitor : public ObjectVisitor {
5883  public:
CheckHandleCountVisitor()5884   CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor()5885   ~CheckHandleCountVisitor() override {
5886     CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
5887   }
VisitPointers(Object ** start,Object ** end)5888   void VisitPointers(Object** start, Object** end) override {
5889     handle_count_ += end - start;
5890   }
5891 
5892  private:
5893   ptrdiff_t handle_count_;
5894 };
5895 
5896 
CheckHandleCount()5897 void Heap::CheckHandleCount() {
5898   CheckHandleCountVisitor v;
5899   isolate_->handle_scope_implementer()->Iterate(&v);
5900 }
5901 
ClearRecordedSlot(HeapObject * object,Object ** slot)5902 void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
5903   if (!InNewSpace(object)) {
5904     Address slot_addr = reinterpret_cast<Address>(slot);
5905     Page* page = Page::FromAddress(slot_addr);
5906     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5907     store_buffer()->DeleteEntry(slot_addr);
5908     RememberedSet<OLD_TO_OLD>::Remove(page, slot_addr);
5909   }
5910 }
5911 
ClearRecordedSlotRange(Address start,Address end)5912 void Heap::ClearRecordedSlotRange(Address start, Address end) {
5913   Page* page = Page::FromAddress(start);
5914   if (!page->InNewSpace()) {
5915     DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
5916     store_buffer()->DeleteEntry(start, end);
5917     RememberedSet<OLD_TO_OLD>::RemoveRange(page, start, end,
5918                                            SlotSet::FREE_EMPTY_BUCKETS);
5919   }
5920 }
5921 
RecordWriteIntoCodeSlow(Code * host,RelocInfo * rinfo,Object * value)5922 void Heap::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
5923                                    Object* value) {
5924   DCHECK(InNewSpace(value));
5925   Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
5926   RelocInfo::Mode rmode = rinfo->rmode();
5927   Address addr = rinfo->pc();
5928   SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
5929   if (rinfo->IsInConstantPool()) {
5930     addr = rinfo->constant_pool_entry_address();
5931     if (RelocInfo::IsCodeTarget(rmode)) {
5932       slot_type = CODE_ENTRY_SLOT;
5933     } else {
5934       DCHECK(RelocInfo::IsEmbeddedObject(rmode));
5935       slot_type = OBJECT_SLOT;
5936     }
5937   }
5938   RememberedSet<OLD_TO_NEW>::InsertTyped(
5939       source_page, reinterpret_cast<Address>(host), slot_type, addr);
5940 }
5941 
RecordWritesIntoCode(Code * code)5942 void Heap::RecordWritesIntoCode(Code* code) {
5943   for (RelocIterator it(code, RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT));
5944        !it.done(); it.next()) {
5945     RecordWriteIntoCode(code, it.rinfo(), it.rinfo()->target_object());
5946   }
5947 }
5948 
next()5949 Space* AllSpaces::next() {
5950   switch (counter_++) {
5951     case NEW_SPACE:
5952       return heap_->new_space();
5953     case OLD_SPACE:
5954       return heap_->old_space();
5955     case CODE_SPACE:
5956       return heap_->code_space();
5957     case MAP_SPACE:
5958       return heap_->map_space();
5959     case LO_SPACE:
5960       return heap_->lo_space();
5961     default:
5962       return NULL;
5963   }
5964 }
5965 
next()5966 PagedSpace* PagedSpaces::next() {
5967   switch (counter_++) {
5968     case OLD_SPACE:
5969       return heap_->old_space();
5970     case CODE_SPACE:
5971       return heap_->code_space();
5972     case MAP_SPACE:
5973       return heap_->map_space();
5974     default:
5975       return NULL;
5976   }
5977 }
5978 
5979 
next()5980 OldSpace* OldSpaces::next() {
5981   switch (counter_++) {
5982     case OLD_SPACE:
5983       return heap_->old_space();
5984     case CODE_SPACE:
5985       return heap_->code_space();
5986     default:
5987       return NULL;
5988   }
5989 }
5990 
SpaceIterator(Heap * heap)5991 SpaceIterator::SpaceIterator(Heap* heap)
5992     : heap_(heap), current_space_(FIRST_SPACE - 1) {}
5993 
~SpaceIterator()5994 SpaceIterator::~SpaceIterator() {
5995 }
5996 
5997 
has_next()5998 bool SpaceIterator::has_next() {
5999   // Iterate until no more spaces.
6000   return current_space_ != LAST_SPACE;
6001 }
6002 
next()6003 Space* SpaceIterator::next() {
6004   DCHECK(has_next());
6005   return heap_->space(++current_space_);
6006 }
6007 
6008 
6009 class HeapObjectsFilter {
6010  public:
~HeapObjectsFilter()6011   virtual ~HeapObjectsFilter() {}
6012   virtual bool SkipObject(HeapObject* object) = 0;
6013 };
6014 
6015 
6016 class UnreachableObjectsFilter : public HeapObjectsFilter {
6017  public:
UnreachableObjectsFilter(Heap * heap)6018   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
6019     MarkReachableObjects();
6020   }
6021 
~UnreachableObjectsFilter()6022   ~UnreachableObjectsFilter() {
6023     heap_->mark_compact_collector()->ClearMarkbits();
6024   }
6025 
SkipObject(HeapObject * object)6026   bool SkipObject(HeapObject* object) {
6027     if (object->IsFiller()) return true;
6028     MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
6029     return Marking::IsWhite(mark_bit);
6030   }
6031 
6032  private:
6033   class MarkingVisitor : public ObjectVisitor {
6034    public:
MarkingVisitor()6035     MarkingVisitor() : marking_stack_(10) {}
6036 
VisitPointers(Object ** start,Object ** end)6037     void VisitPointers(Object** start, Object** end) override {
6038       for (Object** p = start; p < end; p++) {
6039         if (!(*p)->IsHeapObject()) continue;
6040         HeapObject* obj = HeapObject::cast(*p);
6041         MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
6042         if (Marking::IsWhite(mark_bit)) {
6043           Marking::WhiteToBlack(mark_bit);
6044           marking_stack_.Add(obj);
6045         }
6046       }
6047     }
6048 
TransitiveClosure()6049     void TransitiveClosure() {
6050       while (!marking_stack_.is_empty()) {
6051         HeapObject* obj = marking_stack_.RemoveLast();
6052         obj->Iterate(this);
6053       }
6054     }
6055 
6056    private:
6057     List<HeapObject*> marking_stack_;
6058   };
6059 
MarkReachableObjects()6060   void MarkReachableObjects() {
6061     MarkingVisitor visitor;
6062     heap_->IterateRoots(&visitor, VISIT_ALL);
6063     visitor.TransitiveClosure();
6064   }
6065 
6066   Heap* heap_;
6067   DisallowHeapAllocation no_allocation_;
6068 };
6069 
6070 
HeapIterator(Heap * heap,HeapIterator::HeapObjectsFiltering filtering)6071 HeapIterator::HeapIterator(Heap* heap,
6072                            HeapIterator::HeapObjectsFiltering filtering)
6073     : make_heap_iterable_helper_(heap),
6074       no_heap_allocation_(),
6075       heap_(heap),
6076       filtering_(filtering),
6077       filter_(nullptr),
6078       space_iterator_(nullptr),
6079       object_iterator_(nullptr) {
6080   heap_->heap_iterator_start();
6081   // Start the iteration.
6082   space_iterator_ = new SpaceIterator(heap_);
6083   switch (filtering_) {
6084     case kFilterUnreachable:
6085       filter_ = new UnreachableObjectsFilter(heap_);
6086       break;
6087     default:
6088       break;
6089   }
6090   object_iterator_ = space_iterator_->next()->GetObjectIterator();
6091 }
6092 
6093 
~HeapIterator()6094 HeapIterator::~HeapIterator() {
6095   heap_->heap_iterator_end();
6096 #ifdef DEBUG
6097   // Assert that in filtering mode we have iterated through all
6098   // objects. Otherwise, heap will be left in an inconsistent state.
6099   if (filtering_ != kNoFiltering) {
6100     DCHECK(object_iterator_ == nullptr);
6101   }
6102 #endif
6103   delete space_iterator_;
6104   delete filter_;
6105 }
6106 
6107 
next()6108 HeapObject* HeapIterator::next() {
6109   if (filter_ == nullptr) return NextObject();
6110 
6111   HeapObject* obj = NextObject();
6112   while ((obj != nullptr) && (filter_->SkipObject(obj))) obj = NextObject();
6113   return obj;
6114 }
6115 
6116 
NextObject()6117 HeapObject* HeapIterator::NextObject() {
6118   // No iterator means we are done.
6119   if (object_iterator_.get() == nullptr) return nullptr;
6120 
6121   if (HeapObject* obj = object_iterator_.get()->Next()) {
6122     // If the current iterator has more objects we are fine.
6123     return obj;
6124   } else {
6125     // Go though the spaces looking for one that has objects.
6126     while (space_iterator_->has_next()) {
6127       object_iterator_ = space_iterator_->next()->GetObjectIterator();
6128       if (HeapObject* obj = object_iterator_.get()->Next()) {
6129         return obj;
6130       }
6131     }
6132   }
6133   // Done with the last space.
6134   object_iterator_.reset(nullptr);
6135   return nullptr;
6136 }
6137 
6138 
6139 #ifdef DEBUG
6140 
6141 Object* const PathTracer::kAnyGlobalObject = NULL;
6142 
6143 class PathTracer::MarkVisitor : public ObjectVisitor {
6144  public:
MarkVisitor(PathTracer * tracer)6145   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6146 
VisitPointers(Object ** start,Object ** end)6147   void VisitPointers(Object** start, Object** end) override {
6148     // Scan all HeapObject pointers in [start, end)
6149     for (Object** p = start; !tracer_->found() && (p < end); p++) {
6150       if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
6151     }
6152   }
6153 
6154  private:
6155   PathTracer* tracer_;
6156 };
6157 
6158 
6159 class PathTracer::UnmarkVisitor : public ObjectVisitor {
6160  public:
UnmarkVisitor(PathTracer * tracer)6161   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6162 
VisitPointers(Object ** start,Object ** end)6163   void VisitPointers(Object** start, Object** end) override {
6164     // Scan all HeapObject pointers in [start, end)
6165     for (Object** p = start; p < end; p++) {
6166       if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
6167     }
6168   }
6169 
6170  private:
6171   PathTracer* tracer_;
6172 };
6173 
6174 
VisitPointers(Object ** start,Object ** end)6175 void PathTracer::VisitPointers(Object** start, Object** end) {
6176   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
6177   // Visit all HeapObject pointers in [start, end)
6178   for (Object** p = start; !done && (p < end); p++) {
6179     if ((*p)->IsHeapObject()) {
6180       TracePathFrom(p);
6181       done = ((what_to_find_ == FIND_FIRST) && found_target_);
6182     }
6183   }
6184 }
6185 
6186 
Reset()6187 void PathTracer::Reset() {
6188   found_target_ = false;
6189   object_stack_.Clear();
6190 }
6191 
6192 
TracePathFrom(Object ** root)6193 void PathTracer::TracePathFrom(Object** root) {
6194   DCHECK((search_target_ == kAnyGlobalObject) ||
6195          search_target_->IsHeapObject());
6196   found_target_in_trace_ = false;
6197   Reset();
6198 
6199   MarkVisitor mark_visitor(this);
6200   MarkRecursively(root, &mark_visitor);
6201 
6202   UnmarkVisitor unmark_visitor(this);
6203   UnmarkRecursively(root, &unmark_visitor);
6204 
6205   ProcessResults();
6206 }
6207 
6208 
SafeIsNativeContext(HeapObject * obj)6209 static bool SafeIsNativeContext(HeapObject* obj) {
6210   return obj->map() == obj->GetHeap()->root(Heap::kNativeContextMapRootIndex);
6211 }
6212 
6213 
MarkRecursively(Object ** p,MarkVisitor * mark_visitor)6214 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6215   if (!(*p)->IsHeapObject()) return;
6216 
6217   HeapObject* obj = HeapObject::cast(*p);
6218 
6219   MapWord map_word = obj->map_word();
6220   if (!map_word.ToMap()->IsHeapObject()) return;  // visited before
6221 
6222   if (found_target_in_trace_) return;  // stop if target found
6223   object_stack_.Add(obj);
6224   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6225       (obj == search_target_)) {
6226     found_target_in_trace_ = true;
6227     found_target_ = true;
6228     return;
6229   }
6230 
6231   bool is_native_context = SafeIsNativeContext(obj);
6232 
6233   // not visited yet
6234   Map* map = Map::cast(map_word.ToMap());
6235 
6236   MapWord marked_map_word =
6237       MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
6238   obj->set_map_word(marked_map_word);
6239 
6240   // Scan the object body.
6241   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6242     // This is specialized to scan Context's properly.
6243     Object** start =
6244         reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
6245     Object** end =
6246         reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
6247                                    Context::FIRST_WEAK_SLOT * kPointerSize);
6248     mark_visitor->VisitPointers(start, end);
6249   } else {
6250     obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
6251   }
6252 
6253   // Scan the map after the body because the body is a lot more interesting
6254   // when doing leak detection.
6255   MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
6256 
6257   if (!found_target_in_trace_) {  // don't pop if found the target
6258     object_stack_.RemoveLast();
6259   }
6260 }
6261 
6262 
UnmarkRecursively(Object ** p,UnmarkVisitor * unmark_visitor)6263 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6264   if (!(*p)->IsHeapObject()) return;
6265 
6266   HeapObject* obj = HeapObject::cast(*p);
6267 
6268   MapWord map_word = obj->map_word();
6269   if (map_word.ToMap()->IsHeapObject()) return;  // unmarked already
6270 
6271   MapWord unmarked_map_word =
6272       MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
6273   obj->set_map_word(unmarked_map_word);
6274 
6275   Map* map = Map::cast(unmarked_map_word.ToMap());
6276 
6277   UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
6278 
6279   obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
6280 }
6281 
6282 
ProcessResults()6283 void PathTracer::ProcessResults() {
6284   if (found_target_) {
6285     OFStream os(stdout);
6286     os << "=====================================\n"
6287        << "====        Path to object       ====\n"
6288        << "=====================================\n\n";
6289 
6290     DCHECK(!object_stack_.is_empty());
6291     for (int i = 0; i < object_stack_.length(); i++) {
6292       if (i > 0) os << "\n     |\n     |\n     V\n\n";
6293       object_stack_[i]->Print(os);
6294     }
6295     os << "=====================================\n";
6296   }
6297 }
6298 
6299 
6300 // Triggers a depth-first traversal of reachable objects from one
6301 // given root object and finds a path to a specific heap object and
6302 // prints it.
TracePathToObjectFrom(Object * target,Object * root)6303 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
6304   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6305   tracer.VisitPointer(&root);
6306 }
6307 
6308 
6309 // Triggers a depth-first traversal of reachable objects from roots
6310 // and finds a path to a specific heap object and prints it.
TracePathToObject(Object * target)6311 void Heap::TracePathToObject(Object* target) {
6312   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6313   IterateRoots(&tracer, VISIT_ONLY_STRONG);
6314 }
6315 
6316 
6317 // Triggers a depth-first traversal of reachable objects from roots
6318 // and finds a path to any global object and prints it. Useful for
6319 // determining the source for leaks of global objects.
TracePathToGlobal()6320 void Heap::TracePathToGlobal() {
6321   PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
6322                     VISIT_ALL);
6323   IterateRoots(&tracer, VISIT_ONLY_STRONG);
6324 }
6325 #endif
6326 
UpdateTotalGCTime(double duration)6327 void Heap::UpdateTotalGCTime(double duration) {
6328   if (FLAG_trace_gc_verbose) {
6329     total_gc_time_ms_ += duration;
6330   }
6331 }
6332 
CleanUp()6333 void Heap::ExternalStringTable::CleanUp() {
6334   int last = 0;
6335   Isolate* isolate = heap_->isolate();
6336   for (int i = 0; i < new_space_strings_.length(); ++i) {
6337     if (new_space_strings_[i]->IsTheHole(isolate)) {
6338       continue;
6339     }
6340     DCHECK(new_space_strings_[i]->IsExternalString());
6341     if (heap_->InNewSpace(new_space_strings_[i])) {
6342       new_space_strings_[last++] = new_space_strings_[i];
6343     } else {
6344       old_space_strings_.Add(new_space_strings_[i]);
6345     }
6346   }
6347   new_space_strings_.Rewind(last);
6348   new_space_strings_.Trim();
6349 
6350   last = 0;
6351   for (int i = 0; i < old_space_strings_.length(); ++i) {
6352     if (old_space_strings_[i]->IsTheHole(isolate)) {
6353       continue;
6354     }
6355     DCHECK(old_space_strings_[i]->IsExternalString());
6356     DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
6357     old_space_strings_[last++] = old_space_strings_[i];
6358   }
6359   old_space_strings_.Rewind(last);
6360   old_space_strings_.Trim();
6361 #ifdef VERIFY_HEAP
6362   if (FLAG_verify_heap) {
6363     Verify();
6364   }
6365 #endif
6366 }
6367 
TearDown()6368 void Heap::ExternalStringTable::TearDown() {
6369   for (int i = 0; i < new_space_strings_.length(); ++i) {
6370     heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6371   }
6372   new_space_strings_.Free();
6373   for (int i = 0; i < old_space_strings_.length(); ++i) {
6374     heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6375   }
6376   old_space_strings_.Free();
6377 }
6378 
6379 
RememberUnmappedPage(Address page,bool compacted)6380 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6381   uintptr_t p = reinterpret_cast<uintptr_t>(page);
6382   // Tag the page pointer to make it findable in the dump file.
6383   if (compacted) {
6384     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
6385   } else {
6386     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
6387   }
6388   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6389       reinterpret_cast<Address>(p);
6390   remembered_unmapped_pages_index_++;
6391   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6392 }
6393 
6394 
RegisterStrongRoots(Object ** start,Object ** end)6395 void Heap::RegisterStrongRoots(Object** start, Object** end) {
6396   StrongRootsList* list = new StrongRootsList();
6397   list->next = strong_roots_list_;
6398   list->start = start;
6399   list->end = end;
6400   strong_roots_list_ = list;
6401 }
6402 
6403 
UnregisterStrongRoots(Object ** start)6404 void Heap::UnregisterStrongRoots(Object** start) {
6405   StrongRootsList* prev = NULL;
6406   StrongRootsList* list = strong_roots_list_;
6407   while (list != nullptr) {
6408     StrongRootsList* next = list->next;
6409     if (list->start == start) {
6410       if (prev) {
6411         prev->next = next;
6412       } else {
6413         strong_roots_list_ = next;
6414       }
6415       delete list;
6416     } else {
6417       prev = list;
6418     }
6419     list = next;
6420   }
6421 }
6422 
6423 
NumberOfTrackedHeapObjectTypes()6424 size_t Heap::NumberOfTrackedHeapObjectTypes() {
6425   return ObjectStats::OBJECT_STATS_COUNT;
6426 }
6427 
6428 
ObjectCountAtLastGC(size_t index)6429 size_t Heap::ObjectCountAtLastGC(size_t index) {
6430   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6431     return 0;
6432   return live_object_stats_->object_count_last_gc(index);
6433 }
6434 
6435 
ObjectSizeAtLastGC(size_t index)6436 size_t Heap::ObjectSizeAtLastGC(size_t index) {
6437   if (live_object_stats_ == nullptr || index >= ObjectStats::OBJECT_STATS_COUNT)
6438     return 0;
6439   return live_object_stats_->object_size_last_gc(index);
6440 }
6441 
6442 
GetObjectTypeName(size_t index,const char ** object_type,const char ** object_sub_type)6443 bool Heap::GetObjectTypeName(size_t index, const char** object_type,
6444                              const char** object_sub_type) {
6445   if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
6446 
6447   switch (static_cast<int>(index)) {
6448 #define COMPARE_AND_RETURN_NAME(name) \
6449   case name:                          \
6450     *object_type = #name;             \
6451     *object_sub_type = "";            \
6452     return true;
6453     INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6454 #undef COMPARE_AND_RETURN_NAME
6455 #define COMPARE_AND_RETURN_NAME(name)                      \
6456   case ObjectStats::FIRST_CODE_KIND_SUB_TYPE + Code::name: \
6457     *object_type = "CODE_TYPE";                            \
6458     *object_sub_type = "CODE_KIND/" #name;                 \
6459     return true;
6460     CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
6461 #undef COMPARE_AND_RETURN_NAME
6462 #define COMPARE_AND_RETURN_NAME(name)                  \
6463   case ObjectStats::FIRST_FIXED_ARRAY_SUB_TYPE + name: \
6464     *object_type = "FIXED_ARRAY_TYPE";                 \
6465     *object_sub_type = #name;                          \
6466     return true;
6467     FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6468 #undef COMPARE_AND_RETURN_NAME
6469 #define COMPARE_AND_RETURN_NAME(name)                                  \
6470   case ObjectStats::FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - \
6471       Code::kFirstCodeAge:                                             \
6472     *object_type = "CODE_TYPE";                                        \
6473     *object_sub_type = "CODE_AGE/" #name;                              \
6474     return true;
6475     CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6476 #undef COMPARE_AND_RETURN_NAME
6477   }
6478   return false;
6479 }
6480 
6481 
6482 // static
GetStaticVisitorIdForMap(Map * map)6483 int Heap::GetStaticVisitorIdForMap(Map* map) {
6484   return StaticVisitorBase::GetVisitorId(map);
6485 }
6486 
6487 }  // namespace internal
6488 }  // namespace v8
6489