1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/heap.h"
6
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/ast/scopeinfo.h"
10 #include "src/base/bits.h"
11 #include "src/base/once.h"
12 #include "src/base/utils/random-number-generator.h"
13 #include "src/bootstrapper.h"
14 #include "src/codegen.h"
15 #include "src/compilation-cache.h"
16 #include "src/conversions.h"
17 #include "src/debug/debug.h"
18 #include "src/deoptimizer.h"
19 #include "src/global-handles.h"
20 #include "src/heap/array-buffer-tracker.h"
21 #include "src/heap/gc-idle-time-handler.h"
22 #include "src/heap/gc-tracer.h"
23 #include "src/heap/incremental-marking.h"
24 #include "src/heap/mark-compact-inl.h"
25 #include "src/heap/mark-compact.h"
26 #include "src/heap/memory-reducer.h"
27 #include "src/heap/object-stats.h"
28 #include "src/heap/objects-visiting-inl.h"
29 #include "src/heap/objects-visiting.h"
30 #include "src/heap/scavenge-job.h"
31 #include "src/heap/scavenger-inl.h"
32 #include "src/heap/store-buffer.h"
33 #include "src/interpreter/interpreter.h"
34 #include "src/profiler/cpu-profiler.h"
35 #include "src/regexp/jsregexp.h"
36 #include "src/runtime-profiler.h"
37 #include "src/snapshot/natives.h"
38 #include "src/snapshot/serialize.h"
39 #include "src/snapshot/snapshot.h"
40 #include "src/type-feedback-vector.h"
41 #include "src/utils.h"
42 #include "src/v8.h"
43 #include "src/v8threads.h"
44 #include "src/vm-state-inl.h"
45
46 namespace v8 {
47 namespace internal {
48
49
50 struct Heap::StrongRootsList {
51 Object** start;
52 Object** end;
53 StrongRootsList* next;
54 };
55
56 class IdleScavengeObserver : public InlineAllocationObserver {
57 public:
IdleScavengeObserver(Heap & heap,intptr_t step_size)58 IdleScavengeObserver(Heap& heap, intptr_t step_size)
59 : InlineAllocationObserver(step_size), heap_(heap) {}
60
Step(int bytes_allocated,Address,size_t)61 void Step(int bytes_allocated, Address, size_t) override {
62 heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated);
63 }
64
65 private:
66 Heap& heap_;
67 };
68
69
Heap()70 Heap::Heap()
71 : amount_of_external_allocated_memory_(0),
72 amount_of_external_allocated_memory_at_last_global_gc_(0),
73 isolate_(NULL),
74 code_range_size_(0),
75 // semispace_size_ should be a power of 2 and old_generation_size_ should
76 // be a multiple of Page::kPageSize.
77 reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
78 max_semi_space_size_(8 * (kPointerSize / 4) * MB),
79 initial_semispace_size_(Page::kPageSize),
80 target_semispace_size_(Page::kPageSize),
81 max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
82 initial_old_generation_size_(max_old_generation_size_ /
83 kInitalOldGenerationLimitFactor),
84 old_generation_size_configured_(false),
85 max_executable_size_(256ul * (kPointerSize / 4) * MB),
86 // Variables set based on semispace_size_ and old_generation_size_ in
87 // ConfigureHeap.
88 // Will be 4 * reserved_semispace_size_ to ensure that young
89 // generation can be aligned to its size.
90 maximum_committed_(0),
91 survived_since_last_expansion_(0),
92 survived_last_scavenge_(0),
93 always_allocate_scope_count_(0),
94 contexts_disposed_(0),
95 number_of_disposed_maps_(0),
96 global_ic_age_(0),
97 scan_on_scavenge_pages_(0),
98 new_space_(this),
99 old_space_(NULL),
100 code_space_(NULL),
101 map_space_(NULL),
102 lo_space_(NULL),
103 gc_state_(NOT_IN_GC),
104 gc_post_processing_depth_(0),
105 allocations_count_(0),
106 raw_allocations_hash_(0),
107 ms_count_(0),
108 gc_count_(0),
109 remembered_unmapped_pages_index_(0),
110 #ifdef DEBUG
111 allocation_timeout_(0),
112 #endif // DEBUG
113 old_generation_allocation_limit_(initial_old_generation_size_),
114 old_gen_exhausted_(false),
115 optimize_for_memory_usage_(false),
116 inline_allocation_disabled_(false),
117 store_buffer_rebuilder_(store_buffer()),
118 total_regexp_code_generated_(0),
119 tracer_(nullptr),
120 high_survival_rate_period_length_(0),
121 promoted_objects_size_(0),
122 promotion_ratio_(0),
123 semi_space_copied_object_size_(0),
124 previous_semi_space_copied_object_size_(0),
125 semi_space_copied_rate_(0),
126 nodes_died_in_new_space_(0),
127 nodes_copied_in_new_space_(0),
128 nodes_promoted_(0),
129 maximum_size_scavenges_(0),
130 max_gc_pause_(0.0),
131 total_gc_time_ms_(0.0),
132 max_alive_after_gc_(0),
133 min_in_mutator_(kMaxInt),
134 marking_time_(0.0),
135 sweeping_time_(0.0),
136 last_idle_notification_time_(0.0),
137 last_gc_time_(0.0),
138 scavenge_collector_(nullptr),
139 mark_compact_collector_(nullptr),
140 store_buffer_(this),
141 incremental_marking_(nullptr),
142 gc_idle_time_handler_(nullptr),
143 memory_reducer_(nullptr),
144 object_stats_(nullptr),
145 scavenge_job_(nullptr),
146 idle_scavenge_observer_(nullptr),
147 full_codegen_bytes_generated_(0),
148 crankshaft_codegen_bytes_generated_(0),
149 new_space_allocation_counter_(0),
150 old_generation_allocation_counter_(0),
151 old_generation_size_at_last_gc_(0),
152 gcs_since_last_deopt_(0),
153 global_pretenuring_feedback_(nullptr),
154 ring_buffer_full_(false),
155 ring_buffer_end_(0),
156 promotion_queue_(this),
157 configured_(false),
158 current_gc_flags_(Heap::kNoGCFlags),
159 current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
160 external_string_table_(this),
161 chunks_queued_for_free_(NULL),
162 concurrent_unmapping_tasks_active_(0),
163 pending_unmapping_tasks_semaphore_(0),
164 gc_callbacks_depth_(0),
165 deserialization_complete_(false),
166 strong_roots_list_(NULL),
167 array_buffer_tracker_(NULL),
168 heap_iterator_depth_(0),
169 force_oom_(false) {
170 // Allow build-time customization of the max semispace size. Building
171 // V8 with snapshots and a non-default max semispace size is much
172 // easier if you can define it as part of the build environment.
173 #if defined(V8_MAX_SEMISPACE_SIZE)
174 max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
175 #endif
176
177 // Ensure old_generation_size_ is a multiple of kPageSize.
178 DCHECK((max_old_generation_size_ & (Page::kPageSize - 1)) == 0);
179
180 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
181 set_native_contexts_list(NULL);
182 set_allocation_sites_list(Smi::FromInt(0));
183 set_encountered_weak_collections(Smi::FromInt(0));
184 set_encountered_weak_cells(Smi::FromInt(0));
185 set_encountered_transition_arrays(Smi::FromInt(0));
186 // Put a dummy entry in the remembered pages so we can find the list the
187 // minidump even if there are no real unmapped pages.
188 RememberUnmappedPage(NULL, false);
189 }
190
191
Capacity()192 intptr_t Heap::Capacity() {
193 if (!HasBeenSetUp()) return 0;
194
195 return new_space_.Capacity() + old_space_->Capacity() +
196 code_space_->Capacity() + map_space_->Capacity();
197 }
198
199
CommittedOldGenerationMemory()200 intptr_t Heap::CommittedOldGenerationMemory() {
201 if (!HasBeenSetUp()) return 0;
202
203 return old_space_->CommittedMemory() + code_space_->CommittedMemory() +
204 map_space_->CommittedMemory() + lo_space_->Size();
205 }
206
207
CommittedMemory()208 intptr_t Heap::CommittedMemory() {
209 if (!HasBeenSetUp()) return 0;
210
211 return new_space_.CommittedMemory() + CommittedOldGenerationMemory();
212 }
213
214
CommittedPhysicalMemory()215 size_t Heap::CommittedPhysicalMemory() {
216 if (!HasBeenSetUp()) return 0;
217
218 return new_space_.CommittedPhysicalMemory() +
219 old_space_->CommittedPhysicalMemory() +
220 code_space_->CommittedPhysicalMemory() +
221 map_space_->CommittedPhysicalMemory() +
222 lo_space_->CommittedPhysicalMemory();
223 }
224
225
CommittedMemoryExecutable()226 intptr_t Heap::CommittedMemoryExecutable() {
227 if (!HasBeenSetUp()) return 0;
228
229 return isolate()->memory_allocator()->SizeExecutable();
230 }
231
232
UpdateMaximumCommitted()233 void Heap::UpdateMaximumCommitted() {
234 if (!HasBeenSetUp()) return;
235
236 intptr_t current_committed_memory = CommittedMemory();
237 if (current_committed_memory > maximum_committed_) {
238 maximum_committed_ = current_committed_memory;
239 }
240 }
241
242
Available()243 intptr_t Heap::Available() {
244 if (!HasBeenSetUp()) return 0;
245
246 intptr_t total = 0;
247 AllSpaces spaces(this);
248 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
249 total += space->Available();
250 }
251 return total;
252 }
253
254
HasBeenSetUp()255 bool Heap::HasBeenSetUp() {
256 return old_space_ != NULL && code_space_ != NULL && map_space_ != NULL &&
257 lo_space_ != NULL;
258 }
259
260
SelectGarbageCollector(AllocationSpace space,const char ** reason)261 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
262 const char** reason) {
263 // Is global GC requested?
264 if (space != NEW_SPACE) {
265 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
266 *reason = "GC in old space requested";
267 return MARK_COMPACTOR;
268 }
269
270 if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
271 *reason = "GC in old space forced by flags";
272 return MARK_COMPACTOR;
273 }
274
275 // Is enough data promoted to justify a global GC?
276 if (OldGenerationAllocationLimitReached()) {
277 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
278 *reason = "promotion limit reached";
279 return MARK_COMPACTOR;
280 }
281
282 // Have allocation in OLD and LO failed?
283 if (old_gen_exhausted_) {
284 isolate_->counters()
285 ->gc_compactor_caused_by_oldspace_exhaustion()
286 ->Increment();
287 *reason = "old generations exhausted";
288 return MARK_COMPACTOR;
289 }
290
291 // Is there enough space left in OLD to guarantee that a scavenge can
292 // succeed?
293 //
294 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
295 // for object promotion. It counts only the bytes that the memory
296 // allocator has not yet allocated from the OS and assigned to any space,
297 // and does not count available bytes already in the old space or code
298 // space. Undercounting is safe---we may get an unrequested full GC when
299 // a scavenge would have succeeded.
300 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
301 isolate_->counters()
302 ->gc_compactor_caused_by_oldspace_exhaustion()
303 ->Increment();
304 *reason = "scavenge might not succeed";
305 return MARK_COMPACTOR;
306 }
307
308 // Default
309 *reason = NULL;
310 return SCAVENGER;
311 }
312
313
314 // TODO(1238405): Combine the infrastructure for --heap-stats and
315 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsBeforeGC()316 void Heap::ReportStatisticsBeforeGC() {
317 // Heap::ReportHeapStatistics will also log NewSpace statistics when
318 // compiled --log-gc is set. The following logic is used to avoid
319 // double logging.
320 #ifdef DEBUG
321 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
322 if (FLAG_heap_stats) {
323 ReportHeapStatistics("Before GC");
324 } else if (FLAG_log_gc) {
325 new_space_.ReportStatistics();
326 }
327 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
328 #else
329 if (FLAG_log_gc) {
330 new_space_.CollectStatistics();
331 new_space_.ReportStatistics();
332 new_space_.ClearHistograms();
333 }
334 #endif // DEBUG
335 }
336
337
PrintShortHeapStatistics()338 void Heap::PrintShortHeapStatistics() {
339 if (!FLAG_trace_gc_verbose) return;
340 PrintIsolate(isolate_, "Memory allocator, used: %6" V8_PTR_PREFIX
341 "d KB"
342 ", available: %6" V8_PTR_PREFIX "d KB\n",
343 isolate_->memory_allocator()->Size() / KB,
344 isolate_->memory_allocator()->Available() / KB);
345 PrintIsolate(isolate_, "New space, used: %6" V8_PTR_PREFIX
346 "d KB"
347 ", available: %6" V8_PTR_PREFIX
348 "d KB"
349 ", committed: %6" V8_PTR_PREFIX "d KB\n",
350 new_space_.Size() / KB, new_space_.Available() / KB,
351 new_space_.CommittedMemory() / KB);
352 PrintIsolate(isolate_, "Old space, used: %6" V8_PTR_PREFIX
353 "d KB"
354 ", available: %6" V8_PTR_PREFIX
355 "d KB"
356 ", committed: %6" V8_PTR_PREFIX "d KB\n",
357 old_space_->SizeOfObjects() / KB, old_space_->Available() / KB,
358 old_space_->CommittedMemory() / KB);
359 PrintIsolate(isolate_, "Code space, used: %6" V8_PTR_PREFIX
360 "d KB"
361 ", available: %6" V8_PTR_PREFIX
362 "d KB"
363 ", committed: %6" V8_PTR_PREFIX "d KB\n",
364 code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
365 code_space_->CommittedMemory() / KB);
366 PrintIsolate(isolate_, "Map space, used: %6" V8_PTR_PREFIX
367 "d KB"
368 ", available: %6" V8_PTR_PREFIX
369 "d KB"
370 ", committed: %6" V8_PTR_PREFIX "d KB\n",
371 map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
372 map_space_->CommittedMemory() / KB);
373 PrintIsolate(isolate_, "Large object space, used: %6" V8_PTR_PREFIX
374 "d KB"
375 ", available: %6" V8_PTR_PREFIX
376 "d KB"
377 ", committed: %6" V8_PTR_PREFIX "d KB\n",
378 lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
379 lo_space_->CommittedMemory() / KB);
380 PrintIsolate(isolate_, "All spaces, used: %6" V8_PTR_PREFIX
381 "d KB"
382 ", available: %6" V8_PTR_PREFIX
383 "d KB"
384 ", committed: %6" V8_PTR_PREFIX "d KB\n",
385 this->SizeOfObjects() / KB, this->Available() / KB,
386 this->CommittedMemory() / KB);
387 PrintIsolate(
388 isolate_, "External memory reported: %6" V8_PTR_PREFIX "d KB\n",
389 static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
390 PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n",
391 total_gc_time_ms_);
392 }
393
394
395 // TODO(1238405): Combine the infrastructure for --heap-stats and
396 // --log-gc to avoid the complicated preprocessor and flag testing.
ReportStatisticsAfterGC()397 void Heap::ReportStatisticsAfterGC() {
398 // Similar to the before GC, we use some complicated logic to ensure that
399 // NewSpace statistics are logged exactly once when --log-gc is turned on.
400 #if defined(DEBUG)
401 if (FLAG_heap_stats) {
402 new_space_.CollectStatistics();
403 ReportHeapStatistics("After GC");
404 } else if (FLAG_log_gc) {
405 new_space_.ReportStatistics();
406 }
407 #else
408 if (FLAG_log_gc) new_space_.ReportStatistics();
409 #endif // DEBUG
410 for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
411 ++i) {
412 int count = deferred_counters_[i];
413 deferred_counters_[i] = 0;
414 while (count > 0) {
415 count--;
416 isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i));
417 }
418 }
419 }
420
421
IncrementDeferredCount(v8::Isolate::UseCounterFeature feature)422 void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) {
423 deferred_counters_[feature]++;
424 }
425
426
GarbageCollectionPrologue()427 void Heap::GarbageCollectionPrologue() {
428 {
429 AllowHeapAllocation for_the_first_part_of_prologue;
430 gc_count_++;
431
432 #ifdef VERIFY_HEAP
433 if (FLAG_verify_heap) {
434 Verify();
435 }
436 #endif
437 }
438
439 // Reset GC statistics.
440 promoted_objects_size_ = 0;
441 previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
442 semi_space_copied_object_size_ = 0;
443 nodes_died_in_new_space_ = 0;
444 nodes_copied_in_new_space_ = 0;
445 nodes_promoted_ = 0;
446
447 UpdateMaximumCommitted();
448
449 #ifdef DEBUG
450 DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
451
452 if (FLAG_gc_verbose) Print();
453
454 ReportStatisticsBeforeGC();
455 #endif // DEBUG
456
457 store_buffer()->GCPrologue();
458
459 if (isolate()->concurrent_osr_enabled()) {
460 isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs();
461 }
462
463 if (new_space_.IsAtMaximumCapacity()) {
464 maximum_size_scavenges_++;
465 } else {
466 maximum_size_scavenges_ = 0;
467 }
468 CheckNewSpaceExpansionCriteria();
469 UpdateNewSpaceAllocationCounter();
470 }
471
472
SizeOfObjects()473 intptr_t Heap::SizeOfObjects() {
474 intptr_t total = 0;
475 AllSpaces spaces(this);
476 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
477 total += space->SizeOfObjects();
478 }
479 return total;
480 }
481
482
GetSpaceName(int idx)483 const char* Heap::GetSpaceName(int idx) {
484 switch (idx) {
485 case NEW_SPACE:
486 return "new_space";
487 case OLD_SPACE:
488 return "old_space";
489 case MAP_SPACE:
490 return "map_space";
491 case CODE_SPACE:
492 return "code_space";
493 case LO_SPACE:
494 return "large_object_space";
495 default:
496 UNREACHABLE();
497 }
498 return nullptr;
499 }
500
501
RepairFreeListsAfterDeserialization()502 void Heap::RepairFreeListsAfterDeserialization() {
503 PagedSpaces spaces(this);
504 for (PagedSpace* space = spaces.next(); space != NULL;
505 space = spaces.next()) {
506 space->RepairFreeListsAfterDeserialization();
507 }
508 }
509
510
MergeAllocationSitePretenuringFeedback(const HashMap & local_pretenuring_feedback)511 void Heap::MergeAllocationSitePretenuringFeedback(
512 const HashMap& local_pretenuring_feedback) {
513 AllocationSite* site = nullptr;
514 for (HashMap::Entry* local_entry = local_pretenuring_feedback.Start();
515 local_entry != nullptr;
516 local_entry = local_pretenuring_feedback.Next(local_entry)) {
517 site = reinterpret_cast<AllocationSite*>(local_entry->key);
518 MapWord map_word = site->map_word();
519 if (map_word.IsForwardingAddress()) {
520 site = AllocationSite::cast(map_word.ToForwardingAddress());
521 }
522 DCHECK(site->IsAllocationSite());
523 int value =
524 static_cast<int>(reinterpret_cast<intptr_t>(local_entry->value));
525 DCHECK_GT(value, 0);
526
527 {
528 // TODO(mlippautz): For parallel processing we need synchronization here.
529 if (site->IncrementMementoFoundCount(value)) {
530 global_pretenuring_feedback_->LookupOrInsert(
531 site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
532 }
533 }
534 }
535 }
536
537
538 class Heap::PretenuringScope {
539 public:
PretenuringScope(Heap * heap)540 explicit PretenuringScope(Heap* heap) : heap_(heap) {
541 heap_->global_pretenuring_feedback_ =
542 new HashMap(HashMap::PointersMatch, kInitialFeedbackCapacity);
543 }
544
~PretenuringScope()545 ~PretenuringScope() {
546 delete heap_->global_pretenuring_feedback_;
547 heap_->global_pretenuring_feedback_ = nullptr;
548 }
549
550 private:
551 Heap* heap_;
552 };
553
554
ProcessPretenuringFeedback()555 void Heap::ProcessPretenuringFeedback() {
556 bool trigger_deoptimization = false;
557 if (FLAG_allocation_site_pretenuring) {
558 int tenure_decisions = 0;
559 int dont_tenure_decisions = 0;
560 int allocation_mementos_found = 0;
561 int allocation_sites = 0;
562 int active_allocation_sites = 0;
563
564 AllocationSite* site = nullptr;
565
566 // Step 1: Digest feedback for recorded allocation sites.
567 bool maximum_size_scavenge = MaximumSizeScavenge();
568 for (HashMap::Entry* e = global_pretenuring_feedback_->Start();
569 e != nullptr; e = global_pretenuring_feedback_->Next(e)) {
570 site = reinterpret_cast<AllocationSite*>(e->key);
571 int found_count = site->memento_found_count();
572 // The fact that we have an entry in the storage means that we've found
573 // the site at least once.
574 DCHECK_GT(found_count, 0);
575 DCHECK(site->IsAllocationSite());
576 allocation_sites++;
577 active_allocation_sites++;
578 allocation_mementos_found += found_count;
579 if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
580 trigger_deoptimization = true;
581 }
582 if (site->GetPretenureMode() == TENURED) {
583 tenure_decisions++;
584 } else {
585 dont_tenure_decisions++;
586 }
587 }
588
589 // Step 2: Deopt maybe tenured allocation sites if necessary.
590 bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
591 if (deopt_maybe_tenured) {
592 Object* list_element = allocation_sites_list();
593 while (list_element->IsAllocationSite()) {
594 site = AllocationSite::cast(list_element);
595 DCHECK(site->IsAllocationSite());
596 allocation_sites++;
597 if (site->IsMaybeTenure()) {
598 site->set_deopt_dependent_code(true);
599 trigger_deoptimization = true;
600 }
601 list_element = site->weak_next();
602 }
603 }
604
605 if (trigger_deoptimization) {
606 isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
607 }
608
609 if (FLAG_trace_pretenuring_statistics &&
610 (allocation_mementos_found > 0 || tenure_decisions > 0 ||
611 dont_tenure_decisions > 0)) {
612 PrintIsolate(isolate(),
613 "pretenuring: deopt_maybe_tenured=%d visited_sites=%d "
614 "active_sites=%d "
615 "mementos=%d tenured=%d not_tenured=%d\n",
616 deopt_maybe_tenured ? 1 : 0, allocation_sites,
617 active_allocation_sites, allocation_mementos_found,
618 tenure_decisions, dont_tenure_decisions);
619 }
620 }
621 }
622
623
DeoptMarkedAllocationSites()624 void Heap::DeoptMarkedAllocationSites() {
625 // TODO(hpayer): If iterating over the allocation sites list becomes a
626 // performance issue, use a cache data structure in heap instead.
627 Object* list_element = allocation_sites_list();
628 while (list_element->IsAllocationSite()) {
629 AllocationSite* site = AllocationSite::cast(list_element);
630 if (site->deopt_dependent_code()) {
631 site->dependent_code()->MarkCodeForDeoptimization(
632 isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
633 site->set_deopt_dependent_code(false);
634 }
635 list_element = site->weak_next();
636 }
637 Deoptimizer::DeoptimizeMarkedCode(isolate_);
638 }
639
640
GarbageCollectionEpilogue()641 void Heap::GarbageCollectionEpilogue() {
642 store_buffer()->GCEpilogue();
643
644 // In release mode, we only zap the from space under heap verification.
645 if (Heap::ShouldZapGarbage()) {
646 ZapFromSpace();
647 }
648
649 #ifdef VERIFY_HEAP
650 if (FLAG_verify_heap) {
651 Verify();
652 }
653 #endif
654
655 AllowHeapAllocation for_the_rest_of_the_epilogue;
656
657 #ifdef DEBUG
658 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
659 if (FLAG_print_handles) PrintHandles();
660 if (FLAG_gc_verbose) Print();
661 if (FLAG_code_stats) ReportCodeStatistics("After GC");
662 if (FLAG_check_handle_count) CheckHandleCount();
663 #endif
664 if (FLAG_deopt_every_n_garbage_collections > 0) {
665 // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
666 // the topmost optimized frame can be deoptimized safely, because it
667 // might not have a lazy bailout point right after its current PC.
668 if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
669 Deoptimizer::DeoptimizeAll(isolate());
670 gcs_since_last_deopt_ = 0;
671 }
672 }
673
674 UpdateMaximumCommitted();
675
676 isolate_->counters()->alive_after_last_gc()->Set(
677 static_cast<int>(SizeOfObjects()));
678
679 isolate_->counters()->string_table_capacity()->Set(
680 string_table()->Capacity());
681 isolate_->counters()->number_of_symbols()->Set(
682 string_table()->NumberOfElements());
683
684 if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
685 isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
686 static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
687 (crankshaft_codegen_bytes_generated_ +
688 full_codegen_bytes_generated_)));
689 }
690
691 if (CommittedMemory() > 0) {
692 isolate_->counters()->external_fragmentation_total()->AddSample(
693 static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
694
695 isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
696 (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
697 isolate_->counters()->heap_fraction_old_space()->AddSample(static_cast<int>(
698 (old_space()->CommittedMemory() * 100.0) / CommittedMemory()));
699 isolate_->counters()->heap_fraction_code_space()->AddSample(
700 static_cast<int>((code_space()->CommittedMemory() * 100.0) /
701 CommittedMemory()));
702 isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
703 (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
704 isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
705 (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
706
707 isolate_->counters()->heap_sample_total_committed()->AddSample(
708 static_cast<int>(CommittedMemory() / KB));
709 isolate_->counters()->heap_sample_total_used()->AddSample(
710 static_cast<int>(SizeOfObjects() / KB));
711 isolate_->counters()->heap_sample_map_space_committed()->AddSample(
712 static_cast<int>(map_space()->CommittedMemory() / KB));
713 isolate_->counters()->heap_sample_code_space_committed()->AddSample(
714 static_cast<int>(code_space()->CommittedMemory() / KB));
715
716 isolate_->counters()->heap_sample_maximum_committed()->AddSample(
717 static_cast<int>(MaximumCommittedMemory() / KB));
718 }
719
720 #define UPDATE_COUNTERS_FOR_SPACE(space) \
721 isolate_->counters()->space##_bytes_available()->Set( \
722 static_cast<int>(space()->Available())); \
723 isolate_->counters()->space##_bytes_committed()->Set( \
724 static_cast<int>(space()->CommittedMemory())); \
725 isolate_->counters()->space##_bytes_used()->Set( \
726 static_cast<int>(space()->SizeOfObjects()));
727 #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
728 if (space()->CommittedMemory() > 0) { \
729 isolate_->counters()->external_fragmentation_##space()->AddSample( \
730 static_cast<int>(100 - \
731 (space()->SizeOfObjects() * 100.0) / \
732 space()->CommittedMemory())); \
733 }
734 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
735 UPDATE_COUNTERS_FOR_SPACE(space) \
736 UPDATE_FRAGMENTATION_FOR_SPACE(space)
737
738 UPDATE_COUNTERS_FOR_SPACE(new_space)
739 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
740 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
741 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
742 UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
743 #undef UPDATE_COUNTERS_FOR_SPACE
744 #undef UPDATE_FRAGMENTATION_FOR_SPACE
745 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
746
747 #ifdef DEBUG
748 ReportStatisticsAfterGC();
749 #endif // DEBUG
750
751 // Remember the last top pointer so that we can later find out
752 // whether we allocated in new space since the last GC.
753 new_space_top_after_last_gc_ = new_space()->top();
754 last_gc_time_ = MonotonicallyIncreasingTimeInMs();
755
756 ReduceNewSpaceSize();
757 }
758
759
PreprocessStackTraces()760 void Heap::PreprocessStackTraces() {
761 WeakFixedArray::Iterator iterator(weak_stack_trace_list());
762 FixedArray* elements;
763 while ((elements = iterator.Next<FixedArray>())) {
764 for (int j = 1; j < elements->length(); j += 4) {
765 Object* maybe_code = elements->get(j + 2);
766 // If GC happens while adding a stack trace to the weak fixed array,
767 // which has been copied into a larger backing store, we may run into
768 // a stack trace that has already been preprocessed. Guard against this.
769 if (!maybe_code->IsCode()) break;
770 Code* code = Code::cast(maybe_code);
771 int offset = Smi::cast(elements->get(j + 3))->value();
772 Address pc = code->address() + offset;
773 int pos = code->SourcePosition(pc);
774 elements->set(j + 2, Smi::FromInt(pos));
775 }
776 }
777 // We must not compact the weak fixed list here, as we may be in the middle
778 // of writing to it, when the GC triggered. Instead, we reset the root value.
779 set_weak_stack_trace_list(Smi::FromInt(0));
780 }
781
782
783 class GCCallbacksScope {
784 public:
GCCallbacksScope(Heap * heap)785 explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
786 heap_->gc_callbacks_depth_++;
787 }
~GCCallbacksScope()788 ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
789
CheckReenter()790 bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
791
792 private:
793 Heap* heap_;
794 };
795
796
HandleGCRequest()797 void Heap::HandleGCRequest() {
798 if (incremental_marking()->request_type() ==
799 IncrementalMarking::COMPLETE_MARKING) {
800 CollectAllGarbage(current_gc_flags_, "GC interrupt",
801 current_gc_callback_flags_);
802 } else if (incremental_marking()->IsMarking() &&
803 !incremental_marking()->finalize_marking_completed()) {
804 FinalizeIncrementalMarking("GC interrupt: finalize incremental marking");
805 }
806 }
807
808
ScheduleIdleScavengeIfNeeded(int bytes_allocated)809 void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
810 scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
811 }
812
813
FinalizeIncrementalMarking(const char * gc_reason)814 void Heap::FinalizeIncrementalMarking(const char* gc_reason) {
815 if (FLAG_trace_incremental_marking) {
816 PrintF("[IncrementalMarking] (%s).\n", gc_reason);
817 }
818
819 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE);
820 HistogramTimerScope incremental_marking_scope(
821 isolate()->counters()->gc_incremental_marking_finalize());
822
823 {
824 GCCallbacksScope scope(this);
825 if (scope.CheckReenter()) {
826 AllowHeapAllocation allow_allocation;
827 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
828 VMState<EXTERNAL> state(isolate_);
829 HandleScope handle_scope(isolate_);
830 CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
831 }
832 }
833 incremental_marking()->FinalizeIncrementally();
834 {
835 GCCallbacksScope scope(this);
836 if (scope.CheckReenter()) {
837 AllowHeapAllocation allow_allocation;
838 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
839 VMState<EXTERNAL> state(isolate_);
840 HandleScope handle_scope(isolate_);
841 CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
842 }
843 }
844 }
845
846
GCTypeTimer(GarbageCollector collector)847 HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) {
848 if (collector == SCAVENGER) {
849 return isolate_->counters()->gc_scavenger();
850 } else {
851 if (!incremental_marking()->IsStopped()) {
852 if (ShouldReduceMemory()) {
853 return isolate_->counters()->gc_finalize_reduce_memory();
854 } else {
855 return isolate_->counters()->gc_finalize();
856 }
857 } else {
858 return isolate_->counters()->gc_compactor();
859 }
860 }
861 }
862
863
CollectAllGarbage(int flags,const char * gc_reason,const v8::GCCallbackFlags gc_callback_flags)864 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
865 const v8::GCCallbackFlags gc_callback_flags) {
866 // Since we are ignoring the return value, the exact choice of space does
867 // not matter, so long as we do not specify NEW_SPACE, which would not
868 // cause a full GC.
869 set_current_gc_flags(flags);
870 CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags);
871 set_current_gc_flags(kNoGCFlags);
872 }
873
874
CollectAllAvailableGarbage(const char * gc_reason)875 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
876 // Since we are ignoring the return value, the exact choice of space does
877 // not matter, so long as we do not specify NEW_SPACE, which would not
878 // cause a full GC.
879 // Major GC would invoke weak handle callbacks on weakly reachable
880 // handles, but won't collect weakly reachable objects until next
881 // major GC. Therefore if we collect aggressively and weak handle callback
882 // has been invoked, we rerun major GC to release objects which become
883 // garbage.
884 // Note: as weak callbacks can execute arbitrary code, we cannot
885 // hope that eventually there will be no weak callbacks invocations.
886 // Therefore stop recollecting after several attempts.
887 if (isolate()->concurrent_recompilation_enabled()) {
888 // The optimizing compiler may be unnecessarily holding on to memory.
889 DisallowHeapAllocation no_recursive_gc;
890 isolate()->optimizing_compile_dispatcher()->Flush();
891 }
892 isolate()->ClearSerializerData();
893 set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask);
894 isolate_->compilation_cache()->Clear();
895 const int kMaxNumberOfAttempts = 7;
896 const int kMinNumberOfAttempts = 2;
897 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
898 if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL,
899 v8::kGCCallbackFlagForced) &&
900 attempt + 1 >= kMinNumberOfAttempts) {
901 break;
902 }
903 }
904 set_current_gc_flags(kNoGCFlags);
905 new_space_.Shrink();
906 UncommitFromSpace();
907 }
908
909
ReportExternalMemoryPressure(const char * gc_reason)910 void Heap::ReportExternalMemoryPressure(const char* gc_reason) {
911 if (incremental_marking()->IsStopped()) {
912 if (incremental_marking()->CanBeActivated()) {
913 StartIncrementalMarking(
914 i::Heap::kNoGCFlags,
915 kGCCallbackFlagSynchronousPhantomCallbackProcessing, gc_reason);
916 } else {
917 CollectAllGarbage(i::Heap::kNoGCFlags, gc_reason,
918 kGCCallbackFlagSynchronousPhantomCallbackProcessing);
919 }
920 } else {
921 // Incremental marking is turned on an has already been started.
922
923 // TODO(mlippautz): Compute the time slice for incremental marking based on
924 // memory pressure.
925 double deadline = MonotonicallyIncreasingTimeInMs() +
926 FLAG_external_allocation_limit_incremental_time;
927 incremental_marking()->AdvanceIncrementalMarking(
928 0, deadline,
929 IncrementalMarking::StepActions(IncrementalMarking::GC_VIA_STACK_GUARD,
930 IncrementalMarking::FORCE_MARKING,
931 IncrementalMarking::FORCE_COMPLETION));
932 }
933 }
934
935
EnsureFillerObjectAtTop()936 void Heap::EnsureFillerObjectAtTop() {
937 // There may be an allocation memento behind every object in new space.
938 // If we evacuate a not full new space or if we are on the last page of
939 // the new space, then there may be uninitialized memory behind the top
940 // pointer of the new space page. We store a filler object there to
941 // identify the unused space.
942 Address from_top = new_space_.top();
943 // Check that from_top is inside its page (i.e., not at the end).
944 Address space_end = new_space_.ToSpaceEnd();
945 if (from_top < space_end) {
946 Page* page = Page::FromAddress(from_top);
947 if (page->Contains(from_top)) {
948 int remaining_in_page = static_cast<int>(page->area_end() - from_top);
949 CreateFillerObjectAt(from_top, remaining_in_page);
950 }
951 }
952 }
953
954
CollectGarbage(GarbageCollector collector,const char * gc_reason,const char * collector_reason,const v8::GCCallbackFlags gc_callback_flags)955 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
956 const char* collector_reason,
957 const v8::GCCallbackFlags gc_callback_flags) {
958 // The VM is in the GC state until exiting this function.
959 VMState<GC> state(isolate_);
960
961 #ifdef DEBUG
962 // Reset the allocation timeout to the GC interval, but make sure to
963 // allow at least a few allocations after a collection. The reason
964 // for this is that we have a lot of allocation sequences and we
965 // assume that a garbage collection will allow the subsequent
966 // allocation attempts to go through.
967 allocation_timeout_ = Max(6, FLAG_gc_interval);
968 #endif
969
970 EnsureFillerObjectAtTop();
971
972 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
973 if (FLAG_trace_incremental_marking) {
974 PrintF("[IncrementalMarking] Scavenge during marking.\n");
975 }
976 }
977
978 if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
979 !ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
980 !incremental_marking()->should_hurry() && FLAG_incremental_marking &&
981 OldGenerationAllocationLimitReached()) {
982 // Make progress in incremental marking.
983 const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
984 incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
985 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
986 if (!incremental_marking()->IsComplete() &&
987 !mark_compact_collector()->marking_deque_.IsEmpty() &&
988 !FLAG_gc_global) {
989 if (FLAG_trace_incremental_marking) {
990 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
991 }
992 collector = SCAVENGER;
993 collector_reason = "incremental marking delaying mark-sweep";
994 }
995 }
996
997 bool next_gc_likely_to_collect_more = false;
998 intptr_t committed_memory_before = 0;
999
1000 if (collector == MARK_COMPACTOR) {
1001 committed_memory_before = CommittedOldGenerationMemory();
1002 }
1003
1004 {
1005 tracer()->Start(collector, gc_reason, collector_reason);
1006 DCHECK(AllowHeapAllocation::IsAllowed());
1007 DisallowHeapAllocation no_allocation_during_gc;
1008 GarbageCollectionPrologue();
1009
1010 {
1011 HistogramTimerScope histogram_timer_scope(GCTypeTimer(collector));
1012
1013 next_gc_likely_to_collect_more =
1014 PerformGarbageCollection(collector, gc_callback_flags);
1015 }
1016
1017 GarbageCollectionEpilogue();
1018 if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
1019 isolate()->CheckDetachedContextsAfterGC();
1020 }
1021
1022 if (collector == MARK_COMPACTOR) {
1023 intptr_t committed_memory_after = CommittedOldGenerationMemory();
1024 intptr_t used_memory_after = PromotedSpaceSizeOfObjects();
1025 MemoryReducer::Event event;
1026 event.type = MemoryReducer::kMarkCompact;
1027 event.time_ms = MonotonicallyIncreasingTimeInMs();
1028 // Trigger one more GC if
1029 // - this GC decreased committed memory,
1030 // - there is high fragmentation,
1031 // - there are live detached contexts.
1032 event.next_gc_likely_to_collect_more =
1033 (committed_memory_before - committed_memory_after) > MB ||
1034 HasHighFragmentation(used_memory_after, committed_memory_after) ||
1035 (detached_contexts()->length() > 0);
1036 if (deserialization_complete_) {
1037 memory_reducer_->NotifyMarkCompact(event);
1038 }
1039 }
1040
1041 tracer()->Stop(collector);
1042 }
1043
1044 if (collector == MARK_COMPACTOR &&
1045 (gc_callback_flags & kGCCallbackFlagForced) != 0) {
1046 isolate()->CountUsage(v8::Isolate::kForcedGC);
1047 }
1048
1049 // Start incremental marking for the next cycle. The heap snapshot
1050 // generator needs incremental marking to stay off after it aborted.
1051 if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
1052 incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
1053 StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
1054 }
1055
1056 return next_gc_likely_to_collect_more;
1057 }
1058
1059
NotifyContextDisposed(bool dependant_context)1060 int Heap::NotifyContextDisposed(bool dependant_context) {
1061 if (!dependant_context) {
1062 tracer()->ResetSurvivalEvents();
1063 old_generation_size_configured_ = false;
1064 MemoryReducer::Event event;
1065 event.type = MemoryReducer::kContextDisposed;
1066 event.time_ms = MonotonicallyIncreasingTimeInMs();
1067 memory_reducer_->NotifyContextDisposed(event);
1068 }
1069 if (isolate()->concurrent_recompilation_enabled()) {
1070 // Flush the queued recompilation tasks.
1071 isolate()->optimizing_compile_dispatcher()->Flush();
1072 }
1073 AgeInlineCaches();
1074 number_of_disposed_maps_ = retained_maps()->Length();
1075 tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs());
1076 return ++contexts_disposed_;
1077 }
1078
1079
StartIncrementalMarking(int gc_flags,const GCCallbackFlags gc_callback_flags,const char * reason)1080 void Heap::StartIncrementalMarking(int gc_flags,
1081 const GCCallbackFlags gc_callback_flags,
1082 const char* reason) {
1083 DCHECK(incremental_marking()->IsStopped());
1084 set_current_gc_flags(gc_flags);
1085 current_gc_callback_flags_ = gc_callback_flags;
1086 incremental_marking()->Start(reason);
1087 }
1088
1089
StartIdleIncrementalMarking()1090 void Heap::StartIdleIncrementalMarking() {
1091 gc_idle_time_handler_->ResetNoProgressCounter();
1092 StartIncrementalMarking(kReduceMemoryFootprintMask, kNoGCCallbackFlags,
1093 "idle");
1094 }
1095
1096
MoveElements(FixedArray * array,int dst_index,int src_index,int len)1097 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
1098 int len) {
1099 if (len == 0) return;
1100
1101 DCHECK(array->map() != fixed_cow_array_map());
1102 Object** dst_objects = array->data_start() + dst_index;
1103 MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
1104 if (!InNewSpace(array)) {
1105 for (int i = 0; i < len; i++) {
1106 // TODO(hpayer): check store buffer for entries
1107 if (InNewSpace(dst_objects[i])) {
1108 RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
1109 }
1110 }
1111 }
1112 incremental_marking()->RecordWrites(array);
1113 }
1114
1115
1116 #ifdef VERIFY_HEAP
1117 // Helper class for verifying the string table.
1118 class StringTableVerifier : public ObjectVisitor {
1119 public:
VisitPointers(Object ** start,Object ** end)1120 void VisitPointers(Object** start, Object** end) override {
1121 // Visit all HeapObject pointers in [start, end).
1122 for (Object** p = start; p < end; p++) {
1123 if ((*p)->IsHeapObject()) {
1124 // Check that the string is actually internalized.
1125 CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
1126 (*p)->IsInternalizedString());
1127 }
1128 }
1129 }
1130 };
1131
1132
VerifyStringTable(Heap * heap)1133 static void VerifyStringTable(Heap* heap) {
1134 StringTableVerifier verifier;
1135 heap->string_table()->IterateElements(&verifier);
1136 }
1137 #endif // VERIFY_HEAP
1138
1139
ReserveSpace(Reservation * reservations)1140 bool Heap::ReserveSpace(Reservation* reservations) {
1141 bool gc_performed = true;
1142 int counter = 0;
1143 static const int kThreshold = 20;
1144 while (gc_performed && counter++ < kThreshold) {
1145 gc_performed = false;
1146 for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
1147 Reservation* reservation = &reservations[space];
1148 DCHECK_LE(1, reservation->length());
1149 if (reservation->at(0).size == 0) continue;
1150 bool perform_gc = false;
1151 if (space == LO_SPACE) {
1152 DCHECK_EQ(1, reservation->length());
1153 perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
1154 } else {
1155 for (auto& chunk : *reservation) {
1156 AllocationResult allocation;
1157 int size = chunk.size;
1158 DCHECK_LE(size, MemoryAllocator::PageAreaSize(
1159 static_cast<AllocationSpace>(space)));
1160 if (space == NEW_SPACE) {
1161 allocation = new_space()->AllocateRawUnaligned(size);
1162 } else {
1163 allocation = paged_space(space)->AllocateRawUnaligned(size);
1164 }
1165 HeapObject* free_space = nullptr;
1166 if (allocation.To(&free_space)) {
1167 // Mark with a free list node, in case we have a GC before
1168 // deserializing.
1169 Address free_space_address = free_space->address();
1170 CreateFillerObjectAt(free_space_address, size);
1171 DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
1172 chunk.start = free_space_address;
1173 chunk.end = free_space_address + size;
1174 } else {
1175 perform_gc = true;
1176 break;
1177 }
1178 }
1179 }
1180 if (perform_gc) {
1181 if (space == NEW_SPACE) {
1182 CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
1183 } else {
1184 if (counter > 1) {
1185 CollectAllGarbage(
1186 kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask,
1187 "failed to reserve space in paged or large "
1188 "object space, trying to reduce memory footprint");
1189 } else {
1190 CollectAllGarbage(
1191 kAbortIncrementalMarkingMask,
1192 "failed to reserve space in paged or large object space");
1193 }
1194 }
1195 gc_performed = true;
1196 break; // Abort for-loop over spaces and retry.
1197 }
1198 }
1199 }
1200
1201 return !gc_performed;
1202 }
1203
1204
EnsureFromSpaceIsCommitted()1205 void Heap::EnsureFromSpaceIsCommitted() {
1206 if (new_space_.CommitFromSpaceIfNeeded()) return;
1207
1208 // Committing memory to from space failed.
1209 // Memory is exhausted and we will die.
1210 V8::FatalProcessOutOfMemory("Committing semi space failed.");
1211 }
1212
1213
ClearNormalizedMapCaches()1214 void Heap::ClearNormalizedMapCaches() {
1215 if (isolate_->bootstrapper()->IsActive() &&
1216 !incremental_marking()->IsMarking()) {
1217 return;
1218 }
1219
1220 Object* context = native_contexts_list();
1221 while (!context->IsUndefined()) {
1222 // GC can happen when the context is not fully initialized,
1223 // so the cache can be undefined.
1224 Object* cache =
1225 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1226 if (!cache->IsUndefined()) {
1227 NormalizedMapCache::cast(cache)->Clear();
1228 }
1229 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1230 }
1231 }
1232
1233
UpdateSurvivalStatistics(int start_new_space_size)1234 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1235 if (start_new_space_size == 0) return;
1236
1237 promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1238 static_cast<double>(start_new_space_size) * 100);
1239
1240 if (previous_semi_space_copied_object_size_ > 0) {
1241 promotion_rate_ =
1242 (static_cast<double>(promoted_objects_size_) /
1243 static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1244 } else {
1245 promotion_rate_ = 0;
1246 }
1247
1248 semi_space_copied_rate_ =
1249 (static_cast<double>(semi_space_copied_object_size_) /
1250 static_cast<double>(start_new_space_size) * 100);
1251
1252 double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1253 tracer()->AddSurvivalRatio(survival_rate);
1254 if (survival_rate > kYoungSurvivalRateHighThreshold) {
1255 high_survival_rate_period_length_++;
1256 } else {
1257 high_survival_rate_period_length_ = 0;
1258 }
1259 }
1260
PerformGarbageCollection(GarbageCollector collector,const v8::GCCallbackFlags gc_callback_flags)1261 bool Heap::PerformGarbageCollection(
1262 GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1263 int freed_global_handles = 0;
1264
1265 if (collector != SCAVENGER) {
1266 PROFILE(isolate_, CodeMovingGCEvent());
1267 }
1268
1269 #ifdef VERIFY_HEAP
1270 if (FLAG_verify_heap) {
1271 VerifyStringTable(this);
1272 }
1273 #endif
1274
1275 GCType gc_type =
1276 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1277
1278 {
1279 GCCallbacksScope scope(this);
1280 if (scope.CheckReenter()) {
1281 AllowHeapAllocation allow_allocation;
1282 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1283 VMState<EXTERNAL> state(isolate_);
1284 HandleScope handle_scope(isolate_);
1285 CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1286 }
1287 }
1288
1289 EnsureFromSpaceIsCommitted();
1290
1291 int start_new_space_size = Heap::new_space()->SizeAsInt();
1292
1293 if (IsHighSurvivalRate()) {
1294 // We speed up the incremental marker if it is running so that it
1295 // does not fall behind the rate of promotion, which would cause a
1296 // constantly growing old space.
1297 incremental_marking()->NotifyOfHighPromotionRate();
1298 }
1299
1300 {
1301 Heap::PretenuringScope pretenuring_scope(this);
1302
1303 if (collector == MARK_COMPACTOR) {
1304 UpdateOldGenerationAllocationCounter();
1305 // Perform mark-sweep with optional compaction.
1306 MarkCompact();
1307 old_gen_exhausted_ = false;
1308 old_generation_size_configured_ = true;
1309 // This should be updated before PostGarbageCollectionProcessing, which
1310 // can cause another GC. Take into account the objects promoted during GC.
1311 old_generation_allocation_counter_ +=
1312 static_cast<size_t>(promoted_objects_size_);
1313 old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
1314 } else {
1315 Scavenge();
1316 }
1317
1318 ProcessPretenuringFeedback();
1319 }
1320
1321 UpdateSurvivalStatistics(start_new_space_size);
1322 ConfigureInitialOldGenerationSize();
1323
1324 isolate_->counters()->objs_since_last_young()->Set(0);
1325
1326 if (collector != SCAVENGER) {
1327 // Callbacks that fire after this point might trigger nested GCs and
1328 // restart incremental marking, the assertion can't be moved down.
1329 DCHECK(incremental_marking()->IsStopped());
1330
1331 // We finished a marking cycle. We can uncommit the marking deque until
1332 // we start marking again.
1333 mark_compact_collector()->marking_deque()->Uninitialize();
1334 mark_compact_collector()->EnsureMarkingDequeIsCommitted(
1335 MarkCompactCollector::kMinMarkingDequeSize);
1336 }
1337
1338 gc_post_processing_depth_++;
1339 {
1340 AllowHeapAllocation allow_allocation;
1341 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1342 freed_global_handles =
1343 isolate_->global_handles()->PostGarbageCollectionProcessing(
1344 collector, gc_callback_flags);
1345 }
1346 gc_post_processing_depth_--;
1347
1348 isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1349
1350 // Update relocatables.
1351 Relocatable::PostGarbageCollectionProcessing(isolate_);
1352
1353 double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond();
1354 double mutator_speed = static_cast<double>(
1355 tracer()
1356 ->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond());
1357 intptr_t old_gen_size = PromotedSpaceSizeOfObjects();
1358 if (collector == MARK_COMPACTOR) {
1359 // Register the amount of external allocated memory.
1360 amount_of_external_allocated_memory_at_last_global_gc_ =
1361 amount_of_external_allocated_memory_;
1362 SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1363 } else if (HasLowYoungGenerationAllocationRate() &&
1364 old_generation_size_configured_) {
1365 DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
1366 }
1367
1368 {
1369 GCCallbacksScope scope(this);
1370 if (scope.CheckReenter()) {
1371 AllowHeapAllocation allow_allocation;
1372 GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1373 VMState<EXTERNAL> state(isolate_);
1374 HandleScope handle_scope(isolate_);
1375 CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1376 }
1377 }
1378
1379 #ifdef VERIFY_HEAP
1380 if (FLAG_verify_heap) {
1381 VerifyStringTable(this);
1382 }
1383 #endif
1384
1385 return freed_global_handles > 0;
1386 }
1387
1388
CallGCPrologueCallbacks(GCType gc_type,GCCallbackFlags flags)1389 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1390 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1391 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1392 if (!gc_prologue_callbacks_[i].pass_isolate) {
1393 v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
1394 gc_prologue_callbacks_[i].callback);
1395 callback(gc_type, flags);
1396 } else {
1397 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1398 gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1399 }
1400 }
1401 }
1402 }
1403
1404
CallGCEpilogueCallbacks(GCType gc_type,GCCallbackFlags gc_callback_flags)1405 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1406 GCCallbackFlags gc_callback_flags) {
1407 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1408 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1409 if (!gc_epilogue_callbacks_[i].pass_isolate) {
1410 v8::GCCallback callback = reinterpret_cast<v8::GCCallback>(
1411 gc_epilogue_callbacks_[i].callback);
1412 callback(gc_type, gc_callback_flags);
1413 } else {
1414 v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1415 gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1416 }
1417 }
1418 }
1419 }
1420
1421
MarkCompact()1422 void Heap::MarkCompact() {
1423 PauseInlineAllocationObserversScope pause_observers(new_space());
1424
1425 gc_state_ = MARK_COMPACT;
1426 LOG(isolate_, ResourceEvent("markcompact", "begin"));
1427
1428 uint64_t size_of_objects_before_gc = SizeOfObjects();
1429
1430 mark_compact_collector()->Prepare();
1431
1432 ms_count_++;
1433
1434 MarkCompactPrologue();
1435
1436 mark_compact_collector()->CollectGarbage();
1437
1438 LOG(isolate_, ResourceEvent("markcompact", "end"));
1439
1440 MarkCompactEpilogue();
1441
1442 if (FLAG_allocation_site_pretenuring) {
1443 EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1444 }
1445 }
1446
1447
MarkCompactEpilogue()1448 void Heap::MarkCompactEpilogue() {
1449 gc_state_ = NOT_IN_GC;
1450
1451 isolate_->counters()->objs_since_last_full()->Set(0);
1452
1453 incremental_marking()->Epilogue();
1454
1455 PreprocessStackTraces();
1456 }
1457
1458
MarkCompactPrologue()1459 void Heap::MarkCompactPrologue() {
1460 // At any old GC clear the keyed lookup cache to enable collection of unused
1461 // maps.
1462 isolate_->keyed_lookup_cache()->Clear();
1463 isolate_->context_slot_cache()->Clear();
1464 isolate_->descriptor_lookup_cache()->Clear();
1465 RegExpResultsCache::Clear(string_split_cache());
1466 RegExpResultsCache::Clear(regexp_multiple_cache());
1467
1468 isolate_->compilation_cache()->MarkCompactPrologue();
1469
1470 CompletelyClearInstanceofCache();
1471
1472 FlushNumberStringCache();
1473 if (FLAG_cleanup_code_caches_at_gc) {
1474 polymorphic_code_cache()->set_cache(undefined_value());
1475 }
1476
1477 ClearNormalizedMapCaches();
1478 }
1479
1480
1481 #ifdef VERIFY_HEAP
1482 // Visitor class to verify pointers in code or data space do not point into
1483 // new space.
1484 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1485 public:
VerifyNonPointerSpacePointersVisitor(Heap * heap)1486 explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1487
VisitPointers(Object ** start,Object ** end)1488 void VisitPointers(Object** start, Object** end) override {
1489 for (Object** current = start; current < end; current++) {
1490 if ((*current)->IsHeapObject()) {
1491 CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1492 }
1493 }
1494 }
1495
1496 private:
1497 Heap* heap_;
1498 };
1499
1500
VerifyNonPointerSpacePointers(Heap * heap)1501 static void VerifyNonPointerSpacePointers(Heap* heap) {
1502 // Verify that there are no pointers to new space in spaces where we
1503 // do not expect them.
1504 VerifyNonPointerSpacePointersVisitor v(heap);
1505 HeapObjectIterator code_it(heap->code_space());
1506 for (HeapObject* object = code_it.Next(); object != NULL;
1507 object = code_it.Next())
1508 object->Iterate(&v);
1509 }
1510 #endif // VERIFY_HEAP
1511
1512
CheckNewSpaceExpansionCriteria()1513 void Heap::CheckNewSpaceExpansionCriteria() {
1514 if (FLAG_experimental_new_space_growth_heuristic) {
1515 if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1516 survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) {
1517 // Grow the size of new space if there is room to grow, and more than 10%
1518 // have survived the last scavenge.
1519 new_space_.Grow();
1520 survived_since_last_expansion_ = 0;
1521 }
1522 } else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1523 survived_since_last_expansion_ > new_space_.TotalCapacity()) {
1524 // Grow the size of new space if there is room to grow, and enough data
1525 // has survived scavenge since the last expansion.
1526 new_space_.Grow();
1527 survived_since_last_expansion_ = 0;
1528 }
1529 }
1530
1531
IsUnscavengedHeapObject(Heap * heap,Object ** p)1532 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1533 return heap->InNewSpace(*p) &&
1534 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1535 }
1536
1537
IsUnmodifiedHeapObject(Object ** p)1538 static bool IsUnmodifiedHeapObject(Object** p) {
1539 Object* object = *p;
1540 if (object->IsSmi()) return false;
1541 HeapObject* heap_object = HeapObject::cast(object);
1542 if (!object->IsJSObject()) return false;
1543 Object* obj_constructor = (JSObject::cast(object))->map()->GetConstructor();
1544 if (!obj_constructor->IsJSFunction()) return false;
1545 JSFunction* constructor = JSFunction::cast(obj_constructor);
1546 if (!constructor->shared()->IsApiFunction()) return false;
1547 if (constructor != nullptr &&
1548 constructor->initial_map() == heap_object->map()) {
1549 return true;
1550 }
1551 return false;
1552 }
1553
1554
ScavengeStoreBufferCallback(Heap * heap,MemoryChunk * page,StoreBufferEvent event)1555 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1556 StoreBufferEvent event) {
1557 heap->store_buffer_rebuilder_.Callback(page, event);
1558 }
1559
1560
Initialize()1561 void PromotionQueue::Initialize() {
1562 // The last to-space page may be used for promotion queue. On promotion
1563 // conflict, we use the emergency stack.
1564 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1565 0);
1566 front_ = rear_ =
1567 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1568 limit_ = reinterpret_cast<intptr_t*>(
1569 Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
1570 emergency_stack_ = NULL;
1571 }
1572
1573
RelocateQueueHead()1574 void PromotionQueue::RelocateQueueHead() {
1575 DCHECK(emergency_stack_ == NULL);
1576
1577 Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1578 intptr_t* head_start = rear_;
1579 intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1580
1581 int entries_count =
1582 static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1583
1584 emergency_stack_ = new List<Entry>(2 * entries_count);
1585
1586 while (head_start != head_end) {
1587 int size = static_cast<int>(*(head_start++));
1588 HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1589 // New space allocation in SemiSpaceCopyObject marked the region
1590 // overlapping with promotion queue as uninitialized.
1591 MSAN_MEMORY_IS_INITIALIZED(&size, sizeof(size));
1592 MSAN_MEMORY_IS_INITIALIZED(&obj, sizeof(obj));
1593 emergency_stack_->Add(Entry(obj, size));
1594 }
1595 rear_ = head_end;
1596 }
1597
1598
1599 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1600 public:
ScavengeWeakObjectRetainer(Heap * heap)1601 explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1602
RetainAs(Object * object)1603 virtual Object* RetainAs(Object* object) {
1604 if (!heap_->InFromSpace(object)) {
1605 return object;
1606 }
1607
1608 MapWord map_word = HeapObject::cast(object)->map_word();
1609 if (map_word.IsForwardingAddress()) {
1610 return map_word.ToForwardingAddress();
1611 }
1612 return NULL;
1613 }
1614
1615 private:
1616 Heap* heap_;
1617 };
1618
1619
Scavenge()1620 void Heap::Scavenge() {
1621 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
1622 RelocationLock relocation_lock(this);
1623 // There are soft limits in the allocation code, designed to trigger a mark
1624 // sweep collection by failing allocations. There is no sense in trying to
1625 // trigger one during scavenge: scavenges allocation should always succeed.
1626 AlwaysAllocateScope scope(isolate());
1627
1628 // Bump-pointer allocations done during scavenge are not real allocations.
1629 // Pause the inline allocation steps.
1630 PauseInlineAllocationObserversScope pause_observers(new_space());
1631
1632 #ifdef VERIFY_HEAP
1633 if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1634 #endif
1635
1636 gc_state_ = SCAVENGE;
1637
1638 // Implements Cheney's copying algorithm
1639 LOG(isolate_, ResourceEvent("scavenge", "begin"));
1640
1641 // Clear descriptor cache.
1642 isolate_->descriptor_lookup_cache()->Clear();
1643
1644 // Used for updating survived_since_last_expansion_ at function end.
1645 intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1646
1647 scavenge_collector_->SelectScavengingVisitorsTable();
1648
1649 array_buffer_tracker()->PrepareDiscoveryInNewSpace();
1650
1651 // Flip the semispaces. After flipping, to space is empty, from space has
1652 // live objects.
1653 new_space_.Flip();
1654 new_space_.ResetAllocationInfo();
1655
1656 // We need to sweep newly copied objects which can be either in the
1657 // to space or promoted to the old generation. For to-space
1658 // objects, we treat the bottom of the to space as a queue. Newly
1659 // copied and unswept objects lie between a 'front' mark and the
1660 // allocation pointer.
1661 //
1662 // Promoted objects can go into various old-generation spaces, and
1663 // can be allocated internally in the spaces (from the free list).
1664 // We treat the top of the to space as a queue of addresses of
1665 // promoted objects. The addresses of newly promoted and unswept
1666 // objects lie between a 'front' mark and a 'rear' mark that is
1667 // updated as a side effect of promoting an object.
1668 //
1669 // There is guaranteed to be enough room at the top of the to space
1670 // for the addresses of promoted objects: every object promoted
1671 // frees up its size in bytes from the top of the new space, and
1672 // objects are at least one pointer in size.
1673 Address new_space_front = new_space_.ToSpaceStart();
1674 promotion_queue_.Initialize();
1675
1676 ScavengeVisitor scavenge_visitor(this);
1677
1678 if (FLAG_scavenge_reclaim_unmodified_objects) {
1679 isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
1680 &IsUnmodifiedHeapObject);
1681 }
1682
1683 {
1684 // Copy roots.
1685 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
1686 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1687 }
1688
1689 {
1690 // Copy objects reachable from the old generation.
1691 GCTracer::Scope gc_scope(tracer(),
1692 GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
1693 StoreBufferRebuildScope scope(this, store_buffer(),
1694 &ScavengeStoreBufferCallback);
1695 store_buffer()->IteratePointersToNewSpace(&Scavenger::ScavengeObject);
1696 }
1697
1698 {
1699 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
1700 // Copy objects reachable from the encountered weak collections list.
1701 scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1702 // Copy objects reachable from the encountered weak cells.
1703 scavenge_visitor.VisitPointer(&encountered_weak_cells_);
1704 }
1705
1706 {
1707 // Copy objects reachable from the code flushing candidates list.
1708 GCTracer::Scope gc_scope(tracer(),
1709 GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
1710 MarkCompactCollector* collector = mark_compact_collector();
1711 if (collector->is_code_flushing_enabled()) {
1712 collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1713 }
1714 }
1715
1716 {
1717 GCTracer::Scope gc_scope(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
1718 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1719 }
1720
1721 if (FLAG_scavenge_reclaim_unmodified_objects) {
1722 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
1723 &IsUnscavengedHeapObject);
1724
1725 isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
1726 &scavenge_visitor);
1727 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1728 } else {
1729 GCTracer::Scope gc_scope(tracer(),
1730 GCTracer::Scope::SCAVENGER_OBJECT_GROUPS);
1731 while (isolate()->global_handles()->IterateObjectGroups(
1732 &scavenge_visitor, &IsUnscavengedHeapObject)) {
1733 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1734 }
1735 isolate()->global_handles()->RemoveObjectGroups();
1736 isolate()->global_handles()->RemoveImplicitRefGroups();
1737
1738 isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1739 &IsUnscavengedHeapObject);
1740
1741 isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
1742 &scavenge_visitor);
1743 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1744 }
1745
1746 UpdateNewSpaceReferencesInExternalStringTable(
1747 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1748
1749 promotion_queue_.Destroy();
1750
1751 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1752
1753 ScavengeWeakObjectRetainer weak_object_retainer(this);
1754 ProcessYoungWeakReferences(&weak_object_retainer);
1755
1756 DCHECK(new_space_front == new_space_.top());
1757
1758 // Set age mark.
1759 new_space_.set_age_mark(new_space_.top());
1760
1761 array_buffer_tracker()->FreeDead(true);
1762
1763 // Update how much has survived scavenge.
1764 IncrementYoungSurvivorsCounter(static_cast<int>(
1765 (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1766
1767 LOG(isolate_, ResourceEvent("scavenge", "end"));
1768
1769 gc_state_ = NOT_IN_GC;
1770 }
1771
1772
UpdateNewSpaceReferenceInExternalStringTableEntry(Heap * heap,Object ** p)1773 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1774 Object** p) {
1775 MapWord first_word = HeapObject::cast(*p)->map_word();
1776
1777 if (!first_word.IsForwardingAddress()) {
1778 // Unreachable external string can be finalized.
1779 heap->FinalizeExternalString(String::cast(*p));
1780 return NULL;
1781 }
1782
1783 // String is still reachable.
1784 return String::cast(first_word.ToForwardingAddress());
1785 }
1786
1787
UpdateNewSpaceReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1788 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1789 ExternalStringTableUpdaterCallback updater_func) {
1790 #ifdef VERIFY_HEAP
1791 if (FLAG_verify_heap) {
1792 external_string_table_.Verify();
1793 }
1794 #endif
1795
1796 if (external_string_table_.new_space_strings_.is_empty()) return;
1797
1798 Object** start = &external_string_table_.new_space_strings_[0];
1799 Object** end = start + external_string_table_.new_space_strings_.length();
1800 Object** last = start;
1801
1802 for (Object** p = start; p < end; ++p) {
1803 DCHECK(InFromSpace(*p));
1804 String* target = updater_func(this, p);
1805
1806 if (target == NULL) continue;
1807
1808 DCHECK(target->IsExternalString());
1809
1810 if (InNewSpace(target)) {
1811 // String is still in new space. Update the table entry.
1812 *last = target;
1813 ++last;
1814 } else {
1815 // String got promoted. Move it to the old string list.
1816 external_string_table_.AddOldString(target);
1817 }
1818 }
1819
1820 DCHECK(last <= end);
1821 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1822 }
1823
1824
UpdateReferencesInExternalStringTable(ExternalStringTableUpdaterCallback updater_func)1825 void Heap::UpdateReferencesInExternalStringTable(
1826 ExternalStringTableUpdaterCallback updater_func) {
1827 // Update old space string references.
1828 if (external_string_table_.old_space_strings_.length() > 0) {
1829 Object** start = &external_string_table_.old_space_strings_[0];
1830 Object** end = start + external_string_table_.old_space_strings_.length();
1831 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1832 }
1833
1834 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1835 }
1836
1837
ProcessAllWeakReferences(WeakObjectRetainer * retainer)1838 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
1839 ProcessNativeContexts(retainer);
1840 ProcessAllocationSites(retainer);
1841 }
1842
1843
ProcessYoungWeakReferences(WeakObjectRetainer * retainer)1844 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
1845 ProcessNativeContexts(retainer);
1846 }
1847
1848
ProcessNativeContexts(WeakObjectRetainer * retainer)1849 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1850 Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1851 // Update the head of the list of contexts.
1852 set_native_contexts_list(head);
1853 }
1854
1855
ProcessAllocationSites(WeakObjectRetainer * retainer)1856 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1857 Object* allocation_site_obj =
1858 VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1859 set_allocation_sites_list(allocation_site_obj);
1860 }
1861
1862
ResetAllAllocationSitesDependentCode(PretenureFlag flag)1863 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1864 DisallowHeapAllocation no_allocation_scope;
1865 Object* cur = allocation_sites_list();
1866 bool marked = false;
1867 while (cur->IsAllocationSite()) {
1868 AllocationSite* casted = AllocationSite::cast(cur);
1869 if (casted->GetPretenureMode() == flag) {
1870 casted->ResetPretenureDecision();
1871 casted->set_deopt_dependent_code(true);
1872 marked = true;
1873 RemoveAllocationSitePretenuringFeedback(casted);
1874 }
1875 cur = casted->weak_next();
1876 }
1877 if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1878 }
1879
1880
EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc)1881 void Heap::EvaluateOldSpaceLocalPretenuring(
1882 uint64_t size_of_objects_before_gc) {
1883 uint64_t size_of_objects_after_gc = SizeOfObjects();
1884 double old_generation_survival_rate =
1885 (static_cast<double>(size_of_objects_after_gc) * 100) /
1886 static_cast<double>(size_of_objects_before_gc);
1887
1888 if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1889 // Too many objects died in the old generation, pretenuring of wrong
1890 // allocation sites may be the cause for that. We have to deopt all
1891 // dependent code registered in the allocation sites to re-evaluate
1892 // our pretenuring decisions.
1893 ResetAllAllocationSitesDependentCode(TENURED);
1894 if (FLAG_trace_pretenuring) {
1895 PrintF(
1896 "Deopt all allocation sites dependent code due to low survival "
1897 "rate in the old generation %f\n",
1898 old_generation_survival_rate);
1899 }
1900 }
1901 }
1902
1903
VisitExternalResources(v8::ExternalResourceVisitor * visitor)1904 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1905 DisallowHeapAllocation no_allocation;
1906 // All external strings are listed in the external string table.
1907
1908 class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1909 public:
1910 explicit ExternalStringTableVisitorAdapter(
1911 v8::ExternalResourceVisitor* visitor)
1912 : visitor_(visitor) {}
1913 virtual void VisitPointers(Object** start, Object** end) {
1914 for (Object** p = start; p < end; p++) {
1915 DCHECK((*p)->IsExternalString());
1916 visitor_->VisitExternalString(
1917 Utils::ToLocal(Handle<String>(String::cast(*p))));
1918 }
1919 }
1920
1921 private:
1922 v8::ExternalResourceVisitor* visitor_;
1923 } external_string_table_visitor(visitor);
1924
1925 external_string_table_.Iterate(&external_string_table_visitor);
1926 }
1927
1928
DoScavenge(ObjectVisitor * scavenge_visitor,Address new_space_front)1929 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1930 Address new_space_front) {
1931 do {
1932 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1933 // The addresses new_space_front and new_space_.top() define a
1934 // queue of unprocessed copied objects. Process them until the
1935 // queue is empty.
1936 while (new_space_front != new_space_.top()) {
1937 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1938 HeapObject* object = HeapObject::FromAddress(new_space_front);
1939 new_space_front +=
1940 StaticScavengeVisitor::IterateBody(object->map(), object);
1941 } else {
1942 new_space_front =
1943 NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1944 }
1945 }
1946
1947 // Promote and process all the to-be-promoted objects.
1948 {
1949 StoreBufferRebuildScope scope(this, store_buffer(),
1950 &ScavengeStoreBufferCallback);
1951 while (!promotion_queue()->is_empty()) {
1952 HeapObject* target;
1953 int size;
1954 promotion_queue()->remove(&target, &size);
1955
1956 // Promoted object might be already partially visited
1957 // during old space pointer iteration. Thus we search specifically
1958 // for pointers to from semispace instead of looking for pointers
1959 // to new space.
1960 DCHECK(!target->IsMap());
1961
1962 IteratePointersToFromSpace(target, size, &Scavenger::ScavengeObject);
1963 }
1964 }
1965
1966 // Take another spin if there are now unswept objects in new space
1967 // (there are currently no more unswept promoted objects).
1968 } while (new_space_front != new_space_.top());
1969
1970 return new_space_front;
1971 }
1972
1973
1974 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1975 0); // NOLINT
1976 STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) ==
1977 0); // NOLINT
1978 #ifdef V8_HOST_ARCH_32_BIT
1979 STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) !=
1980 0); // NOLINT
1981 #endif
1982
1983
GetMaximumFillToAlign(AllocationAlignment alignment)1984 int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) {
1985 switch (alignment) {
1986 case kWordAligned:
1987 return 0;
1988 case kDoubleAligned:
1989 case kDoubleUnaligned:
1990 return kDoubleSize - kPointerSize;
1991 case kSimd128Unaligned:
1992 return kSimd128Size - kPointerSize;
1993 default:
1994 UNREACHABLE();
1995 }
1996 return 0;
1997 }
1998
1999
GetFillToAlign(Address address,AllocationAlignment alignment)2000 int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) {
2001 intptr_t offset = OffsetFrom(address);
2002 if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0)
2003 return kPointerSize;
2004 if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0)
2005 return kDoubleSize - kPointerSize; // No fill if double is always aligned.
2006 if (alignment == kSimd128Unaligned) {
2007 return (kSimd128Size - (static_cast<int>(offset) + kPointerSize)) &
2008 kSimd128AlignmentMask;
2009 }
2010 return 0;
2011 }
2012
2013
PrecedeWithFiller(HeapObject * object,int filler_size)2014 HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) {
2015 CreateFillerObjectAt(object->address(), filler_size);
2016 return HeapObject::FromAddress(object->address() + filler_size);
2017 }
2018
2019
AlignWithFiller(HeapObject * object,int object_size,int allocation_size,AllocationAlignment alignment)2020 HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size,
2021 int allocation_size,
2022 AllocationAlignment alignment) {
2023 int filler_size = allocation_size - object_size;
2024 DCHECK(filler_size > 0);
2025 int pre_filler = GetFillToAlign(object->address(), alignment);
2026 if (pre_filler) {
2027 object = PrecedeWithFiller(object, pre_filler);
2028 filler_size -= pre_filler;
2029 }
2030 if (filler_size)
2031 CreateFillerObjectAt(object->address() + object_size, filler_size);
2032 return object;
2033 }
2034
2035
DoubleAlignForDeserialization(HeapObject * object,int size)2036 HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
2037 return AlignWithFiller(object, size - kPointerSize, size, kDoubleAligned);
2038 }
2039
2040
RegisterNewArrayBuffer(JSArrayBuffer * buffer)2041 void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
2042 return array_buffer_tracker()->RegisterNew(buffer);
2043 }
2044
2045
UnregisterArrayBuffer(JSArrayBuffer * buffer)2046 void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
2047 return array_buffer_tracker()->Unregister(buffer);
2048 }
2049
2050
ConfigureInitialOldGenerationSize()2051 void Heap::ConfigureInitialOldGenerationSize() {
2052 if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2053 old_generation_allocation_limit_ =
2054 Max(kMinimumOldGenerationAllocationLimit,
2055 static_cast<intptr_t>(
2056 static_cast<double>(old_generation_allocation_limit_) *
2057 (tracer()->AverageSurvivalRatio() / 100)));
2058 }
2059 }
2060
2061
AllocatePartialMap(InstanceType instance_type,int instance_size)2062 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2063 int instance_size) {
2064 Object* result = nullptr;
2065 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
2066 if (!allocation.To(&result)) return allocation;
2067
2068 // Map::cast cannot be used due to uninitialized map field.
2069 reinterpret_cast<Map*>(result)->set_map(
2070 reinterpret_cast<Map*>(root(kMetaMapRootIndex)));
2071 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2072 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2073 // Initialize to only containing tagged fields.
2074 reinterpret_cast<Map*>(result)->set_visitor_id(
2075 StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
2076 if (FLAG_unbox_double_fields) {
2077 reinterpret_cast<Map*>(result)
2078 ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2079 }
2080 reinterpret_cast<Map*>(result)->clear_unused();
2081 reinterpret_cast<Map*>(result)
2082 ->set_inobject_properties_or_constructor_function_index(0);
2083 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2084 reinterpret_cast<Map*>(result)->set_bit_field(0);
2085 reinterpret_cast<Map*>(result)->set_bit_field2(0);
2086 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2087 Map::OwnsDescriptors::encode(true) |
2088 Map::ConstructionCounter::encode(Map::kNoSlackTracking);
2089 reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2090 reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::FromInt(0));
2091 return result;
2092 }
2093
2094
AllocateMap(InstanceType instance_type,int instance_size,ElementsKind elements_kind)2095 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2096 int instance_size,
2097 ElementsKind elements_kind) {
2098 HeapObject* result = nullptr;
2099 AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
2100 if (!allocation.To(&result)) return allocation;
2101
2102 result->set_map_no_write_barrier(meta_map());
2103 Map* map = Map::cast(result);
2104 map->set_instance_type(instance_type);
2105 map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2106 map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
2107 map->set_instance_size(instance_size);
2108 map->clear_unused();
2109 map->set_inobject_properties_or_constructor_function_index(0);
2110 map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2111 map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2112 SKIP_WRITE_BARRIER);
2113 map->set_weak_cell_cache(Smi::FromInt(0));
2114 map->set_raw_transitions(Smi::FromInt(0));
2115 map->set_unused_property_fields(0);
2116 map->set_instance_descriptors(empty_descriptor_array());
2117 if (FLAG_unbox_double_fields) {
2118 map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2119 }
2120 // Must be called only after |instance_type|, |instance_size| and
2121 // |layout_descriptor| are set.
2122 map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
2123 map->set_bit_field(0);
2124 map->set_bit_field2(1 << Map::kIsExtensible);
2125 int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2126 Map::OwnsDescriptors::encode(true) |
2127 Map::ConstructionCounter::encode(Map::kNoSlackTracking);
2128 map->set_bit_field3(bit_field3);
2129 map->set_elements_kind(elements_kind);
2130 map->set_new_target_is_base(true);
2131
2132 return map;
2133 }
2134
2135
AllocateFillerObject(int size,bool double_align,AllocationSpace space)2136 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2137 AllocationSpace space) {
2138 HeapObject* obj = nullptr;
2139 {
2140 AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
2141 AllocationResult allocation = AllocateRaw(size, space, align);
2142 if (!allocation.To(&obj)) return allocation;
2143 }
2144 #ifdef DEBUG
2145 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2146 DCHECK(chunk->owner()->identity() == space);
2147 #endif
2148 CreateFillerObjectAt(obj->address(), size);
2149 return obj;
2150 }
2151
2152
2153 const Heap::StringTypeTable Heap::string_type_table[] = {
2154 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2155 { type, size, k##camel_name##MapRootIndex } \
2156 ,
2157 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2158 #undef STRING_TYPE_ELEMENT
2159 };
2160
2161
2162 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2163 {"", kempty_stringRootIndex},
2164 #define CONSTANT_STRING_ELEMENT(name, contents) \
2165 { contents, k##name##RootIndex } \
2166 ,
2167 INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2168 #undef CONSTANT_STRING_ELEMENT
2169 };
2170
2171
2172 const Heap::StructTable Heap::struct_table[] = {
2173 #define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
2174 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2175 ,
2176 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2177 #undef STRUCT_TABLE_ELEMENT
2178 };
2179
2180
CreateInitialMaps()2181 bool Heap::CreateInitialMaps() {
2182 HeapObject* obj = nullptr;
2183 {
2184 AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2185 if (!allocation.To(&obj)) return false;
2186 }
2187 // Map::cast cannot be used due to uninitialized map field.
2188 Map* new_meta_map = reinterpret_cast<Map*>(obj);
2189 set_meta_map(new_meta_map);
2190 new_meta_map->set_map(new_meta_map);
2191
2192 { // Partial map allocation
2193 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
2194 { \
2195 Map* map; \
2196 if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2197 set_##field_name##_map(map); \
2198 }
2199
2200 ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2201 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2202 ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2203
2204 #undef ALLOCATE_PARTIAL_MAP
2205 }
2206
2207 // Allocate the empty array.
2208 {
2209 AllocationResult allocation = AllocateEmptyFixedArray();
2210 if (!allocation.To(&obj)) return false;
2211 }
2212 set_empty_fixed_array(FixedArray::cast(obj));
2213
2214 {
2215 AllocationResult allocation = Allocate(null_map(), OLD_SPACE);
2216 if (!allocation.To(&obj)) return false;
2217 }
2218 set_null_value(Oddball::cast(obj));
2219 Oddball::cast(obj)->set_kind(Oddball::kNull);
2220
2221 {
2222 AllocationResult allocation = Allocate(undefined_map(), OLD_SPACE);
2223 if (!allocation.To(&obj)) return false;
2224 }
2225 set_undefined_value(Oddball::cast(obj));
2226 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2227 DCHECK(!InNewSpace(undefined_value()));
2228
2229 // Set preliminary exception sentinel value before actually initializing it.
2230 set_exception(null_value());
2231
2232 // Allocate the empty descriptor array.
2233 {
2234 AllocationResult allocation = AllocateEmptyFixedArray();
2235 if (!allocation.To(&obj)) return false;
2236 }
2237 set_empty_descriptor_array(DescriptorArray::cast(obj));
2238
2239 // Fix the instance_descriptors for the existing maps.
2240 meta_map()->set_code_cache(empty_fixed_array());
2241 meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2242 meta_map()->set_raw_transitions(Smi::FromInt(0));
2243 meta_map()->set_instance_descriptors(empty_descriptor_array());
2244 if (FLAG_unbox_double_fields) {
2245 meta_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2246 }
2247
2248 fixed_array_map()->set_code_cache(empty_fixed_array());
2249 fixed_array_map()->set_dependent_code(
2250 DependentCode::cast(empty_fixed_array()));
2251 fixed_array_map()->set_raw_transitions(Smi::FromInt(0));
2252 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2253 if (FLAG_unbox_double_fields) {
2254 fixed_array_map()->set_layout_descriptor(
2255 LayoutDescriptor::FastPointerLayout());
2256 }
2257
2258 undefined_map()->set_code_cache(empty_fixed_array());
2259 undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2260 undefined_map()->set_raw_transitions(Smi::FromInt(0));
2261 undefined_map()->set_instance_descriptors(empty_descriptor_array());
2262 if (FLAG_unbox_double_fields) {
2263 undefined_map()->set_layout_descriptor(
2264 LayoutDescriptor::FastPointerLayout());
2265 }
2266
2267 null_map()->set_code_cache(empty_fixed_array());
2268 null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2269 null_map()->set_raw_transitions(Smi::FromInt(0));
2270 null_map()->set_instance_descriptors(empty_descriptor_array());
2271 if (FLAG_unbox_double_fields) {
2272 null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2273 }
2274
2275 // Fix prototype object for existing maps.
2276 meta_map()->set_prototype(null_value());
2277 meta_map()->set_constructor_or_backpointer(null_value());
2278
2279 fixed_array_map()->set_prototype(null_value());
2280 fixed_array_map()->set_constructor_or_backpointer(null_value());
2281
2282 undefined_map()->set_prototype(null_value());
2283 undefined_map()->set_constructor_or_backpointer(null_value());
2284
2285 null_map()->set_prototype(null_value());
2286 null_map()->set_constructor_or_backpointer(null_value());
2287
2288 { // Map allocation
2289 #define ALLOCATE_MAP(instance_type, size, field_name) \
2290 { \
2291 Map* map; \
2292 if (!AllocateMap((instance_type), size).To(&map)) return false; \
2293 set_##field_name##_map(map); \
2294 }
2295
2296 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2297 ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2298
2299 #define ALLOCATE_PRIMITIVE_MAP(instance_type, size, field_name, \
2300 constructor_function_index) \
2301 { \
2302 ALLOCATE_MAP((instance_type), (size), field_name); \
2303 field_name##_map()->SetConstructorFunctionIndex( \
2304 (constructor_function_index)); \
2305 }
2306
2307 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2308 DCHECK(fixed_array_map() != fixed_cow_array_map());
2309
2310 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2311 ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
2312 Context::NUMBER_FUNCTION_INDEX)
2313 ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2314 mutable_heap_number)
2315 ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
2316 Context::SYMBOL_FUNCTION_INDEX)
2317 #define ALLOCATE_SIMD128_MAP(TYPE, Type, type, lane_count, lane_type) \
2318 ALLOCATE_PRIMITIVE_MAP(SIMD128_VALUE_TYPE, Type::kSize, type, \
2319 Context::TYPE##_FUNCTION_INDEX)
2320 SIMD128_TYPES(ALLOCATE_SIMD128_MAP)
2321 #undef ALLOCATE_SIMD128_MAP
2322 ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2323
2324 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2325 ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
2326 Context::BOOLEAN_FUNCTION_INDEX);
2327 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2328 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2329 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2330 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2331 ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2332
2333 for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2334 const StringTypeTable& entry = string_type_table[i];
2335 {
2336 AllocationResult allocation = AllocateMap(entry.type, entry.size);
2337 if (!allocation.To(&obj)) return false;
2338 }
2339 Map* map = Map::cast(obj);
2340 map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
2341 // Mark cons string maps as unstable, because their objects can change
2342 // maps during GC.
2343 if (StringShape(entry.type).IsCons()) map->mark_unstable();
2344 roots_[entry.index] = map;
2345 }
2346
2347 { // Create a separate external one byte string map for native sources.
2348 AllocationResult allocation = AllocateMap(EXTERNAL_ONE_BYTE_STRING_TYPE,
2349 ExternalOneByteString::kSize);
2350 if (!allocation.To(&obj)) return false;
2351 Map* map = Map::cast(obj);
2352 map->SetConstructorFunctionIndex(Context::STRING_FUNCTION_INDEX);
2353 set_native_source_string_map(map);
2354 }
2355
2356 ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2357 ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2358 ALLOCATE_VARSIZE_MAP(BYTECODE_ARRAY_TYPE, bytecode_array)
2359 ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2360
2361 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2362 ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2363
2364 TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2365 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2366
2367 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2368
2369 ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2370
2371 ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2372 ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2373 ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
2374 ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2375 ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2376
2377 ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
2378
2379 for (unsigned i = 0; i < arraysize(struct_table); i++) {
2380 const StructTable& entry = struct_table[i];
2381 Map* map;
2382 if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2383 roots_[entry.index] = map;
2384 }
2385
2386 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2387 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2388
2389 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2390 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2391 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2392 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2393 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2394 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
2395 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
2396
2397 ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2398 native_context_map()->set_dictionary_map(true);
2399 native_context_map()->set_visitor_id(
2400 StaticVisitorBase::kVisitNativeContext);
2401
2402 ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2403 shared_function_info)
2404
2405 ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2406 ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2407 external_map()->set_is_extensible(false);
2408 #undef ALLOCATE_PRIMITIVE_MAP
2409 #undef ALLOCATE_VARSIZE_MAP
2410 #undef ALLOCATE_MAP
2411 }
2412
2413 { // Empty arrays
2414 {
2415 ByteArray* byte_array;
2416 if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2417 set_empty_byte_array(byte_array);
2418
2419 BytecodeArray* bytecode_array = nullptr;
2420 AllocationResult allocation =
2421 AllocateBytecodeArray(0, nullptr, 0, 0, empty_fixed_array());
2422 if (!allocation.To(&bytecode_array)) {
2423 return false;
2424 }
2425 set_empty_bytecode_array(bytecode_array);
2426 }
2427
2428 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2429 { \
2430 FixedTypedArrayBase* obj; \
2431 if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2432 return false; \
2433 set_empty_fixed_##type##_array(obj); \
2434 }
2435
2436 TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2437 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2438 }
2439 DCHECK(!InNewSpace(empty_fixed_array()));
2440 return true;
2441 }
2442
2443
AllocateHeapNumber(double value,MutableMode mode,PretenureFlag pretenure)2444 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2445 PretenureFlag pretenure) {
2446 // Statically ensure that it is safe to allocate heap numbers in paged
2447 // spaces.
2448 int size = HeapNumber::kSize;
2449 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2450
2451 AllocationSpace space = SelectSpace(pretenure);
2452
2453 HeapObject* result = nullptr;
2454 {
2455 AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
2456 if (!allocation.To(&result)) return allocation;
2457 }
2458
2459 Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2460 HeapObject::cast(result)->set_map_no_write_barrier(map);
2461 HeapNumber::cast(result)->set_value(value);
2462 return result;
2463 }
2464
2465 #define SIMD_ALLOCATE_DEFINITION(TYPE, Type, type, lane_count, lane_type) \
2466 AllocationResult Heap::Allocate##Type(lane_type lanes[lane_count], \
2467 PretenureFlag pretenure) { \
2468 int size = Type::kSize; \
2469 STATIC_ASSERT(Type::kSize <= Page::kMaxRegularHeapObjectSize); \
2470 \
2471 AllocationSpace space = SelectSpace(pretenure); \
2472 \
2473 HeapObject* result = nullptr; \
2474 { \
2475 AllocationResult allocation = \
2476 AllocateRaw(size, space, kSimd128Unaligned); \
2477 if (!allocation.To(&result)) return allocation; \
2478 } \
2479 \
2480 result->set_map_no_write_barrier(type##_map()); \
2481 Type* instance = Type::cast(result); \
2482 for (int i = 0; i < lane_count; i++) { \
2483 instance->set_lane(i, lanes[i]); \
2484 } \
2485 return result; \
2486 }
SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)2487 SIMD128_TYPES(SIMD_ALLOCATE_DEFINITION)
2488 #undef SIMD_ALLOCATE_DEFINITION
2489
2490
2491 AllocationResult Heap::AllocateCell(Object* value) {
2492 int size = Cell::kSize;
2493 STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2494
2495 HeapObject* result = nullptr;
2496 {
2497 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2498 if (!allocation.To(&result)) return allocation;
2499 }
2500 result->set_map_no_write_barrier(cell_map());
2501 Cell::cast(result)->set_value(value);
2502 return result;
2503 }
2504
2505
AllocatePropertyCell()2506 AllocationResult Heap::AllocatePropertyCell() {
2507 int size = PropertyCell::kSize;
2508 STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2509
2510 HeapObject* result = nullptr;
2511 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2512 if (!allocation.To(&result)) return allocation;
2513
2514 result->set_map_no_write_barrier(global_property_cell_map());
2515 PropertyCell* cell = PropertyCell::cast(result);
2516 cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2517 SKIP_WRITE_BARRIER);
2518 cell->set_property_details(PropertyDetails(Smi::FromInt(0)));
2519 cell->set_value(the_hole_value());
2520 return result;
2521 }
2522
2523
AllocateWeakCell(HeapObject * value)2524 AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
2525 int size = WeakCell::kSize;
2526 STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
2527 HeapObject* result = nullptr;
2528 {
2529 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
2530 if (!allocation.To(&result)) return allocation;
2531 }
2532 result->set_map_no_write_barrier(weak_cell_map());
2533 WeakCell::cast(result)->initialize(value);
2534 WeakCell::cast(result)->clear_next(the_hole_value());
2535 return result;
2536 }
2537
2538
AllocateTransitionArray(int capacity)2539 AllocationResult Heap::AllocateTransitionArray(int capacity) {
2540 DCHECK(capacity > 0);
2541 HeapObject* raw_array = nullptr;
2542 {
2543 AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
2544 if (!allocation.To(&raw_array)) return allocation;
2545 }
2546 raw_array->set_map_no_write_barrier(transition_array_map());
2547 TransitionArray* array = TransitionArray::cast(raw_array);
2548 array->set_length(capacity);
2549 MemsetPointer(array->data_start(), undefined_value(), capacity);
2550 return array;
2551 }
2552
2553
CreateApiObjects()2554 void Heap::CreateApiObjects() {
2555 HandleScope scope(isolate());
2556 Factory* factory = isolate()->factory();
2557 Handle<Map> new_neander_map =
2558 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2559
2560 // Don't use Smi-only elements optimizations for objects with the neander
2561 // map. There are too many cases where element values are set directly with a
2562 // bottleneck to trap the Smi-only -> fast elements transition, and there
2563 // appears to be no benefit for optimize this case.
2564 new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2565 set_neander_map(*new_neander_map);
2566
2567 Handle<JSObject> listeners = factory->NewNeanderObject();
2568 Handle<FixedArray> elements = factory->NewFixedArray(2);
2569 elements->set(0, Smi::FromInt(0));
2570 listeners->set_elements(*elements);
2571 set_message_listeners(*listeners);
2572 }
2573
2574
CreateJSEntryStub()2575 void Heap::CreateJSEntryStub() {
2576 JSEntryStub stub(isolate(), StackFrame::ENTRY);
2577 set_js_entry_code(*stub.GetCode());
2578 }
2579
2580
CreateJSConstructEntryStub()2581 void Heap::CreateJSConstructEntryStub() {
2582 JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2583 set_js_construct_entry_code(*stub.GetCode());
2584 }
2585
2586
CreateFixedStubs()2587 void Heap::CreateFixedStubs() {
2588 // Here we create roots for fixed stubs. They are needed at GC
2589 // for cooking and uncooking (check out frames.cc).
2590 // The eliminates the need for doing dictionary lookup in the
2591 // stub cache for these stubs.
2592 HandleScope scope(isolate());
2593
2594 // Create stubs that should be there, so we don't unexpectedly have to
2595 // create them if we need them during the creation of another stub.
2596 // Stub creation mixes raw pointers and handles in an unsafe manner so
2597 // we cannot create stubs while we are creating stubs.
2598 CodeStub::GenerateStubsAheadOfTime(isolate());
2599
2600 // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2601 // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2602 // is created.
2603
2604 // gcc-4.4 has problem generating correct code of following snippet:
2605 // { JSEntryStub stub;
2606 // js_entry_code_ = *stub.GetCode();
2607 // }
2608 // { JSConstructEntryStub stub;
2609 // js_construct_entry_code_ = *stub.GetCode();
2610 // }
2611 // To workaround the problem, make separate functions without inlining.
2612 Heap::CreateJSEntryStub();
2613 Heap::CreateJSConstructEntryStub();
2614 }
2615
2616
CreateInitialObjects()2617 void Heap::CreateInitialObjects() {
2618 HandleScope scope(isolate());
2619 Factory* factory = isolate()->factory();
2620
2621 // The -0 value must be set before NewNumber works.
2622 set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2623 DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2624
2625 set_nan_value(*factory->NewHeapNumber(
2626 std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
2627 set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2628 set_minus_infinity_value(
2629 *factory->NewHeapNumber(-V8_INFINITY, IMMUTABLE, TENURED));
2630
2631 // The hole has not been created yet, but we want to put something
2632 // predictable in the gaps in the string table, so lets make that Smi zero.
2633 set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2634
2635 // Allocate initial string table.
2636 set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2637
2638 // Finish initializing oddballs after creating the string table.
2639 Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2640 factory->nan_value(), "undefined", Oddball::kUndefined);
2641
2642 // Initialize the null_value.
2643 Oddball::Initialize(isolate(), factory->null_value(), "null",
2644 handle(Smi::FromInt(0), isolate()), "object",
2645 Oddball::kNull);
2646
2647 set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
2648 handle(Smi::FromInt(1), isolate()),
2649 "boolean", Oddball::kTrue));
2650
2651 set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
2652 handle(Smi::FromInt(0), isolate()),
2653 "boolean", Oddball::kFalse));
2654
2655 set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
2656 handle(Smi::FromInt(-1), isolate()),
2657 "undefined", Oddball::kTheHole));
2658
2659 set_uninitialized_value(
2660 *factory->NewOddball(factory->uninitialized_map(), "uninitialized",
2661 handle(Smi::FromInt(-1), isolate()), "undefined",
2662 Oddball::kUninitialized));
2663
2664 set_arguments_marker(
2665 *factory->NewOddball(factory->arguments_marker_map(), "arguments_marker",
2666 handle(Smi::FromInt(-4), isolate()), "undefined",
2667 Oddball::kArgumentMarker));
2668
2669 set_no_interceptor_result_sentinel(*factory->NewOddball(
2670 factory->no_interceptor_result_sentinel_map(),
2671 "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2672 "undefined", Oddball::kOther));
2673
2674 set_termination_exception(*factory->NewOddball(
2675 factory->termination_exception_map(), "termination_exception",
2676 handle(Smi::FromInt(-3), isolate()), "undefined", Oddball::kOther));
2677
2678 set_exception(*factory->NewOddball(factory->exception_map(), "exception",
2679 handle(Smi::FromInt(-5), isolate()),
2680 "undefined", Oddball::kException));
2681
2682 for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
2683 Handle<String> str =
2684 factory->InternalizeUtf8String(constant_string_table[i].contents);
2685 roots_[constant_string_table[i].index] = *str;
2686 }
2687
2688 // The {hidden_string} is special because it is an empty string, but does not
2689 // match any string (even the {empty_string}) when looked up in properties.
2690 // Allocate the hidden string which is used to identify the hidden properties
2691 // in JSObjects. The hash code has a special value so that it will not match
2692 // the empty string when searching for the property. It cannot be part of the
2693 // loop above because it needs to be allocated manually with the special
2694 // hash code in place. The hash code for the hidden_string is zero to ensure
2695 // that it will always be at the first entry in property descriptors.
2696 set_hidden_string(*factory->NewOneByteInternalizedString(
2697 OneByteVector("", 0), String::kEmptyStringHash));
2698
2699 // Create the code_stubs dictionary. The initial size is set to avoid
2700 // expanding the dictionary during bootstrapping.
2701 set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2702
2703 // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
2704 // is set to avoid expanding the dictionary during bootstrapping.
2705 set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
2706
2707 set_polymorphic_code_cache(PolymorphicCodeCache::cast(
2708 *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
2709
2710 set_instanceof_cache_function(Smi::FromInt(0));
2711 set_instanceof_cache_map(Smi::FromInt(0));
2712 set_instanceof_cache_answer(Smi::FromInt(0));
2713
2714 {
2715 HandleScope scope(isolate());
2716 #define SYMBOL_INIT(name) \
2717 { \
2718 Handle<String> name##d = factory->NewStringFromStaticChars(#name); \
2719 Handle<Symbol> symbol(isolate()->factory()->NewPrivateSymbol()); \
2720 symbol->set_name(*name##d); \
2721 roots_[k##name##RootIndex] = *symbol; \
2722 }
2723 PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
2724 #undef SYMBOL_INIT
2725 }
2726
2727 {
2728 HandleScope scope(isolate());
2729 #define SYMBOL_INIT(name, description) \
2730 Handle<Symbol> name = factory->NewSymbol(); \
2731 Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
2732 name->set_name(*name##d); \
2733 roots_[k##name##RootIndex] = *name;
2734 PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
2735 #undef SYMBOL_INIT
2736
2737 #define SYMBOL_INIT(name, description) \
2738 Handle<Symbol> name = factory->NewSymbol(); \
2739 Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
2740 name->set_is_well_known_symbol(true); \
2741 name->set_name(*name##d); \
2742 roots_[k##name##RootIndex] = *name;
2743 WELL_KNOWN_SYMBOL_LIST(SYMBOL_INIT)
2744 #undef SYMBOL_INIT
2745 }
2746
2747 CreateFixedStubs();
2748
2749 // Allocate the dictionary of intrinsic function names.
2750 Handle<NameDictionary> intrinsic_names =
2751 NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
2752 Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2753 set_intrinsic_function_names(*intrinsic_names);
2754
2755 Handle<NameDictionary> empty_properties_dictionary =
2756 NameDictionary::New(isolate(), 0, TENURED);
2757 empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
2758 set_empty_properties_dictionary(*empty_properties_dictionary);
2759
2760 set_number_string_cache(
2761 *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2762
2763 // Allocate cache for single character one byte strings.
2764 set_single_character_string_cache(
2765 *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2766
2767 // Allocate cache for string split and regexp-multiple.
2768 set_string_split_cache(*factory->NewFixedArray(
2769 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2770 set_regexp_multiple_cache(*factory->NewFixedArray(
2771 RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2772
2773 // Allocate cache for external strings pointing to native source code.
2774 set_natives_source_cache(
2775 *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2776
2777 set_experimental_natives_source_cache(
2778 *factory->NewFixedArray(ExperimentalNatives::GetBuiltinsCount()));
2779
2780 set_extra_natives_source_cache(
2781 *factory->NewFixedArray(ExtraNatives::GetBuiltinsCount()));
2782
2783 set_experimental_extra_natives_source_cache(
2784 *factory->NewFixedArray(ExperimentalExtraNatives::GetBuiltinsCount()));
2785
2786 set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2787
2788 // The symbol registry is initialized lazily.
2789 set_symbol_registry(Smi::FromInt(0));
2790
2791 // Allocate object to hold object observation state.
2792 set_observation_state(*factory->NewJSObjectFromMap(
2793 factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
2794
2795 // Microtask queue uses the empty fixed array as a sentinel for "empty".
2796 // Number of queued microtasks stored in Isolate::pending_microtask_count().
2797 set_microtask_queue(empty_fixed_array());
2798
2799 {
2800 StaticFeedbackVectorSpec spec;
2801 FeedbackVectorSlot load_ic_slot = spec.AddLoadICSlot();
2802 FeedbackVectorSlot keyed_load_ic_slot = spec.AddKeyedLoadICSlot();
2803 FeedbackVectorSlot store_ic_slot = spec.AddStoreICSlot();
2804 FeedbackVectorSlot keyed_store_ic_slot = spec.AddKeyedStoreICSlot();
2805
2806 DCHECK_EQ(load_ic_slot,
2807 FeedbackVectorSlot(TypeFeedbackVector::kDummyLoadICSlot));
2808 DCHECK_EQ(keyed_load_ic_slot,
2809 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
2810 DCHECK_EQ(store_ic_slot,
2811 FeedbackVectorSlot(TypeFeedbackVector::kDummyStoreICSlot));
2812 DCHECK_EQ(keyed_store_ic_slot,
2813 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
2814
2815 Handle<TypeFeedbackMetadata> dummy_metadata =
2816 TypeFeedbackMetadata::New(isolate(), &spec);
2817 Handle<TypeFeedbackVector> dummy_vector =
2818 TypeFeedbackVector::New(isolate(), dummy_metadata);
2819
2820 Object* megamorphic = *TypeFeedbackVector::MegamorphicSentinel(isolate());
2821 dummy_vector->Set(load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
2822 dummy_vector->Set(keyed_load_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
2823 dummy_vector->Set(store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
2824 dummy_vector->Set(keyed_store_ic_slot, megamorphic, SKIP_WRITE_BARRIER);
2825
2826 set_dummy_vector(*dummy_vector);
2827 }
2828
2829 {
2830 Handle<WeakCell> cell = factory->NewWeakCell(factory->undefined_value());
2831 set_empty_weak_cell(*cell);
2832 cell->clear();
2833
2834 Handle<FixedArray> cleared_optimized_code_map =
2835 factory->NewFixedArray(SharedFunctionInfo::kEntriesStart, TENURED);
2836 cleared_optimized_code_map->set(SharedFunctionInfo::kSharedCodeIndex,
2837 *cell);
2838 STATIC_ASSERT(SharedFunctionInfo::kEntriesStart == 1 &&
2839 SharedFunctionInfo::kSharedCodeIndex == 0);
2840 set_cleared_optimized_code_map(*cleared_optimized_code_map);
2841 }
2842
2843 set_detached_contexts(empty_fixed_array());
2844 set_retained_maps(ArrayList::cast(empty_fixed_array()));
2845
2846 set_weak_object_to_code_table(
2847 *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
2848 TENURED));
2849
2850 set_script_list(Smi::FromInt(0));
2851
2852 Handle<SeededNumberDictionary> slow_element_dictionary =
2853 SeededNumberDictionary::New(isolate(), 0, TENURED);
2854 slow_element_dictionary->set_requires_slow_elements();
2855 set_empty_slow_element_dictionary(*slow_element_dictionary);
2856
2857 set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2858
2859 // Handling of script id generation is in Heap::NextScriptId().
2860 set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2861
2862 // Allocate the empty script.
2863 Handle<Script> script = factory->NewScript(factory->empty_string());
2864 script->set_type(Script::TYPE_NATIVE);
2865 set_empty_script(*script);
2866
2867 Handle<PropertyCell> cell = factory->NewPropertyCell();
2868 cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
2869 set_array_protector(*cell);
2870
2871 cell = factory->NewPropertyCell();
2872 cell->set_value(the_hole_value());
2873 set_empty_property_cell(*cell);
2874
2875 set_weak_stack_trace_list(Smi::FromInt(0));
2876
2877 set_noscript_shared_function_infos(Smi::FromInt(0));
2878
2879 // Will be filled in by Interpreter::Initialize().
2880 set_interpreter_table(
2881 *interpreter::Interpreter::CreateUninitializedInterpreterTable(
2882 isolate()));
2883
2884 // Initialize keyed lookup cache.
2885 isolate_->keyed_lookup_cache()->Clear();
2886
2887 // Initialize context slot cache.
2888 isolate_->context_slot_cache()->Clear();
2889
2890 // Initialize descriptor cache.
2891 isolate_->descriptor_lookup_cache()->Clear();
2892
2893 // Initialize compilation cache.
2894 isolate_->compilation_cache()->Clear();
2895 }
2896
2897
RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index)2898 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2899 switch (root_index) {
2900 case kStoreBufferTopRootIndex:
2901 case kNumberStringCacheRootIndex:
2902 case kInstanceofCacheFunctionRootIndex:
2903 case kInstanceofCacheMapRootIndex:
2904 case kInstanceofCacheAnswerRootIndex:
2905 case kCodeStubsRootIndex:
2906 case kNonMonomorphicCacheRootIndex:
2907 case kPolymorphicCodeCacheRootIndex:
2908 case kEmptyScriptRootIndex:
2909 case kSymbolRegistryRootIndex:
2910 case kScriptListRootIndex:
2911 case kMaterializedObjectsRootIndex:
2912 case kMicrotaskQueueRootIndex:
2913 case kDetachedContextsRootIndex:
2914 case kWeakObjectToCodeTableRootIndex:
2915 case kRetainedMapsRootIndex:
2916 case kNoScriptSharedFunctionInfosRootIndex:
2917 case kWeakStackTraceListRootIndex:
2918 // Smi values
2919 #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
2920 SMI_ROOT_LIST(SMI_ENTRY)
2921 #undef SMI_ENTRY
2922 // String table
2923 case kStringTableRootIndex:
2924 return true;
2925
2926 default:
2927 return false;
2928 }
2929 }
2930
2931
RootCanBeTreatedAsConstant(RootListIndex root_index)2932 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2933 return !RootCanBeWrittenAfterInitialization(root_index) &&
2934 !InNewSpace(root(root_index));
2935 }
2936
2937
FullSizeNumberStringCacheLength()2938 int Heap::FullSizeNumberStringCacheLength() {
2939 // Compute the size of the number string cache based on the max newspace size.
2940 // The number string cache has a minimum size based on twice the initial cache
2941 // size to ensure that it is bigger after being made 'full size'.
2942 int number_string_cache_size = max_semi_space_size_ / 512;
2943 number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
2944 Min(0x4000, number_string_cache_size));
2945 // There is a string and a number per entry so the length is twice the number
2946 // of entries.
2947 return number_string_cache_size * 2;
2948 }
2949
2950
FlushNumberStringCache()2951 void Heap::FlushNumberStringCache() {
2952 // Flush the number to string cache.
2953 int len = number_string_cache()->length();
2954 for (int i = 0; i < len; i++) {
2955 number_string_cache()->set_undefined(i);
2956 }
2957 }
2958
2959
MapForFixedTypedArray(ExternalArrayType array_type)2960 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
2961 return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
2962 }
2963
2964
RootIndexForFixedTypedArray(ExternalArrayType array_type)2965 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
2966 ExternalArrayType array_type) {
2967 switch (array_type) {
2968 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
2969 case kExternal##Type##Array: \
2970 return kFixed##Type##ArrayMapRootIndex;
2971
2972 TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
2973 #undef ARRAY_TYPE_TO_ROOT_INDEX
2974
2975 default:
2976 UNREACHABLE();
2977 return kUndefinedValueRootIndex;
2978 }
2979 }
2980
2981
RootIndexForEmptyFixedTypedArray(ElementsKind elementsKind)2982 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
2983 ElementsKind elementsKind) {
2984 switch (elementsKind) {
2985 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
2986 case TYPE##_ELEMENTS: \
2987 return kEmptyFixed##Type##ArrayRootIndex;
2988
2989 TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
2990 #undef ELEMENT_KIND_TO_ROOT_INDEX
2991 default:
2992 UNREACHABLE();
2993 return kUndefinedValueRootIndex;
2994 }
2995 }
2996
2997
EmptyFixedTypedArrayForMap(Map * map)2998 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
2999 return FixedTypedArrayBase::cast(
3000 roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3001 }
3002
3003
AllocateForeign(Address address,PretenureFlag pretenure)3004 AllocationResult Heap::AllocateForeign(Address address,
3005 PretenureFlag pretenure) {
3006 // Statically ensure that it is safe to allocate foreigns in paged spaces.
3007 STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3008 AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
3009 Foreign* result = nullptr;
3010 AllocationResult allocation = Allocate(foreign_map(), space);
3011 if (!allocation.To(&result)) return allocation;
3012 result->set_foreign_address(address);
3013 return result;
3014 }
3015
3016
AllocateByteArray(int length,PretenureFlag pretenure)3017 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3018 if (length < 0 || length > ByteArray::kMaxLength) {
3019 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3020 }
3021 int size = ByteArray::SizeFor(length);
3022 AllocationSpace space = SelectSpace(pretenure);
3023 HeapObject* result = nullptr;
3024 {
3025 AllocationResult allocation = AllocateRaw(size, space);
3026 if (!allocation.To(&result)) return allocation;
3027 }
3028
3029 result->set_map_no_write_barrier(byte_array_map());
3030 ByteArray::cast(result)->set_length(length);
3031 return result;
3032 }
3033
3034
AllocateBytecodeArray(int length,const byte * const raw_bytecodes,int frame_size,int parameter_count,FixedArray * constant_pool)3035 AllocationResult Heap::AllocateBytecodeArray(int length,
3036 const byte* const raw_bytecodes,
3037 int frame_size,
3038 int parameter_count,
3039 FixedArray* constant_pool) {
3040 if (length < 0 || length > BytecodeArray::kMaxLength) {
3041 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3042 }
3043 // Bytecode array is pretenured, so constant pool array should be to.
3044 DCHECK(!InNewSpace(constant_pool));
3045
3046 int size = BytecodeArray::SizeFor(length);
3047 HeapObject* result = nullptr;
3048 {
3049 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3050 if (!allocation.To(&result)) return allocation;
3051 }
3052
3053 result->set_map_no_write_barrier(bytecode_array_map());
3054 BytecodeArray* instance = BytecodeArray::cast(result);
3055 instance->set_length(length);
3056 instance->set_frame_size(frame_size);
3057 instance->set_parameter_count(parameter_count);
3058 instance->set_constant_pool(constant_pool);
3059 CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
3060
3061 return result;
3062 }
3063
3064
CreateFillerObjectAt(Address addr,int size)3065 void Heap::CreateFillerObjectAt(Address addr, int size) {
3066 if (size == 0) return;
3067 HeapObject* filler = HeapObject::FromAddress(addr);
3068 if (size == kPointerSize) {
3069 filler->set_map_no_write_barrier(
3070 reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)));
3071 } else if (size == 2 * kPointerSize) {
3072 filler->set_map_no_write_barrier(
3073 reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)));
3074 } else {
3075 DCHECK_GT(size, 2 * kPointerSize);
3076 filler->set_map_no_write_barrier(
3077 reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
3078 FreeSpace::cast(filler)->nobarrier_set_size(size);
3079 }
3080 // At this point, we may be deserializing the heap from a snapshot, and
3081 // none of the maps have been created yet and are NULL.
3082 DCHECK((filler->map() == NULL && !deserialization_complete_) ||
3083 filler->map()->IsMap());
3084 }
3085
3086
CanMoveObjectStart(HeapObject * object)3087 bool Heap::CanMoveObjectStart(HeapObject* object) {
3088 if (!FLAG_move_object_start) return false;
3089
3090 Address address = object->address();
3091
3092 if (lo_space()->Contains(object)) return false;
3093
3094 Page* page = Page::FromAddress(address);
3095 // We can move the object start if:
3096 // (1) the object is not in old space,
3097 // (2) the page of the object was already swept,
3098 // (3) the page was already concurrently swept. This case is an optimization
3099 // for concurrent sweeping. The WasSwept predicate for concurrently swept
3100 // pages is set after sweeping all pages.
3101 return !InOldSpace(address) || page->WasSwept() || page->SweepingCompleted();
3102 }
3103
3104
AdjustLiveBytes(HeapObject * object,int by,InvocationMode mode)3105 void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
3106 // As long as the inspected object is black and we are currently not iterating
3107 // the heap using HeapIterator, we can update the live byte count. We cannot
3108 // update while using HeapIterator because the iterator is temporarily
3109 // marking the whole object graph, without updating live bytes.
3110 if (!in_heap_iterator() &&
3111 !mark_compact_collector()->sweeping_in_progress() &&
3112 Marking::IsBlack(Marking::MarkBitFrom(object->address()))) {
3113 if (mode == SEQUENTIAL_TO_SWEEPER) {
3114 MemoryChunk::IncrementLiveBytesFromGC(object, by);
3115 } else {
3116 MemoryChunk::IncrementLiveBytesFromMutator(object, by);
3117 }
3118 }
3119 }
3120
3121
LeftTrimFixedArray(FixedArrayBase * object,int elements_to_trim)3122 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3123 int elements_to_trim) {
3124 DCHECK(!object->IsFixedTypedArrayBase());
3125 DCHECK(!object->IsByteArray());
3126 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3127 const int bytes_to_trim = elements_to_trim * element_size;
3128 Map* map = object->map();
3129
3130 // For now this trick is only applied to objects in new and paged space.
3131 // In large object space the object's start must coincide with chunk
3132 // and thus the trick is just not applicable.
3133 DCHECK(!lo_space()->Contains(object));
3134 DCHECK(object->map() != fixed_cow_array_map());
3135
3136 STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3137 STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3138 STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3139
3140 const int len = object->length();
3141 DCHECK(elements_to_trim <= len);
3142
3143 // Calculate location of new array start.
3144 Address new_start = object->address() + bytes_to_trim;
3145
3146 // Technically in new space this write might be omitted (except for
3147 // debug mode which iterates through the heap), but to play safer
3148 // we still do it.
3149 CreateFillerObjectAt(object->address(), bytes_to_trim);
3150
3151 // Initialize header of the trimmed array. Since left trimming is only
3152 // performed on pages which are not concurrently swept creating a filler
3153 // object does not require synchronization.
3154 DCHECK(CanMoveObjectStart(object));
3155 Object** former_start = HeapObject::RawField(object, 0);
3156 int new_start_index = elements_to_trim * (element_size / kPointerSize);
3157 former_start[new_start_index] = map;
3158 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3159 FixedArrayBase* new_object =
3160 FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3161
3162 // Maintain consistency of live bytes during incremental marking
3163 Marking::TransferMark(this, object->address(), new_start);
3164 AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
3165
3166 // Notify the heap profiler of change in object layout.
3167 OnMoveEvent(new_object, object, new_object->Size());
3168 return new_object;
3169 }
3170
3171
3172 // Force instantiation of templatized method.
3173 template void Heap::RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
3174 FixedArrayBase*, int);
3175 template void Heap::RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
3176 FixedArrayBase*, int);
3177
3178
3179 template<Heap::InvocationMode mode>
RightTrimFixedArray(FixedArrayBase * object,int elements_to_trim)3180 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3181 const int len = object->length();
3182 DCHECK_LE(elements_to_trim, len);
3183 DCHECK_GE(elements_to_trim, 0);
3184
3185 int bytes_to_trim;
3186 if (object->IsFixedTypedArrayBase()) {
3187 InstanceType type = object->map()->instance_type();
3188 bytes_to_trim =
3189 FixedTypedArrayBase::TypedArraySize(type, len) -
3190 FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
3191 } else if (object->IsByteArray()) {
3192 int new_size = ByteArray::SizeFor(len - elements_to_trim);
3193 bytes_to_trim = ByteArray::SizeFor(len) - new_size;
3194 DCHECK_GE(bytes_to_trim, 0);
3195 } else {
3196 const int element_size =
3197 object->IsFixedArray() ? kPointerSize : kDoubleSize;
3198 bytes_to_trim = elements_to_trim * element_size;
3199 }
3200
3201
3202 // For now this trick is only applied to objects in new and paged space.
3203 DCHECK(object->map() != fixed_cow_array_map());
3204
3205 if (bytes_to_trim == 0) {
3206 // No need to create filler and update live bytes counters, just initialize
3207 // header of the trimmed array.
3208 object->synchronized_set_length(len - elements_to_trim);
3209 return;
3210 }
3211
3212 // Calculate location of new array end.
3213 Address new_end = object->address() + object->Size() - bytes_to_trim;
3214
3215 // Technically in new space this write might be omitted (except for
3216 // debug mode which iterates through the heap), but to play safer
3217 // we still do it.
3218 // We do not create a filler for objects in large object space.
3219 // TODO(hpayer): We should shrink the large object page if the size
3220 // of the object changed significantly.
3221 if (!lo_space()->Contains(object)) {
3222 CreateFillerObjectAt(new_end, bytes_to_trim);
3223 }
3224
3225 // Initialize header of the trimmed array. We are storing the new length
3226 // using release store after creating a filler for the left-over space to
3227 // avoid races with the sweeper thread.
3228 object->synchronized_set_length(len - elements_to_trim);
3229
3230 // Maintain consistency of live bytes during incremental marking
3231 AdjustLiveBytes(object, -bytes_to_trim, mode);
3232
3233 // Notify the heap profiler of change in object layout. The array may not be
3234 // moved during GC, and size has to be adjusted nevertheless.
3235 HeapProfiler* profiler = isolate()->heap_profiler();
3236 if (profiler->is_tracking_allocations()) {
3237 profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3238 }
3239 }
3240
3241
AllocateFixedTypedArrayWithExternalPointer(int length,ExternalArrayType array_type,void * external_pointer,PretenureFlag pretenure)3242 AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
3243 int length, ExternalArrayType array_type, void* external_pointer,
3244 PretenureFlag pretenure) {
3245 int size = FixedTypedArrayBase::kHeaderSize;
3246 AllocationSpace space = SelectSpace(pretenure);
3247 HeapObject* result = nullptr;
3248 {
3249 AllocationResult allocation = AllocateRaw(size, space);
3250 if (!allocation.To(&result)) return allocation;
3251 }
3252
3253 result->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
3254 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
3255 elements->set_base_pointer(Smi::FromInt(0), SKIP_WRITE_BARRIER);
3256 elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
3257 elements->set_length(length);
3258 return elements;
3259 }
3260
ForFixedTypedArray(ExternalArrayType array_type,int * element_size,ElementsKind * element_kind)3261 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3262 ElementsKind* element_kind) {
3263 switch (array_type) {
3264 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3265 case kExternal##Type##Array: \
3266 *element_size = size; \
3267 *element_kind = TYPE##_ELEMENTS; \
3268 return;
3269
3270 TYPED_ARRAYS(TYPED_ARRAY_CASE)
3271 #undef TYPED_ARRAY_CASE
3272
3273 default:
3274 *element_size = 0; // Bogus
3275 *element_kind = UINT8_ELEMENTS; // Bogus
3276 UNREACHABLE();
3277 }
3278 }
3279
3280
AllocateFixedTypedArray(int length,ExternalArrayType array_type,bool initialize,PretenureFlag pretenure)3281 AllocationResult Heap::AllocateFixedTypedArray(int length,
3282 ExternalArrayType array_type,
3283 bool initialize,
3284 PretenureFlag pretenure) {
3285 int element_size;
3286 ElementsKind elements_kind;
3287 ForFixedTypedArray(array_type, &element_size, &elements_kind);
3288 int size = OBJECT_POINTER_ALIGN(length * element_size +
3289 FixedTypedArrayBase::kDataOffset);
3290 AllocationSpace space = SelectSpace(pretenure);
3291
3292 HeapObject* object = nullptr;
3293 AllocationResult allocation = AllocateRaw(
3294 size, space,
3295 array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
3296 if (!allocation.To(&object)) return allocation;
3297
3298 object->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
3299 FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3300 elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
3301 elements->set_external_pointer(
3302 ExternalReference::fixed_typed_array_base_data_offset().address(),
3303 SKIP_WRITE_BARRIER);
3304 elements->set_length(length);
3305 if (initialize) memset(elements->DataPtr(), 0, elements->DataSize());
3306 return elements;
3307 }
3308
3309
AllocateCode(int object_size,bool immovable)3310 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3311 DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3312 AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
3313
3314 HeapObject* result = nullptr;
3315 if (!allocation.To(&result)) return allocation;
3316
3317 if (immovable) {
3318 Address address = result->address();
3319 // Code objects which should stay at a fixed address are allocated either
3320 // in the first page of code space (objects on the first page of each space
3321 // are never moved) or in large object space.
3322 if (!code_space_->FirstPage()->Contains(address) &&
3323 MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3324 // Discard the first code allocation, which was on a page where it could
3325 // be moved.
3326 CreateFillerObjectAt(result->address(), object_size);
3327 allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3328 if (!allocation.To(&result)) return allocation;
3329 OnAllocationEvent(result, object_size);
3330 }
3331 }
3332
3333 result->set_map_no_write_barrier(code_map());
3334 Code* code = Code::cast(result);
3335 DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
3336 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3337 isolate_->code_range()->contains(code->address()) ||
3338 object_size <= code_space()->AreaSize());
3339 code->set_gc_metadata(Smi::FromInt(0));
3340 code->set_ic_age(global_ic_age_);
3341 return code;
3342 }
3343
3344
CopyCode(Code * code)3345 AllocationResult Heap::CopyCode(Code* code) {
3346 AllocationResult allocation;
3347
3348 HeapObject* result = nullptr;
3349 // Allocate an object the same size as the code object.
3350 int obj_size = code->Size();
3351 allocation = AllocateRaw(obj_size, CODE_SPACE);
3352 if (!allocation.To(&result)) return allocation;
3353
3354 // Copy code object.
3355 Address old_addr = code->address();
3356 Address new_addr = result->address();
3357 CopyBlock(new_addr, old_addr, obj_size);
3358 Code* new_code = Code::cast(result);
3359
3360 // Relocate the copy.
3361 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
3362 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3363 isolate_->code_range()->contains(code->address()) ||
3364 obj_size <= code_space()->AreaSize());
3365 new_code->Relocate(new_addr - old_addr);
3366 return new_code;
3367 }
3368
3369
CopyCode(Code * code,Vector<byte> reloc_info)3370 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3371 // Allocate ByteArray before the Code object, so that we do not risk
3372 // leaving uninitialized Code object (and breaking the heap).
3373 ByteArray* reloc_info_array = nullptr;
3374 {
3375 AllocationResult allocation =
3376 AllocateByteArray(reloc_info.length(), TENURED);
3377 if (!allocation.To(&reloc_info_array)) return allocation;
3378 }
3379
3380 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3381
3382 int new_obj_size = Code::SizeFor(new_body_size);
3383
3384 Address old_addr = code->address();
3385
3386 size_t relocation_offset =
3387 static_cast<size_t>(code->instruction_end() - old_addr);
3388
3389 HeapObject* result = nullptr;
3390 AllocationResult allocation = AllocateRaw(new_obj_size, CODE_SPACE);
3391 if (!allocation.To(&result)) return allocation;
3392
3393 // Copy code object.
3394 Address new_addr = result->address();
3395
3396 // Copy header and instructions.
3397 CopyBytes(new_addr, old_addr, relocation_offset);
3398
3399 Code* new_code = Code::cast(result);
3400 new_code->set_relocation_info(reloc_info_array);
3401
3402 // Copy patched rinfo.
3403 CopyBytes(new_code->relocation_start(), reloc_info.start(),
3404 static_cast<size_t>(reloc_info.length()));
3405
3406 // Relocate the copy.
3407 DCHECK(IsAligned(bit_cast<intptr_t>(new_code->address()), kCodeAlignment));
3408 DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3409 isolate_->code_range()->contains(code->address()) ||
3410 new_obj_size <= code_space()->AreaSize());
3411
3412 new_code->Relocate(new_addr - old_addr);
3413
3414 #ifdef VERIFY_HEAP
3415 if (FLAG_verify_heap) code->ObjectVerify();
3416 #endif
3417 return new_code;
3418 }
3419
3420
InitializeAllocationMemento(AllocationMemento * memento,AllocationSite * allocation_site)3421 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3422 AllocationSite* allocation_site) {
3423 memento->set_map_no_write_barrier(allocation_memento_map());
3424 DCHECK(allocation_site->map() == allocation_site_map());
3425 memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3426 if (FLAG_allocation_site_pretenuring) {
3427 allocation_site->IncrementMementoCreateCount();
3428 }
3429 }
3430
3431
Allocate(Map * map,AllocationSpace space,AllocationSite * allocation_site)3432 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3433 AllocationSite* allocation_site) {
3434 DCHECK(gc_state_ == NOT_IN_GC);
3435 DCHECK(map->instance_type() != MAP_TYPE);
3436 int size = map->instance_size();
3437 if (allocation_site != NULL) {
3438 size += AllocationMemento::kSize;
3439 }
3440 HeapObject* result = nullptr;
3441 AllocationResult allocation = AllocateRaw(size, space);
3442 if (!allocation.To(&result)) return allocation;
3443 // No need for write barrier since object is white and map is in old space.
3444 result->set_map_no_write_barrier(map);
3445 if (allocation_site != NULL) {
3446 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3447 reinterpret_cast<Address>(result) + map->instance_size());
3448 InitializeAllocationMemento(alloc_memento, allocation_site);
3449 }
3450 return result;
3451 }
3452
3453
InitializeJSObjectFromMap(JSObject * obj,FixedArray * properties,Map * map)3454 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3455 Map* map) {
3456 obj->set_properties(properties);
3457 obj->initialize_elements();
3458 // TODO(1240798): Initialize the object's body using valid initial values
3459 // according to the object's initial map. For example, if the map's
3460 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3461 // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3462 // fixed array (e.g. Heap::empty_fixed_array()). Currently, the object
3463 // verification code has to cope with (temporarily) invalid objects. See
3464 // for example, JSArray::JSArrayVerify).
3465 InitializeJSObjectBody(obj, map, JSObject::kHeaderSize);
3466 }
3467
3468
InitializeJSObjectBody(JSObject * obj,Map * map,int start_offset)3469 void Heap::InitializeJSObjectBody(JSObject* obj, Map* map, int start_offset) {
3470 if (start_offset == map->instance_size()) return;
3471 DCHECK_LT(start_offset, map->instance_size());
3472
3473 Object* filler;
3474 // We cannot always fill with one_pointer_filler_map because objects
3475 // created from API functions expect their internal fields to be initialized
3476 // with undefined_value.
3477 // Pre-allocated fields need to be initialized with undefined_value as well
3478 // so that object accesses before the constructor completes (e.g. in the
3479 // debugger) will not cause a crash.
3480
3481 // In case of Array subclassing the |map| could already be transitioned
3482 // to different elements kind from the initial map on which we track slack.
3483 Map* initial_map = map->FindRootMap();
3484 if (initial_map->IsInobjectSlackTrackingInProgress()) {
3485 // We might want to shrink the object later.
3486 filler = Heap::one_pointer_filler_map();
3487 } else {
3488 filler = Heap::undefined_value();
3489 }
3490 obj->InitializeBody(map, start_offset, Heap::undefined_value(), filler);
3491 initial_map->InobjectSlackTrackingStep();
3492 }
3493
3494
AllocateJSObjectFromMap(Map * map,PretenureFlag pretenure,AllocationSite * allocation_site)3495 AllocationResult Heap::AllocateJSObjectFromMap(
3496 Map* map, PretenureFlag pretenure, AllocationSite* allocation_site) {
3497 // JSFunctions should be allocated using AllocateFunction to be
3498 // properly initialized.
3499 DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3500
3501 // Both types of global objects should be allocated using
3502 // AllocateGlobalObject to be properly initialized.
3503 DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3504
3505 // Allocate the backing storage for the properties.
3506 FixedArray* properties = empty_fixed_array();
3507
3508 // Allocate the JSObject.
3509 AllocationSpace space = SelectSpace(pretenure);
3510 JSObject* js_obj = nullptr;
3511 AllocationResult allocation = Allocate(map, space, allocation_site);
3512 if (!allocation.To(&js_obj)) return allocation;
3513
3514 // Initialize the JSObject.
3515 InitializeJSObjectFromMap(js_obj, properties, map);
3516 DCHECK(js_obj->HasFastElements() || js_obj->HasFixedTypedArrayElements());
3517 return js_obj;
3518 }
3519
3520
AllocateJSObject(JSFunction * constructor,PretenureFlag pretenure,AllocationSite * allocation_site)3521 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3522 PretenureFlag pretenure,
3523 AllocationSite* allocation_site) {
3524 DCHECK(constructor->has_initial_map());
3525
3526 // Allocate the object based on the constructors initial map.
3527 AllocationResult allocation = AllocateJSObjectFromMap(
3528 constructor->initial_map(), pretenure, allocation_site);
3529 #ifdef DEBUG
3530 // Make sure result is NOT a global object if valid.
3531 HeapObject* obj = nullptr;
3532 DCHECK(!allocation.To(&obj) || !obj->IsJSGlobalObject());
3533 #endif
3534 return allocation;
3535 }
3536
3537
CopyJSObject(JSObject * source,AllocationSite * site)3538 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3539 // Make the clone.
3540 Map* map = source->map();
3541
3542 // We can only clone regexps, normal objects or arrays. Copying anything else
3543 // will break invariants.
3544 CHECK(map->instance_type() == JS_REGEXP_TYPE ||
3545 map->instance_type() == JS_OBJECT_TYPE ||
3546 map->instance_type() == JS_ARRAY_TYPE);
3547
3548 int object_size = map->instance_size();
3549 HeapObject* clone = nullptr;
3550
3551 DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3552
3553 int adjusted_object_size =
3554 site != NULL ? object_size + AllocationMemento::kSize : object_size;
3555 AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
3556 if (!allocation.To(&clone)) return allocation;
3557
3558 SLOW_DCHECK(InNewSpace(clone));
3559 // Since we know the clone is allocated in new space, we can copy
3560 // the contents without worrying about updating the write barrier.
3561 CopyBlock(clone->address(), source->address(), object_size);
3562
3563 if (site != NULL) {
3564 AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3565 reinterpret_cast<Address>(clone) + object_size);
3566 InitializeAllocationMemento(alloc_memento, site);
3567 }
3568
3569 SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3570 source->GetElementsKind());
3571 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3572 FixedArray* properties = FixedArray::cast(source->properties());
3573 // Update elements if necessary.
3574 if (elements->length() > 0) {
3575 FixedArrayBase* elem = nullptr;
3576 {
3577 AllocationResult allocation;
3578 if (elements->map() == fixed_cow_array_map()) {
3579 allocation = FixedArray::cast(elements);
3580 } else if (source->HasFastDoubleElements()) {
3581 allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3582 } else {
3583 allocation = CopyFixedArray(FixedArray::cast(elements));
3584 }
3585 if (!allocation.To(&elem)) return allocation;
3586 }
3587 JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
3588 }
3589 // Update properties if necessary.
3590 if (properties->length() > 0) {
3591 FixedArray* prop = nullptr;
3592 {
3593 AllocationResult allocation = CopyFixedArray(properties);
3594 if (!allocation.To(&prop)) return allocation;
3595 }
3596 JSObject::cast(clone)->set_properties(prop, SKIP_WRITE_BARRIER);
3597 }
3598 // Return the new clone.
3599 return clone;
3600 }
3601
3602
WriteOneByteData(Vector<const char> vector,uint8_t * chars,int len)3603 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3604 int len) {
3605 // Only works for one byte strings.
3606 DCHECK(vector.length() == len);
3607 MemCopy(chars, vector.start(), len);
3608 }
3609
WriteTwoByteData(Vector<const char> vector,uint16_t * chars,int len)3610 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3611 int len) {
3612 const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3613 size_t stream_length = vector.length();
3614 while (stream_length != 0) {
3615 size_t consumed = 0;
3616 uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3617 DCHECK(c != unibrow::Utf8::kBadChar);
3618 DCHECK(consumed <= stream_length);
3619 stream_length -= consumed;
3620 stream += consumed;
3621 if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3622 len -= 2;
3623 if (len < 0) break;
3624 *chars++ = unibrow::Utf16::LeadSurrogate(c);
3625 *chars++ = unibrow::Utf16::TrailSurrogate(c);
3626 } else {
3627 len -= 1;
3628 if (len < 0) break;
3629 *chars++ = c;
3630 }
3631 }
3632 DCHECK(stream_length == 0);
3633 DCHECK(len == 0);
3634 }
3635
3636
WriteOneByteData(String * s,uint8_t * chars,int len)3637 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3638 DCHECK(s->length() == len);
3639 String::WriteToFlat(s, chars, 0, len);
3640 }
3641
3642
WriteTwoByteData(String * s,uint16_t * chars,int len)3643 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3644 DCHECK(s->length() == len);
3645 String::WriteToFlat(s, chars, 0, len);
3646 }
3647
3648
3649 template <bool is_one_byte, typename T>
AllocateInternalizedStringImpl(T t,int chars,uint32_t hash_field)3650 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3651 uint32_t hash_field) {
3652 DCHECK(chars >= 0);
3653 // Compute map and object size.
3654 int size;
3655 Map* map;
3656
3657 DCHECK_LE(0, chars);
3658 DCHECK_GE(String::kMaxLength, chars);
3659 if (is_one_byte) {
3660 map = one_byte_internalized_string_map();
3661 size = SeqOneByteString::SizeFor(chars);
3662 } else {
3663 map = internalized_string_map();
3664 size = SeqTwoByteString::SizeFor(chars);
3665 }
3666
3667 // Allocate string.
3668 HeapObject* result = nullptr;
3669 {
3670 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3671 if (!allocation.To(&result)) return allocation;
3672 }
3673
3674 result->set_map_no_write_barrier(map);
3675 // Set length and hash fields of the allocated string.
3676 String* answer = String::cast(result);
3677 answer->set_length(chars);
3678 answer->set_hash_field(hash_field);
3679
3680 DCHECK_EQ(size, answer->Size());
3681
3682 if (is_one_byte) {
3683 WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3684 } else {
3685 WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3686 }
3687 return answer;
3688 }
3689
3690
3691 // Need explicit instantiations.
3692 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3693 int,
3694 uint32_t);
3695 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3696 int,
3697 uint32_t);
3698 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3699 Vector<const char>, int, uint32_t);
3700
3701
AllocateRawOneByteString(int length,PretenureFlag pretenure)3702 AllocationResult Heap::AllocateRawOneByteString(int length,
3703 PretenureFlag pretenure) {
3704 DCHECK_LE(0, length);
3705 DCHECK_GE(String::kMaxLength, length);
3706 int size = SeqOneByteString::SizeFor(length);
3707 DCHECK(size <= SeqOneByteString::kMaxSize);
3708 AllocationSpace space = SelectSpace(pretenure);
3709
3710 HeapObject* result = nullptr;
3711 {
3712 AllocationResult allocation = AllocateRaw(size, space);
3713 if (!allocation.To(&result)) return allocation;
3714 }
3715
3716 // Partially initialize the object.
3717 result->set_map_no_write_barrier(one_byte_string_map());
3718 String::cast(result)->set_length(length);
3719 String::cast(result)->set_hash_field(String::kEmptyHashField);
3720 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3721
3722 return result;
3723 }
3724
3725
AllocateRawTwoByteString(int length,PretenureFlag pretenure)3726 AllocationResult Heap::AllocateRawTwoByteString(int length,
3727 PretenureFlag pretenure) {
3728 DCHECK_LE(0, length);
3729 DCHECK_GE(String::kMaxLength, length);
3730 int size = SeqTwoByteString::SizeFor(length);
3731 DCHECK(size <= SeqTwoByteString::kMaxSize);
3732 AllocationSpace space = SelectSpace(pretenure);
3733
3734 HeapObject* result = nullptr;
3735 {
3736 AllocationResult allocation = AllocateRaw(size, space);
3737 if (!allocation.To(&result)) return allocation;
3738 }
3739
3740 // Partially initialize the object.
3741 result->set_map_no_write_barrier(string_map());
3742 String::cast(result)->set_length(length);
3743 String::cast(result)->set_hash_field(String::kEmptyHashField);
3744 DCHECK_EQ(size, HeapObject::cast(result)->Size());
3745 return result;
3746 }
3747
3748
AllocateEmptyFixedArray()3749 AllocationResult Heap::AllocateEmptyFixedArray() {
3750 int size = FixedArray::SizeFor(0);
3751 HeapObject* result = nullptr;
3752 {
3753 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
3754 if (!allocation.To(&result)) return allocation;
3755 }
3756 // Initialize the object.
3757 result->set_map_no_write_barrier(fixed_array_map());
3758 FixedArray::cast(result)->set_length(0);
3759 return result;
3760 }
3761
3762
CopyAndTenureFixedCOWArray(FixedArray * src)3763 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3764 if (!InNewSpace(src)) {
3765 return src;
3766 }
3767
3768 int len = src->length();
3769 HeapObject* obj = nullptr;
3770 {
3771 AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3772 if (!allocation.To(&obj)) return allocation;
3773 }
3774 obj->set_map_no_write_barrier(fixed_array_map());
3775 FixedArray* result = FixedArray::cast(obj);
3776 result->set_length(len);
3777
3778 // Copy the content.
3779 DisallowHeapAllocation no_gc;
3780 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3781 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3782
3783 // TODO(mvstanton): The map is set twice because of protection against calling
3784 // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3785 // we might then be able to remove this whole method.
3786 HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
3787 return result;
3788 }
3789
3790
AllocateEmptyFixedTypedArray(ExternalArrayType array_type)3791 AllocationResult Heap::AllocateEmptyFixedTypedArray(
3792 ExternalArrayType array_type) {
3793 return AllocateFixedTypedArray(0, array_type, false, TENURED);
3794 }
3795
3796
CopyFixedArrayAndGrow(FixedArray * src,int grow_by,PretenureFlag pretenure)3797 AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
3798 PretenureFlag pretenure) {
3799 int old_len = src->length();
3800 int new_len = old_len + grow_by;
3801 DCHECK(new_len >= old_len);
3802 HeapObject* obj = nullptr;
3803 {
3804 AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
3805 if (!allocation.To(&obj)) return allocation;
3806 }
3807 obj->set_map_no_write_barrier(fixed_array_map());
3808 FixedArray* result = FixedArray::cast(obj);
3809 result->set_length(new_len);
3810
3811 // Copy the content.
3812 DisallowHeapAllocation no_gc;
3813 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3814 for (int i = 0; i < old_len; i++) result->set(i, src->get(i), mode);
3815 MemsetPointer(result->data_start() + old_len, undefined_value(), grow_by);
3816 return result;
3817 }
3818
3819
CopyFixedArrayWithMap(FixedArray * src,Map * map)3820 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3821 int len = src->length();
3822 HeapObject* obj = nullptr;
3823 {
3824 AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
3825 if (!allocation.To(&obj)) return allocation;
3826 }
3827 if (InNewSpace(obj)) {
3828 obj->set_map_no_write_barrier(map);
3829 CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
3830 FixedArray::SizeFor(len) - kPointerSize);
3831 return obj;
3832 }
3833 obj->set_map_no_write_barrier(map);
3834 FixedArray* result = FixedArray::cast(obj);
3835 result->set_length(len);
3836
3837 // Copy the content.
3838 DisallowHeapAllocation no_gc;
3839 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3840 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3841 return result;
3842 }
3843
3844
CopyFixedDoubleArrayWithMap(FixedDoubleArray * src,Map * map)3845 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
3846 Map* map) {
3847 int len = src->length();
3848 HeapObject* obj = nullptr;
3849 {
3850 AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
3851 if (!allocation.To(&obj)) return allocation;
3852 }
3853 obj->set_map_no_write_barrier(map);
3854 CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
3855 src->address() + FixedDoubleArray::kLengthOffset,
3856 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
3857 return obj;
3858 }
3859
3860
AllocateRawFixedArray(int length,PretenureFlag pretenure)3861 AllocationResult Heap::AllocateRawFixedArray(int length,
3862 PretenureFlag pretenure) {
3863 if (length < 0 || length > FixedArray::kMaxLength) {
3864 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3865 }
3866 int size = FixedArray::SizeFor(length);
3867 AllocationSpace space = SelectSpace(pretenure);
3868
3869 return AllocateRaw(size, space);
3870 }
3871
3872
AllocateFixedArrayWithFiller(int length,PretenureFlag pretenure,Object * filler)3873 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
3874 PretenureFlag pretenure,
3875 Object* filler) {
3876 DCHECK(length >= 0);
3877 DCHECK(empty_fixed_array()->IsFixedArray());
3878 if (length == 0) return empty_fixed_array();
3879
3880 DCHECK(!InNewSpace(filler));
3881 HeapObject* result = nullptr;
3882 {
3883 AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
3884 if (!allocation.To(&result)) return allocation;
3885 }
3886
3887 result->set_map_no_write_barrier(fixed_array_map());
3888 FixedArray* array = FixedArray::cast(result);
3889 array->set_length(length);
3890 MemsetPointer(array->data_start(), filler, length);
3891 return array;
3892 }
3893
3894
AllocateFixedArray(int length,PretenureFlag pretenure)3895 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
3896 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
3897 }
3898
3899
AllocateUninitializedFixedArray(int length)3900 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
3901 if (length == 0) return empty_fixed_array();
3902
3903 HeapObject* obj = nullptr;
3904 {
3905 AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
3906 if (!allocation.To(&obj)) return allocation;
3907 }
3908
3909 obj->set_map_no_write_barrier(fixed_array_map());
3910 FixedArray::cast(obj)->set_length(length);
3911 return obj;
3912 }
3913
3914
AllocateUninitializedFixedDoubleArray(int length,PretenureFlag pretenure)3915 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
3916 int length, PretenureFlag pretenure) {
3917 if (length == 0) return empty_fixed_array();
3918
3919 HeapObject* elements = nullptr;
3920 AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
3921 if (!allocation.To(&elements)) return allocation;
3922
3923 elements->set_map_no_write_barrier(fixed_double_array_map());
3924 FixedDoubleArray::cast(elements)->set_length(length);
3925 return elements;
3926 }
3927
3928
AllocateRawFixedDoubleArray(int length,PretenureFlag pretenure)3929 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
3930 PretenureFlag pretenure) {
3931 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
3932 v8::internal::Heap::FatalProcessOutOfMemory("invalid array length",
3933 kDoubleAligned);
3934 }
3935 int size = FixedDoubleArray::SizeFor(length);
3936 AllocationSpace space = SelectSpace(pretenure);
3937
3938 HeapObject* object = nullptr;
3939 {
3940 AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned);
3941 if (!allocation.To(&object)) return allocation;
3942 }
3943
3944 return object;
3945 }
3946
3947
AllocateSymbol()3948 AllocationResult Heap::AllocateSymbol() {
3949 // Statically ensure that it is safe to allocate symbols in paged spaces.
3950 STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
3951
3952 HeapObject* result = nullptr;
3953 AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
3954 if (!allocation.To(&result)) return allocation;
3955
3956 result->set_map_no_write_barrier(symbol_map());
3957
3958 // Generate a random hash value.
3959 int hash;
3960 int attempts = 0;
3961 do {
3962 hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
3963 attempts++;
3964 } while (hash == 0 && attempts < 30);
3965 if (hash == 0) hash = 1; // never return 0
3966
3967 Symbol::cast(result)
3968 ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
3969 Symbol::cast(result)->set_name(undefined_value());
3970 Symbol::cast(result)->set_flags(0);
3971
3972 DCHECK(!Symbol::cast(result)->is_private());
3973 return result;
3974 }
3975
3976
AllocateStruct(InstanceType type)3977 AllocationResult Heap::AllocateStruct(InstanceType type) {
3978 Map* map;
3979 switch (type) {
3980 #define MAKE_CASE(NAME, Name, name) \
3981 case NAME##_TYPE: \
3982 map = name##_map(); \
3983 break;
3984 STRUCT_LIST(MAKE_CASE)
3985 #undef MAKE_CASE
3986 default:
3987 UNREACHABLE();
3988 return exception();
3989 }
3990 int size = map->instance_size();
3991 Struct* result = nullptr;
3992 {
3993 AllocationResult allocation = Allocate(map, OLD_SPACE);
3994 if (!allocation.To(&result)) return allocation;
3995 }
3996 result->InitializeBody(size);
3997 return result;
3998 }
3999
4000
IsHeapIterable()4001 bool Heap::IsHeapIterable() {
4002 // TODO(hpayer): This function is not correct. Allocation folding in old
4003 // space breaks the iterability.
4004 return new_space_top_after_last_gc_ == new_space()->top();
4005 }
4006
4007
MakeHeapIterable()4008 void Heap::MakeHeapIterable() {
4009 DCHECK(AllowHeapAllocation::IsAllowed());
4010 if (!IsHeapIterable()) {
4011 CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4012 }
4013 if (mark_compact_collector()->sweeping_in_progress()) {
4014 mark_compact_collector()->EnsureSweepingCompleted();
4015 }
4016 DCHECK(IsHeapIterable());
4017 }
4018
4019
ComputeMutatorUtilization(double mutator_speed,double gc_speed)4020 static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) {
4021 const double kMinMutatorUtilization = 0.0;
4022 const double kConservativeGcSpeedInBytesPerMillisecond = 200000;
4023 if (mutator_speed == 0) return kMinMutatorUtilization;
4024 if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond;
4025 // Derivation:
4026 // mutator_utilization = mutator_time / (mutator_time + gc_time)
4027 // mutator_time = 1 / mutator_speed
4028 // gc_time = 1 / gc_speed
4029 // mutator_utilization = (1 / mutator_speed) /
4030 // (1 / mutator_speed + 1 / gc_speed)
4031 // mutator_utilization = gc_speed / (mutator_speed + gc_speed)
4032 return gc_speed / (mutator_speed + gc_speed);
4033 }
4034
4035
YoungGenerationMutatorUtilization()4036 double Heap::YoungGenerationMutatorUtilization() {
4037 double mutator_speed = static_cast<double>(
4038 tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4039 double gc_speed = static_cast<double>(
4040 tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects));
4041 double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
4042 if (FLAG_trace_mutator_utilization) {
4043 PrintIsolate(isolate(),
4044 "Young generation mutator utilization = %.3f ("
4045 "mutator_speed=%.f, gc_speed=%.f)\n",
4046 result, mutator_speed, gc_speed);
4047 }
4048 return result;
4049 }
4050
4051
OldGenerationMutatorUtilization()4052 double Heap::OldGenerationMutatorUtilization() {
4053 double mutator_speed = static_cast<double>(
4054 tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
4055 double gc_speed = static_cast<double>(
4056 tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
4057 double result = ComputeMutatorUtilization(mutator_speed, gc_speed);
4058 if (FLAG_trace_mutator_utilization) {
4059 PrintIsolate(isolate(),
4060 "Old generation mutator utilization = %.3f ("
4061 "mutator_speed=%.f, gc_speed=%.f)\n",
4062 result, mutator_speed, gc_speed);
4063 }
4064 return result;
4065 }
4066
4067
HasLowYoungGenerationAllocationRate()4068 bool Heap::HasLowYoungGenerationAllocationRate() {
4069 const double high_mutator_utilization = 0.993;
4070 return YoungGenerationMutatorUtilization() > high_mutator_utilization;
4071 }
4072
4073
HasLowOldGenerationAllocationRate()4074 bool Heap::HasLowOldGenerationAllocationRate() {
4075 const double high_mutator_utilization = 0.993;
4076 return OldGenerationMutatorUtilization() > high_mutator_utilization;
4077 }
4078
4079
HasLowAllocationRate()4080 bool Heap::HasLowAllocationRate() {
4081 return HasLowYoungGenerationAllocationRate() &&
4082 HasLowOldGenerationAllocationRate();
4083 }
4084
4085
HasHighFragmentation()4086 bool Heap::HasHighFragmentation() {
4087 intptr_t used = PromotedSpaceSizeOfObjects();
4088 intptr_t committed = CommittedOldGenerationMemory();
4089 return HasHighFragmentation(used, committed);
4090 }
4091
4092
HasHighFragmentation(intptr_t used,intptr_t committed)4093 bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) {
4094 const intptr_t kSlack = 16 * MB;
4095 // Fragmentation is high if committed > 2 * used + kSlack.
4096 // Rewrite the exression to avoid overflow.
4097 return committed - used > used + kSlack;
4098 }
4099
4100
ReduceNewSpaceSize()4101 void Heap::ReduceNewSpaceSize() {
4102 // TODO(ulan): Unify this constant with the similar constant in
4103 // GCIdleTimeHandler once the change is merged to 4.5.
4104 static const size_t kLowAllocationThroughput = 1000;
4105 const size_t allocation_throughput =
4106 tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
4107
4108 if (FLAG_predictable) return;
4109
4110 if (ShouldReduceMemory() ||
4111 ((allocation_throughput != 0) &&
4112 (allocation_throughput < kLowAllocationThroughput))) {
4113 new_space_.Shrink();
4114 UncommitFromSpace();
4115 }
4116 }
4117
4118
FinalizeIncrementalMarkingIfComplete(const char * comment)4119 void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
4120 if (incremental_marking()->IsMarking() &&
4121 (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4122 (!incremental_marking()->finalize_marking_completed() &&
4123 mark_compact_collector()->marking_deque()->IsEmpty()))) {
4124 FinalizeIncrementalMarking(comment);
4125 } else if (incremental_marking()->IsComplete() ||
4126 (mark_compact_collector()->marking_deque()->IsEmpty())) {
4127 CollectAllGarbage(current_gc_flags_, comment);
4128 }
4129 }
4130
4131
TryFinalizeIdleIncrementalMarking(double idle_time_in_ms)4132 bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
4133 size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
4134 size_t final_incremental_mark_compact_speed_in_bytes_per_ms =
4135 static_cast<size_t>(
4136 tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
4137 if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4138 (!incremental_marking()->finalize_marking_completed() &&
4139 mark_compact_collector()->marking_deque()->IsEmpty() &&
4140 gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
4141 static_cast<size_t>(idle_time_in_ms)))) {
4142 FinalizeIncrementalMarking(
4143 "Idle notification: finalize incremental marking");
4144 return true;
4145 } else if (incremental_marking()->IsComplete() ||
4146 (mark_compact_collector()->marking_deque()->IsEmpty() &&
4147 gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
4148 static_cast<size_t>(idle_time_in_ms), size_of_objects,
4149 final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4150 CollectAllGarbage(current_gc_flags_,
4151 "idle notification: finalize incremental marking");
4152 return true;
4153 }
4154 return false;
4155 }
4156
4157
ComputeHeapState()4158 GCIdleTimeHeapState Heap::ComputeHeapState() {
4159 GCIdleTimeHeapState heap_state;
4160 heap_state.contexts_disposed = contexts_disposed_;
4161 heap_state.contexts_disposal_rate =
4162 tracer()->ContextDisposalRateInMilliseconds();
4163 heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4164 heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4165 return heap_state;
4166 }
4167
4168
PerformIdleTimeAction(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double deadline_in_ms)4169 bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
4170 GCIdleTimeHeapState heap_state,
4171 double deadline_in_ms) {
4172 bool result = false;
4173 switch (action.type) {
4174 case DONE:
4175 result = true;
4176 break;
4177 case DO_INCREMENTAL_STEP: {
4178 if (incremental_marking()->incremental_marking_job()->IdleTaskPending()) {
4179 result = true;
4180 } else {
4181 incremental_marking()
4182 ->incremental_marking_job()
4183 ->NotifyIdleTaskProgress();
4184 result = IncrementalMarkingJob::IdleTask::Step(this, deadline_in_ms) ==
4185 IncrementalMarkingJob::IdleTask::kDone;
4186 }
4187 break;
4188 }
4189 case DO_FULL_GC: {
4190 DCHECK(contexts_disposed_ > 0);
4191 HistogramTimerScope scope(isolate_->counters()->gc_context());
4192 CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
4193 break;
4194 }
4195 case DO_NOTHING:
4196 break;
4197 }
4198
4199 return result;
4200 }
4201
4202
IdleNotificationEpilogue(GCIdleTimeAction action,GCIdleTimeHeapState heap_state,double start_ms,double deadline_in_ms)4203 void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
4204 GCIdleTimeHeapState heap_state,
4205 double start_ms, double deadline_in_ms) {
4206 double idle_time_in_ms = deadline_in_ms - start_ms;
4207 double current_time = MonotonicallyIncreasingTimeInMs();
4208 last_idle_notification_time_ = current_time;
4209 double deadline_difference = deadline_in_ms - current_time;
4210
4211 contexts_disposed_ = 0;
4212
4213 isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4214 static_cast<int>(idle_time_in_ms));
4215
4216 if (deadline_in_ms - start_ms >
4217 GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
4218 int committed_memory = static_cast<int>(CommittedMemory() / KB);
4219 int used_memory = static_cast<int>(heap_state.size_of_objects / KB);
4220 isolate()->counters()->aggregated_memory_heap_committed()->AddSample(
4221 start_ms, committed_memory);
4222 isolate()->counters()->aggregated_memory_heap_used()->AddSample(
4223 start_ms, used_memory);
4224 }
4225
4226 if (deadline_difference >= 0) {
4227 if (action.type != DONE && action.type != DO_NOTHING) {
4228 isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4229 static_cast<int>(deadline_difference));
4230 }
4231 } else {
4232 isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4233 static_cast<int>(-deadline_difference));
4234 }
4235
4236 if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
4237 FLAG_trace_idle_notification_verbose) {
4238 PrintIsolate(isolate_, "%8.0f ms: ", isolate()->time_millis_since_init());
4239 PrintF(
4240 "Idle notification: requested idle time %.2f ms, used idle time %.2f "
4241 "ms, deadline usage %.2f ms [",
4242 idle_time_in_ms, idle_time_in_ms - deadline_difference,
4243 deadline_difference);
4244 action.Print();
4245 PrintF("]");
4246 if (FLAG_trace_idle_notification_verbose) {
4247 PrintF("[");
4248 heap_state.Print();
4249 PrintF("]");
4250 }
4251 PrintF("\n");
4252 }
4253 }
4254
4255
MonotonicallyIncreasingTimeInMs()4256 double Heap::MonotonicallyIncreasingTimeInMs() {
4257 return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
4258 static_cast<double>(base::Time::kMillisecondsPerSecond);
4259 }
4260
4261
IdleNotification(int idle_time_in_ms)4262 bool Heap::IdleNotification(int idle_time_in_ms) {
4263 return IdleNotification(
4264 V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
4265 (static_cast<double>(idle_time_in_ms) /
4266 static_cast<double>(base::Time::kMillisecondsPerSecond)));
4267 }
4268
4269
IdleNotification(double deadline_in_seconds)4270 bool Heap::IdleNotification(double deadline_in_seconds) {
4271 CHECK(HasBeenSetUp());
4272 double deadline_in_ms =
4273 deadline_in_seconds *
4274 static_cast<double>(base::Time::kMillisecondsPerSecond);
4275 HistogramTimerScope idle_notification_scope(
4276 isolate_->counters()->gc_idle_notification());
4277 double start_ms = MonotonicallyIncreasingTimeInMs();
4278 double idle_time_in_ms = deadline_in_ms - start_ms;
4279
4280 tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
4281 OldGenerationAllocationCounter());
4282
4283 GCIdleTimeHeapState heap_state = ComputeHeapState();
4284
4285 GCIdleTimeAction action =
4286 gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
4287
4288 bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
4289
4290 IdleNotificationEpilogue(action, heap_state, start_ms, deadline_in_ms);
4291 return result;
4292 }
4293
4294
RecentIdleNotificationHappened()4295 bool Heap::RecentIdleNotificationHappened() {
4296 return (last_idle_notification_time_ +
4297 GCIdleTimeHandler::kMaxScheduledIdleTime) >
4298 MonotonicallyIncreasingTimeInMs();
4299 }
4300
4301
4302 #ifdef DEBUG
4303
Print()4304 void Heap::Print() {
4305 if (!HasBeenSetUp()) return;
4306 isolate()->PrintStack(stdout);
4307 AllSpaces spaces(this);
4308 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4309 space->Print();
4310 }
4311 }
4312
4313
ReportCodeStatistics(const char * title)4314 void Heap::ReportCodeStatistics(const char* title) {
4315 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4316 PagedSpace::ResetCodeStatistics(isolate());
4317 // We do not look for code in new space, map space, or old space. If code
4318 // somehow ends up in those spaces, we would miss it here.
4319 code_space_->CollectCodeStatistics();
4320 lo_space_->CollectCodeStatistics();
4321 PagedSpace::ReportCodeStatistics(isolate());
4322 }
4323
4324
4325 // This function expects that NewSpace's allocated objects histogram is
4326 // populated (via a call to CollectStatistics or else as a side effect of a
4327 // just-completed scavenge collection).
ReportHeapStatistics(const char * title)4328 void Heap::ReportHeapStatistics(const char* title) {
4329 USE(title);
4330 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4331 gc_count_);
4332 PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4333 old_generation_allocation_limit_);
4334
4335 PrintF("\n");
4336 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4337 isolate_->global_handles()->PrintStats();
4338 PrintF("\n");
4339
4340 PrintF("Heap statistics : ");
4341 isolate_->memory_allocator()->ReportStatistics();
4342 PrintF("To space : ");
4343 new_space_.ReportStatistics();
4344 PrintF("Old space : ");
4345 old_space_->ReportStatistics();
4346 PrintF("Code space : ");
4347 code_space_->ReportStatistics();
4348 PrintF("Map space : ");
4349 map_space_->ReportStatistics();
4350 PrintF("Large object space : ");
4351 lo_space_->ReportStatistics();
4352 PrintF(">>>>>> ========================================= >>>>>>\n");
4353 }
4354
4355 #endif // DEBUG
4356
Contains(HeapObject * value)4357 bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
4358
4359
Contains(Address addr)4360 bool Heap::Contains(Address addr) {
4361 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4362 return HasBeenSetUp() &&
4363 (new_space_.ToSpaceContains(addr) || old_space_->Contains(addr) ||
4364 code_space_->Contains(addr) || map_space_->Contains(addr) ||
4365 lo_space_->SlowContains(addr));
4366 }
4367
4368
InSpace(HeapObject * value,AllocationSpace space)4369 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4370 return InSpace(value->address(), space);
4371 }
4372
4373
InSpace(Address addr,AllocationSpace space)4374 bool Heap::InSpace(Address addr, AllocationSpace space) {
4375 if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4376 if (!HasBeenSetUp()) return false;
4377
4378 switch (space) {
4379 case NEW_SPACE:
4380 return new_space_.ToSpaceContains(addr);
4381 case OLD_SPACE:
4382 return old_space_->Contains(addr);
4383 case CODE_SPACE:
4384 return code_space_->Contains(addr);
4385 case MAP_SPACE:
4386 return map_space_->Contains(addr);
4387 case LO_SPACE:
4388 return lo_space_->SlowContains(addr);
4389 }
4390 UNREACHABLE();
4391 return false;
4392 }
4393
4394
IsValidAllocationSpace(AllocationSpace space)4395 bool Heap::IsValidAllocationSpace(AllocationSpace space) {
4396 switch (space) {
4397 case NEW_SPACE:
4398 case OLD_SPACE:
4399 case CODE_SPACE:
4400 case MAP_SPACE:
4401 case LO_SPACE:
4402 return true;
4403 default:
4404 return false;
4405 }
4406 }
4407
4408
RootIsImmortalImmovable(int root_index)4409 bool Heap::RootIsImmortalImmovable(int root_index) {
4410 switch (root_index) {
4411 #define IMMORTAL_IMMOVABLE_ROOT(name) case Heap::k##name##RootIndex:
4412 IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
4413 #undef IMMORTAL_IMMOVABLE_ROOT
4414 #define INTERNALIZED_STRING(name, value) case Heap::k##name##RootIndex:
4415 INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
4416 #undef INTERNALIZED_STRING
4417 #define STRING_TYPE(NAME, size, name, Name) case Heap::k##Name##MapRootIndex:
4418 STRING_TYPE_LIST(STRING_TYPE)
4419 #undef STRING_TYPE
4420 return true;
4421 default:
4422 return false;
4423 }
4424 }
4425
4426
4427 #ifdef VERIFY_HEAP
Verify()4428 void Heap::Verify() {
4429 CHECK(HasBeenSetUp());
4430 HandleScope scope(isolate());
4431
4432 store_buffer()->Verify();
4433
4434 if (mark_compact_collector()->sweeping_in_progress()) {
4435 // We have to wait here for the sweeper threads to have an iterable heap.
4436 mark_compact_collector()->EnsureSweepingCompleted();
4437 }
4438
4439 VerifyPointersVisitor visitor;
4440 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4441
4442 VerifySmisVisitor smis_visitor;
4443 IterateSmiRoots(&smis_visitor);
4444
4445 new_space_.Verify();
4446
4447 old_space_->Verify(&visitor);
4448 map_space_->Verify(&visitor);
4449
4450 VerifyPointersVisitor no_dirty_regions_visitor;
4451 code_space_->Verify(&no_dirty_regions_visitor);
4452
4453 lo_space_->Verify();
4454
4455 mark_compact_collector()->VerifyWeakEmbeddedObjectsInCode();
4456 if (FLAG_omit_map_checks_for_leaf_maps) {
4457 mark_compact_collector()->VerifyOmittedMapChecks();
4458 }
4459 }
4460 #endif
4461
4462
ZapFromSpace()4463 void Heap::ZapFromSpace() {
4464 if (!new_space_.IsFromSpaceCommitted()) return;
4465 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4466 new_space_.FromSpaceEnd());
4467 while (it.has_next()) {
4468 NewSpacePage* page = it.next();
4469 for (Address cursor = page->area_start(), limit = page->area_end();
4470 cursor < limit; cursor += kPointerSize) {
4471 Memory::Address_at(cursor) = kFromSpaceZapValue;
4472 }
4473 }
4474 }
4475
4476
IterateAndMarkPointersToFromSpace(HeapObject * object,Address start,Address end,bool record_slots,ObjectSlotCallback callback)4477 void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
4478 Address end, bool record_slots,
4479 ObjectSlotCallback callback) {
4480 Address slot_address = start;
4481
4482 while (slot_address < end) {
4483 Object** slot = reinterpret_cast<Object**>(slot_address);
4484 Object* target = *slot;
4485 // If the store buffer becomes overfull we mark pages as being exempt from
4486 // the store buffer. These pages are scanned to find pointers that point
4487 // to the new space. In that case we may hit newly promoted objects and
4488 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4489 if (target->IsHeapObject()) {
4490 if (Heap::InFromSpace(target)) {
4491 callback(reinterpret_cast<HeapObject**>(slot),
4492 HeapObject::cast(target));
4493 Object* new_target = *slot;
4494 if (InNewSpace(new_target)) {
4495 SLOW_DCHECK(Heap::InToSpace(new_target));
4496 SLOW_DCHECK(new_target->IsHeapObject());
4497 store_buffer_.EnterDirectlyIntoStoreBuffer(
4498 reinterpret_cast<Address>(slot));
4499 }
4500 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
4501 } else if (record_slots &&
4502 MarkCompactCollector::IsOnEvacuationCandidate(target)) {
4503 mark_compact_collector()->RecordSlot(object, slot, target);
4504 }
4505 }
4506 slot_address += kPointerSize;
4507 }
4508 }
4509
4510
4511 class IteratePointersToFromSpaceVisitor final : public ObjectVisitor {
4512 public:
IteratePointersToFromSpaceVisitor(Heap * heap,HeapObject * target,bool record_slots,ObjectSlotCallback callback)4513 IteratePointersToFromSpaceVisitor(Heap* heap, HeapObject* target,
4514 bool record_slots,
4515 ObjectSlotCallback callback)
4516 : heap_(heap),
4517 target_(target),
4518 record_slots_(record_slots),
4519 callback_(callback) {}
4520
VisitPointers(Object ** start,Object ** end)4521 V8_INLINE void VisitPointers(Object** start, Object** end) override {
4522 heap_->IterateAndMarkPointersToFromSpace(
4523 target_, reinterpret_cast<Address>(start),
4524 reinterpret_cast<Address>(end), record_slots_, callback_);
4525 }
4526
VisitCodeEntry(Address code_entry_slot)4527 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {}
4528
4529 private:
4530 Heap* heap_;
4531 HeapObject* target_;
4532 bool record_slots_;
4533 ObjectSlotCallback callback_;
4534 };
4535
4536
IteratePointersToFromSpace(HeapObject * target,int size,ObjectSlotCallback callback)4537 void Heap::IteratePointersToFromSpace(HeapObject* target, int size,
4538 ObjectSlotCallback callback) {
4539 // We are not collecting slots on new space objects during mutation
4540 // thus we have to scan for pointers to evacuation candidates when we
4541 // promote objects. But we should not record any slots in non-black
4542 // objects. Grey object's slots would be rescanned.
4543 // White object might not survive until the end of collection
4544 // it would be a violation of the invariant to record it's slots.
4545 bool record_slots = false;
4546 if (incremental_marking()->IsCompacting()) {
4547 MarkBit mark_bit = Marking::MarkBitFrom(target);
4548 record_slots = Marking::IsBlack(mark_bit);
4549 }
4550
4551 IteratePointersToFromSpaceVisitor visitor(this, target, record_slots,
4552 callback);
4553 target->IterateBody(target->map()->instance_type(), size, &visitor);
4554 }
4555
4556
IterateRoots(ObjectVisitor * v,VisitMode mode)4557 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4558 IterateStrongRoots(v, mode);
4559 IterateWeakRoots(v, mode);
4560 }
4561
4562
IterateWeakRoots(ObjectVisitor * v,VisitMode mode)4563 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4564 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4565 v->Synchronize(VisitorSynchronization::kStringTable);
4566 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4567 // Scavenge collections have special processing for this.
4568 external_string_table_.Iterate(v);
4569 }
4570 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4571 }
4572
4573
IterateSmiRoots(ObjectVisitor * v)4574 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4575 // Acquire execution access since we are going to read stack limit values.
4576 ExecutionAccess access(isolate());
4577 v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4578 v->Synchronize(VisitorSynchronization::kSmiRootList);
4579 }
4580
4581
IterateStrongRoots(ObjectVisitor * v,VisitMode mode)4582 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4583 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4584 v->Synchronize(VisitorSynchronization::kStrongRootList);
4585
4586 isolate_->bootstrapper()->Iterate(v);
4587 v->Synchronize(VisitorSynchronization::kBootstrapper);
4588 isolate_->Iterate(v);
4589 v->Synchronize(VisitorSynchronization::kTop);
4590 Relocatable::Iterate(isolate_, v);
4591 v->Synchronize(VisitorSynchronization::kRelocatable);
4592
4593 if (isolate_->deoptimizer_data() != NULL) {
4594 isolate_->deoptimizer_data()->Iterate(v);
4595 }
4596 v->Synchronize(VisitorSynchronization::kDebug);
4597 isolate_->compilation_cache()->Iterate(v);
4598 v->Synchronize(VisitorSynchronization::kCompilationCache);
4599
4600 // Iterate over local handles in handle scopes.
4601 isolate_->handle_scope_implementer()->Iterate(v);
4602 isolate_->IterateDeferredHandles(v);
4603 v->Synchronize(VisitorSynchronization::kHandleScope);
4604
4605 // Iterate over the builtin code objects and code stubs in the
4606 // heap. Note that it is not necessary to iterate over code objects
4607 // on scavenge collections.
4608 if (mode != VISIT_ALL_IN_SCAVENGE) {
4609 isolate_->builtins()->IterateBuiltins(v);
4610 }
4611 v->Synchronize(VisitorSynchronization::kBuiltins);
4612
4613 // Iterate over global handles.
4614 switch (mode) {
4615 case VISIT_ONLY_STRONG:
4616 isolate_->global_handles()->IterateStrongRoots(v);
4617 break;
4618 case VISIT_ALL_IN_SCAVENGE:
4619 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4620 break;
4621 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4622 case VISIT_ALL:
4623 isolate_->global_handles()->IterateAllRoots(v);
4624 break;
4625 }
4626 v->Synchronize(VisitorSynchronization::kGlobalHandles);
4627
4628 // Iterate over eternal handles.
4629 if (mode == VISIT_ALL_IN_SCAVENGE) {
4630 isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4631 } else {
4632 isolate_->eternal_handles()->IterateAllRoots(v);
4633 }
4634 v->Synchronize(VisitorSynchronization::kEternalHandles);
4635
4636 // Iterate over pointers being held by inactive threads.
4637 isolate_->thread_manager()->Iterate(v);
4638 v->Synchronize(VisitorSynchronization::kThreadManager);
4639
4640 // Iterate over other strong roots (currently only identity maps).
4641 for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
4642 v->VisitPointers(list->start, list->end);
4643 }
4644 v->Synchronize(VisitorSynchronization::kStrongRoots);
4645
4646 // Iterate over the pointers the Serialization/Deserialization code is
4647 // holding.
4648 // During garbage collection this keeps the partial snapshot cache alive.
4649 // During deserialization of the startup snapshot this creates the partial
4650 // snapshot cache and deserializes the objects it refers to. During
4651 // serialization this does nothing, since the partial snapshot cache is
4652 // empty. However the next thing we do is create the partial snapshot,
4653 // filling up the partial snapshot cache with objects it needs as we go.
4654 SerializerDeserializer::Iterate(isolate_, v);
4655 // We don't do a v->Synchronize call here, because in debug mode that will
4656 // output a flag to the snapshot. However at this point the serializer and
4657 // deserializer are deliberately a little unsynchronized (see above) so the
4658 // checking of the sync flag in the snapshot would fail.
4659 }
4660
4661
4662 // TODO(1236194): Since the heap size is configurable on the command line
4663 // and through the API, we should gracefully handle the case that the heap
4664 // size is not big enough to fit all the initial objects.
ConfigureHeap(int max_semi_space_size,int max_old_space_size,int max_executable_size,size_t code_range_size)4665 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
4666 int max_executable_size, size_t code_range_size) {
4667 if (HasBeenSetUp()) return false;
4668
4669 // Overwrite default configuration.
4670 if (max_semi_space_size > 0) {
4671 max_semi_space_size_ = max_semi_space_size * MB;
4672 }
4673 if (max_old_space_size > 0) {
4674 max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB;
4675 }
4676 if (max_executable_size > 0) {
4677 max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB;
4678 }
4679
4680 // If max space size flags are specified overwrite the configuration.
4681 if (FLAG_max_semi_space_size > 0) {
4682 max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4683 }
4684 if (FLAG_max_old_space_size > 0) {
4685 max_old_generation_size_ =
4686 static_cast<intptr_t>(FLAG_max_old_space_size) * MB;
4687 }
4688 if (FLAG_max_executable_size > 0) {
4689 max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
4690 }
4691
4692 if (Page::kPageSize > MB) {
4693 max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize);
4694 max_old_generation_size_ =
4695 ROUND_UP(max_old_generation_size_, Page::kPageSize);
4696 max_executable_size_ = ROUND_UP(max_executable_size_, Page::kPageSize);
4697 }
4698
4699 if (FLAG_stress_compaction) {
4700 // This will cause more frequent GCs when stressing.
4701 max_semi_space_size_ = Page::kPageSize;
4702 }
4703
4704 if (isolate()->snapshot_available()) {
4705 // If we are using a snapshot we always reserve the default amount
4706 // of memory for each semispace because code in the snapshot has
4707 // write-barrier code that relies on the size and alignment of new
4708 // space. We therefore cannot use a larger max semispace size
4709 // than the default reserved semispace size.
4710 if (max_semi_space_size_ > reserved_semispace_size_) {
4711 max_semi_space_size_ = reserved_semispace_size_;
4712 if (FLAG_trace_gc) {
4713 PrintIsolate(isolate_,
4714 "Max semi-space size cannot be more than %d kbytes\n",
4715 reserved_semispace_size_ >> 10);
4716 }
4717 }
4718 } else {
4719 // If we are not using snapshots we reserve space for the actual
4720 // max semispace size.
4721 reserved_semispace_size_ = max_semi_space_size_;
4722 }
4723
4724 // The new space size must be a power of two to support single-bit testing
4725 // for containment.
4726 max_semi_space_size_ =
4727 base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
4728 reserved_semispace_size_ =
4729 base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
4730
4731 if (FLAG_min_semi_space_size > 0) {
4732 int initial_semispace_size = FLAG_min_semi_space_size * MB;
4733 if (initial_semispace_size > max_semi_space_size_) {
4734 initial_semispace_size_ = max_semi_space_size_;
4735 if (FLAG_trace_gc) {
4736 PrintIsolate(isolate_,
4737 "Min semi-space size cannot be more than the maximum "
4738 "semi-space size of %d MB\n",
4739 max_semi_space_size_ / MB);
4740 }
4741 } else {
4742 initial_semispace_size_ =
4743 ROUND_UP(initial_semispace_size, Page::kPageSize);
4744 }
4745 }
4746
4747 initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4748
4749 if (FLAG_target_semi_space_size > 0) {
4750 int target_semispace_size = FLAG_target_semi_space_size * MB;
4751 if (target_semispace_size < initial_semispace_size_) {
4752 target_semispace_size_ = initial_semispace_size_;
4753 if (FLAG_trace_gc) {
4754 PrintIsolate(isolate_,
4755 "Target semi-space size cannot be less than the minimum "
4756 "semi-space size of %d MB\n",
4757 initial_semispace_size_ / MB);
4758 }
4759 } else if (target_semispace_size > max_semi_space_size_) {
4760 target_semispace_size_ = max_semi_space_size_;
4761 if (FLAG_trace_gc) {
4762 PrintIsolate(isolate_,
4763 "Target semi-space size cannot be less than the maximum "
4764 "semi-space size of %d MB\n",
4765 max_semi_space_size_ / MB);
4766 }
4767 } else {
4768 target_semispace_size_ = ROUND_UP(target_semispace_size, Page::kPageSize);
4769 }
4770 }
4771
4772 target_semispace_size_ = Max(initial_semispace_size_, target_semispace_size_);
4773
4774 if (FLAG_semi_space_growth_factor < 2) {
4775 FLAG_semi_space_growth_factor = 2;
4776 }
4777
4778 // The old generation is paged and needs at least one page for each space.
4779 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4780 max_old_generation_size_ =
4781 Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
4782 max_old_generation_size_);
4783
4784 // The max executable size must be less than or equal to the max old
4785 // generation size.
4786 if (max_executable_size_ > max_old_generation_size_) {
4787 max_executable_size_ = max_old_generation_size_;
4788 }
4789
4790 if (FLAG_initial_old_space_size > 0) {
4791 initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
4792 } else {
4793 initial_old_generation_size_ =
4794 max_old_generation_size_ / kInitalOldGenerationLimitFactor;
4795 }
4796 old_generation_allocation_limit_ = initial_old_generation_size_;
4797
4798 // We rely on being able to allocate new arrays in paged spaces.
4799 DCHECK(Page::kMaxRegularHeapObjectSize >=
4800 (JSArray::kSize +
4801 FixedArray::SizeFor(JSArray::kInitialMaxFastElementArray) +
4802 AllocationMemento::kSize));
4803
4804 code_range_size_ = code_range_size * MB;
4805
4806 configured_ = true;
4807 return true;
4808 }
4809
4810
AddToRingBuffer(const char * string)4811 void Heap::AddToRingBuffer(const char* string) {
4812 size_t first_part =
4813 Min(strlen(string), kTraceRingBufferSize - ring_buffer_end_);
4814 memcpy(trace_ring_buffer_ + ring_buffer_end_, string, first_part);
4815 ring_buffer_end_ += first_part;
4816 if (first_part < strlen(string)) {
4817 ring_buffer_full_ = true;
4818 size_t second_part = strlen(string) - first_part;
4819 memcpy(trace_ring_buffer_, string + first_part, second_part);
4820 ring_buffer_end_ = second_part;
4821 }
4822 }
4823
4824
GetFromRingBuffer(char * buffer)4825 void Heap::GetFromRingBuffer(char* buffer) {
4826 size_t copied = 0;
4827 if (ring_buffer_full_) {
4828 copied = kTraceRingBufferSize - ring_buffer_end_;
4829 memcpy(buffer, trace_ring_buffer_ + ring_buffer_end_, copied);
4830 }
4831 memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
4832 }
4833
4834
ConfigureHeapDefault()4835 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
4836
4837
RecordStats(HeapStats * stats,bool take_snapshot)4838 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4839 *stats->start_marker = HeapStats::kStartMarker;
4840 *stats->end_marker = HeapStats::kEndMarker;
4841 *stats->new_space_size = new_space_.SizeAsInt();
4842 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
4843 *stats->old_space_size = old_space_->SizeOfObjects();
4844 *stats->old_space_capacity = old_space_->Capacity();
4845 *stats->code_space_size = code_space_->SizeOfObjects();
4846 *stats->code_space_capacity = code_space_->Capacity();
4847 *stats->map_space_size = map_space_->SizeOfObjects();
4848 *stats->map_space_capacity = map_space_->Capacity();
4849 *stats->lo_space_size = lo_space_->Size();
4850 isolate_->global_handles()->RecordStats(stats);
4851 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
4852 *stats->memory_allocator_capacity =
4853 isolate()->memory_allocator()->Size() +
4854 isolate()->memory_allocator()->Available();
4855 *stats->os_error = base::OS::GetLastError();
4856 isolate()->memory_allocator()->Available();
4857 if (take_snapshot) {
4858 HeapIterator iterator(this);
4859 for (HeapObject* obj = iterator.next(); obj != NULL;
4860 obj = iterator.next()) {
4861 InstanceType type = obj->map()->instance_type();
4862 DCHECK(0 <= type && type <= LAST_TYPE);
4863 stats->objects_per_type[type]++;
4864 stats->size_per_type[type] += obj->Size();
4865 }
4866 }
4867 if (stats->last_few_messages != NULL)
4868 GetFromRingBuffer(stats->last_few_messages);
4869 if (stats->js_stacktrace != NULL) {
4870 FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
4871 StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
4872 if (gc_state() == Heap::NOT_IN_GC) {
4873 isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
4874 } else {
4875 accumulator.Add("Cannot get stack trace in GC.");
4876 }
4877 }
4878 }
4879
4880
PromotedSpaceSizeOfObjects()4881 intptr_t Heap::PromotedSpaceSizeOfObjects() {
4882 return old_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
4883 map_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
4884 }
4885
4886
PromotedExternalMemorySize()4887 int64_t Heap::PromotedExternalMemorySize() {
4888 if (amount_of_external_allocated_memory_ <=
4889 amount_of_external_allocated_memory_at_last_global_gc_)
4890 return 0;
4891 return amount_of_external_allocated_memory_ -
4892 amount_of_external_allocated_memory_at_last_global_gc_;
4893 }
4894
4895
4896 const double Heap::kMinHeapGrowingFactor = 1.1;
4897 const double Heap::kMaxHeapGrowingFactor = 4.0;
4898 const double Heap::kMaxHeapGrowingFactorMemoryConstrained = 2.0;
4899 const double Heap::kMaxHeapGrowingFactorIdle = 1.5;
4900 const double Heap::kTargetMutatorUtilization = 0.97;
4901
4902
4903 // Given GC speed in bytes per ms, the allocation throughput in bytes per ms
4904 // (mutator speed), this function returns the heap growing factor that will
4905 // achieve the kTargetMutatorUtilisation if the GC speed and the mutator speed
4906 // remain the same until the next GC.
4907 //
4908 // For a fixed time-frame T = TM + TG, the mutator utilization is the ratio
4909 // TM / (TM + TG), where TM is the time spent in the mutator and TG is the
4910 // time spent in the garbage collector.
4911 //
4912 // Let MU be kTargetMutatorUtilisation, the desired mutator utilization for the
4913 // time-frame from the end of the current GC to the end of the next GC. Based
4914 // on the MU we can compute the heap growing factor F as
4915 //
4916 // F = R * (1 - MU) / (R * (1 - MU) - MU), where R = gc_speed / mutator_speed.
4917 //
4918 // This formula can be derived as follows.
4919 //
4920 // F = Limit / Live by definition, where the Limit is the allocation limit,
4921 // and the Live is size of live objects.
4922 // Let’s assume that we already know the Limit. Then:
4923 // TG = Limit / gc_speed
4924 // TM = (TM + TG) * MU, by definition of MU.
4925 // TM = TG * MU / (1 - MU)
4926 // TM = Limit * MU / (gc_speed * (1 - MU))
4927 // On the other hand, if the allocation throughput remains constant:
4928 // Limit = Live + TM * allocation_throughput = Live + TM * mutator_speed
4929 // Solving it for TM, we get
4930 // TM = (Limit - Live) / mutator_speed
4931 // Combining the two equation for TM:
4932 // (Limit - Live) / mutator_speed = Limit * MU / (gc_speed * (1 - MU))
4933 // (Limit - Live) = Limit * MU * mutator_speed / (gc_speed * (1 - MU))
4934 // substitute R = gc_speed / mutator_speed
4935 // (Limit - Live) = Limit * MU / (R * (1 - MU))
4936 // substitute F = Limit / Live
4937 // F - 1 = F * MU / (R * (1 - MU))
4938 // F - F * MU / (R * (1 - MU)) = 1
4939 // F * (1 - MU / (R * (1 - MU))) = 1
4940 // F * (R * (1 - MU) - MU) / (R * (1 - MU)) = 1
4941 // F = R * (1 - MU) / (R * (1 - MU) - MU)
HeapGrowingFactor(double gc_speed,double mutator_speed)4942 double Heap::HeapGrowingFactor(double gc_speed, double mutator_speed) {
4943 if (gc_speed == 0 || mutator_speed == 0) return kMaxHeapGrowingFactor;
4944
4945 const double speed_ratio = gc_speed / mutator_speed;
4946 const double mu = kTargetMutatorUtilization;
4947
4948 const double a = speed_ratio * (1 - mu);
4949 const double b = speed_ratio * (1 - mu) - mu;
4950
4951 // The factor is a / b, but we need to check for small b first.
4952 double factor =
4953 (a < b * kMaxHeapGrowingFactor) ? a / b : kMaxHeapGrowingFactor;
4954 factor = Min(factor, kMaxHeapGrowingFactor);
4955 factor = Max(factor, kMinHeapGrowingFactor);
4956 return factor;
4957 }
4958
4959
CalculateOldGenerationAllocationLimit(double factor,intptr_t old_gen_size)4960 intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
4961 intptr_t old_gen_size) {
4962 CHECK(factor > 1.0);
4963 CHECK(old_gen_size > 0);
4964 intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
4965 limit = Max(limit, old_gen_size + kMinimumOldGenerationAllocationLimit);
4966 limit += new_space_.Capacity();
4967 intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
4968 return Min(limit, halfway_to_the_max);
4969 }
4970
4971
SetOldGenerationAllocationLimit(intptr_t old_gen_size,double gc_speed,double mutator_speed)4972 void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
4973 double gc_speed,
4974 double mutator_speed) {
4975 const double kConservativeHeapGrowingFactor = 1.3;
4976
4977 double factor = HeapGrowingFactor(gc_speed, mutator_speed);
4978
4979 if (FLAG_trace_gc_verbose) {
4980 PrintIsolate(isolate_,
4981 "Heap growing factor %.1f based on mu=%.3f, speed_ratio=%.f "
4982 "(gc=%.f, mutator=%.f)\n",
4983 factor, kTargetMutatorUtilization, gc_speed / mutator_speed,
4984 gc_speed, mutator_speed);
4985 }
4986
4987 // We set the old generation growing factor to 2 to grow the heap slower on
4988 // memory-constrained devices.
4989 if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice ||
4990 FLAG_optimize_for_size) {
4991 factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
4992 }
4993
4994 if (memory_reducer_->ShouldGrowHeapSlowly() || optimize_for_memory_usage_) {
4995 factor = Min(factor, kConservativeHeapGrowingFactor);
4996 }
4997
4998 if (FLAG_stress_compaction || ShouldReduceMemory()) {
4999 factor = kMinHeapGrowingFactor;
5000 }
5001
5002 if (FLAG_heap_growing_percent > 0) {
5003 factor = 1.0 + FLAG_heap_growing_percent / 100.0;
5004 }
5005
5006 old_generation_allocation_limit_ =
5007 CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5008
5009 if (FLAG_trace_gc_verbose) {
5010 PrintIsolate(isolate_, "Grow: old size: %" V8_PTR_PREFIX
5011 "d KB, new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
5012 old_gen_size / KB, old_generation_allocation_limit_ / KB,
5013 factor);
5014 }
5015 }
5016
5017
DampenOldGenerationAllocationLimit(intptr_t old_gen_size,double gc_speed,double mutator_speed)5018 void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
5019 double gc_speed,
5020 double mutator_speed) {
5021 double factor = HeapGrowingFactor(gc_speed, mutator_speed);
5022 intptr_t limit = CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5023 if (limit < old_generation_allocation_limit_) {
5024 if (FLAG_trace_gc_verbose) {
5025 PrintIsolate(isolate_, "Dampen: old size: %" V8_PTR_PREFIX
5026 "d KB, old limit: %" V8_PTR_PREFIX
5027 "d KB, "
5028 "new limit: %" V8_PTR_PREFIX "d KB (%.1f)\n",
5029 old_gen_size / KB, old_generation_allocation_limit_ / KB,
5030 limit / KB, factor);
5031 }
5032 old_generation_allocation_limit_ = limit;
5033 }
5034 }
5035
5036
EnableInlineAllocation()5037 void Heap::EnableInlineAllocation() {
5038 if (!inline_allocation_disabled_) return;
5039 inline_allocation_disabled_ = false;
5040
5041 // Update inline allocation limit for new space.
5042 new_space()->UpdateInlineAllocationLimit(0);
5043 }
5044
5045
DisableInlineAllocation()5046 void Heap::DisableInlineAllocation() {
5047 if (inline_allocation_disabled_) return;
5048 inline_allocation_disabled_ = true;
5049
5050 // Update inline allocation limit for new space.
5051 new_space()->UpdateInlineAllocationLimit(0);
5052
5053 // Update inline allocation limit for old spaces.
5054 PagedSpaces spaces(this);
5055 for (PagedSpace* space = spaces.next(); space != NULL;
5056 space = spaces.next()) {
5057 space->EmptyAllocationInfo();
5058 }
5059 }
5060
5061
5062 V8_DECLARE_ONCE(initialize_gc_once);
5063
InitializeGCOnce()5064 static void InitializeGCOnce() {
5065 Scavenger::Initialize();
5066 StaticScavengeVisitor::Initialize();
5067 MarkCompactCollector::Initialize();
5068 }
5069
5070
SetUp()5071 bool Heap::SetUp() {
5072 #ifdef DEBUG
5073 allocation_timeout_ = FLAG_gc_interval;
5074 #endif
5075
5076 // Initialize heap spaces and initial maps and objects. Whenever something
5077 // goes wrong, just return false. The caller should check the results and
5078 // call Heap::TearDown() to release allocated memory.
5079 //
5080 // If the heap is not yet configured (e.g. through the API), configure it.
5081 // Configuration is based on the flags new-space-size (really the semispace
5082 // size) and old-space-size if set or the initial values of semispace_size_
5083 // and old_generation_size_ otherwise.
5084 if (!configured_) {
5085 if (!ConfigureHeapDefault()) return false;
5086 }
5087
5088 base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5089
5090 // Set up memory allocator.
5091 if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5092 return false;
5093
5094 // Initialize incremental marking.
5095 incremental_marking_ = new IncrementalMarking(this);
5096
5097 // Set up new space.
5098 if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5099 return false;
5100 }
5101 new_space_top_after_last_gc_ = new_space()->top();
5102
5103 // Initialize old space.
5104 old_space_ = new OldSpace(this, OLD_SPACE, NOT_EXECUTABLE);
5105 if (old_space_ == NULL) return false;
5106 if (!old_space_->SetUp()) return false;
5107
5108 if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5109
5110 // Initialize the code space, set its maximum capacity to the old
5111 // generation size. It needs executable memory.
5112 code_space_ = new OldSpace(this, CODE_SPACE, EXECUTABLE);
5113 if (code_space_ == NULL) return false;
5114 if (!code_space_->SetUp()) return false;
5115
5116 // Initialize map space.
5117 map_space_ = new MapSpace(this, MAP_SPACE);
5118 if (map_space_ == NULL) return false;
5119 if (!map_space_->SetUp()) return false;
5120
5121 // The large object code space may contain code or data. We set the memory
5122 // to be non-executable here for safety, but this means we need to enable it
5123 // explicitly when allocating large code objects.
5124 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
5125 if (lo_space_ == NULL) return false;
5126 if (!lo_space_->SetUp()) return false;
5127
5128 // Set up the seed that is used to randomize the string hash function.
5129 DCHECK(hash_seed() == 0);
5130 if (FLAG_randomize_hashes) {
5131 if (FLAG_hash_seed == 0) {
5132 int rnd = isolate()->random_number_generator()->NextInt();
5133 set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5134 } else {
5135 set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5136 }
5137 }
5138
5139 for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
5140 i++) {
5141 deferred_counters_[i] = 0;
5142 }
5143
5144 tracer_ = new GCTracer(this);
5145
5146 scavenge_collector_ = new Scavenger(this);
5147
5148 mark_compact_collector_ = new MarkCompactCollector(this);
5149
5150 gc_idle_time_handler_ = new GCIdleTimeHandler();
5151
5152 memory_reducer_ = new MemoryReducer(this);
5153
5154 object_stats_ = new ObjectStats(this);
5155 object_stats_->ClearObjectStats(true);
5156
5157 scavenge_job_ = new ScavengeJob();
5158
5159 array_buffer_tracker_ = new ArrayBufferTracker(this);
5160
5161 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5162 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5163
5164 store_buffer()->SetUp();
5165
5166 mark_compact_collector()->SetUp();
5167
5168 idle_scavenge_observer_ = new IdleScavengeObserver(
5169 *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
5170 new_space()->AddInlineAllocationObserver(idle_scavenge_observer_);
5171
5172 return true;
5173 }
5174
5175
CreateHeapObjects()5176 bool Heap::CreateHeapObjects() {
5177 // Create initial maps.
5178 if (!CreateInitialMaps()) return false;
5179 CreateApiObjects();
5180
5181 // Create initial objects
5182 CreateInitialObjects();
5183 CHECK_EQ(0u, gc_count_);
5184
5185 set_native_contexts_list(undefined_value());
5186 set_allocation_sites_list(undefined_value());
5187
5188 return true;
5189 }
5190
5191
SetStackLimits()5192 void Heap::SetStackLimits() {
5193 DCHECK(isolate_ != NULL);
5194 DCHECK(isolate_ == isolate());
5195 // On 64 bit machines, pointers are generally out of range of Smis. We write
5196 // something that looks like an out of range Smi to the GC.
5197
5198 // Set up the special root array entries containing the stack limits.
5199 // These are actually addresses, but the tag makes the GC ignore it.
5200 roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5201 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5202 roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5203 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5204 }
5205
5206
PrintAlloctionsHash()5207 void Heap::PrintAlloctionsHash() {
5208 uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
5209 PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
5210 }
5211
5212
NotifyDeserializationComplete()5213 void Heap::NotifyDeserializationComplete() {
5214 deserialization_complete_ = true;
5215 #ifdef DEBUG
5216 // All pages right after bootstrapping must be marked as never-evacuate.
5217 PagedSpaces spaces(this);
5218 for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
5219 PageIterator it(s);
5220 while (it.has_next()) CHECK(it.next()->NeverEvacuate());
5221 }
5222 #endif // DEBUG
5223 }
5224
5225
TearDown()5226 void Heap::TearDown() {
5227 #ifdef VERIFY_HEAP
5228 if (FLAG_verify_heap) {
5229 Verify();
5230 }
5231 #endif
5232
5233 UpdateMaximumCommitted();
5234
5235 if (FLAG_print_cumulative_gc_stat) {
5236 PrintF("\n");
5237 PrintF("gc_count=%d ", gc_count_);
5238 PrintF("mark_sweep_count=%d ", ms_count_);
5239 PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5240 PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5241 PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5242 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5243 PrintF("total_marking_time=%.1f ", tracer()->cumulative_marking_duration());
5244 PrintF("total_sweeping_time=%.1f ",
5245 tracer()->cumulative_sweeping_duration());
5246 PrintF("\n\n");
5247 }
5248
5249 if (FLAG_print_max_heap_committed) {
5250 PrintF("\n");
5251 PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5252 MaximumCommittedMemory());
5253 PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5254 new_space_.MaximumCommittedMemory());
5255 PrintF("maximum_committed_by_old_space=%" V8_PTR_PREFIX "d ",
5256 old_space_->MaximumCommittedMemory());
5257 PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5258 code_space_->MaximumCommittedMemory());
5259 PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5260 map_space_->MaximumCommittedMemory());
5261 PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5262 lo_space_->MaximumCommittedMemory());
5263 PrintF("\n\n");
5264 }
5265
5266 if (FLAG_verify_predictable) {
5267 PrintAlloctionsHash();
5268 }
5269
5270 new_space()->RemoveInlineAllocationObserver(idle_scavenge_observer_);
5271 delete idle_scavenge_observer_;
5272 idle_scavenge_observer_ = nullptr;
5273
5274 delete scavenge_collector_;
5275 scavenge_collector_ = nullptr;
5276
5277 if (mark_compact_collector_ != nullptr) {
5278 mark_compact_collector_->TearDown();
5279 delete mark_compact_collector_;
5280 mark_compact_collector_ = nullptr;
5281 }
5282
5283 delete incremental_marking_;
5284 incremental_marking_ = nullptr;
5285
5286 delete gc_idle_time_handler_;
5287 gc_idle_time_handler_ = nullptr;
5288
5289 if (memory_reducer_ != nullptr) {
5290 memory_reducer_->TearDown();
5291 delete memory_reducer_;
5292 memory_reducer_ = nullptr;
5293 }
5294
5295 delete object_stats_;
5296 object_stats_ = nullptr;
5297
5298 delete scavenge_job_;
5299 scavenge_job_ = nullptr;
5300
5301 WaitUntilUnmappingOfFreeChunksCompleted();
5302
5303 delete array_buffer_tracker_;
5304 array_buffer_tracker_ = nullptr;
5305
5306 isolate_->global_handles()->TearDown();
5307
5308 external_string_table_.TearDown();
5309
5310 delete tracer_;
5311 tracer_ = nullptr;
5312
5313 new_space_.TearDown();
5314
5315 if (old_space_ != NULL) {
5316 delete old_space_;
5317 old_space_ = NULL;
5318 }
5319
5320 if (code_space_ != NULL) {
5321 delete code_space_;
5322 code_space_ = NULL;
5323 }
5324
5325 if (map_space_ != NULL) {
5326 delete map_space_;
5327 map_space_ = NULL;
5328 }
5329
5330 if (lo_space_ != NULL) {
5331 lo_space_->TearDown();
5332 delete lo_space_;
5333 lo_space_ = NULL;
5334 }
5335
5336 store_buffer()->TearDown();
5337
5338 isolate_->memory_allocator()->TearDown();
5339
5340 StrongRootsList* next = NULL;
5341 for (StrongRootsList* list = strong_roots_list_; list; list = next) {
5342 next = list->next;
5343 delete list;
5344 }
5345 strong_roots_list_ = NULL;
5346 }
5347
5348
AddGCPrologueCallback(v8::Isolate::GCCallback callback,GCType gc_type,bool pass_isolate)5349 void Heap::AddGCPrologueCallback(v8::Isolate::GCCallback callback,
5350 GCType gc_type, bool pass_isolate) {
5351 DCHECK(callback != NULL);
5352 GCCallbackPair pair(callback, gc_type, pass_isolate);
5353 DCHECK(!gc_prologue_callbacks_.Contains(pair));
5354 return gc_prologue_callbacks_.Add(pair);
5355 }
5356
5357
RemoveGCPrologueCallback(v8::Isolate::GCCallback callback)5358 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCCallback callback) {
5359 DCHECK(callback != NULL);
5360 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5361 if (gc_prologue_callbacks_[i].callback == callback) {
5362 gc_prologue_callbacks_.Remove(i);
5363 return;
5364 }
5365 }
5366 UNREACHABLE();
5367 }
5368
5369
AddGCEpilogueCallback(v8::Isolate::GCCallback callback,GCType gc_type,bool pass_isolate)5370 void Heap::AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
5371 GCType gc_type, bool pass_isolate) {
5372 DCHECK(callback != NULL);
5373 GCCallbackPair pair(callback, gc_type, pass_isolate);
5374 DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5375 return gc_epilogue_callbacks_.Add(pair);
5376 }
5377
5378
RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback)5379 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback) {
5380 DCHECK(callback != NULL);
5381 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5382 if (gc_epilogue_callbacks_[i].callback == callback) {
5383 gc_epilogue_callbacks_.Remove(i);
5384 return;
5385 }
5386 }
5387 UNREACHABLE();
5388 }
5389
5390
5391 // TODO(ishell): Find a better place for this.
AddWeakObjectToCodeDependency(Handle<HeapObject> obj,Handle<DependentCode> dep)5392 void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
5393 Handle<DependentCode> dep) {
5394 DCHECK(!InNewSpace(*obj));
5395 DCHECK(!InNewSpace(*dep));
5396 Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
5397 table = WeakHashTable::Put(table, obj, dep);
5398 if (*table != weak_object_to_code_table())
5399 set_weak_object_to_code_table(*table);
5400 DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
5401 }
5402
5403
LookupWeakObjectToCodeDependency(Handle<HeapObject> obj)5404 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
5405 Object* dep = weak_object_to_code_table()->Lookup(obj);
5406 if (dep->IsDependentCode()) return DependentCode::cast(dep);
5407 return DependentCode::cast(empty_fixed_array());
5408 }
5409
5410
AddRetainedMap(Handle<Map> map)5411 void Heap::AddRetainedMap(Handle<Map> map) {
5412 Handle<WeakCell> cell = Map::WeakCellForMap(map);
5413 Handle<ArrayList> array(retained_maps(), isolate());
5414 if (array->IsFull()) {
5415 CompactRetainedMaps(*array);
5416 }
5417 array = ArrayList::Add(
5418 array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
5419 ArrayList::kReloadLengthAfterAllocation);
5420 if (*array != retained_maps()) {
5421 set_retained_maps(*array);
5422 }
5423 }
5424
5425
CompactRetainedMaps(ArrayList * retained_maps)5426 void Heap::CompactRetainedMaps(ArrayList* retained_maps) {
5427 DCHECK_EQ(retained_maps, this->retained_maps());
5428 int length = retained_maps->Length();
5429 int new_length = 0;
5430 int new_number_of_disposed_maps = 0;
5431 // This loop compacts the array by removing cleared weak cells.
5432 for (int i = 0; i < length; i += 2) {
5433 DCHECK(retained_maps->Get(i)->IsWeakCell());
5434 WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
5435 Object* age = retained_maps->Get(i + 1);
5436 if (cell->cleared()) continue;
5437 if (i != new_length) {
5438 retained_maps->Set(new_length, cell);
5439 retained_maps->Set(new_length + 1, age);
5440 }
5441 if (i < number_of_disposed_maps_) {
5442 new_number_of_disposed_maps += 2;
5443 }
5444 new_length += 2;
5445 }
5446 number_of_disposed_maps_ = new_number_of_disposed_maps;
5447 Object* undefined = undefined_value();
5448 for (int i = new_length; i < length; i++) {
5449 retained_maps->Clear(i, undefined);
5450 }
5451 if (new_length != length) retained_maps->SetLength(new_length);
5452 }
5453
5454
FatalProcessOutOfMemory(const char * location,bool take_snapshot)5455 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5456 v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5457 }
5458
5459 #ifdef DEBUG
5460
5461 class PrintHandleVisitor : public ObjectVisitor {
5462 public:
VisitPointers(Object ** start,Object ** end)5463 void VisitPointers(Object** start, Object** end) override {
5464 for (Object** p = start; p < end; p++)
5465 PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
5466 reinterpret_cast<void*>(*p));
5467 }
5468 };
5469
5470
PrintHandles()5471 void Heap::PrintHandles() {
5472 PrintF("Handles:\n");
5473 PrintHandleVisitor v;
5474 isolate_->handle_scope_implementer()->Iterate(&v);
5475 }
5476
5477 #endif
5478
5479 class CheckHandleCountVisitor : public ObjectVisitor {
5480 public:
CheckHandleCountVisitor()5481 CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor()5482 ~CheckHandleCountVisitor() override {
5483 CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
5484 }
VisitPointers(Object ** start,Object ** end)5485 void VisitPointers(Object** start, Object** end) override {
5486 handle_count_ += end - start;
5487 }
5488
5489 private:
5490 ptrdiff_t handle_count_;
5491 };
5492
5493
CheckHandleCount()5494 void Heap::CheckHandleCount() {
5495 CheckHandleCountVisitor v;
5496 isolate_->handle_scope_implementer()->Iterate(&v);
5497 }
5498
5499
next()5500 Space* AllSpaces::next() {
5501 switch (counter_++) {
5502 case NEW_SPACE:
5503 return heap_->new_space();
5504 case OLD_SPACE:
5505 return heap_->old_space();
5506 case CODE_SPACE:
5507 return heap_->code_space();
5508 case MAP_SPACE:
5509 return heap_->map_space();
5510 case LO_SPACE:
5511 return heap_->lo_space();
5512 default:
5513 return NULL;
5514 }
5515 }
5516
5517
next()5518 PagedSpace* PagedSpaces::next() {
5519 switch (counter_++) {
5520 case OLD_SPACE:
5521 return heap_->old_space();
5522 case CODE_SPACE:
5523 return heap_->code_space();
5524 case MAP_SPACE:
5525 return heap_->map_space();
5526 default:
5527 return NULL;
5528 }
5529 }
5530
5531
next()5532 OldSpace* OldSpaces::next() {
5533 switch (counter_++) {
5534 case OLD_SPACE:
5535 return heap_->old_space();
5536 case CODE_SPACE:
5537 return heap_->code_space();
5538 default:
5539 return NULL;
5540 }
5541 }
5542
5543
SpaceIterator(Heap * heap)5544 SpaceIterator::SpaceIterator(Heap* heap)
5545 : heap_(heap), current_space_(FIRST_SPACE), iterator_(NULL) {}
5546
5547
~SpaceIterator()5548 SpaceIterator::~SpaceIterator() {
5549 // Delete active iterator if any.
5550 delete iterator_;
5551 }
5552
5553
has_next()5554 bool SpaceIterator::has_next() {
5555 // Iterate until no more spaces.
5556 return current_space_ != LAST_SPACE;
5557 }
5558
5559
next()5560 ObjectIterator* SpaceIterator::next() {
5561 if (iterator_ != NULL) {
5562 delete iterator_;
5563 iterator_ = NULL;
5564 // Move to the next space
5565 current_space_++;
5566 if (current_space_ > LAST_SPACE) {
5567 return NULL;
5568 }
5569 }
5570
5571 // Return iterator for the new current space.
5572 return CreateIterator();
5573 }
5574
5575
5576 // Create an iterator for the space to iterate.
CreateIterator()5577 ObjectIterator* SpaceIterator::CreateIterator() {
5578 DCHECK(iterator_ == NULL);
5579
5580 switch (current_space_) {
5581 case NEW_SPACE:
5582 iterator_ = new SemiSpaceIterator(heap_->new_space());
5583 break;
5584 case OLD_SPACE:
5585 iterator_ = new HeapObjectIterator(heap_->old_space());
5586 break;
5587 case CODE_SPACE:
5588 iterator_ = new HeapObjectIterator(heap_->code_space());
5589 break;
5590 case MAP_SPACE:
5591 iterator_ = new HeapObjectIterator(heap_->map_space());
5592 break;
5593 case LO_SPACE:
5594 iterator_ = new LargeObjectIterator(heap_->lo_space());
5595 break;
5596 }
5597
5598 // Return the newly allocated iterator;
5599 DCHECK(iterator_ != NULL);
5600 return iterator_;
5601 }
5602
5603
5604 class HeapObjectsFilter {
5605 public:
~HeapObjectsFilter()5606 virtual ~HeapObjectsFilter() {}
5607 virtual bool SkipObject(HeapObject* object) = 0;
5608 };
5609
5610
5611 class UnreachableObjectsFilter : public HeapObjectsFilter {
5612 public:
UnreachableObjectsFilter(Heap * heap)5613 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5614 MarkReachableObjects();
5615 }
5616
~UnreachableObjectsFilter()5617 ~UnreachableObjectsFilter() {
5618 heap_->mark_compact_collector()->ClearMarkbits();
5619 }
5620
SkipObject(HeapObject * object)5621 bool SkipObject(HeapObject* object) {
5622 if (object->IsFiller()) return true;
5623 MarkBit mark_bit = Marking::MarkBitFrom(object);
5624 return Marking::IsWhite(mark_bit);
5625 }
5626
5627 private:
5628 class MarkingVisitor : public ObjectVisitor {
5629 public:
MarkingVisitor()5630 MarkingVisitor() : marking_stack_(10) {}
5631
VisitPointers(Object ** start,Object ** end)5632 void VisitPointers(Object** start, Object** end) override {
5633 for (Object** p = start; p < end; p++) {
5634 if (!(*p)->IsHeapObject()) continue;
5635 HeapObject* obj = HeapObject::cast(*p);
5636 MarkBit mark_bit = Marking::MarkBitFrom(obj);
5637 if (Marking::IsWhite(mark_bit)) {
5638 Marking::WhiteToBlack(mark_bit);
5639 marking_stack_.Add(obj);
5640 }
5641 }
5642 }
5643
TransitiveClosure()5644 void TransitiveClosure() {
5645 while (!marking_stack_.is_empty()) {
5646 HeapObject* obj = marking_stack_.RemoveLast();
5647 obj->Iterate(this);
5648 }
5649 }
5650
5651 private:
5652 List<HeapObject*> marking_stack_;
5653 };
5654
MarkReachableObjects()5655 void MarkReachableObjects() {
5656 MarkingVisitor visitor;
5657 heap_->IterateRoots(&visitor, VISIT_ALL);
5658 visitor.TransitiveClosure();
5659 }
5660
5661 Heap* heap_;
5662 DisallowHeapAllocation no_allocation_;
5663 };
5664
5665
HeapIterator(Heap * heap,HeapIterator::HeapObjectsFiltering filtering)5666 HeapIterator::HeapIterator(Heap* heap,
5667 HeapIterator::HeapObjectsFiltering filtering)
5668 : make_heap_iterable_helper_(heap),
5669 no_heap_allocation_(),
5670 heap_(heap),
5671 filtering_(filtering),
5672 filter_(nullptr),
5673 space_iterator_(nullptr),
5674 object_iterator_(nullptr) {
5675 heap_->heap_iterator_start();
5676 // Start the iteration.
5677 space_iterator_ = new SpaceIterator(heap_);
5678 switch (filtering_) {
5679 case kFilterUnreachable:
5680 filter_ = new UnreachableObjectsFilter(heap_);
5681 break;
5682 default:
5683 break;
5684 }
5685 object_iterator_ = space_iterator_->next();
5686 }
5687
5688
~HeapIterator()5689 HeapIterator::~HeapIterator() {
5690 heap_->heap_iterator_end();
5691 #ifdef DEBUG
5692 // Assert that in filtering mode we have iterated through all
5693 // objects. Otherwise, heap will be left in an inconsistent state.
5694 if (filtering_ != kNoFiltering) {
5695 DCHECK(object_iterator_ == nullptr);
5696 }
5697 #endif
5698 // Make sure the last iterator is deallocated.
5699 delete object_iterator_;
5700 delete space_iterator_;
5701 delete filter_;
5702 }
5703
5704
next()5705 HeapObject* HeapIterator::next() {
5706 if (filter_ == nullptr) return NextObject();
5707
5708 HeapObject* obj = NextObject();
5709 while ((obj != nullptr) && (filter_->SkipObject(obj))) obj = NextObject();
5710 return obj;
5711 }
5712
5713
NextObject()5714 HeapObject* HeapIterator::NextObject() {
5715 // No iterator means we are done.
5716 if (object_iterator_ == nullptr) return nullptr;
5717
5718 if (HeapObject* obj = object_iterator_->next_object()) {
5719 // If the current iterator has more objects we are fine.
5720 return obj;
5721 } else {
5722 // Go though the spaces looking for one that has objects.
5723 while (space_iterator_->has_next()) {
5724 object_iterator_ = space_iterator_->next();
5725 if (HeapObject* obj = object_iterator_->next_object()) {
5726 return obj;
5727 }
5728 }
5729 }
5730 // Done with the last space.
5731 object_iterator_ = nullptr;
5732 return nullptr;
5733 }
5734
5735
5736 #ifdef DEBUG
5737
5738 Object* const PathTracer::kAnyGlobalObject = NULL;
5739
5740 class PathTracer::MarkVisitor : public ObjectVisitor {
5741 public:
MarkVisitor(PathTracer * tracer)5742 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5743
VisitPointers(Object ** start,Object ** end)5744 void VisitPointers(Object** start, Object** end) override {
5745 // Scan all HeapObject pointers in [start, end)
5746 for (Object** p = start; !tracer_->found() && (p < end); p++) {
5747 if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
5748 }
5749 }
5750
5751 private:
5752 PathTracer* tracer_;
5753 };
5754
5755
5756 class PathTracer::UnmarkVisitor : public ObjectVisitor {
5757 public:
UnmarkVisitor(PathTracer * tracer)5758 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5759
VisitPointers(Object ** start,Object ** end)5760 void VisitPointers(Object** start, Object** end) override {
5761 // Scan all HeapObject pointers in [start, end)
5762 for (Object** p = start; p < end; p++) {
5763 if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
5764 }
5765 }
5766
5767 private:
5768 PathTracer* tracer_;
5769 };
5770
5771
VisitPointers(Object ** start,Object ** end)5772 void PathTracer::VisitPointers(Object** start, Object** end) {
5773 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5774 // Visit all HeapObject pointers in [start, end)
5775 for (Object** p = start; !done && (p < end); p++) {
5776 if ((*p)->IsHeapObject()) {
5777 TracePathFrom(p);
5778 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5779 }
5780 }
5781 }
5782
5783
Reset()5784 void PathTracer::Reset() {
5785 found_target_ = false;
5786 object_stack_.Clear();
5787 }
5788
5789
TracePathFrom(Object ** root)5790 void PathTracer::TracePathFrom(Object** root) {
5791 DCHECK((search_target_ == kAnyGlobalObject) ||
5792 search_target_->IsHeapObject());
5793 found_target_in_trace_ = false;
5794 Reset();
5795
5796 MarkVisitor mark_visitor(this);
5797 MarkRecursively(root, &mark_visitor);
5798
5799 UnmarkVisitor unmark_visitor(this);
5800 UnmarkRecursively(root, &unmark_visitor);
5801
5802 ProcessResults();
5803 }
5804
5805
SafeIsNativeContext(HeapObject * obj)5806 static bool SafeIsNativeContext(HeapObject* obj) {
5807 return obj->map() == obj->GetHeap()->root(Heap::kNativeContextMapRootIndex);
5808 }
5809
5810
MarkRecursively(Object ** p,MarkVisitor * mark_visitor)5811 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5812 if (!(*p)->IsHeapObject()) return;
5813
5814 HeapObject* obj = HeapObject::cast(*p);
5815
5816 MapWord map_word = obj->map_word();
5817 if (!map_word.ToMap()->IsHeapObject()) return; // visited before
5818
5819 if (found_target_in_trace_) return; // stop if target found
5820 object_stack_.Add(obj);
5821 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5822 (obj == search_target_)) {
5823 found_target_in_trace_ = true;
5824 found_target_ = true;
5825 return;
5826 }
5827
5828 bool is_native_context = SafeIsNativeContext(obj);
5829
5830 // not visited yet
5831 Map* map = Map::cast(map_word.ToMap());
5832
5833 MapWord marked_map_word =
5834 MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
5835 obj->set_map_word(marked_map_word);
5836
5837 // Scan the object body.
5838 if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5839 // This is specialized to scan Context's properly.
5840 Object** start =
5841 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
5842 Object** end =
5843 reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
5844 Context::FIRST_WEAK_SLOT * kPointerSize);
5845 mark_visitor->VisitPointers(start, end);
5846 } else {
5847 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
5848 }
5849
5850 // Scan the map after the body because the body is a lot more interesting
5851 // when doing leak detection.
5852 MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
5853
5854 if (!found_target_in_trace_) { // don't pop if found the target
5855 object_stack_.RemoveLast();
5856 }
5857 }
5858
5859
UnmarkRecursively(Object ** p,UnmarkVisitor * unmark_visitor)5860 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5861 if (!(*p)->IsHeapObject()) return;
5862
5863 HeapObject* obj = HeapObject::cast(*p);
5864
5865 MapWord map_word = obj->map_word();
5866 if (map_word.ToMap()->IsHeapObject()) return; // unmarked already
5867
5868 MapWord unmarked_map_word =
5869 MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
5870 obj->set_map_word(unmarked_map_word);
5871
5872 Map* map = Map::cast(unmarked_map_word.ToMap());
5873
5874 UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
5875
5876 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
5877 }
5878
5879
ProcessResults()5880 void PathTracer::ProcessResults() {
5881 if (found_target_) {
5882 OFStream os(stdout);
5883 os << "=====================================\n"
5884 << "==== Path to object ====\n"
5885 << "=====================================\n\n";
5886
5887 DCHECK(!object_stack_.is_empty());
5888 for (int i = 0; i < object_stack_.length(); i++) {
5889 if (i > 0) os << "\n |\n |\n V\n\n";
5890 object_stack_[i]->Print(os);
5891 }
5892 os << "=====================================\n";
5893 }
5894 }
5895
5896
5897 // Triggers a depth-first traversal of reachable objects from one
5898 // given root object and finds a path to a specific heap object and
5899 // prints it.
TracePathToObjectFrom(Object * target,Object * root)5900 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
5901 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5902 tracer.VisitPointer(&root);
5903 }
5904
5905
5906 // Triggers a depth-first traversal of reachable objects from roots
5907 // and finds a path to a specific heap object and prints it.
TracePathToObject(Object * target)5908 void Heap::TracePathToObject(Object* target) {
5909 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5910 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5911 }
5912
5913
5914 // Triggers a depth-first traversal of reachable objects from roots
5915 // and finds a path to any global object and prints it. Useful for
5916 // determining the source for leaks of global objects.
TracePathToGlobal()5917 void Heap::TracePathToGlobal() {
5918 PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
5919 VISIT_ALL);
5920 IterateRoots(&tracer, VISIT_ONLY_STRONG);
5921 }
5922 #endif
5923
5924
UpdateCumulativeGCStatistics(double duration,double spent_in_mutator,double marking_time)5925 void Heap::UpdateCumulativeGCStatistics(double duration,
5926 double spent_in_mutator,
5927 double marking_time) {
5928 if (FLAG_print_cumulative_gc_stat) {
5929 total_gc_time_ms_ += duration;
5930 max_gc_pause_ = Max(max_gc_pause_, duration);
5931 max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
5932 min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
5933 } else if (FLAG_trace_gc_verbose) {
5934 total_gc_time_ms_ += duration;
5935 }
5936
5937 marking_time_ += marking_time;
5938 }
5939
5940
Hash(Handle<Map> map,Handle<Name> name)5941 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
5942 DisallowHeapAllocation no_gc;
5943 // Uses only lower 32 bits if pointers are larger.
5944 uintptr_t addr_hash =
5945 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
5946 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
5947 }
5948
5949
Lookup(Handle<Map> map,Handle<Name> name)5950 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
5951 DisallowHeapAllocation no_gc;
5952 int index = (Hash(map, name) & kHashMask);
5953 for (int i = 0; i < kEntriesPerBucket; i++) {
5954 Key& key = keys_[index + i];
5955 if ((key.map == *map) && key.name->Equals(*name)) {
5956 return field_offsets_[index + i];
5957 }
5958 }
5959 return kNotFound;
5960 }
5961
5962
Update(Handle<Map> map,Handle<Name> name,int field_offset)5963 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
5964 int field_offset) {
5965 DisallowHeapAllocation no_gc;
5966 if (!name->IsUniqueName()) {
5967 if (!StringTable::InternalizeStringIfExists(
5968 name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
5969 return;
5970 }
5971 }
5972 // This cache is cleared only between mark compact passes, so we expect the
5973 // cache to only contain old space names.
5974 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
5975
5976 int index = (Hash(map, name) & kHashMask);
5977 // After a GC there will be free slots, so we use them in order (this may
5978 // help to get the most frequently used one in position 0).
5979 for (int i = 0; i < kEntriesPerBucket; i++) {
5980 Key& key = keys_[index];
5981 Object* free_entry_indicator = NULL;
5982 if (key.map == free_entry_indicator) {
5983 key.map = *map;
5984 key.name = *name;
5985 field_offsets_[index + i] = field_offset;
5986 return;
5987 }
5988 }
5989 // No free entry found in this bucket, so we move them all down one and
5990 // put the new entry at position zero.
5991 for (int i = kEntriesPerBucket - 1; i > 0; i--) {
5992 Key& key = keys_[index + i];
5993 Key& key2 = keys_[index + i - 1];
5994 key = key2;
5995 field_offsets_[index + i] = field_offsets_[index + i - 1];
5996 }
5997
5998 // Write the new first entry.
5999 Key& key = keys_[index];
6000 key.map = *map;
6001 key.name = *name;
6002 field_offsets_[index] = field_offset;
6003 }
6004
6005
Clear()6006 void KeyedLookupCache::Clear() {
6007 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6008 }
6009
6010
Clear()6011 void DescriptorLookupCache::Clear() {
6012 for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6013 }
6014
6015
CleanUp()6016 void Heap::ExternalStringTable::CleanUp() {
6017 int last = 0;
6018 for (int i = 0; i < new_space_strings_.length(); ++i) {
6019 if (new_space_strings_[i] == heap_->the_hole_value()) {
6020 continue;
6021 }
6022 DCHECK(new_space_strings_[i]->IsExternalString());
6023 if (heap_->InNewSpace(new_space_strings_[i])) {
6024 new_space_strings_[last++] = new_space_strings_[i];
6025 } else {
6026 old_space_strings_.Add(new_space_strings_[i]);
6027 }
6028 }
6029 new_space_strings_.Rewind(last);
6030 new_space_strings_.Trim();
6031
6032 last = 0;
6033 for (int i = 0; i < old_space_strings_.length(); ++i) {
6034 if (old_space_strings_[i] == heap_->the_hole_value()) {
6035 continue;
6036 }
6037 DCHECK(old_space_strings_[i]->IsExternalString());
6038 DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
6039 old_space_strings_[last++] = old_space_strings_[i];
6040 }
6041 old_space_strings_.Rewind(last);
6042 old_space_strings_.Trim();
6043 #ifdef VERIFY_HEAP
6044 if (FLAG_verify_heap) {
6045 Verify();
6046 }
6047 #endif
6048 }
6049
6050
TearDown()6051 void Heap::ExternalStringTable::TearDown() {
6052 for (int i = 0; i < new_space_strings_.length(); ++i) {
6053 heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6054 }
6055 new_space_strings_.Free();
6056 for (int i = 0; i < old_space_strings_.length(); ++i) {
6057 heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6058 }
6059 old_space_strings_.Free();
6060 }
6061
6062
6063 class Heap::UnmapFreeMemoryTask : public v8::Task {
6064 public:
UnmapFreeMemoryTask(Heap * heap,MemoryChunk * head)6065 UnmapFreeMemoryTask(Heap* heap, MemoryChunk* head)
6066 : heap_(heap), head_(head) {}
~UnmapFreeMemoryTask()6067 virtual ~UnmapFreeMemoryTask() {}
6068
6069 private:
6070 // v8::Task overrides.
Run()6071 void Run() override {
6072 heap_->FreeQueuedChunks(head_);
6073 heap_->pending_unmapping_tasks_semaphore_.Signal();
6074 }
6075
6076 Heap* heap_;
6077 MemoryChunk* head_;
6078
6079 DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
6080 };
6081
6082
WaitUntilUnmappingOfFreeChunksCompleted()6083 void Heap::WaitUntilUnmappingOfFreeChunksCompleted() {
6084 while (concurrent_unmapping_tasks_active_ > 0) {
6085 pending_unmapping_tasks_semaphore_.Wait();
6086 concurrent_unmapping_tasks_active_--;
6087 }
6088 }
6089
6090
QueueMemoryChunkForFree(MemoryChunk * chunk)6091 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6092 // PreFree logically frees the memory chunk. However, the actual freeing
6093 // will happen on a separate thread sometime later.
6094 isolate_->memory_allocator()->PreFreeMemory(chunk);
6095
6096 // The chunks added to this queue will be freed by a concurrent thread.
6097 chunk->set_next_chunk(chunks_queued_for_free_);
6098 chunks_queued_for_free_ = chunk;
6099 }
6100
6101
FilterStoreBufferEntriesOnAboutToBeFreedPages()6102 void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
6103 if (chunks_queued_for_free_ == NULL) return;
6104 MemoryChunk* next;
6105 MemoryChunk* chunk;
6106 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6107 next = chunk->next_chunk();
6108 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6109 }
6110 store_buffer()->Compact();
6111 store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6112 }
6113
6114
FreeQueuedChunks()6115 void Heap::FreeQueuedChunks() {
6116 if (chunks_queued_for_free_ != NULL) {
6117 if (FLAG_concurrent_sweeping) {
6118 V8::GetCurrentPlatform()->CallOnBackgroundThread(
6119 new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
6120 v8::Platform::kShortRunningTask);
6121 } else {
6122 FreeQueuedChunks(chunks_queued_for_free_);
6123 pending_unmapping_tasks_semaphore_.Signal();
6124 }
6125 chunks_queued_for_free_ = NULL;
6126 } else {
6127 // If we do not have anything to unmap, we just signal the semaphore
6128 // that we are done.
6129 pending_unmapping_tasks_semaphore_.Signal();
6130 }
6131 concurrent_unmapping_tasks_active_++;
6132 }
6133
6134
FreeQueuedChunks(MemoryChunk * list_head)6135 void Heap::FreeQueuedChunks(MemoryChunk* list_head) {
6136 MemoryChunk* next;
6137 MemoryChunk* chunk;
6138 for (chunk = list_head; chunk != NULL; chunk = next) {
6139 next = chunk->next_chunk();
6140 isolate_->memory_allocator()->PerformFreeMemory(chunk);
6141 }
6142 }
6143
6144
RememberUnmappedPage(Address page,bool compacted)6145 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6146 uintptr_t p = reinterpret_cast<uintptr_t>(page);
6147 // Tag the page pointer to make it findable in the dump file.
6148 if (compacted) {
6149 p ^= 0xc1ead & (Page::kPageSize - 1); // Cleared.
6150 } else {
6151 p ^= 0x1d1ed & (Page::kPageSize - 1); // I died.
6152 }
6153 remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6154 reinterpret_cast<Address>(p);
6155 remembered_unmapped_pages_index_++;
6156 remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6157 }
6158
6159
RegisterStrongRoots(Object ** start,Object ** end)6160 void Heap::RegisterStrongRoots(Object** start, Object** end) {
6161 StrongRootsList* list = new StrongRootsList();
6162 list->next = strong_roots_list_;
6163 list->start = start;
6164 list->end = end;
6165 strong_roots_list_ = list;
6166 }
6167
6168
UnregisterStrongRoots(Object ** start)6169 void Heap::UnregisterStrongRoots(Object** start) {
6170 StrongRootsList* prev = NULL;
6171 StrongRootsList* list = strong_roots_list_;
6172 while (list != nullptr) {
6173 StrongRootsList* next = list->next;
6174 if (list->start == start) {
6175 if (prev) {
6176 prev->next = next;
6177 } else {
6178 strong_roots_list_ = next;
6179 }
6180 delete list;
6181 } else {
6182 prev = list;
6183 }
6184 list = next;
6185 }
6186 }
6187
6188
NumberOfTrackedHeapObjectTypes()6189 size_t Heap::NumberOfTrackedHeapObjectTypes() {
6190 return ObjectStats::OBJECT_STATS_COUNT;
6191 }
6192
6193
ObjectCountAtLastGC(size_t index)6194 size_t Heap::ObjectCountAtLastGC(size_t index) {
6195 if (index >= ObjectStats::OBJECT_STATS_COUNT) return 0;
6196 return object_stats_->object_count_last_gc(index);
6197 }
6198
6199
ObjectSizeAtLastGC(size_t index)6200 size_t Heap::ObjectSizeAtLastGC(size_t index) {
6201 if (index >= ObjectStats::OBJECT_STATS_COUNT) return 0;
6202 return object_stats_->object_size_last_gc(index);
6203 }
6204
6205
GetObjectTypeName(size_t index,const char ** object_type,const char ** object_sub_type)6206 bool Heap::GetObjectTypeName(size_t index, const char** object_type,
6207 const char** object_sub_type) {
6208 if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
6209
6210 switch (static_cast<int>(index)) {
6211 #define COMPARE_AND_RETURN_NAME(name) \
6212 case name: \
6213 *object_type = #name; \
6214 *object_sub_type = ""; \
6215 return true;
6216 INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6217 #undef COMPARE_AND_RETURN_NAME
6218 #define COMPARE_AND_RETURN_NAME(name) \
6219 case ObjectStats::FIRST_CODE_KIND_SUB_TYPE + Code::name: \
6220 *object_type = "CODE_TYPE"; \
6221 *object_sub_type = "CODE_KIND/" #name; \
6222 return true;
6223 CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
6224 #undef COMPARE_AND_RETURN_NAME
6225 #define COMPARE_AND_RETURN_NAME(name) \
6226 case ObjectStats::FIRST_FIXED_ARRAY_SUB_TYPE + name: \
6227 *object_type = "FIXED_ARRAY_TYPE"; \
6228 *object_sub_type = #name; \
6229 return true;
6230 FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
6231 #undef COMPARE_AND_RETURN_NAME
6232 #define COMPARE_AND_RETURN_NAME(name) \
6233 case ObjectStats::FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - \
6234 Code::kFirstCodeAge: \
6235 *object_type = "CODE_TYPE"; \
6236 *object_sub_type = "CODE_AGE/" #name; \
6237 return true;
6238 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
6239 #undef COMPARE_AND_RETURN_NAME
6240 }
6241 return false;
6242 }
6243
6244
6245 // static
GetStaticVisitorIdForMap(Map * map)6246 int Heap::GetStaticVisitorIdForMap(Map* map) {
6247 return StaticVisitorBase::GetVisitorId(map);
6248 }
6249
6250 } // namespace internal
6251 } // namespace v8
6252