1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_HEAP_INL_H_
6 #define V8_HEAP_HEAP_INL_H_
7
8 #include <cmath>
9
10 #include "src/base/platform/platform.h"
11 #include "src/counters-inl.h"
12 #include "src/heap/heap.h"
13 #include "src/heap/incremental-marking-inl.h"
14 #include "src/heap/mark-compact.h"
15 #include "src/heap/object-stats.h"
16 #include "src/heap/remembered-set.h"
17 #include "src/heap/spaces-inl.h"
18 #include "src/heap/store-buffer.h"
19 #include "src/isolate.h"
20 #include "src/list-inl.h"
21 #include "src/log.h"
22 #include "src/msan.h"
23 #include "src/objects-inl.h"
24 #include "src/type-feedback-vector-inl.h"
25
26 namespace v8 {
27 namespace internal {
28
RetrySpace()29 AllocationSpace AllocationResult::RetrySpace() {
30 DCHECK(IsRetry());
31 return static_cast<AllocationSpace>(Smi::cast(object_)->value());
32 }
33
ToObjectChecked()34 HeapObject* AllocationResult::ToObjectChecked() {
35 CHECK(!IsRetry());
36 return HeapObject::cast(object_);
37 }
38
insert(HeapObject * target,int32_t size,bool was_marked_black)39 void PromotionQueue::insert(HeapObject* target, int32_t size,
40 bool was_marked_black) {
41 if (emergency_stack_ != NULL) {
42 emergency_stack_->Add(Entry(target, size, was_marked_black));
43 return;
44 }
45
46 if ((rear_ - 1) < limit_) {
47 RelocateQueueHead();
48 emergency_stack_->Add(Entry(target, size, was_marked_black));
49 return;
50 }
51
52 struct Entry* entry = reinterpret_cast<struct Entry*>(--rear_);
53 entry->obj_ = target;
54 entry->size_ = size;
55 entry->was_marked_black_ = was_marked_black;
56
57 // Assert no overflow into live objects.
58 #ifdef DEBUG
59 SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
60 reinterpret_cast<Address>(rear_));
61 #endif
62 }
63
remove(HeapObject ** target,int32_t * size,bool * was_marked_black)64 void PromotionQueue::remove(HeapObject** target, int32_t* size,
65 bool* was_marked_black) {
66 DCHECK(!is_empty());
67 if (front_ == rear_) {
68 Entry e = emergency_stack_->RemoveLast();
69 *target = e.obj_;
70 *size = e.size_;
71 *was_marked_black = e.was_marked_black_;
72 return;
73 }
74
75 struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
76 *target = entry->obj_;
77 *size = entry->size_;
78 *was_marked_black = entry->was_marked_black_;
79
80 // Assert no underflow.
81 SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
82 reinterpret_cast<Address>(front_));
83 }
84
GetHeadPage()85 Page* PromotionQueue::GetHeadPage() {
86 return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
87 }
88
SetNewLimit(Address limit)89 void PromotionQueue::SetNewLimit(Address limit) {
90 // If we are already using an emergency stack, we can ignore it.
91 if (emergency_stack_) return;
92
93 // If the limit is not on the same page, we can ignore it.
94 if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
95
96 limit_ = reinterpret_cast<struct Entry*>(limit);
97
98 if (limit_ <= rear_) {
99 return;
100 }
101
102 RelocateQueueHead();
103 }
104
IsBelowPromotionQueue(Address to_space_top)105 bool PromotionQueue::IsBelowPromotionQueue(Address to_space_top) {
106 // If an emergency stack is used, the to-space address cannot interfere
107 // with the promotion queue.
108 if (emergency_stack_) return true;
109
110 // If the given to-space top pointer and the head of the promotion queue
111 // are not on the same page, then the to-space objects are below the
112 // promotion queue.
113 if (GetHeadPage() != Page::FromAddress(to_space_top)) {
114 return true;
115 }
116 // If the to space top pointer is smaller or equal than the promotion
117 // queue head, then the to-space objects are below the promotion queue.
118 return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
119 }
120
121 #define ROOT_ACCESSOR(type, name, camel_name) \
122 type* Heap::name() { return type::cast(roots_[k##camel_name##RootIndex]); }
123 ROOT_LIST(ROOT_ACCESSOR)
124 #undef ROOT_ACCESSOR
125
126 #define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
127 Map* Heap::name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
STRUCT_LIST(STRUCT_MAP_ACCESSOR)128 STRUCT_LIST(STRUCT_MAP_ACCESSOR)
129 #undef STRUCT_MAP_ACCESSOR
130
131 #define STRING_ACCESSOR(name, str) \
132 String* Heap::name() { return String::cast(roots_[k##name##RootIndex]); }
133 INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
134 #undef STRING_ACCESSOR
135
136 #define SYMBOL_ACCESSOR(name) \
137 Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
138 PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
139 #undef SYMBOL_ACCESSOR
140
141 #define SYMBOL_ACCESSOR(name, description) \
142 Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
143 PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
144 WELL_KNOWN_SYMBOL_LIST(SYMBOL_ACCESSOR)
145 #undef SYMBOL_ACCESSOR
146
147 #define ROOT_ACCESSOR(type, name, camel_name) \
148 void Heap::set_##name(type* value) { \
149 /* The deserializer makes use of the fact that these common roots are */ \
150 /* never in new space and never on a page that is being compacted. */ \
151 DCHECK(!deserialization_complete() || \
152 RootCanBeWrittenAfterInitialization(k##camel_name##RootIndex)); \
153 DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
154 roots_[k##camel_name##RootIndex] = value; \
155 }
156 ROOT_LIST(ROOT_ACCESSOR)
157 #undef ROOT_ACCESSOR
158
159 PagedSpace* Heap::paged_space(int idx) {
160 DCHECK_NE(idx, LO_SPACE);
161 DCHECK_NE(idx, NEW_SPACE);
162 return static_cast<PagedSpace*>(space_[idx]);
163 }
164
space(int idx)165 Space* Heap::space(int idx) { return space_[idx]; }
166
NewSpaceAllocationTopAddress()167 Address* Heap::NewSpaceAllocationTopAddress() {
168 return new_space_->allocation_top_address();
169 }
170
NewSpaceAllocationLimitAddress()171 Address* Heap::NewSpaceAllocationLimitAddress() {
172 return new_space_->allocation_limit_address();
173 }
174
OldSpaceAllocationTopAddress()175 Address* Heap::OldSpaceAllocationTopAddress() {
176 return old_space_->allocation_top_address();
177 }
178
OldSpaceAllocationLimitAddress()179 Address* Heap::OldSpaceAllocationLimitAddress() {
180 return old_space_->allocation_limit_address();
181 }
182
UpdateNewSpaceAllocationCounter()183 void Heap::UpdateNewSpaceAllocationCounter() {
184 new_space_allocation_counter_ = NewSpaceAllocationCounter();
185 }
186
NewSpaceAllocationCounter()187 size_t Heap::NewSpaceAllocationCounter() {
188 return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
189 }
190
191 template <>
IsOneByte(Vector<const char> str,int chars)192 bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
193 // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
194 return chars == str.length();
195 }
196
197
198 template <>
IsOneByte(String * str,int chars)199 bool inline Heap::IsOneByte(String* str, int chars) {
200 return str->IsOneByteRepresentation();
201 }
202
203
AllocateInternalizedStringFromUtf8(Vector<const char> str,int chars,uint32_t hash_field)204 AllocationResult Heap::AllocateInternalizedStringFromUtf8(
205 Vector<const char> str, int chars, uint32_t hash_field) {
206 if (IsOneByte(str, chars)) {
207 return AllocateOneByteInternalizedString(Vector<const uint8_t>::cast(str),
208 hash_field);
209 }
210 return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
211 }
212
213
214 template <typename T>
AllocateInternalizedStringImpl(T t,int chars,uint32_t hash_field)215 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
216 uint32_t hash_field) {
217 if (IsOneByte(t, chars)) {
218 return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
219 }
220 return AllocateInternalizedStringImpl<false>(t, chars, hash_field);
221 }
222
223
AllocateOneByteInternalizedString(Vector<const uint8_t> str,uint32_t hash_field)224 AllocationResult Heap::AllocateOneByteInternalizedString(
225 Vector<const uint8_t> str, uint32_t hash_field) {
226 CHECK_GE(String::kMaxLength, str.length());
227 // Compute map and object size.
228 Map* map = one_byte_internalized_string_map();
229 int size = SeqOneByteString::SizeFor(str.length());
230
231 // Allocate string.
232 HeapObject* result = nullptr;
233 {
234 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
235 if (!allocation.To(&result)) return allocation;
236 }
237
238 // String maps are all immortal immovable objects.
239 result->set_map_no_write_barrier(map);
240 // Set length and hash fields of the allocated string.
241 String* answer = String::cast(result);
242 answer->set_length(str.length());
243 answer->set_hash_field(hash_field);
244
245 DCHECK_EQ(size, answer->Size());
246
247 // Fill in the characters.
248 MemCopy(answer->address() + SeqOneByteString::kHeaderSize, str.start(),
249 str.length());
250
251 return answer;
252 }
253
254
AllocateTwoByteInternalizedString(Vector<const uc16> str,uint32_t hash_field)255 AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
256 uint32_t hash_field) {
257 CHECK_GE(String::kMaxLength, str.length());
258 // Compute map and object size.
259 Map* map = internalized_string_map();
260 int size = SeqTwoByteString::SizeFor(str.length());
261
262 // Allocate string.
263 HeapObject* result = nullptr;
264 {
265 AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
266 if (!allocation.To(&result)) return allocation;
267 }
268
269 result->set_map(map);
270 // Set length and hash fields of the allocated string.
271 String* answer = String::cast(result);
272 answer->set_length(str.length());
273 answer->set_hash_field(hash_field);
274
275 DCHECK_EQ(size, answer->Size());
276
277 // Fill in the characters.
278 MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, str.start(),
279 str.length() * kUC16Size);
280
281 return answer;
282 }
283
CopyFixedArray(FixedArray * src)284 AllocationResult Heap::CopyFixedArray(FixedArray* src) {
285 if (src->length() == 0) return src;
286 return CopyFixedArrayWithMap(src, src->map());
287 }
288
289
CopyFixedDoubleArray(FixedDoubleArray * src)290 AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
291 if (src->length() == 0) return src;
292 return CopyFixedDoubleArrayWithMap(src, src->map());
293 }
294
295
AllocateRaw(int size_in_bytes,AllocationSpace space,AllocationAlignment alignment)296 AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
297 AllocationAlignment alignment) {
298 DCHECK(AllowHandleAllocation::IsAllowed());
299 DCHECK(AllowHeapAllocation::IsAllowed());
300 DCHECK(gc_state_ == NOT_IN_GC);
301 #ifdef DEBUG
302 if (FLAG_gc_interval >= 0 && !always_allocate() &&
303 Heap::allocation_timeout_-- <= 0) {
304 return AllocationResult::Retry(space);
305 }
306 isolate_->counters()->objs_since_last_full()->Increment();
307 isolate_->counters()->objs_since_last_young()->Increment();
308 #endif
309
310 bool large_object = size_in_bytes > kMaxRegularHeapObjectSize;
311 HeapObject* object = nullptr;
312 AllocationResult allocation;
313 if (NEW_SPACE == space) {
314 if (large_object) {
315 space = LO_SPACE;
316 } else {
317 allocation = new_space_->AllocateRaw(size_in_bytes, alignment);
318 if (allocation.To(&object)) {
319 OnAllocationEvent(object, size_in_bytes);
320 }
321 return allocation;
322 }
323 }
324
325 // Here we only allocate in the old generation.
326 if (OLD_SPACE == space) {
327 if (large_object) {
328 allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
329 } else {
330 allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
331 }
332 } else if (CODE_SPACE == space) {
333 if (size_in_bytes <= code_space()->AreaSize()) {
334 allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
335 } else {
336 allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
337 }
338 } else if (LO_SPACE == space) {
339 DCHECK(large_object);
340 allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
341 } else if (MAP_SPACE == space) {
342 allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
343 } else {
344 // NEW_SPACE is not allowed here.
345 UNREACHABLE();
346 }
347 if (allocation.To(&object)) {
348 OnAllocationEvent(object, size_in_bytes);
349 }
350
351 return allocation;
352 }
353
354
OnAllocationEvent(HeapObject * object,int size_in_bytes)355 void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
356 HeapProfiler* profiler = isolate_->heap_profiler();
357 if (profiler->is_tracking_allocations()) {
358 profiler->AllocationEvent(object->address(), size_in_bytes);
359 }
360
361 if (FLAG_verify_predictable) {
362 ++allocations_count_;
363 // Advance synthetic time by making a time request.
364 MonotonicallyIncreasingTimeInMs();
365
366 UpdateAllocationsHash(object);
367 UpdateAllocationsHash(size_in_bytes);
368
369 if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
370 PrintAlloctionsHash();
371 }
372 }
373
374 if (FLAG_trace_allocation_stack_interval > 0) {
375 if (!FLAG_verify_predictable) ++allocations_count_;
376 if (allocations_count_ % FLAG_trace_allocation_stack_interval == 0) {
377 isolate()->PrintStack(stdout, Isolate::kPrintStackConcise);
378 }
379 }
380 }
381
382
OnMoveEvent(HeapObject * target,HeapObject * source,int size_in_bytes)383 void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
384 int size_in_bytes) {
385 HeapProfiler* heap_profiler = isolate_->heap_profiler();
386 if (heap_profiler->is_tracking_object_moves()) {
387 heap_profiler->ObjectMoveEvent(source->address(), target->address(),
388 size_in_bytes);
389 }
390 if (target->IsSharedFunctionInfo()) {
391 LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
392 target->address()));
393 }
394
395 if (FLAG_verify_predictable) {
396 ++allocations_count_;
397 // Advance synthetic time by making a time request.
398 MonotonicallyIncreasingTimeInMs();
399
400 UpdateAllocationsHash(source);
401 UpdateAllocationsHash(target);
402 UpdateAllocationsHash(size_in_bytes);
403
404 if (allocations_count_ % FLAG_dump_allocations_digest_at_alloc == 0) {
405 PrintAlloctionsHash();
406 }
407 }
408 }
409
410
UpdateAllocationsHash(HeapObject * object)411 void Heap::UpdateAllocationsHash(HeapObject* object) {
412 Address object_address = object->address();
413 MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
414 AllocationSpace allocation_space = memory_chunk->owner()->identity();
415
416 STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
417 uint32_t value =
418 static_cast<uint32_t>(object_address - memory_chunk->address()) |
419 (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
420
421 UpdateAllocationsHash(value);
422 }
423
424
UpdateAllocationsHash(uint32_t value)425 void Heap::UpdateAllocationsHash(uint32_t value) {
426 uint16_t c1 = static_cast<uint16_t>(value);
427 uint16_t c2 = static_cast<uint16_t>(value >> 16);
428 raw_allocations_hash_ =
429 StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
430 raw_allocations_hash_ =
431 StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
432 }
433
434
RegisterExternalString(String * string)435 void Heap::RegisterExternalString(String* string) {
436 external_string_table_.AddString(string);
437 }
438
439
FinalizeExternalString(String * string)440 void Heap::FinalizeExternalString(String* string) {
441 DCHECK(string->IsExternalString());
442 v8::String::ExternalStringResourceBase** resource_addr =
443 reinterpret_cast<v8::String::ExternalStringResourceBase**>(
444 reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
445 kHeapObjectTag);
446
447 // Dispose of the C++ object if it has not already been disposed.
448 if (*resource_addr != NULL) {
449 (*resource_addr)->Dispose();
450 *resource_addr = NULL;
451 }
452 }
453
NewSpaceTop()454 Address Heap::NewSpaceTop() { return new_space_->top(); }
455
DeoptMaybeTenuredAllocationSites()456 bool Heap::DeoptMaybeTenuredAllocationSites() {
457 return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
458 }
459
InNewSpace(Object * object)460 bool Heap::InNewSpace(Object* object) {
461 // Inlined check from NewSpace::Contains.
462 bool result =
463 object->IsHeapObject() &&
464 Page::FromAddress(HeapObject::cast(object)->address())->InNewSpace();
465 DCHECK(!result || // Either not in new space
466 gc_state_ != NOT_IN_GC || // ... or in the middle of GC
467 InToSpace(object)); // ... or in to-space (where we allocate).
468 return result;
469 }
470
InFromSpace(Object * object)471 bool Heap::InFromSpace(Object* object) {
472 return object->IsHeapObject() &&
473 MemoryChunk::FromAddress(HeapObject::cast(object)->address())
474 ->IsFlagSet(Page::IN_FROM_SPACE);
475 }
476
477
InToSpace(Object * object)478 bool Heap::InToSpace(Object* object) {
479 return object->IsHeapObject() &&
480 MemoryChunk::FromAddress(HeapObject::cast(object)->address())
481 ->IsFlagSet(Page::IN_TO_SPACE);
482 }
483
InOldSpace(Object * object)484 bool Heap::InOldSpace(Object* object) { return old_space_->Contains(object); }
485
InNewSpaceSlow(Address address)486 bool Heap::InNewSpaceSlow(Address address) {
487 return new_space_->ContainsSlow(address);
488 }
489
InOldSpaceSlow(Address address)490 bool Heap::InOldSpaceSlow(Address address) {
491 return old_space_->ContainsSlow(address);
492 }
493
ShouldBePromoted(Address old_address,int object_size)494 bool Heap::ShouldBePromoted(Address old_address, int object_size) {
495 Page* page = Page::FromAddress(old_address);
496 Address age_mark = new_space_->age_mark();
497 return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
498 (!page->ContainsLimit(age_mark) || old_address < age_mark);
499 }
500
RecordWrite(Object * object,int offset,Object * o)501 void Heap::RecordWrite(Object* object, int offset, Object* o) {
502 if (!InNewSpace(o) || !object->IsHeapObject() || InNewSpace(object)) {
503 return;
504 }
505 store_buffer()->InsertEntry(HeapObject::cast(object)->address() + offset);
506 }
507
RecordWriteIntoCode(Code * host,RelocInfo * rinfo,Object * value)508 void Heap::RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value) {
509 if (InNewSpace(value)) {
510 RecordWriteIntoCodeSlow(host, rinfo, value);
511 }
512 }
513
RecordFixedArrayElements(FixedArray * array,int offset,int length)514 void Heap::RecordFixedArrayElements(FixedArray* array, int offset, int length) {
515 if (InNewSpace(array)) return;
516 for (int i = 0; i < length; i++) {
517 if (!InNewSpace(array->get(offset + i))) continue;
518 store_buffer()->InsertEntry(
519 reinterpret_cast<Address>(array->RawFieldOfElementAt(offset + i)));
520 }
521 }
522
store_buffer_top_address()523 Address* Heap::store_buffer_top_address() {
524 return store_buffer()->top_address();
525 }
526
AllowedToBeMigrated(HeapObject * obj,AllocationSpace dst)527 bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
528 // Object migration is governed by the following rules:
529 //
530 // 1) Objects in new-space can be migrated to the old space
531 // that matches their target space or they stay in new-space.
532 // 2) Objects in old-space stay in the same space when migrating.
533 // 3) Fillers (two or more words) can migrate due to left-trimming of
534 // fixed arrays in new-space or old space.
535 // 4) Fillers (one word) can never migrate, they are skipped by
536 // incremental marking explicitly to prevent invalid pattern.
537 //
538 // Since this function is used for debugging only, we do not place
539 // asserts here, but check everything explicitly.
540 if (obj->map() == one_pointer_filler_map()) return false;
541 InstanceType type = obj->map()->instance_type();
542 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
543 AllocationSpace src = chunk->owner()->identity();
544 switch (src) {
545 case NEW_SPACE:
546 return dst == src || dst == OLD_SPACE;
547 case OLD_SPACE:
548 return dst == src &&
549 (dst == OLD_SPACE || obj->IsFiller() || obj->IsExternalString());
550 case CODE_SPACE:
551 return dst == src && type == CODE_TYPE;
552 case MAP_SPACE:
553 case LO_SPACE:
554 return false;
555 }
556 UNREACHABLE();
557 return false;
558 }
559
CopyBlock(Address dst,Address src,int byte_size)560 void Heap::CopyBlock(Address dst, Address src, int byte_size) {
561 CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
562 static_cast<size_t>(byte_size / kPointerSize));
563 }
564
565 template <Heap::FindMementoMode mode>
FindAllocationMemento(HeapObject * object)566 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
567 Address object_address = object->address();
568 Address memento_address = object_address + object->Size();
569 Address last_memento_word_address = memento_address + kPointerSize;
570 // If the memento would be on another page, bail out immediately.
571 if (!Page::OnSamePage(object_address, last_memento_word_address)) {
572 return nullptr;
573 }
574 HeapObject* candidate = HeapObject::FromAddress(memento_address);
575 Map* candidate_map = candidate->map();
576 // This fast check may peek at an uninitialized word. However, the slow check
577 // below (memento_address == top) ensures that this is safe. Mark the word as
578 // initialized to silence MemorySanitizer warnings.
579 MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
580 if (candidate_map != allocation_memento_map()) {
581 return nullptr;
582 }
583
584 // Bail out if the memento is below the age mark, which can happen when
585 // mementos survived because a page got moved within new space.
586 Page* object_page = Page::FromAddress(object_address);
587 if (object_page->IsFlagSet(Page::NEW_SPACE_BELOW_AGE_MARK)) {
588 Address age_mark =
589 reinterpret_cast<SemiSpace*>(object_page->owner())->age_mark();
590 if (!object_page->Contains(age_mark)) {
591 return nullptr;
592 }
593 // Do an exact check in the case where the age mark is on the same page.
594 if (object_address < age_mark) {
595 return nullptr;
596 }
597 }
598
599 AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
600
601 // Depending on what the memento is used for, we might need to perform
602 // additional checks.
603 Address top;
604 switch (mode) {
605 case Heap::kForGC:
606 return memento_candidate;
607 case Heap::kForRuntime:
608 if (memento_candidate == nullptr) return nullptr;
609 // Either the object is the last object in the new space, or there is
610 // another object of at least word size (the header map word) following
611 // it, so suffices to compare ptr and top here.
612 top = NewSpaceTop();
613 DCHECK(memento_address == top ||
614 memento_address + HeapObject::kHeaderSize <= top ||
615 !Page::OnSamePage(memento_address, top - 1));
616 if ((memento_address != top) && memento_candidate->IsValid()) {
617 return memento_candidate;
618 }
619 return nullptr;
620 default:
621 UNREACHABLE();
622 }
623 UNREACHABLE();
624 return nullptr;
625 }
626
627 template <Heap::UpdateAllocationSiteMode mode>
UpdateAllocationSite(HeapObject * object,base::HashMap * pretenuring_feedback)628 void Heap::UpdateAllocationSite(HeapObject* object,
629 base::HashMap* pretenuring_feedback) {
630 DCHECK(InFromSpace(object) ||
631 (InToSpace(object) &&
632 Page::FromAddress(object->address())
633 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) ||
634 (!InNewSpace(object) &&
635 Page::FromAddress(object->address())
636 ->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)));
637 if (!FLAG_allocation_site_pretenuring ||
638 !AllocationSite::CanTrack(object->map()->instance_type()))
639 return;
640 AllocationMemento* memento_candidate = FindAllocationMemento<kForGC>(object);
641 if (memento_candidate == nullptr) return;
642
643 if (mode == kGlobal) {
644 DCHECK_EQ(pretenuring_feedback, global_pretenuring_feedback_);
645 // Entering global pretenuring feedback is only used in the scavenger, where
646 // we are allowed to actually touch the allocation site.
647 if (!memento_candidate->IsValid()) return;
648 AllocationSite* site = memento_candidate->GetAllocationSite();
649 DCHECK(!site->IsZombie());
650 // For inserting in the global pretenuring storage we need to first
651 // increment the memento found count on the allocation site.
652 if (site->IncrementMementoFoundCount()) {
653 global_pretenuring_feedback_->LookupOrInsert(site,
654 ObjectHash(site->address()));
655 }
656 } else {
657 DCHECK_EQ(mode, kCached);
658 DCHECK_NE(pretenuring_feedback, global_pretenuring_feedback_);
659 // Entering cached feedback is used in the parallel case. We are not allowed
660 // to dereference the allocation site and rather have to postpone all checks
661 // till actually merging the data.
662 Address key = memento_candidate->GetAllocationSiteUnchecked();
663 base::HashMap::Entry* e =
664 pretenuring_feedback->LookupOrInsert(key, ObjectHash(key));
665 DCHECK(e != nullptr);
666 (*bit_cast<intptr_t*>(&e->value))++;
667 }
668 }
669
670
RemoveAllocationSitePretenuringFeedback(AllocationSite * site)671 void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
672 global_pretenuring_feedback_->Remove(
673 site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
674 }
675
CollectGarbage(AllocationSpace space,GarbageCollectionReason gc_reason,const v8::GCCallbackFlags callbackFlags)676 bool Heap::CollectGarbage(AllocationSpace space,
677 GarbageCollectionReason gc_reason,
678 const v8::GCCallbackFlags callbackFlags) {
679 const char* collector_reason = NULL;
680 GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
681 return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
682 }
683
684
isolate()685 Isolate* Heap::isolate() {
686 return reinterpret_cast<Isolate*>(
687 reinterpret_cast<intptr_t>(this) -
688 reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
689 }
690
691
AddString(String * string)692 void Heap::ExternalStringTable::AddString(String* string) {
693 DCHECK(string->IsExternalString());
694 if (heap_->InNewSpace(string)) {
695 new_space_strings_.Add(string);
696 } else {
697 old_space_strings_.Add(string);
698 }
699 }
700
701
Iterate(ObjectVisitor * v)702 void Heap::ExternalStringTable::Iterate(ObjectVisitor* v) {
703 if (!new_space_strings_.is_empty()) {
704 Object** start = &new_space_strings_[0];
705 v->VisitPointers(start, start + new_space_strings_.length());
706 }
707 if (!old_space_strings_.is_empty()) {
708 Object** start = &old_space_strings_[0];
709 v->VisitPointers(start, start + old_space_strings_.length());
710 }
711 }
712
713
714 // Verify() is inline to avoid ifdef-s around its calls in release
715 // mode.
Verify()716 void Heap::ExternalStringTable::Verify() {
717 #ifdef DEBUG
718 for (int i = 0; i < new_space_strings_.length(); ++i) {
719 Object* obj = Object::cast(new_space_strings_[i]);
720 DCHECK(heap_->InNewSpace(obj));
721 DCHECK(!obj->IsTheHole(heap_->isolate()));
722 }
723 for (int i = 0; i < old_space_strings_.length(); ++i) {
724 Object* obj = Object::cast(old_space_strings_[i]);
725 DCHECK(!heap_->InNewSpace(obj));
726 DCHECK(!obj->IsTheHole(heap_->isolate()));
727 }
728 #endif
729 }
730
731
AddOldString(String * string)732 void Heap::ExternalStringTable::AddOldString(String* string) {
733 DCHECK(string->IsExternalString());
734 DCHECK(!heap_->InNewSpace(string));
735 old_space_strings_.Add(string);
736 }
737
738
ShrinkNewStrings(int position)739 void Heap::ExternalStringTable::ShrinkNewStrings(int position) {
740 new_space_strings_.Rewind(position);
741 #ifdef VERIFY_HEAP
742 if (FLAG_verify_heap) {
743 Verify();
744 }
745 #endif
746 }
747
ClearInstanceofCache()748 void Heap::ClearInstanceofCache() { set_instanceof_cache_function(Smi::kZero); }
749
ToBoolean(bool condition)750 Oddball* Heap::ToBoolean(bool condition) {
751 return condition ? true_value() : false_value();
752 }
753
754
CompletelyClearInstanceofCache()755 void Heap::CompletelyClearInstanceofCache() {
756 set_instanceof_cache_map(Smi::kZero);
757 set_instanceof_cache_function(Smi::kZero);
758 }
759
760
HashSeed()761 uint32_t Heap::HashSeed() {
762 uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
763 DCHECK(FLAG_randomize_hashes || seed == 0);
764 return seed;
765 }
766
767
NextScriptId()768 int Heap::NextScriptId() {
769 int last_id = last_script_id()->value();
770 if (last_id == Smi::kMaxValue) {
771 last_id = 1;
772 } else {
773 last_id++;
774 }
775 set_last_script_id(Smi::FromInt(last_id));
776 return last_id;
777 }
778
SetArgumentsAdaptorDeoptPCOffset(int pc_offset)779 void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
780 DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::kZero);
781 set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
782 }
783
SetConstructStubDeoptPCOffset(int pc_offset)784 void Heap::SetConstructStubDeoptPCOffset(int pc_offset) {
785 DCHECK(construct_stub_deopt_pc_offset() == Smi::kZero);
786 set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
787 }
788
SetGetterStubDeoptPCOffset(int pc_offset)789 void Heap::SetGetterStubDeoptPCOffset(int pc_offset) {
790 DCHECK(getter_stub_deopt_pc_offset() == Smi::kZero);
791 set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
792 }
793
SetSetterStubDeoptPCOffset(int pc_offset)794 void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
795 DCHECK(setter_stub_deopt_pc_offset() == Smi::kZero);
796 set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
797 }
798
SetInterpreterEntryReturnPCOffset(int pc_offset)799 void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) {
800 DCHECK(interpreter_entry_return_pc_offset() == Smi::kZero);
801 set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset));
802 }
803
GetNextTemplateSerialNumber()804 int Heap::GetNextTemplateSerialNumber() {
805 int next_serial_number = next_template_serial_number()->value() + 1;
806 set_next_template_serial_number(Smi::FromInt(next_serial_number));
807 return next_serial_number;
808 }
809
SetSerializedTemplates(FixedArray * templates)810 void Heap::SetSerializedTemplates(FixedArray* templates) {
811 DCHECK_EQ(empty_fixed_array(), serialized_templates());
812 set_serialized_templates(templates);
813 }
814
CreateObjectStats()815 void Heap::CreateObjectStats() {
816 if (V8_LIKELY(FLAG_gc_stats == 0)) return;
817 if (!live_object_stats_) {
818 live_object_stats_ = new ObjectStats(this);
819 }
820 if (!dead_object_stats_) {
821 dead_object_stats_ = new ObjectStats(this);
822 }
823 }
824
AlwaysAllocateScope(Isolate * isolate)825 AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
826 : heap_(isolate->heap()) {
827 heap_->always_allocate_scope_count_.Increment(1);
828 }
829
830
~AlwaysAllocateScope()831 AlwaysAllocateScope::~AlwaysAllocateScope() {
832 heap_->always_allocate_scope_count_.Increment(-1);
833 }
834
835
VisitPointers(Object ** start,Object ** end)836 void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
837 for (Object** current = start; current < end; current++) {
838 if ((*current)->IsHeapObject()) {
839 HeapObject* object = HeapObject::cast(*current);
840 CHECK(object->GetIsolate()->heap()->Contains(object));
841 CHECK(object->map()->IsMap());
842 }
843 }
844 }
845
846
VisitPointers(Object ** start,Object ** end)847 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
848 for (Object** current = start; current < end; current++) {
849 CHECK((*current)->IsSmi());
850 }
851 }
852 } // namespace internal
853 } // namespace v8
854
855 #endif // V8_HEAP_HEAP_INL_H_
856