1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_INL_H_
6 #define V8_HEAP_SPACES_INL_H_
7 
8 #include "src/heap/incremental-marking.h"
9 #include "src/heap/spaces.h"
10 #include "src/isolate.h"
11 #include "src/msan.h"
12 #include "src/profiler/heap-profiler.h"
13 #include "src/v8memory.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 template <class PAGE_TYPE>
19 PageIteratorImpl<PAGE_TYPE>& PageIteratorImpl<PAGE_TYPE>::operator++() {
20   p_ = p_->next_page();
21   return *this;
22 }
23 
24 template <class PAGE_TYPE>
25 PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
26   PageIteratorImpl<PAGE_TYPE> tmp(*this);
27   operator++();
28   return tmp;
29 }
30 
NewSpacePageRange(Address start,Address limit)31 NewSpacePageRange::NewSpacePageRange(Address start, Address limit)
32     : range_(Page::FromAddress(start),
33              Page::FromAllocationAreaAddress(limit)->next_page()) {
34   SemiSpace::AssertValidRange(start, limit);
35 }
36 
37 // -----------------------------------------------------------------------------
38 // SemiSpaceIterator
39 
Next()40 HeapObject* SemiSpaceIterator::Next() {
41   while (current_ != limit_) {
42     if (Page::IsAlignedToPageSize(current_)) {
43       Page* page = Page::FromAllocationAreaAddress(current_);
44       page = page->next_page();
45       DCHECK(!page->is_anchor());
46       current_ = page->area_start();
47       if (current_ == limit_) return nullptr;
48     }
49     HeapObject* object = HeapObject::FromAddress(current_);
50     current_ += object->Size();
51     if (!object->IsFiller()) {
52       return object;
53     }
54   }
55   return nullptr;
56 }
57 
58 // -----------------------------------------------------------------------------
59 // HeapObjectIterator
60 
Next()61 HeapObject* HeapObjectIterator::Next() {
62   do {
63     HeapObject* next_obj = FromCurrentPage();
64     if (next_obj != nullptr) return next_obj;
65   } while (AdvanceToNextPage());
66   return nullptr;
67 }
68 
FromCurrentPage()69 HeapObject* HeapObjectIterator::FromCurrentPage() {
70   while (cur_addr_ != cur_end_) {
71     if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
72       cur_addr_ = space_->limit();
73       continue;
74     }
75     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
76     const int obj_size = obj->Size();
77     cur_addr_ += obj_size;
78     DCHECK_LE(cur_addr_, cur_end_);
79     if (!obj->IsFiller()) {
80       if (obj->IsCode()) {
81         DCHECK_EQ(space_, space_->heap()->code_space());
82         DCHECK_CODEOBJECT_SIZE(obj_size, space_);
83       } else {
84         DCHECK_OBJECT_SIZE(obj_size);
85       }
86       return obj;
87     }
88   }
89   return nullptr;
90 }
91 
92 // -----------------------------------------------------------------------------
93 // MemoryAllocator
94 
95 #ifdef ENABLE_HEAP_PROTECTION
96 
Protect(Address start,size_t size)97 void MemoryAllocator::Protect(Address start, size_t size) {
98   base::OS::Protect(start, size);
99 }
100 
101 
Unprotect(Address start,size_t size,Executability executable)102 void MemoryAllocator::Unprotect(Address start, size_t size,
103                                 Executability executable) {
104   base::OS::Unprotect(start, size, executable);
105 }
106 
107 
ProtectChunkFromPage(Page * page)108 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
109   int id = GetChunkId(page);
110   base::OS::Protect(chunks_[id].address(), chunks_[id].size());
111 }
112 
113 
UnprotectChunkFromPage(Page * page)114 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
115   int id = GetChunkId(page);
116   base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
117                       chunks_[id].owner()->executable() == EXECUTABLE);
118 }
119 
120 #endif
121 
122 // -----------------------------------------------------------------------------
123 // SemiSpace
124 
Contains(HeapObject * o)125 bool SemiSpace::Contains(HeapObject* o) {
126   return id_ == kToSpace
127              ? MemoryChunk::FromAddress(o->address())->InToSpace()
128              : MemoryChunk::FromAddress(o->address())->InFromSpace();
129 }
130 
Contains(Object * o)131 bool SemiSpace::Contains(Object* o) {
132   return o->IsHeapObject() && Contains(HeapObject::cast(o));
133 }
134 
ContainsSlow(Address a)135 bool SemiSpace::ContainsSlow(Address a) {
136   for (Page* p : *this) {
137     if (p == MemoryChunk::FromAddress(a)) return true;
138   }
139   return false;
140 }
141 
142 // --------------------------------------------------------------------------
143 // NewSpace
144 
Contains(HeapObject * o)145 bool NewSpace::Contains(HeapObject* o) {
146   return MemoryChunk::FromAddress(o->address())->InNewSpace();
147 }
148 
Contains(Object * o)149 bool NewSpace::Contains(Object* o) {
150   return o->IsHeapObject() && Contains(HeapObject::cast(o));
151 }
152 
ContainsSlow(Address a)153 bool NewSpace::ContainsSlow(Address a) {
154   return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
155 }
156 
ToSpaceContainsSlow(Address a)157 bool NewSpace::ToSpaceContainsSlow(Address a) {
158   return to_space_.ContainsSlow(a);
159 }
160 
FromSpaceContainsSlow(Address a)161 bool NewSpace::FromSpaceContainsSlow(Address a) {
162   return from_space_.ContainsSlow(a);
163 }
164 
ToSpaceContains(Object * o)165 bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
FromSpaceContains(Object * o)166 bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
167 
Initialize(Heap * heap,MemoryChunk * chunk,Executability executable,SemiSpace * owner)168 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
169                        SemiSpace* owner) {
170   DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
171   bool in_to_space = (owner->id() != kFromSpace);
172   chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
173                              : MemoryChunk::IN_FROM_SPACE);
174   DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
175                                        : MemoryChunk::IN_TO_SPACE));
176   Page* page = static_cast<Page*>(chunk);
177   heap->incremental_marking()->SetNewSpacePageFlags(page);
178   page->AllocateLocalTracker();
179   return page;
180 }
181 
182 // --------------------------------------------------------------------------
183 // PagedSpace
184 
185 template <Page::InitializationMode mode>
Initialize(Heap * heap,MemoryChunk * chunk,Executability executable,PagedSpace * owner)186 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
187                        PagedSpace* owner) {
188   Page* page = reinterpret_cast<Page*>(chunk);
189   DCHECK(page->area_size() <= kAllocatableMemory);
190   DCHECK(chunk->owner() == owner);
191 
192   owner->IncreaseCapacity(page->area_size());
193   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
194 
195   // Make sure that categories are initialized before freeing the area.
196   page->InitializeFreeListCategories();
197   // In the case we do not free the memory, we effectively account for the whole
198   // page as allocated memory that cannot be used for further allocations.
199   if (mode == kFreeMemory) {
200     owner->Free(page->area_start(), page->area_size());
201   }
202 
203   return page;
204 }
205 
ConvertNewToOld(Page * old_page)206 Page* Page::ConvertNewToOld(Page* old_page) {
207   OldSpace* old_space = old_page->heap()->old_space();
208   DCHECK(old_page->InNewSpace());
209   old_page->set_owner(old_space);
210   old_page->SetFlags(0, ~0);
211   old_space->AccountCommitted(old_page->size());
212   Page* new_page = Page::Initialize<kDoNotFreeMemory>(
213       old_page->heap(), old_page, NOT_EXECUTABLE, old_space);
214   new_page->InsertAfter(old_space->anchor()->prev_page());
215   return new_page;
216 }
217 
InitializeFreeListCategories()218 void Page::InitializeFreeListCategories() {
219   for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
220     categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
221   }
222 }
223 
IncrementLiveBytesFromGC(HeapObject * object,int by)224 void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
225   MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
226 }
227 
ResetLiveBytes()228 void MemoryChunk::ResetLiveBytes() {
229   if (FLAG_trace_live_bytes) {
230     PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n",
231                  static_cast<void*>(this), live_byte_count_);
232   }
233   live_byte_count_ = 0;
234 }
235 
IncrementLiveBytes(int by)236 void MemoryChunk::IncrementLiveBytes(int by) {
237   if (FLAG_trace_live_bytes) {
238     PrintIsolate(
239         heap()->isolate(), "live-bytes: update page=%p delta=%d %d->%d\n",
240         static_cast<void*>(this), by, live_byte_count_, live_byte_count_ + by);
241   }
242   live_byte_count_ += by;
243   DCHECK_GE(live_byte_count_, 0);
244   DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
245 }
246 
IncrementLiveBytesFromMutator(HeapObject * object,int by)247 void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
248   MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
249   if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
250     static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
251   }
252   chunk->IncrementLiveBytes(by);
253 }
254 
Contains(Address addr)255 bool PagedSpace::Contains(Address addr) {
256   Page* p = Page::FromAddress(addr);
257   if (!Page::IsValid(p)) return false;
258   return p->owner() == this;
259 }
260 
Contains(Object * o)261 bool PagedSpace::Contains(Object* o) {
262   if (!o->IsHeapObject()) return false;
263   Page* p = Page::FromAddress(HeapObject::cast(o)->address());
264   if (!Page::IsValid(p)) return false;
265   return p->owner() == this;
266 }
267 
UnlinkFreeListCategories(Page * page)268 void PagedSpace::UnlinkFreeListCategories(Page* page) {
269   DCHECK_EQ(this, page->owner());
270   page->ForAllFreeListCategories([this](FreeListCategory* category) {
271     DCHECK_EQ(free_list(), category->owner());
272     free_list()->RemoveCategory(category);
273   });
274 }
275 
RelinkFreeListCategories(Page * page)276 intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
277   DCHECK_EQ(this, page->owner());
278   intptr_t added = 0;
279   page->ForAllFreeListCategories([&added](FreeListCategory* category) {
280     added += category->available();
281     category->Relink();
282   });
283   DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
284   return added;
285 }
286 
FromAnyPointerAddress(Heap * heap,Address addr)287 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
288   MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
289   uintptr_t offset = addr - chunk->address();
290   if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
291     chunk = heap->lo_space()->FindPage(addr);
292   }
293   return chunk;
294 }
295 
FromAnyPointerAddress(Heap * heap,Address addr)296 Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
297   return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
298 }
299 
MarkNeverAllocateForTesting()300 void Page::MarkNeverAllocateForTesting() {
301   DCHECK(this->owner()->identity() != NEW_SPACE);
302   DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
303   SetFlag(NEVER_ALLOCATE_ON_PAGE);
304   reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
305 }
306 
MarkEvacuationCandidate()307 void Page::MarkEvacuationCandidate() {
308   DCHECK(!IsFlagSet(NEVER_EVACUATE));
309   DCHECK_NULL(old_to_old_slots_);
310   DCHECK_NULL(typed_old_to_old_slots_);
311   SetFlag(EVACUATION_CANDIDATE);
312   reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
313 }
314 
ClearEvacuationCandidate()315 void Page::ClearEvacuationCandidate() {
316   if (!IsFlagSet(COMPACTION_WAS_ABORTED)) {
317     DCHECK_NULL(old_to_old_slots_);
318     DCHECK_NULL(typed_old_to_old_slots_);
319   }
320   ClearFlag(EVACUATION_CANDIDATE);
321   InitializeFreeListCategories();
322 }
323 
MemoryChunkIterator(Heap * heap)324 MemoryChunkIterator::MemoryChunkIterator(Heap* heap)
325     : heap_(heap),
326       state_(kOldSpaceState),
327       old_iterator_(heap->old_space()->begin()),
328       code_iterator_(heap->code_space()->begin()),
329       map_iterator_(heap->map_space()->begin()),
330       lo_iterator_(heap->lo_space()->begin()) {}
331 
next()332 MemoryChunk* MemoryChunkIterator::next() {
333   switch (state_) {
334     case kOldSpaceState: {
335       if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
336       state_ = kMapState;
337       // Fall through.
338     }
339     case kMapState: {
340       if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
341       state_ = kCodeState;
342       // Fall through.
343     }
344     case kCodeState: {
345       if (code_iterator_ != heap_->code_space()->end())
346         return *(code_iterator_++);
347       state_ = kLargeObjectState;
348       // Fall through.
349     }
350     case kLargeObjectState: {
351       if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
352       state_ = kFinishedState;
353       // Fall through;
354     }
355     case kFinishedState:
356       return nullptr;
357     default:
358       break;
359   }
360   UNREACHABLE();
361   return nullptr;
362 }
363 
page()364 Page* FreeListCategory::page() {
365   return Page::FromAddress(reinterpret_cast<Address>(this));
366 }
367 
owner()368 FreeList* FreeListCategory::owner() {
369   return reinterpret_cast<PagedSpace*>(
370              Page::FromAddress(reinterpret_cast<Address>(this))->owner())
371       ->free_list();
372 }
373 
is_linked()374 bool FreeListCategory::is_linked() {
375   return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
376 }
377 
378 // Try linear allocation in the page of alloc_info's allocation top.  Does
379 // not contain slow case logic (e.g. move to the next page or try free list
380 // allocation) so it can be used by all the allocation functions and for all
381 // the paged spaces.
AllocateLinearly(int size_in_bytes)382 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
383   Address current_top = allocation_info_.top();
384   Address new_top = current_top + size_in_bytes;
385   if (new_top > allocation_info_.limit()) return NULL;
386 
387   allocation_info_.set_top(new_top);
388   return HeapObject::FromAddress(current_top);
389 }
390 
391 
AllocateRawAligned(int size_in_bytes,AllocationAlignment alignment)392 AllocationResult LocalAllocationBuffer::AllocateRawAligned(
393     int size_in_bytes, AllocationAlignment alignment) {
394   Address current_top = allocation_info_.top();
395   int filler_size = Heap::GetFillToAlign(current_top, alignment);
396 
397   Address new_top = current_top + filler_size + size_in_bytes;
398   if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
399 
400   allocation_info_.set_top(new_top);
401   if (filler_size > 0) {
402     return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
403                                     filler_size);
404   }
405 
406   return AllocationResult(HeapObject::FromAddress(current_top));
407 }
408 
409 
AllocateLinearlyAligned(int * size_in_bytes,AllocationAlignment alignment)410 HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
411                                                 AllocationAlignment alignment) {
412   Address current_top = allocation_info_.top();
413   int filler_size = Heap::GetFillToAlign(current_top, alignment);
414 
415   Address new_top = current_top + filler_size + *size_in_bytes;
416   if (new_top > allocation_info_.limit()) return NULL;
417 
418   allocation_info_.set_top(new_top);
419   if (filler_size > 0) {
420     *size_in_bytes += filler_size;
421     return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
422                                      filler_size);
423   }
424 
425   return HeapObject::FromAddress(current_top);
426 }
427 
428 
429 // Raw allocation.
AllocateRawUnaligned(int size_in_bytes,UpdateSkipList update_skip_list)430 AllocationResult PagedSpace::AllocateRawUnaligned(
431     int size_in_bytes, UpdateSkipList update_skip_list) {
432   HeapObject* object = AllocateLinearly(size_in_bytes);
433 
434   if (object == NULL) {
435     object = free_list_.Allocate(size_in_bytes);
436     if (object == NULL) {
437       object = SlowAllocateRaw(size_in_bytes);
438     }
439     if (object != NULL) {
440       if (heap()->incremental_marking()->black_allocation()) {
441         Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
442         MemoryChunk::IncrementLiveBytesFromGC(object, size_in_bytes);
443       }
444     }
445   }
446 
447   if (object != NULL) {
448     if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
449       SkipList::Update(object->address(), size_in_bytes);
450     }
451     MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
452     return object;
453   }
454 
455   return AllocationResult::Retry(identity());
456 }
457 
458 
AllocateRawUnalignedSynchronized(int size_in_bytes)459 AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
460     int size_in_bytes) {
461   base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
462   return AllocateRawUnaligned(size_in_bytes);
463 }
464 
465 
466 // Raw allocation.
AllocateRawAligned(int size_in_bytes,AllocationAlignment alignment)467 AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
468                                                 AllocationAlignment alignment) {
469   DCHECK(identity() == OLD_SPACE);
470   int allocation_size = size_in_bytes;
471   HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
472 
473   if (object == NULL) {
474     // We don't know exactly how much filler we need to align until space is
475     // allocated, so assume the worst case.
476     int filler_size = Heap::GetMaximumFillToAlign(alignment);
477     allocation_size += filler_size;
478     object = free_list_.Allocate(allocation_size);
479     if (object == NULL) {
480       object = SlowAllocateRaw(allocation_size);
481     }
482     if (object != NULL && filler_size != 0) {
483       object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
484                                        alignment);
485       // Filler objects are initialized, so mark only the aligned object memory
486       // as uninitialized.
487       allocation_size = size_in_bytes;
488     }
489   }
490 
491   if (object != NULL) {
492     MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
493     return object;
494   }
495 
496   return AllocationResult::Retry(identity());
497 }
498 
499 
AllocateRaw(int size_in_bytes,AllocationAlignment alignment)500 AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
501                                          AllocationAlignment alignment) {
502 #ifdef V8_HOST_ARCH_32_BIT
503   AllocationResult result =
504       alignment == kDoubleAligned
505           ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
506           : AllocateRawUnaligned(size_in_bytes);
507 #else
508   AllocationResult result = AllocateRawUnaligned(size_in_bytes);
509 #endif
510   HeapObject* heap_obj = nullptr;
511   if (!result.IsRetry() && result.To(&heap_obj)) {
512     AllocationStep(heap_obj->address(), size_in_bytes);
513   }
514   return result;
515 }
516 
517 
518 // -----------------------------------------------------------------------------
519 // NewSpace
520 
521 
AllocateRawAligned(int size_in_bytes,AllocationAlignment alignment)522 AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
523                                               AllocationAlignment alignment) {
524   Address top = allocation_info_.top();
525   int filler_size = Heap::GetFillToAlign(top, alignment);
526   int aligned_size_in_bytes = size_in_bytes + filler_size;
527 
528   if (allocation_info_.limit() - top < aligned_size_in_bytes) {
529     // See if we can create room.
530     if (!EnsureAllocation(size_in_bytes, alignment)) {
531       return AllocationResult::Retry();
532     }
533 
534     top = allocation_info_.top();
535     filler_size = Heap::GetFillToAlign(top, alignment);
536     aligned_size_in_bytes = size_in_bytes + filler_size;
537   }
538 
539   HeapObject* obj = HeapObject::FromAddress(top);
540   allocation_info_.set_top(top + aligned_size_in_bytes);
541   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
542 
543   if (filler_size > 0) {
544     obj = heap()->PrecedeWithFiller(obj, filler_size);
545   }
546 
547   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
548 
549   return obj;
550 }
551 
552 
AllocateRawUnaligned(int size_in_bytes)553 AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
554   Address top = allocation_info_.top();
555   if (allocation_info_.limit() < top + size_in_bytes) {
556     // See if we can create room.
557     if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
558       return AllocationResult::Retry();
559     }
560 
561     top = allocation_info_.top();
562   }
563 
564   HeapObject* obj = HeapObject::FromAddress(top);
565   allocation_info_.set_top(top + size_in_bytes);
566   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
567 
568   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
569 
570   return obj;
571 }
572 
573 
AllocateRaw(int size_in_bytes,AllocationAlignment alignment)574 AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
575                                        AllocationAlignment alignment) {
576 #ifdef V8_HOST_ARCH_32_BIT
577   return alignment == kDoubleAligned
578              ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
579              : AllocateRawUnaligned(size_in_bytes);
580 #else
581   return AllocateRawUnaligned(size_in_bytes);
582 #endif
583 }
584 
585 
AllocateRawSynchronized(int size_in_bytes,AllocationAlignment alignment)586 MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
587     int size_in_bytes, AllocationAlignment alignment) {
588   base::LockGuard<base::Mutex> guard(&mutex_);
589   return AllocateRaw(size_in_bytes, alignment);
590 }
591 
Initialize(Heap * heap,MemoryChunk * chunk,Executability executable,Space * owner)592 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
593                                  Executability executable, Space* owner) {
594   if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
595     STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
596     FATAL("Code page is too large.");
597   }
598   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
599   return static_cast<LargePage*>(chunk);
600 }
601 
Available()602 size_t LargeObjectSpace::Available() {
603   return ObjectSizeFor(heap()->memory_allocator()->Available());
604 }
605 
606 
InvalidBuffer()607 LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
608   return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
609 }
610 
611 
FromResult(Heap * heap,AllocationResult result,intptr_t size)612 LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
613                                                         AllocationResult result,
614                                                         intptr_t size) {
615   if (result.IsRetry()) return InvalidBuffer();
616   HeapObject* obj = nullptr;
617   bool ok = result.To(&obj);
618   USE(ok);
619   DCHECK(ok);
620   Address top = HeapObject::cast(obj)->address();
621   return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
622 }
623 
624 
TryMerge(LocalAllocationBuffer * other)625 bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
626   if (allocation_info_.top() == other->allocation_info_.limit()) {
627     allocation_info_.set_top(other->allocation_info_.top());
628     other->allocation_info_.Reset(nullptr, nullptr);
629     return true;
630   }
631   return false;
632 }
633 
634 }  // namespace internal
635 }  // namespace v8
636 
637 #endif  // V8_HEAP_SPACES_INL_H_
638