1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
7 
8 #include <list>
9 #include <memory>
10 #include <unordered_set>
11 
12 #include "src/allocation.h"
13 #include "src/base/atomic-utils.h"
14 #include "src/base/atomicops.h"
15 #include "src/base/bits.h"
16 #include "src/base/hashmap.h"
17 #include "src/base/platform/mutex.h"
18 #include "src/flags.h"
19 #include "src/globals.h"
20 #include "src/heap/heap.h"
21 #include "src/heap/marking.h"
22 #include "src/list.h"
23 #include "src/objects.h"
24 #include "src/utils.h"
25 
26 namespace v8 {
27 namespace internal {
28 
29 class AllocationInfo;
30 class AllocationObserver;
31 class CompactionSpace;
32 class CompactionSpaceCollection;
33 class FreeList;
34 class Isolate;
35 class LocalArrayBufferTracker;
36 class MemoryAllocator;
37 class MemoryChunk;
38 class Page;
39 class PagedSpace;
40 class SemiSpace;
41 class SkipList;
42 class SlotsBuffer;
43 class SlotSet;
44 class TypedSlotSet;
45 class Space;
46 
47 // -----------------------------------------------------------------------------
48 // Heap structures:
49 //
50 // A JS heap consists of a young generation, an old generation, and a large
51 // object space. The young generation is divided into two semispaces. A
52 // scavenger implements Cheney's copying algorithm. The old generation is
53 // separated into a map space and an old object space. The map space contains
54 // all (and only) map objects, the rest of old objects go into the old space.
55 // The old generation is collected by a mark-sweep-compact collector.
56 //
57 // The semispaces of the young generation are contiguous.  The old and map
58 // spaces consists of a list of pages. A page has a page header and an object
59 // area.
60 //
61 // There is a separate large object space for objects larger than
62 // kMaxRegularHeapObjectSize, so that they do not have to move during
63 // collection. The large object space is paged. Pages in large object space
64 // may be larger than the page size.
65 //
66 // A store-buffer based write barrier is used to keep track of intergenerational
67 // references.  See heap/store-buffer.h.
68 //
69 // During scavenges and mark-sweep collections we sometimes (after a store
70 // buffer overflow) iterate intergenerational pointers without decoding heap
71 // object maps so if the page belongs to old space or large object space
72 // it is essential to guarantee that the page does not contain any
73 // garbage pointers to new space: every pointer aligned word which satisfies
74 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
75 // new space. Thus objects in old space and large object spaces should have a
76 // special layout (e.g. no bare integer fields). This requirement does not
77 // apply to map space which is iterated in a special fashion. However we still
78 // require pointer fields of dead maps to be cleaned.
79 //
80 // To enable lazy cleaning of old space pages we can mark chunks of the page
81 // as being garbage.  Garbage sections are marked with a special map.  These
82 // sections are skipped when scanning the page, even if we are otherwise
83 // scanning without regard for object boundaries.  Garbage sections are chained
84 // together to form a free list after a GC.  Garbage sections created outside
85 // of GCs by object trunctation etc. may not be in the free list chain.  Very
86 // small free spaces are ignored, they need only be cleaned of bogus pointers
87 // into new space.
88 //
89 // Each page may have up to one special garbage section.  The start of this
90 // section is denoted by the top field in the space.  The end of the section
91 // is denoted by the limit field in the space.  This special garbage section
92 // is not marked with a free space map in the data.  The point of this section
93 // is to enable linear allocation without having to constantly update the byte
94 // array every time the top field is updated and a new object is created.  The
95 // special garbage section is not in the chain of garbage sections.
96 //
97 // Since the top and limit fields are in the space, not the page, only one page
98 // has a special garbage section, and if the top and limit are equal then there
99 // is no special garbage section.
100 
101 // Some assertion macros used in the debugging mode.
102 
103 #define DCHECK_PAGE_ALIGNED(address) \
104   DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
105 
106 #define DCHECK_OBJECT_ALIGNED(address) \
107   DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
108 
109 #define DCHECK_OBJECT_SIZE(size) \
110   DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
111 
112 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
113   DCHECK((0 < size) && (size <= code_space->AreaSize()))
114 
115 #define DCHECK_PAGE_OFFSET(offset) \
116   DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
117 
118 enum FreeListCategoryType {
119   kTiniest,
120   kTiny,
121   kSmall,
122   kMedium,
123   kLarge,
124   kHuge,
125 
126   kFirstCategory = kTiniest,
127   kLastCategory = kHuge,
128   kNumberOfCategories = kLastCategory + 1,
129   kInvalidCategory
130 };
131 
132 enum FreeMode { kLinkCategory, kDoNotLinkCategory };
133 
134 // A free list category maintains a linked list of free memory blocks.
135 class FreeListCategory {
136  public:
137   static const int kSize = kIntSize +      // FreeListCategoryType type_
138                            kIntSize +      // padding for type_
139                            kSizetSize +    // size_t available_
140                            kPointerSize +  // FreeSpace* top_
141                            kPointerSize +  // FreeListCategory* prev_
142                            kPointerSize;   // FreeListCategory* next_
143 
FreeListCategory()144   FreeListCategory()
145       : type_(kInvalidCategory),
146         available_(0),
147         top_(nullptr),
148         prev_(nullptr),
149         next_(nullptr) {}
150 
Initialize(FreeListCategoryType type)151   void Initialize(FreeListCategoryType type) {
152     type_ = type;
153     available_ = 0;
154     top_ = nullptr;
155     prev_ = nullptr;
156     next_ = nullptr;
157   }
158 
159   void Invalidate();
160 
161   void Reset();
162 
ResetStats()163   void ResetStats() { Reset(); }
164 
165   void RepairFreeList(Heap* heap);
166 
167   // Relinks the category into the currently owning free list. Requires that the
168   // category is currently unlinked.
169   void Relink();
170 
171   bool Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
172 
173   // Picks a node from the list and stores its size in |node_size|. Returns
174   // nullptr if the category is empty.
175   FreeSpace* PickNodeFromList(size_t* node_size);
176 
177   // Performs a single try to pick a node of at least |minimum_size| from the
178   // category. Stores the actual size in |node_size|. Returns nullptr if no
179   // node is found.
180   FreeSpace* TryPickNodeFromList(size_t minimum_size, size_t* node_size);
181 
182   // Picks a node of at least |minimum_size| from the category. Stores the
183   // actual size in |node_size|. Returns nullptr if no node is found.
184   FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
185 
186   inline FreeList* owner();
187   inline bool is_linked();
is_empty()188   bool is_empty() { return top() == nullptr; }
available()189   size_t available() const { return available_; }
190 
191 #ifdef DEBUG
192   size_t SumFreeList();
193   int FreeListLength();
194 #endif
195 
196  private:
197   // For debug builds we accurately compute free lists lengths up until
198   // {kVeryLongFreeList} by manually walking the list.
199   static const int kVeryLongFreeList = 500;
200 
201   inline Page* page();
202 
top()203   FreeSpace* top() { return top_; }
set_top(FreeSpace * top)204   void set_top(FreeSpace* top) { top_ = top; }
prev()205   FreeListCategory* prev() { return prev_; }
set_prev(FreeListCategory * prev)206   void set_prev(FreeListCategory* prev) { prev_ = prev; }
next()207   FreeListCategory* next() { return next_; }
set_next(FreeListCategory * next)208   void set_next(FreeListCategory* next) { next_ = next; }
209 
210   // |type_|: The type of this free list category.
211   FreeListCategoryType type_;
212 
213   // |available_|: Total available bytes in all blocks of this free list
214   // category.
215   size_t available_;
216 
217   // |top_|: Points to the top FreeSpace* in the free list category.
218   FreeSpace* top_;
219 
220   FreeListCategory* prev_;
221   FreeListCategory* next_;
222 
223   friend class FreeList;
224   friend class PagedSpace;
225 };
226 
227 // MemoryChunk represents a memory region owned by a specific space.
228 // It is divided into the header and the body. Chunk start is always
229 // 1MB aligned. Start of the body is aligned so it can accommodate
230 // any heap object.
231 class MemoryChunk {
232  public:
233   enum Flag {
234     NO_FLAGS = 0u,
235     IS_EXECUTABLE = 1u << 0,
236     POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
237     POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
238     // A page in new space has one of the next to flags set.
239     IN_FROM_SPACE = 1u << 3,
240     IN_TO_SPACE = 1u << 4,
241     NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
242     EVACUATION_CANDIDATE = 1u << 6,
243     NEVER_EVACUATE = 1u << 7,
244 
245     // Large objects can have a progress bar in their page header. These object
246     // are scanned in increments and will be kept black while being scanned.
247     // Even if the mutator writes to them they will be kept black and a white
248     // to grey transition is performed in the value.
249     HAS_PROGRESS_BAR = 1u << 8,
250 
251     // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
252     // from new to old space during evacuation.
253     PAGE_NEW_OLD_PROMOTION = 1u << 9,
254 
255     // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
256     // within the new space during evacuation.
257     PAGE_NEW_NEW_PROMOTION = 1u << 10,
258 
259     // This flag is intended to be used for testing. Works only when both
260     // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
261     // are set. It forces the page to become an evacuation candidate at next
262     // candidates selection cycle.
263     FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
264 
265     // This flag is intended to be used for testing.
266     NEVER_ALLOCATE_ON_PAGE = 1u << 12,
267 
268     // The memory chunk is already logically freed, however the actual freeing
269     // still has to be performed.
270     PRE_FREED = 1u << 13,
271 
272     // |POOLED|: When actually freeing this chunk, only uncommit and do not
273     // give up the reservation as we still reuse the chunk at some point.
274     POOLED = 1u << 14,
275 
276     // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
277     //   has been aborted and needs special handling by the sweeper.
278     COMPACTION_WAS_ABORTED = 1u << 15,
279 
280     // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
281     // on pages is sometimes aborted. The flag is used to avoid repeatedly
282     // triggering on the same page.
283     COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
284 
285     // |ANCHOR|: Flag is set if page is an anchor.
286     ANCHOR = 1u << 17,
287   };
288   typedef base::Flags<Flag, uintptr_t> Flags;
289 
290   static const int kPointersToHereAreInterestingMask =
291       POINTERS_TO_HERE_ARE_INTERESTING;
292 
293   static const int kPointersFromHereAreInterestingMask =
294       POINTERS_FROM_HERE_ARE_INTERESTING;
295 
296   static const int kEvacuationCandidateMask = EVACUATION_CANDIDATE;
297 
298   static const int kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
299 
300   static const int kSkipEvacuationSlotsRecordingMask =
301       kEvacuationCandidateMask | kIsInNewSpaceMask;
302 
303   // |kSweepingDone|: The page state when sweeping is complete or sweeping must
304   //   not be performed on that page. Sweeper threads that are done with their
305   //   work will set this value and not touch the page anymore.
306   // |kSweepingPending|: This page is ready for parallel sweeping.
307   // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
308   enum ConcurrentSweepingState {
309     kSweepingDone,
310     kSweepingPending,
311     kSweepingInProgress,
312   };
313 
314   static const intptr_t kAlignment =
315       (static_cast<uintptr_t>(1) << kPageSizeBits);
316 
317   static const intptr_t kAlignmentMask = kAlignment - 1;
318 
319   static const intptr_t kSizeOffset = 0;
320 
321   static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
322 
323   static const size_t kMinHeaderSize =
324       kSizeOffset + kSizetSize  // size_t size
325       + kIntptrSize             // Flags flags_
326       + kPointerSize            // Address area_start_
327       + kPointerSize            // Address area_end_
328       + 2 * kPointerSize        // base::VirtualMemory reservation_
329       + kPointerSize            // Address owner_
330       + kPointerSize            // Heap* heap_
331       + kIntSize                // int progress_bar_
332       + kIntSize                // int live_bytes_count_
333       + kPointerSize            // SlotSet* old_to_new_slots_
334       + kPointerSize            // SlotSet* old_to_old_slots_
335       + kPointerSize            // TypedSlotSet* typed_old_to_new_slots_
336       + kPointerSize            // TypedSlotSet* typed_old_to_old_slots_
337       + kPointerSize            // SkipList* skip_list_
338       + kPointerSize            // AtomicValue high_water_mark_
339       + kPointerSize            // base::Mutex* mutex_
340       + kPointerSize            // base::AtomicWord concurrent_sweeping_
341       + 2 * kSizetSize          // AtomicNumber free-list statistics
342       + kPointerSize            // AtomicValue next_chunk_
343       + kPointerSize            // AtomicValue prev_chunk_
344       // FreeListCategory categories_[kNumberOfCategories]
345       + FreeListCategory::kSize * kNumberOfCategories +
346       kPointerSize;  // LocalArrayBufferTracker* local_tracker_
347 
348   // We add some more space to the computed header size to amount for missing
349   // alignment requirements in our computation.
350   // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
351   static const size_t kHeaderSize = kMinHeaderSize;
352 
353   static const int kBodyOffset =
354       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
355 
356   // The start offset of the object area in a page. Aligned to both maps and
357   // code alignment to be suitable for both.  Also aligned to 32 words because
358   // the marking bitmap is arranged in 32 bit chunks.
359   static const int kObjectStartAlignment = 32 * kPointerSize;
360   static const int kObjectStartOffset =
361       kBodyOffset - 1 +
362       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
363 
364   // Page size in bytes.  This must be a multiple of the OS page size.
365   static const int kPageSize = 1 << kPageSizeBits;
366   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
367 
368   static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
369 
370   static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
371   static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
372 
373   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)374   static MemoryChunk* FromAddress(Address a) {
375     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
376   }
377 
378   static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
379 
UpdateHighWaterMark(Address mark)380   static inline void UpdateHighWaterMark(Address mark) {
381     if (mark == nullptr) return;
382     // Need to subtract one from the mark because when a chunk is full the
383     // top points to the next address after the chunk, which effectively belongs
384     // to another chunk. See the comment to Page::FromTopOrLimit.
385     MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
386     intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
387     intptr_t old_mark = 0;
388     do {
389       old_mark = chunk->high_water_mark_.Value();
390     } while ((new_mark > old_mark) &&
391              !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
392   }
393 
IsValid(MemoryChunk * chunk)394   static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
395 
address()396   Address address() { return reinterpret_cast<Address>(this); }
397 
mutex()398   base::Mutex* mutex() { return mutex_; }
399 
Contains(Address addr)400   bool Contains(Address addr) {
401     return addr >= area_start() && addr < area_end();
402   }
403 
404   // Checks whether |addr| can be a limit of addresses in this page. It's a
405   // limit if it's in the page, or if it's just after the last byte of the page.
ContainsLimit(Address addr)406   bool ContainsLimit(Address addr) {
407     return addr >= area_start() && addr <= area_end();
408   }
409 
concurrent_sweeping_state()410   base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
411     return concurrent_sweeping_;
412   }
413 
SweepingDone()414   bool SweepingDone() {
415     return concurrent_sweeping_state().Value() == kSweepingDone;
416   }
417 
418   // Manage live byte count, i.e., count of bytes in black objects.
419   inline void ResetLiveBytes();
420   inline void IncrementLiveBytes(int by);
421 
LiveBytes()422   int LiveBytes() {
423     DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
424     return live_byte_count_;
425   }
426 
SetLiveBytes(int live_bytes)427   void SetLiveBytes(int live_bytes) {
428     DCHECK_GE(live_bytes, 0);
429     DCHECK_LE(static_cast<size_t>(live_bytes), size_);
430     live_byte_count_ = live_bytes;
431   }
432 
size()433   size_t size() const { return size_; }
set_size(size_t size)434   void set_size(size_t size) { size_ = size; }
435 
heap()436   inline Heap* heap() const { return heap_; }
437 
skip_list()438   inline SkipList* skip_list() { return skip_list_; }
439 
set_skip_list(SkipList * skip_list)440   inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
441 
old_to_new_slots()442   inline SlotSet* old_to_new_slots() { return old_to_new_slots_.Value(); }
old_to_old_slots()443   inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
typed_old_to_new_slots()444   inline TypedSlotSet* typed_old_to_new_slots() {
445     return typed_old_to_new_slots_.Value();
446   }
typed_old_to_old_slots()447   inline TypedSlotSet* typed_old_to_old_slots() {
448     return typed_old_to_old_slots_;
449   }
local_tracker()450   inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
451 
452   V8_EXPORT_PRIVATE void AllocateOldToNewSlots();
453   void ReleaseOldToNewSlots();
454   V8_EXPORT_PRIVATE void AllocateOldToOldSlots();
455   void ReleaseOldToOldSlots();
456   void AllocateTypedOldToNewSlots();
457   void ReleaseTypedOldToNewSlots();
458   void AllocateTypedOldToOldSlots();
459   void ReleaseTypedOldToOldSlots();
460   void AllocateLocalTracker();
461   void ReleaseLocalTracker();
462 
area_start()463   Address area_start() { return area_start_; }
area_end()464   Address area_end() { return area_end_; }
area_size()465   size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
466 
467   bool CommitArea(size_t requested);
468 
469   // Approximate amount of physical memory committed for this chunk.
470   size_t CommittedPhysicalMemory();
471 
HighWaterMark()472   Address HighWaterMark() { return address() + high_water_mark_.Value(); }
473 
progress_bar()474   int progress_bar() {
475     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
476     return progress_bar_;
477   }
478 
set_progress_bar(int progress_bar)479   void set_progress_bar(int progress_bar) {
480     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
481     progress_bar_ = progress_bar;
482   }
483 
ResetProgressBar()484   void ResetProgressBar() {
485     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
486       set_progress_bar(0);
487     }
488   }
489 
markbits()490   inline Bitmap* markbits() {
491     return Bitmap::FromAddress(address() + kHeaderSize);
492   }
493 
AddressToMarkbitIndex(Address addr)494   inline uint32_t AddressToMarkbitIndex(Address addr) {
495     return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
496   }
497 
MarkbitIndexToAddress(uint32_t index)498   inline Address MarkbitIndexToAddress(uint32_t index) {
499     return this->address() + (index << kPointerSizeLog2);
500   }
501 
502   void ClearLiveness();
503 
PrintMarkbits()504   void PrintMarkbits() { markbits()->Print(); }
505 
SetFlag(Flag flag)506   void SetFlag(Flag flag) { flags_ |= flag; }
ClearFlag(Flag flag)507   void ClearFlag(Flag flag) { flags_ &= ~Flags(flag); }
IsFlagSet(Flag flag)508   bool IsFlagSet(Flag flag) { return flags_ & flag; }
509 
510   // Set or clear multiple flags at a time. The flags in the mask are set to
511   // the value in "flags", the rest retain the current value in |flags_|.
SetFlags(uintptr_t flags,uintptr_t mask)512   void SetFlags(uintptr_t flags, uintptr_t mask) {
513     flags_ = (flags_ & ~Flags(mask)) | (Flags(flags) & Flags(mask));
514   }
515 
516   // Return all current flags.
GetFlags()517   uintptr_t GetFlags() { return flags_; }
518 
NeverEvacuate()519   bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
520 
MarkNeverEvacuate()521   void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
522 
IsEvacuationCandidate()523   bool IsEvacuationCandidate() {
524     DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
525     return IsFlagSet(EVACUATION_CANDIDATE);
526   }
527 
CanAllocate()528   bool CanAllocate() {
529     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
530   }
531 
ShouldSkipEvacuationSlotRecording()532   bool ShouldSkipEvacuationSlotRecording() {
533     return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) &&
534            !IsFlagSet(COMPACTION_WAS_ABORTED);
535   }
536 
executable()537   Executability executable() {
538     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
539   }
540 
InNewSpace()541   bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; }
542 
InToSpace()543   bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
544 
InFromSpace()545   bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
546 
next_chunk()547   MemoryChunk* next_chunk() { return next_chunk_.Value(); }
548 
prev_chunk()549   MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
550 
set_next_chunk(MemoryChunk * next)551   void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
552 
set_prev_chunk(MemoryChunk * prev)553   void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
554 
owner()555   Space* owner() const {
556     if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
557         kPageHeaderTag) {
558       return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
559                                       kPageHeaderTag);
560     } else {
561       return nullptr;
562     }
563   }
564 
set_owner(Space * space)565   void set_owner(Space* space) {
566     DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
567     owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
568     DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
569            kPageHeaderTag);
570   }
571 
HasPageHeader()572   bool HasPageHeader() { return owner() != nullptr; }
573 
574   void InsertAfter(MemoryChunk* other);
575   void Unlink();
576 
577  protected:
578   static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
579                                  Address area_start, Address area_end,
580                                  Executability executable, Space* owner,
581                                  base::VirtualMemory* reservation);
582 
583   // Should be called when memory chunk is about to be freed.
584   void ReleaseAllocatedMemory();
585 
reserved_memory()586   base::VirtualMemory* reserved_memory() { return &reservation_; }
587 
588   size_t size_;
589   Flags flags_;
590 
591   // Start and end of allocatable memory on this chunk.
592   Address area_start_;
593   Address area_end_;
594 
595   // If the chunk needs to remember its memory reservation, it is stored here.
596   base::VirtualMemory reservation_;
597 
598   // The identity of the owning space.  This is tagged as a failure pointer, but
599   // no failure can be in an object, so this can be distinguished from any entry
600   // in a fixed array.
601   Address owner_;
602 
603   Heap* heap_;
604 
605   // Used by the incremental marker to keep track of the scanning progress in
606   // large objects that have a progress bar and are scanned in increments.
607   int progress_bar_;
608 
609   // Count of bytes marked black on page.
610   int live_byte_count_;
611 
612   // A single slot set for small pages (of size kPageSize) or an array of slot
613   // set for large pages. In the latter case the number of entries in the array
614   // is ceil(size() / kPageSize).
615   base::AtomicValue<SlotSet*> old_to_new_slots_;
616   SlotSet* old_to_old_slots_;
617   base::AtomicValue<TypedSlotSet*> typed_old_to_new_slots_;
618   TypedSlotSet* typed_old_to_old_slots_;
619 
620   SkipList* skip_list_;
621 
622   // Assuming the initial allocation on a page is sequential,
623   // count highest number of bytes ever allocated on the page.
624   base::AtomicValue<intptr_t> high_water_mark_;
625 
626   base::Mutex* mutex_;
627 
628   base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
629 
630   // PagedSpace free-list statistics.
631   base::AtomicNumber<intptr_t> available_in_free_list_;
632   base::AtomicNumber<intptr_t> wasted_memory_;
633 
634   // next_chunk_ holds a pointer of type MemoryChunk
635   base::AtomicValue<MemoryChunk*> next_chunk_;
636   // prev_chunk_ holds a pointer of type MemoryChunk
637   base::AtomicValue<MemoryChunk*> prev_chunk_;
638 
639   FreeListCategory categories_[kNumberOfCategories];
640 
641   LocalArrayBufferTracker* local_tracker_;
642 
643  private:
InitializeReservedMemory()644   void InitializeReservedMemory() { reservation_.Reset(); }
645 
646   friend class MemoryAllocator;
647   friend class MemoryChunkValidator;
648 };
649 
650 DEFINE_OPERATORS_FOR_FLAGS(MemoryChunk::Flags)
651 
652 static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
653               "kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
654 
655 // -----------------------------------------------------------------------------
656 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
657 //
658 // The only way to get a page pointer is by calling factory methods:
659 //   Page* p = Page::FromAddress(addr); or
660 //   Page* p = Page::FromTopOrLimit(top);
661 class Page : public MemoryChunk {
662  public:
663   static const intptr_t kCopyAllFlags = ~0;
664 
665   // Page flags copied from from-space to to-space when flipping semispaces.
666   static const intptr_t kCopyOnFlipFlagsMask =
667       static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
668       static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
669 
670   static inline Page* ConvertNewToOld(Page* old_page);
671 
672   // Returns the page containing a given address. The address ranges
673   // from [page_addr .. page_addr + kPageSize[. This only works if the object
674   // is in fact in a page.
FromAddress(Address addr)675   static Page* FromAddress(Address addr) {
676     return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
677   }
678 
679   // Returns the page containing the address provided. The address can
680   // potentially point righter after the page. To be also safe for tagged values
681   // we subtract a hole word. The valid address ranges from
682   // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
FromAllocationAreaAddress(Address address)683   static Page* FromAllocationAreaAddress(Address address) {
684     return Page::FromAddress(address - kPointerSize);
685   }
686 
687   // Checks if address1 and address2 are on the same new space page.
OnSamePage(Address address1,Address address2)688   static bool OnSamePage(Address address1, Address address2) {
689     return Page::FromAddress(address1) == Page::FromAddress(address2);
690   }
691 
692   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address addr)693   static bool IsAlignedToPageSize(Address addr) {
694     return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
695   }
696 
IsAtObjectStart(Address addr)697   static bool IsAtObjectStart(Address addr) {
698     return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) ==
699            kObjectStartOffset;
700   }
701 
702   inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
703 
704   // Create a Page object that is only used as anchor for the doubly-linked
705   // list of real pages.
Page(Space * owner)706   explicit Page(Space* owner) { InitializeAsAnchor(owner); }
707 
708   inline void MarkNeverAllocateForTesting();
709   inline void MarkEvacuationCandidate();
710   inline void ClearEvacuationCandidate();
711 
next_page()712   Page* next_page() { return static_cast<Page*>(next_chunk()); }
prev_page()713   Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
set_next_page(Page * page)714   void set_next_page(Page* page) { set_next_chunk(page); }
set_prev_page(Page * page)715   void set_prev_page(Page* page) { set_prev_chunk(page); }
716 
717   template <typename Callback>
ForAllFreeListCategories(Callback callback)718   inline void ForAllFreeListCategories(Callback callback) {
719     for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
720       callback(&categories_[i]);
721     }
722   }
723 
724   // Returns the offset of a given address to this page.
Offset(Address a)725   inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
726 
727   // Returns the address for a given offset to the this page.
OffsetToAddress(size_t offset)728   Address OffsetToAddress(size_t offset) {
729     DCHECK_PAGE_OFFSET(offset);
730     return address() + offset;
731   }
732 
733   // WaitUntilSweepingCompleted only works when concurrent sweeping is in
734   // progress. In particular, when we know that right before this call a
735   // sweeper thread was sweeping this page.
WaitUntilSweepingCompleted()736   void WaitUntilSweepingCompleted() {
737     mutex_->Lock();
738     mutex_->Unlock();
739     DCHECK(SweepingDone());
740   }
741 
742   void ResetFreeListStatistics();
743 
744   size_t AvailableInFreeList();
745 
LiveBytesFromFreeList()746   size_t LiveBytesFromFreeList() {
747     DCHECK_GE(area_size(), wasted_memory() + available_in_free_list());
748     return area_size() - wasted_memory() - available_in_free_list();
749   }
750 
free_list_category(FreeListCategoryType type)751   FreeListCategory* free_list_category(FreeListCategoryType type) {
752     return &categories_[type];
753   }
754 
is_anchor()755   bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
756 
wasted_memory()757   size_t wasted_memory() { return wasted_memory_.Value(); }
add_wasted_memory(size_t waste)758   void add_wasted_memory(size_t waste) { wasted_memory_.Increment(waste); }
available_in_free_list()759   size_t available_in_free_list() { return available_in_free_list_.Value(); }
add_available_in_free_list(size_t available)760   void add_available_in_free_list(size_t available) {
761     DCHECK_LE(available, area_size());
762     available_in_free_list_.Increment(available);
763   }
remove_available_in_free_list(size_t available)764   void remove_available_in_free_list(size_t available) {
765     DCHECK_LE(available, area_size());
766     DCHECK_GE(available_in_free_list(), available);
767     available_in_free_list_.Decrement(available);
768   }
769 
770   size_t ShrinkToHighWaterMark();
771 
772 #ifdef DEBUG
773   void Print();
774 #endif  // DEBUG
775 
776  private:
777   enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
778 
779   template <InitializationMode mode = kFreeMemory>
780   static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
781                                  Executability executable, PagedSpace* owner);
782   static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
783                                  Executability executable, SemiSpace* owner);
784 
785   inline void InitializeFreeListCategories();
786 
787   void InitializeAsAnchor(Space* owner);
788 
789   friend class MemoryAllocator;
790 };
791 
792 class LargePage : public MemoryChunk {
793  public:
GetObject()794   HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
795 
next_page()796   inline LargePage* next_page() {
797     return static_cast<LargePage*>(next_chunk());
798   }
799 
set_next_page(LargePage * page)800   inline void set_next_page(LargePage* page) { set_next_chunk(page); }
801 
802   // Uncommit memory that is not in use anymore by the object. If the object
803   // cannot be shrunk 0 is returned.
804   Address GetAddressToShrink();
805 
806   void ClearOutOfLiveRangeSlots(Address free_start);
807 
808   // A limit to guarantee that we do not overflow typed slot offset in
809   // the old to old remembered set.
810   // Note that this limit is higher than what assembler already imposes on
811   // x64 and ia32 architectures.
812   static const int kMaxCodePageSize = 512 * MB;
813 
814  private:
815   static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
816                                       Executability executable, Space* owner);
817 
818   friend class MemoryAllocator;
819 };
820 
821 
822 // ----------------------------------------------------------------------------
823 // Space is the abstract superclass for all allocation spaces.
824 class Space : public Malloced {
825  public:
Space(Heap * heap,AllocationSpace id,Executability executable)826   Space(Heap* heap, AllocationSpace id, Executability executable)
827       : allocation_observers_(new List<AllocationObserver*>()),
828         allocation_observers_paused_(false),
829         heap_(heap),
830         id_(id),
831         executable_(executable),
832         committed_(0),
833         max_committed_(0) {}
834 
~Space()835   virtual ~Space() {}
836 
heap()837   Heap* heap() const { return heap_; }
838 
839   // Does the space need executable memory?
executable()840   Executability executable() { return executable_; }
841 
842   // Identity used in error reporting.
identity()843   AllocationSpace identity() { return id_; }
844 
AddAllocationObserver(AllocationObserver * observer)845   virtual void AddAllocationObserver(AllocationObserver* observer) {
846     allocation_observers_->Add(observer);
847   }
848 
RemoveAllocationObserver(AllocationObserver * observer)849   virtual void RemoveAllocationObserver(AllocationObserver* observer) {
850     bool removed = allocation_observers_->RemoveElement(observer);
851     USE(removed);
852     DCHECK(removed);
853   }
854 
PauseAllocationObservers()855   virtual void PauseAllocationObservers() {
856     allocation_observers_paused_ = true;
857   }
858 
ResumeAllocationObservers()859   virtual void ResumeAllocationObservers() {
860     allocation_observers_paused_ = false;
861   }
862 
863   void AllocationStep(Address soon_object, int size);
864 
865   // Return the total amount committed memory for this space, i.e., allocatable
866   // memory and page headers.
CommittedMemory()867   virtual size_t CommittedMemory() { return committed_; }
868 
MaximumCommittedMemory()869   virtual size_t MaximumCommittedMemory() { return max_committed_; }
870 
871   // Returns allocated size.
872   virtual size_t Size() = 0;
873 
874   // Returns size of objects. Can differ from the allocated size
875   // (e.g. see LargeObjectSpace).
SizeOfObjects()876   virtual size_t SizeOfObjects() { return Size(); }
877 
878   // Approximate amount of physical memory committed for this space.
879   virtual size_t CommittedPhysicalMemory() = 0;
880 
881   // Return the available bytes without growing.
882   virtual size_t Available() = 0;
883 
RoundSizeDownToObjectAlignment(int size)884   virtual int RoundSizeDownToObjectAlignment(int size) {
885     if (id_ == CODE_SPACE) {
886       return RoundDown(size, kCodeAlignment);
887     } else {
888       return RoundDown(size, kPointerSize);
889     }
890   }
891 
892   virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
893 
AccountCommitted(size_t bytes)894   void AccountCommitted(size_t bytes) {
895     DCHECK_GE(committed_ + bytes, committed_);
896     committed_ += bytes;
897     if (committed_ > max_committed_) {
898       max_committed_ = committed_;
899     }
900   }
901 
AccountUncommitted(size_t bytes)902   void AccountUncommitted(size_t bytes) {
903     DCHECK_GE(committed_, committed_ - bytes);
904     committed_ -= bytes;
905   }
906 
907 #ifdef DEBUG
908   virtual void Print() = 0;
909 #endif
910 
911  protected:
912   std::unique_ptr<List<AllocationObserver*>> allocation_observers_;
913   bool allocation_observers_paused_;
914 
915  private:
916   Heap* heap_;
917   AllocationSpace id_;
918   Executability executable_;
919 
920   // Keeps track of committed memory in a space.
921   size_t committed_;
922   size_t max_committed_;
923 
924   DISALLOW_COPY_AND_ASSIGN(Space);
925 };
926 
927 
928 class MemoryChunkValidator {
929   // Computed offsets should match the compiler generated ones.
930   STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
931 
932   // Validate our estimates on the header size.
933   STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
934   STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
935   STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
936 };
937 
938 
939 // ----------------------------------------------------------------------------
940 // All heap objects containing executable code (code objects) must be allocated
941 // from a 2 GB range of memory, so that they can call each other using 32-bit
942 // displacements.  This happens automatically on 32-bit platforms, where 32-bit
943 // displacements cover the entire 4GB virtual address space.  On 64-bit
944 // platforms, we support this using the CodeRange object, which reserves and
945 // manages a range of virtual memory.
946 class CodeRange {
947  public:
948   explicit CodeRange(Isolate* isolate);
~CodeRange()949   ~CodeRange() { TearDown(); }
950 
951   // Reserves a range of virtual memory, but does not commit any of it.
952   // Can only be called once, at heap initialization time.
953   // Returns false on failure.
954   bool SetUp(size_t requested_size);
955 
valid()956   bool valid() { return code_range_ != NULL; }
start()957   Address start() {
958     DCHECK(valid());
959     return static_cast<Address>(code_range_->address());
960   }
size()961   size_t size() {
962     DCHECK(valid());
963     return code_range_->size();
964   }
contains(Address address)965   bool contains(Address address) {
966     if (!valid()) return false;
967     Address start = static_cast<Address>(code_range_->address());
968     return start <= address && address < start + code_range_->size();
969   }
970 
971   // Allocates a chunk of memory from the large-object portion of
972   // the code range.  On platforms with no separate code range, should
973   // not be called.
974   MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
975                                             const size_t commit_size,
976                                             size_t* allocated);
977   bool CommitRawMemory(Address start, size_t length);
978   bool UncommitRawMemory(Address start, size_t length);
979   void FreeRawMemory(Address buf, size_t length);
980 
981  private:
982   class FreeBlock {
983    public:
FreeBlock()984     FreeBlock() : start(0), size(0) {}
FreeBlock(Address start_arg,size_t size_arg)985     FreeBlock(Address start_arg, size_t size_arg)
986         : start(start_arg), size(size_arg) {
987       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
988       DCHECK(size >= static_cast<size_t>(Page::kPageSize));
989     }
FreeBlock(void * start_arg,size_t size_arg)990     FreeBlock(void* start_arg, size_t size_arg)
991         : start(static_cast<Address>(start_arg)), size(size_arg) {
992       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
993       DCHECK(size >= static_cast<size_t>(Page::kPageSize));
994     }
995 
996     Address start;
997     size_t size;
998   };
999 
1000   // Frees the range of virtual memory, and frees the data structures used to
1001   // manage it.
1002   void TearDown();
1003 
1004   // Finds a block on the allocation list that contains at least the
1005   // requested amount of memory.  If none is found, sorts and merges
1006   // the existing free memory blocks, and searches again.
1007   // If none can be found, returns false.
1008   bool GetNextAllocationBlock(size_t requested);
1009   // Compares the start addresses of two free blocks.
1010   static int CompareFreeBlockAddress(const FreeBlock* left,
1011                                      const FreeBlock* right);
1012   bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1013   void ReleaseBlock(const FreeBlock* block);
1014 
1015   Isolate* isolate_;
1016 
1017   // The reserved range of virtual memory that all code objects are put in.
1018   base::VirtualMemory* code_range_;
1019 
1020   // The global mutex guards free_list_ and allocation_list_ as GC threads may
1021   // access both lists concurrently to the main thread.
1022   base::Mutex code_range_mutex_;
1023 
1024   // Freed blocks of memory are added to the free list.  When the allocation
1025   // list is exhausted, the free list is sorted and merged to make the new
1026   // allocation list.
1027   List<FreeBlock> free_list_;
1028 
1029   // Memory is allocated from the free blocks on the allocation list.
1030   // The block at current_allocation_block_index_ is the current block.
1031   List<FreeBlock> allocation_list_;
1032   int current_allocation_block_index_;
1033 
1034   DISALLOW_COPY_AND_ASSIGN(CodeRange);
1035 };
1036 
1037 
1038 class SkipList {
1039  public:
SkipList()1040   SkipList() { Clear(); }
1041 
Clear()1042   void Clear() {
1043     for (int idx = 0; idx < kSize; idx++) {
1044       starts_[idx] = reinterpret_cast<Address>(-1);
1045     }
1046   }
1047 
StartFor(Address addr)1048   Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
1049 
AddObject(Address addr,int size)1050   void AddObject(Address addr, int size) {
1051     int start_region = RegionNumber(addr);
1052     int end_region = RegionNumber(addr + size - kPointerSize);
1053     for (int idx = start_region; idx <= end_region; idx++) {
1054       if (starts_[idx] > addr) {
1055         starts_[idx] = addr;
1056       } else {
1057         // In the first region, there may already be an object closer to the
1058         // start of the region. Do not change the start in that case. If this
1059         // is not the first region, you probably added overlapping objects.
1060         DCHECK_EQ(start_region, idx);
1061       }
1062     }
1063   }
1064 
RegionNumber(Address addr)1065   static inline int RegionNumber(Address addr) {
1066     return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1067   }
1068 
Update(Address addr,int size)1069   static void Update(Address addr, int size) {
1070     Page* page = Page::FromAddress(addr);
1071     SkipList* list = page->skip_list();
1072     if (list == NULL) {
1073       list = new SkipList();
1074       page->set_skip_list(list);
1075     }
1076 
1077     list->AddObject(addr, size);
1078   }
1079 
1080  private:
1081   static const int kRegionSizeLog2 = 13;
1082   static const int kRegionSize = 1 << kRegionSizeLog2;
1083   static const int kSize = Page::kPageSize / kRegionSize;
1084 
1085   STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1086 
1087   Address starts_[kSize];
1088 };
1089 
1090 
1091 // ----------------------------------------------------------------------------
1092 // A space acquires chunks of memory from the operating system. The memory
1093 // allocator allocates and deallocates pages for the paged heap spaces and large
1094 // pages for large object space.
1095 class MemoryAllocator {
1096  public:
1097   // Unmapper takes care of concurrently unmapping and uncommitting memory
1098   // chunks.
1099   class Unmapper {
1100    public:
1101     class UnmapFreeMemoryTask;
1102 
Unmapper(MemoryAllocator * allocator)1103     explicit Unmapper(MemoryAllocator* allocator)
1104         : allocator_(allocator),
1105           pending_unmapping_tasks_semaphore_(0),
1106           concurrent_unmapping_tasks_active_(0) {}
1107 
AddMemoryChunkSafe(MemoryChunk * chunk)1108     void AddMemoryChunkSafe(MemoryChunk* chunk) {
1109       if ((chunk->size() == Page::kPageSize) &&
1110           (chunk->executable() != EXECUTABLE)) {
1111         AddMemoryChunkSafe<kRegular>(chunk);
1112       } else {
1113         AddMemoryChunkSafe<kNonRegular>(chunk);
1114       }
1115     }
1116 
TryGetPooledMemoryChunkSafe()1117     MemoryChunk* TryGetPooledMemoryChunkSafe() {
1118       // Procedure:
1119       // (1) Try to get a chunk that was declared as pooled and already has
1120       // been uncommitted.
1121       // (2) Try to steal any memory chunk of kPageSize that would've been
1122       // unmapped.
1123       MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1124       if (chunk == nullptr) {
1125         chunk = GetMemoryChunkSafe<kRegular>();
1126         if (chunk != nullptr) {
1127           // For stolen chunks we need to manually free any allocated memory.
1128           chunk->ReleaseAllocatedMemory();
1129         }
1130       }
1131       return chunk;
1132     }
1133 
1134     void FreeQueuedChunks();
1135     bool WaitUntilCompleted();
1136     void TearDown();
1137 
1138    private:
1139     enum ChunkQueueType {
1140       kRegular,     // Pages of kPageSize that do not live in a CodeRange and
1141                     // can thus be used for stealing.
1142       kNonRegular,  // Large chunks and executable chunks.
1143       kPooled,      // Pooled chunks, already uncommited and ready for reuse.
1144       kNumberOfChunkQueues,
1145     };
1146 
1147     template <ChunkQueueType type>
AddMemoryChunkSafe(MemoryChunk * chunk)1148     void AddMemoryChunkSafe(MemoryChunk* chunk) {
1149       base::LockGuard<base::Mutex> guard(&mutex_);
1150       if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
1151         chunks_[type].push_back(chunk);
1152       } else {
1153         DCHECK_EQ(type, kRegular);
1154         delayed_regular_chunks_.push_back(chunk);
1155       }
1156     }
1157 
1158     template <ChunkQueueType type>
GetMemoryChunkSafe()1159     MemoryChunk* GetMemoryChunkSafe() {
1160       base::LockGuard<base::Mutex> guard(&mutex_);
1161       if (chunks_[type].empty()) return nullptr;
1162       MemoryChunk* chunk = chunks_[type].front();
1163       chunks_[type].pop_front();
1164       return chunk;
1165     }
1166 
1167     void ReconsiderDelayedChunks();
1168     void PerformFreeMemoryOnQueuedChunks();
1169 
1170     base::Mutex mutex_;
1171     MemoryAllocator* allocator_;
1172     std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1173     // Delayed chunks cannot be processed in the current unmapping cycle because
1174     // of dependencies such as an active sweeper.
1175     // See MemoryAllocator::CanFreeMemoryChunk.
1176     std::list<MemoryChunk*> delayed_regular_chunks_;
1177     base::Semaphore pending_unmapping_tasks_semaphore_;
1178     intptr_t concurrent_unmapping_tasks_active_;
1179 
1180     friend class MemoryAllocator;
1181   };
1182 
1183   enum AllocationMode {
1184     kRegular,
1185     kPooled,
1186   };
1187 
1188   enum FreeMode {
1189     kFull,
1190     kPreFreeAndQueue,
1191     kPooledAndQueue,
1192   };
1193 
1194   static size_t CodePageGuardStartOffset();
1195 
1196   static size_t CodePageGuardSize();
1197 
1198   static size_t CodePageAreaStartOffset();
1199 
1200   static size_t CodePageAreaEndOffset();
1201 
CodePageAreaSize()1202   static size_t CodePageAreaSize() {
1203     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1204   }
1205 
PageAreaSize(AllocationSpace space)1206   static size_t PageAreaSize(AllocationSpace space) {
1207     DCHECK_NE(LO_SPACE, space);
1208     return (space == CODE_SPACE) ? CodePageAreaSize()
1209                                  : Page::kAllocatableMemory;
1210   }
1211 
1212   static intptr_t GetCommitPageSize();
1213 
1214   explicit MemoryAllocator(Isolate* isolate);
1215 
1216   // Initializes its internal bookkeeping structures.
1217   // Max capacity of the total space and executable memory limit.
1218   bool SetUp(size_t max_capacity, size_t capacity_executable,
1219              size_t code_range_size);
1220 
1221   void TearDown();
1222 
1223   // Allocates a Page from the allocator. AllocationMode is used to indicate
1224   // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1225   // should be tried first.
1226   template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1227             typename SpaceType>
1228   Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
1229 
1230   LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
1231                                Executability executable);
1232 
1233   template <MemoryAllocator::FreeMode mode = kFull>
1234   void Free(MemoryChunk* chunk);
1235 
1236   bool CanFreeMemoryChunk(MemoryChunk* chunk);
1237 
1238   // Returns allocated spaces in bytes.
Size()1239   size_t Size() { return size_.Value(); }
1240 
1241   // Returns allocated executable spaces in bytes.
SizeExecutable()1242   size_t SizeExecutable() { return size_executable_.Value(); }
1243 
1244   // Returns the maximum available bytes of heaps.
Available()1245   size_t Available() {
1246     const size_t size = Size();
1247     return capacity_ < size ? 0 : capacity_ - size;
1248   }
1249 
1250   // Returns the maximum available executable bytes of heaps.
AvailableExecutable()1251   size_t AvailableExecutable() {
1252     const size_t executable_size = SizeExecutable();
1253     if (capacity_executable_ < executable_size) return 0;
1254     return capacity_executable_ - executable_size;
1255   }
1256 
1257   // Returns maximum available bytes that the old space can have.
MaxAvailable()1258   size_t MaxAvailable() {
1259     return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
1260   }
1261 
1262   // Returns an indication of whether a pointer is in a space that has
1263   // been allocated by this MemoryAllocator.
IsOutsideAllocatedSpace(const void * address)1264   V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1265     return address < lowest_ever_allocated_.Value() ||
1266            address >= highest_ever_allocated_.Value();
1267   }
1268 
1269   // Returns a MemoryChunk in which the memory region from commit_area_size to
1270   // reserve_area_size of the chunk area is reserved but not committed, it
1271   // could be committed later by calling MemoryChunk::CommitArea.
1272   MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
1273                              Executability executable, Space* space);
1274 
1275   void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
1276 
1277   Address ReserveAlignedMemory(size_t requested, size_t alignment,
1278                                base::VirtualMemory* controller);
1279   Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1280                                 size_t alignment, Executability executable,
1281                                 base::VirtualMemory* controller);
1282 
1283   bool CommitMemory(Address addr, size_t size, Executability executable);
1284 
1285   void FreeMemory(base::VirtualMemory* reservation, Executability executable);
1286   void PartialFreeMemory(MemoryChunk* chunk, Address start_free);
1287   void FreeMemory(Address addr, size_t size, Executability executable);
1288 
1289   // Commit a contiguous block of memory from the initial chunk.  Assumes that
1290   // the address is not NULL, the size is greater than zero, and that the
1291   // block is contained in the initial chunk.  Returns true if it succeeded
1292   // and false otherwise.
1293   bool CommitBlock(Address start, size_t size, Executability executable);
1294 
1295   // Uncommit a contiguous block of memory [start..(start+size)[.
1296   // start is not NULL, the size is greater than zero, and the
1297   // block is contained in the initial chunk.  Returns true if it succeeded
1298   // and false otherwise.
1299   bool UncommitBlock(Address start, size_t size);
1300 
1301   // Zaps a contiguous block of memory [start..(start+size)[ thus
1302   // filling it up with a recognizable non-NULL bit pattern.
1303   void ZapBlock(Address start, size_t size);
1304 
1305   MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1306                                               Address start, size_t commit_size,
1307                                               size_t reserved_size);
1308 
code_range()1309   CodeRange* code_range() { return code_range_; }
unmapper()1310   Unmapper* unmapper() { return &unmapper_; }
1311 
1312 #ifdef DEBUG
1313   // Reports statistic info of the space.
1314   void ReportStatistics();
1315 #endif
1316 
1317  private:
1318   // PreFree logically frees the object, i.e., it takes care of the size
1319   // bookkeeping and calls the allocation callback.
1320   void PreFreeMemory(MemoryChunk* chunk);
1321 
1322   // FreeMemory can be called concurrently when PreFree was executed before.
1323   void PerformFreeMemory(MemoryChunk* chunk);
1324 
1325   // See AllocatePage for public interface. Note that currently we only support
1326   // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1327   template <typename SpaceType>
1328   MemoryChunk* AllocatePagePooled(SpaceType* owner);
1329 
1330   // Initializes pages in a chunk. Returns the first page address.
1331   // This function and GetChunkId() are provided for the mark-compact
1332   // collector to rebuild page headers in the from space, which is
1333   // used as a marking stack and its page headers are destroyed.
1334   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1335                                PagedSpace* owner);
1336 
UpdateAllocatedSpaceLimits(void * low,void * high)1337   void UpdateAllocatedSpaceLimits(void* low, void* high) {
1338     // The use of atomic primitives does not guarantee correctness (wrt.
1339     // desired semantics) by default. The loop here ensures that we update the
1340     // values only if they did not change in between.
1341     void* ptr = nullptr;
1342     do {
1343       ptr = lowest_ever_allocated_.Value();
1344     } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1345     do {
1346       ptr = highest_ever_allocated_.Value();
1347     } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
1348   }
1349 
1350   Isolate* isolate_;
1351   CodeRange* code_range_;
1352 
1353   // Maximum space size in bytes.
1354   size_t capacity_;
1355   // Maximum subset of capacity_ that can be executable
1356   size_t capacity_executable_;
1357 
1358   // Allocated space size in bytes.
1359   base::AtomicNumber<size_t> size_;
1360   // Allocated executable space size in bytes.
1361   base::AtomicNumber<size_t> size_executable_;
1362 
1363   // We keep the lowest and highest addresses allocated as a quick way
1364   // of determining that pointers are outside the heap. The estimate is
1365   // conservative, i.e. not all addresses in 'allocated' space are allocated
1366   // to our heap. The range is [lowest, highest[, inclusive on the low end
1367   // and exclusive on the high end.
1368   base::AtomicValue<void*> lowest_ever_allocated_;
1369   base::AtomicValue<void*> highest_ever_allocated_;
1370 
1371   base::VirtualMemory last_chunk_;
1372   Unmapper unmapper_;
1373 
1374   friend class TestCodeRangeScope;
1375 
1376   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1377 };
1378 
1379 
1380 // -----------------------------------------------------------------------------
1381 // Interface for heap object iterator to be implemented by all object space
1382 // object iterators.
1383 //
1384 // NOTE: The space specific object iterators also implements the own next()
1385 //       method which is used to avoid using virtual functions
1386 //       iterating a specific space.
1387 
1388 class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
1389  public:
~ObjectIterator()1390   virtual ~ObjectIterator() {}
1391   virtual HeapObject* Next() = 0;
1392 };
1393 
1394 template <class PAGE_TYPE>
1395 class PageIteratorImpl
1396     : public std::iterator<std::forward_iterator_tag, PAGE_TYPE> {
1397  public:
PageIteratorImpl(PAGE_TYPE * p)1398   explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE> & other)1399   PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
1400   PAGE_TYPE* operator*() { return p_; }
1401   bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1402     return rhs.p_ == p_;
1403   }
1404   bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1405     return rhs.p_ != p_;
1406   }
1407   inline PageIteratorImpl<PAGE_TYPE>& operator++();
1408   inline PageIteratorImpl<PAGE_TYPE> operator++(int);
1409 
1410  private:
1411   PAGE_TYPE* p_;
1412 };
1413 
1414 typedef PageIteratorImpl<Page> PageIterator;
1415 typedef PageIteratorImpl<LargePage> LargePageIterator;
1416 
1417 class PageRange {
1418  public:
1419   typedef PageIterator iterator;
PageRange(Page * begin,Page * end)1420   PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
PageRange(Page * page)1421   explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
begin()1422   iterator begin() { return iterator(begin_); }
end()1423   iterator end() { return iterator(end_); }
1424 
1425  private:
1426   Page* begin_;
1427   Page* end_;
1428 };
1429 
1430 // -----------------------------------------------------------------------------
1431 // Heap object iterator in new/old/map spaces.
1432 //
1433 // A HeapObjectIterator iterates objects from the bottom of the given space
1434 // to its top or from the bottom of the given page to its top.
1435 //
1436 // If objects are allocated in the page during iteration the iterator may
1437 // or may not iterate over those objects.  The caller must create a new
1438 // iterator in order to be sure to visit these new objects.
1439 class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
1440  public:
1441   // Creates a new object iterator in a given space.
1442   explicit HeapObjectIterator(PagedSpace* space);
1443   explicit HeapObjectIterator(Page* page);
1444 
1445   // Advance to the next object, skipping free spaces and other fillers and
1446   // skipping the special garbage section of which there is one per space.
1447   // Returns nullptr when the iteration has ended.
1448   inline HeapObject* Next() override;
1449 
1450  private:
1451   // Fast (inlined) path of next().
1452   inline HeapObject* FromCurrentPage();
1453 
1454   // Slow path of next(), goes into the next page.  Returns false if the
1455   // iteration has ended.
1456   bool AdvanceToNextPage();
1457 
1458   Address cur_addr_;  // Current iteration point.
1459   Address cur_end_;   // End iteration point.
1460   PagedSpace* space_;
1461   PageRange page_range_;
1462   PageRange::iterator current_page_;
1463 };
1464 
1465 
1466 // -----------------------------------------------------------------------------
1467 // A space has a circular list of pages. The next page can be accessed via
1468 // Page::next_page() call.
1469 
1470 // An abstraction of allocation and relocation pointers in a page-structured
1471 // space.
1472 class AllocationInfo {
1473  public:
AllocationInfo()1474   AllocationInfo() : original_top_(nullptr), top_(nullptr), limit_(nullptr) {}
AllocationInfo(Address top,Address limit)1475   AllocationInfo(Address top, Address limit)
1476       : original_top_(top), top_(top), limit_(limit) {}
1477 
Reset(Address top,Address limit)1478   void Reset(Address top, Address limit) {
1479     original_top_ = top;
1480     set_top(top);
1481     set_limit(limit);
1482   }
1483 
original_top()1484   Address original_top() {
1485     SLOW_DCHECK(top_ == NULL ||
1486                 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1487     return original_top_;
1488   }
1489 
INLINE(void set_top (Address top))1490   INLINE(void set_top(Address top)) {
1491     SLOW_DCHECK(top == NULL ||
1492                 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
1493     top_ = top;
1494   }
1495 
INLINE(Address top ())1496   INLINE(Address top()) const {
1497     SLOW_DCHECK(top_ == NULL ||
1498                 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1499     return top_;
1500   }
1501 
top_address()1502   Address* top_address() { return &top_; }
1503 
INLINE(void set_limit (Address limit))1504   INLINE(void set_limit(Address limit)) {
1505     limit_ = limit;
1506   }
1507 
INLINE(Address limit ())1508   INLINE(Address limit()) const {
1509     return limit_;
1510   }
1511 
limit_address()1512   Address* limit_address() { return &limit_; }
1513 
1514 #ifdef DEBUG
VerifyPagedAllocation()1515   bool VerifyPagedAllocation() {
1516     return (Page::FromAllocationAreaAddress(top_) ==
1517             Page::FromAllocationAreaAddress(limit_)) &&
1518            (top_ <= limit_);
1519   }
1520 #endif
1521 
1522  private:
1523   // The original top address when the allocation info was initialized.
1524   Address original_top_;
1525   // Current allocation top.
1526   Address top_;
1527   // Current allocation limit.
1528   Address limit_;
1529 };
1530 
1531 
1532 // An abstraction of the accounting statistics of a page-structured space.
1533 //
1534 // The stats are only set by functions that ensure they stay balanced. These
1535 // functions increase or decrease one of the non-capacity stats in conjunction
1536 // with capacity, or else they always balance increases and decreases to the
1537 // non-capacity stats.
1538 class AllocationStats BASE_EMBEDDED {
1539  public:
AllocationStats()1540   AllocationStats() { Clear(); }
1541 
1542   // Zero out all the allocation statistics (i.e., no capacity).
Clear()1543   void Clear() {
1544     capacity_ = 0;
1545     max_capacity_ = 0;
1546     size_ = 0;
1547   }
1548 
ClearSize()1549   void ClearSize() { size_ = capacity_; }
1550 
1551   // Accessors for the allocation statistics.
Capacity()1552   size_t Capacity() { return capacity_; }
MaxCapacity()1553   size_t MaxCapacity() { return max_capacity_; }
Size()1554   size_t Size() { return size_; }
1555 
1556   // Grow the space by adding available bytes.  They are initially marked as
1557   // being in use (part of the size), but will normally be immediately freed,
1558   // putting them on the free list and removing them from size_.
ExpandSpace(size_t bytes)1559   void ExpandSpace(size_t bytes) {
1560     DCHECK_GE(size_ + bytes, size_);
1561     DCHECK_GE(capacity_ + bytes, capacity_);
1562     capacity_ += bytes;
1563     size_ += bytes;
1564     if (capacity_ > max_capacity_) {
1565       max_capacity_ = capacity_;
1566     }
1567   }
1568 
1569   // Shrink the space by removing available bytes.  Since shrinking is done
1570   // during sweeping, bytes have been marked as being in use (part of the size)
1571   // and are hereby freed.
ShrinkSpace(size_t bytes)1572   void ShrinkSpace(size_t bytes) {
1573     DCHECK_GE(capacity_, bytes);
1574     DCHECK_GE(size_, bytes);
1575     capacity_ -= bytes;
1576     size_ -= bytes;
1577   }
1578 
AllocateBytes(size_t bytes)1579   void AllocateBytes(size_t bytes) {
1580     DCHECK_GE(size_ + bytes, size_);
1581     size_ += bytes;
1582   }
1583 
DeallocateBytes(size_t bytes)1584   void DeallocateBytes(size_t bytes) {
1585     DCHECK_GE(size_, bytes);
1586     size_ -= bytes;
1587   }
1588 
DecreaseCapacity(size_t bytes)1589   void DecreaseCapacity(size_t bytes) {
1590     DCHECK_GE(capacity_, bytes);
1591     DCHECK_GE(capacity_ - bytes, size_);
1592     capacity_ -= bytes;
1593   }
1594 
IncreaseCapacity(size_t bytes)1595   void IncreaseCapacity(size_t bytes) {
1596     DCHECK_GE(capacity_ + bytes, capacity_);
1597     capacity_ += bytes;
1598   }
1599 
1600   // Merge |other| into |this|.
Merge(const AllocationStats & other)1601   void Merge(const AllocationStats& other) {
1602     DCHECK_GE(capacity_ + other.capacity_, capacity_);
1603     DCHECK_GE(size_ + other.size_, size_);
1604     capacity_ += other.capacity_;
1605     size_ += other.size_;
1606     if (other.max_capacity_ > max_capacity_) {
1607       max_capacity_ = other.max_capacity_;
1608     }
1609   }
1610 
1611  private:
1612   // |capacity_|: The number of object-area bytes (i.e., not including page
1613   // bookkeeping structures) currently in the space.
1614   size_t capacity_;
1615 
1616   // |max_capacity_|: The maximum capacity ever observed.
1617   size_t max_capacity_;
1618 
1619   // |size_|: The number of allocated bytes.
1620   size_t size_;
1621 };
1622 
1623 // A free list maintaining free blocks of memory. The free list is organized in
1624 // a way to encourage objects allocated around the same time to be near each
1625 // other. The normal way to allocate is intended to be by bumping a 'top'
1626 // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
1627 // find a new space to allocate from. This is done with the free list, which is
1628 // divided up into rough categories to cut down on waste. Having finer
1629 // categories would scatter allocation more.
1630 
1631 // The free list is organized in categories as follows:
1632 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1633 //   allocation, when categories >= small do not have entries anymore.
1634 // 11-31 words (tiny): The tiny blocks are only used for allocation, when
1635 //   categories >= small do not have entries anymore.
1636 // 32-255 words (small): Used for allocating free space between 1-31 words in
1637 //   size.
1638 // 256-2047 words (medium): Used for allocating free space between 32-255 words
1639 //   in size.
1640 // 1048-16383 words (large): Used for allocating free space between 256-2047
1641 //   words in size.
1642 // At least 16384 words (huge): This list is for objects of 2048 words or
1643 //   larger. Empty pages are also added to this list.
1644 class FreeList {
1645  public:
1646   // This method returns how much memory can be allocated after freeing
1647   // maximum_freed memory.
GuaranteedAllocatable(size_t maximum_freed)1648   static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
1649     if (maximum_freed <= kTiniestListMax) {
1650       // Since we are not iterating over all list entries, we cannot guarantee
1651       // that we can find the maximum freed block in that free list.
1652       return 0;
1653     } else if (maximum_freed <= kTinyListMax) {
1654       return kTinyAllocationMax;
1655     } else if (maximum_freed <= kSmallListMax) {
1656       return kSmallAllocationMax;
1657     } else if (maximum_freed <= kMediumListMax) {
1658       return kMediumAllocationMax;
1659     } else if (maximum_freed <= kLargeListMax) {
1660       return kLargeAllocationMax;
1661     }
1662     return maximum_freed;
1663   }
1664 
1665   explicit FreeList(PagedSpace* owner);
1666 
1667   // Adds a node on the free list. The block of size {size_in_bytes} starting
1668   // at {start} is placed on the free list. The return value is the number of
1669   // bytes that were not added to the free list, because they freed memory block
1670   // was too small. Bookkeeping information will be written to the block, i.e.,
1671   // its contents will be destroyed. The start address should be word aligned,
1672   // and the size should be a non-zero multiple of the word size.
1673   size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
1674 
1675   // Allocate a block of size {size_in_bytes} from the free list. The block is
1676   // unitialized. A failure is returned if no block is available. The size
1677   // should be a non-zero multiple of the word size.
1678   MUST_USE_RESULT HeapObject* Allocate(size_t size_in_bytes);
1679 
1680   // Clear the free list.
1681   void Reset();
1682 
ResetStats()1683   void ResetStats() {
1684     wasted_bytes_.SetValue(0);
1685     ForAllFreeListCategories(
1686         [](FreeListCategory* category) { category->ResetStats(); });
1687   }
1688 
1689   // Return the number of bytes available on the free list.
Available()1690   size_t Available() {
1691     size_t available = 0;
1692     ForAllFreeListCategories([&available](FreeListCategory* category) {
1693       available += category->available();
1694     });
1695     return available;
1696   }
1697 
IsEmpty()1698   bool IsEmpty() {
1699     bool empty = true;
1700     ForAllFreeListCategories([&empty](FreeListCategory* category) {
1701       if (!category->is_empty()) empty = false;
1702     });
1703     return empty;
1704   }
1705 
1706   // Used after booting the VM.
1707   void RepairLists(Heap* heap);
1708 
1709   size_t EvictFreeListItems(Page* page);
1710   bool ContainsPageFreeListItems(Page* page);
1711 
owner()1712   PagedSpace* owner() { return owner_; }
wasted_bytes()1713   size_t wasted_bytes() { return wasted_bytes_.Value(); }
1714 
1715   template <typename Callback>
ForAllFreeListCategories(FreeListCategoryType type,Callback callback)1716   void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1717     FreeListCategory* current = categories_[type];
1718     while (current != nullptr) {
1719       FreeListCategory* next = current->next();
1720       callback(current);
1721       current = next;
1722     }
1723   }
1724 
1725   template <typename Callback>
ForAllFreeListCategories(Callback callback)1726   void ForAllFreeListCategories(Callback callback) {
1727     for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1728       ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1729     }
1730   }
1731 
1732   bool AddCategory(FreeListCategory* category);
1733   void RemoveCategory(FreeListCategory* category);
1734   void PrintCategories(FreeListCategoryType type);
1735 
1736 #ifdef DEBUG
1737   size_t SumFreeLists();
1738   bool IsVeryLong();
1739 #endif
1740 
1741  private:
1742   class FreeListCategoryIterator {
1743    public:
FreeListCategoryIterator(FreeList * free_list,FreeListCategoryType type)1744     FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1745         : current_(free_list->categories_[type]) {}
1746 
HasNext()1747     bool HasNext() { return current_ != nullptr; }
1748 
Next()1749     FreeListCategory* Next() {
1750       DCHECK(HasNext());
1751       FreeListCategory* tmp = current_;
1752       current_ = current_->next();
1753       return tmp;
1754     }
1755 
1756    private:
1757     FreeListCategory* current_;
1758   };
1759 
1760   // The size range of blocks, in bytes.
1761   static const size_t kMinBlockSize = 3 * kPointerSize;
1762   static const size_t kMaxBlockSize = Page::kAllocatableMemory;
1763 
1764   static const size_t kTiniestListMax = 0xa * kPointerSize;
1765   static const size_t kTinyListMax = 0x1f * kPointerSize;
1766   static const size_t kSmallListMax = 0xff * kPointerSize;
1767   static const size_t kMediumListMax = 0x7ff * kPointerSize;
1768   static const size_t kLargeListMax = 0x3fff * kPointerSize;
1769   static const size_t kTinyAllocationMax = kTiniestListMax;
1770   static const size_t kSmallAllocationMax = kTinyListMax;
1771   static const size_t kMediumAllocationMax = kSmallListMax;
1772   static const size_t kLargeAllocationMax = kMediumListMax;
1773 
1774   FreeSpace* FindNodeFor(size_t size_in_bytes, size_t* node_size);
1775 
1776   // Walks all available categories for a given |type| and tries to retrieve
1777   // a node. Returns nullptr if the category is empty.
1778   FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
1779 
1780   // Tries to retrieve a node from the first category in a given |type|.
1781   // Returns nullptr if the category is empty.
1782   FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
1783                            size_t minimum_size);
1784 
1785   // Searches a given |type| for a node of at least |minimum_size|.
1786   FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
1787                                  size_t minimum_size);
1788 
SelectFreeListCategoryType(size_t size_in_bytes)1789   FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
1790     if (size_in_bytes <= kTiniestListMax) {
1791       return kTiniest;
1792     } else if (size_in_bytes <= kTinyListMax) {
1793       return kTiny;
1794     } else if (size_in_bytes <= kSmallListMax) {
1795       return kSmall;
1796     } else if (size_in_bytes <= kMediumListMax) {
1797       return kMedium;
1798     } else if (size_in_bytes <= kLargeListMax) {
1799       return kLarge;
1800     }
1801     return kHuge;
1802   }
1803 
1804   // The tiny categories are not used for fast allocation.
SelectFastAllocationFreeListCategoryType(size_t size_in_bytes)1805   FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1806       size_t size_in_bytes) {
1807     if (size_in_bytes <= kSmallAllocationMax) {
1808       return kSmall;
1809     } else if (size_in_bytes <= kMediumAllocationMax) {
1810       return kMedium;
1811     } else if (size_in_bytes <= kLargeAllocationMax) {
1812       return kLarge;
1813     }
1814     return kHuge;
1815   }
1816 
top(FreeListCategoryType type)1817   FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
1818 
1819   PagedSpace* owner_;
1820   base::AtomicNumber<size_t> wasted_bytes_;
1821   FreeListCategory* categories_[kNumberOfCategories];
1822 
1823   friend class FreeListCategory;
1824 
1825   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1826 };
1827 
1828 // LocalAllocationBuffer represents a linear allocation area that is created
1829 // from a given {AllocationResult} and can be used to allocate memory without
1830 // synchronization.
1831 //
1832 // The buffer is properly closed upon destruction and reassignment.
1833 // Example:
1834 //   {
1835 //     AllocationResult result = ...;
1836 //     LocalAllocationBuffer a(heap, result, size);
1837 //     LocalAllocationBuffer b = a;
1838 //     CHECK(!a.IsValid());
1839 //     CHECK(b.IsValid());
1840 //     // {a} is invalid now and cannot be used for further allocations.
1841 //   }
1842 //   // Since {b} went out of scope, the LAB is closed, resulting in creating a
1843 //   // filler object for the remaining area.
1844 class LocalAllocationBuffer {
1845  public:
1846   // Indicates that a buffer cannot be used for allocations anymore. Can result
1847   // from either reassigning a buffer, or trying to construct it from an
1848   // invalid {AllocationResult}.
1849   static inline LocalAllocationBuffer InvalidBuffer();
1850 
1851   // Creates a new LAB from a given {AllocationResult}. Results in
1852   // InvalidBuffer if the result indicates a retry.
1853   static inline LocalAllocationBuffer FromResult(Heap* heap,
1854                                                  AllocationResult result,
1855                                                  intptr_t size);
1856 
~LocalAllocationBuffer()1857   ~LocalAllocationBuffer() { Close(); }
1858 
1859   // Convert to C++11 move-semantics once allowed by the style guide.
1860   LocalAllocationBuffer(const LocalAllocationBuffer& other);
1861   LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
1862 
1863   MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
1864       int size_in_bytes, AllocationAlignment alignment);
1865 
IsValid()1866   inline bool IsValid() { return allocation_info_.top() != nullptr; }
1867 
1868   // Try to merge LABs, which is only possible when they are adjacent in memory.
1869   // Returns true if the merge was successful, false otherwise.
1870   inline bool TryMerge(LocalAllocationBuffer* other);
1871 
1872  private:
1873   LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
1874 
1875   void Close();
1876 
1877   Heap* heap_;
1878   AllocationInfo allocation_info_;
1879 };
1880 
1881 class NewSpacePageRange {
1882  public:
1883   typedef PageRange::iterator iterator;
1884   inline NewSpacePageRange(Address start, Address limit);
begin()1885   iterator begin() { return range_.begin(); }
end()1886   iterator end() { return range_.end(); }
1887 
1888  private:
1889   PageRange range_;
1890 };
1891 
1892 class PagedSpace : public Space {
1893  public:
1894   typedef PageIterator iterator;
1895 
1896   static const intptr_t kCompactionMemoryWanted = 500 * KB;
1897 
1898   // Creates a space with an id.
1899   PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
1900 
~PagedSpace()1901   ~PagedSpace() override { TearDown(); }
1902 
1903   // Set up the space using the given address range of virtual memory (from
1904   // the memory allocator's initial chunk) if possible.  If the block of
1905   // addresses is not big enough to contain a single page-aligned page, a
1906   // fresh chunk will be allocated.
1907   bool SetUp();
1908 
1909   // Returns true if the space has been successfully set up and not
1910   // subsequently torn down.
1911   bool HasBeenSetUp();
1912 
1913   // Checks whether an object/address is in this space.
1914   inline bool Contains(Address a);
1915   inline bool Contains(Object* o);
1916   bool ContainsSlow(Address addr);
1917 
1918   // Given an address occupied by a live object, return that object if it is
1919   // in this space, or a Smi if it is not.  The implementation iterates over
1920   // objects in the page containing the address, the cost is linear in the
1921   // number of objects in the page.  It may be slow.
1922   Object* FindObject(Address addr);
1923 
1924   // During boot the free_space_map is created, and afterwards we may need
1925   // to write it into the free list nodes that were already created.
1926   void RepairFreeListsAfterDeserialization();
1927 
1928   // Prepares for a mark-compact GC.
1929   void PrepareForMarkCompact();
1930 
1931   // Current capacity without growing (Size() + Available()).
Capacity()1932   size_t Capacity() { return accounting_stats_.Capacity(); }
1933 
1934   // Approximate amount of physical memory committed for this space.
1935   size_t CommittedPhysicalMemory() override;
1936 
1937   void ResetFreeListStatistics();
1938 
1939   // Sets the capacity, the available space and the wasted space to zero.
1940   // The stats are rebuilt during sweeping by adding each page to the
1941   // capacity and the size when it is encountered.  As free spaces are
1942   // discovered during the sweeping they are subtracted from the size and added
1943   // to the available and wasted totals.
ClearStats()1944   void ClearStats() {
1945     accounting_stats_.ClearSize();
1946     free_list_.ResetStats();
1947     ResetFreeListStatistics();
1948   }
1949 
1950   // Available bytes without growing.  These are the bytes on the free list.
1951   // The bytes in the linear allocation area are not included in this total
1952   // because updating the stats would slow down allocation.  New pages are
1953   // immediately added to the free list so they show up here.
Available()1954   size_t Available() override { return free_list_.Available(); }
1955 
1956   // Allocated bytes in this space.  Garbage bytes that were not found due to
1957   // concurrent sweeping are counted as being allocated!  The bytes in the
1958   // current linear allocation area (between top and limit) are also counted
1959   // here.
Size()1960   size_t Size() override { return accounting_stats_.Size(); }
1961 
1962   // As size, but the bytes in lazily swept pages are estimated and the bytes
1963   // in the current linear allocation area are not included.
1964   size_t SizeOfObjects() override;
1965 
1966   // Wasted bytes in this space.  These are just the bytes that were thrown away
1967   // due to being too small to use for allocation.
Waste()1968   virtual size_t Waste() { return free_list_.wasted_bytes(); }
1969 
1970   // Returns the allocation pointer in this space.
top()1971   Address top() { return allocation_info_.top(); }
limit()1972   Address limit() { return allocation_info_.limit(); }
1973 
1974   // The allocation top address.
allocation_top_address()1975   Address* allocation_top_address() { return allocation_info_.top_address(); }
1976 
1977   // The allocation limit address.
allocation_limit_address()1978   Address* allocation_limit_address() {
1979     return allocation_info_.limit_address();
1980   }
1981 
1982   enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
1983 
1984   // Allocate the requested number of bytes in the space if possible, return a
1985   // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
1986   // to be manually updated later.
1987   MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
1988       int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
1989 
1990   MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
1991       int size_in_bytes);
1992 
1993   // Allocate the requested number of bytes in the space double aligned if
1994   // possible, return a failure object if not.
1995   MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
1996       int size_in_bytes, AllocationAlignment alignment);
1997 
1998   // Allocate the requested number of bytes in the space and consider allocation
1999   // alignment if needed.
2000   MUST_USE_RESULT inline AllocationResult AllocateRaw(
2001       int size_in_bytes, AllocationAlignment alignment);
2002 
2003   // Give a block of memory to the space's free list.  It might be added to
2004   // the free list or accounted as waste.
2005   // If add_to_freelist is false then just accounting stats are updated and
2006   // no attempt to add area to free list is made.
Free(Address start,size_t size_in_bytes)2007   size_t Free(Address start, size_t size_in_bytes) {
2008     size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
2009     accounting_stats_.DeallocateBytes(size_in_bytes);
2010     DCHECK_GE(size_in_bytes, wasted);
2011     return size_in_bytes - wasted;
2012   }
2013 
UnaccountedFree(Address start,size_t size_in_bytes)2014   size_t UnaccountedFree(Address start, size_t size_in_bytes) {
2015     size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2016     DCHECK_GE(size_in_bytes, wasted);
2017     return size_in_bytes - wasted;
2018   }
2019 
ResetFreeList()2020   void ResetFreeList() { free_list_.Reset(); }
2021 
2022   // Set space allocation info.
SetTopAndLimit(Address top,Address limit)2023   void SetTopAndLimit(Address top, Address limit) {
2024     DCHECK(top == limit ||
2025            Page::FromAddress(top) == Page::FromAddress(limit - 1));
2026     MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2027     allocation_info_.Reset(top, limit);
2028   }
2029 
2030   void SetAllocationInfo(Address top, Address limit);
2031 
2032   // Empty space allocation info, returning unused area to free list.
2033   void EmptyAllocationInfo();
2034 
2035   void MarkAllocationInfoBlack();
2036 
Allocate(int bytes)2037   void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
2038 
2039   void IncreaseCapacity(size_t bytes);
2040 
2041   // Releases an unused page and shrinks the space.
2042   void ReleasePage(Page* page);
2043 
2044   // The dummy page that anchors the linked list of pages.
anchor()2045   Page* anchor() { return &anchor_; }
2046 
2047 
2048 #ifdef VERIFY_HEAP
2049   // Verify integrity of this space.
2050   virtual void Verify(ObjectVisitor* visitor);
2051 
2052   // Overridden by subclasses to verify space-specific object
2053   // properties (e.g., only maps or free-list nodes are in map space).
VerifyObject(HeapObject * obj)2054   virtual void VerifyObject(HeapObject* obj) {}
2055 #endif
2056 
2057 #ifdef DEBUG
2058   // Print meta info and objects in this space.
2059   void Print() override;
2060 
2061   // Reports statistics for the space
2062   void ReportStatistics();
2063 
2064   // Report code object related statistics
2065   static void ReportCodeStatistics(Isolate* isolate);
2066   static void ResetCodeStatistics(Isolate* isolate);
2067 #endif
2068 
FirstPage()2069   Page* FirstPage() { return anchor_.next_page(); }
LastPage()2070   Page* LastPage() { return anchor_.prev_page(); }
2071 
2072   bool CanExpand(size_t size);
2073 
2074   // Returns the number of total pages in this space.
2075   int CountTotalPages();
2076 
2077   // Return size of allocatable area on a page in this space.
AreaSize()2078   inline int AreaSize() { return static_cast<int>(area_size_); }
2079 
is_local()2080   virtual bool is_local() { return false; }
2081 
2082   // Merges {other} into the current space. Note that this modifies {other},
2083   // e.g., removes its bump pointer area and resets statistics.
2084   void MergeCompactionSpace(CompactionSpace* other);
2085 
2086   // Refills the free list from the corresponding free list filled by the
2087   // sweeper.
2088   virtual void RefillFreeList();
2089 
free_list()2090   FreeList* free_list() { return &free_list_; }
2091 
mutex()2092   base::Mutex* mutex() { return &space_mutex_; }
2093 
2094   inline void UnlinkFreeListCategories(Page* page);
2095   inline intptr_t RelinkFreeListCategories(Page* page);
2096 
begin()2097   iterator begin() { return iterator(anchor_.next_page()); }
end()2098   iterator end() { return iterator(&anchor_); }
2099 
2100   // Shrink immortal immovable pages of the space to be exactly the size needed
2101   // using the high water mark.
2102   void ShrinkImmortalImmovablePages();
2103 
2104   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2105 
2106  protected:
2107   // PagedSpaces that should be included in snapshots have different, i.e.,
2108   // smaller, initial pages.
snapshotable()2109   virtual bool snapshotable() { return true; }
2110 
HasPages()2111   bool HasPages() { return anchor_.next_page() != &anchor_; }
2112 
2113   // Cleans up the space, frees all pages in this space except those belonging
2114   // to the initial chunk, uncommits addresses in the initial chunk.
2115   void TearDown();
2116 
2117   // Expands the space by allocating a fixed number of pages. Returns false if
2118   // it cannot allocate requested number of pages from OS, or if the hard heap
2119   // size limit has been hit.
2120   bool Expand();
2121 
2122   // Generic fast case allocation function that tries linear allocation at the
2123   // address denoted by top in allocation_info_.
2124   inline HeapObject* AllocateLinearly(int size_in_bytes);
2125 
2126   // Generic fast case allocation function that tries aligned linear allocation
2127   // at the address denoted by top in allocation_info_. Writes the aligned
2128   // allocation size, which includes the filler size, to size_in_bytes.
2129   inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
2130                                              AllocationAlignment alignment);
2131 
2132   // If sweeping is still in progress try to sweep unswept pages. If that is
2133   // not successful, wait for the sweeper threads and re-try free-list
2134   // allocation.
2135   MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
2136       int size_in_bytes);
2137 
2138   // Slow path of AllocateRaw.  This function is space-dependent.
2139   MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2140 
2141   size_t area_size_;
2142 
2143   // Accounting information for this space.
2144   AllocationStats accounting_stats_;
2145 
2146   // The dummy page that anchors the double linked list of pages.
2147   Page anchor_;
2148 
2149   // The space's free list.
2150   FreeList free_list_;
2151 
2152   // Normal allocation information.
2153   AllocationInfo allocation_info_;
2154 
2155   // Mutex guarding any concurrent access to the space.
2156   base::Mutex space_mutex_;
2157 
2158   friend class IncrementalMarking;
2159   friend class MarkCompactCollector;
2160 
2161   // Used in cctest.
2162   friend class HeapTester;
2163 };
2164 
2165 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2166 
2167 // -----------------------------------------------------------------------------
2168 // SemiSpace in young generation
2169 //
2170 // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2171 // The mark-compact collector  uses the memory of the first page in the from
2172 // space as a marking stack when tracing live objects.
2173 class SemiSpace : public Space {
2174  public:
2175   typedef PageIterator iterator;
2176 
2177   static void Swap(SemiSpace* from, SemiSpace* to);
2178 
SemiSpace(Heap * heap,SemiSpaceId semispace)2179   SemiSpace(Heap* heap, SemiSpaceId semispace)
2180       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2181         current_capacity_(0),
2182         maximum_capacity_(0),
2183         minimum_capacity_(0),
2184         age_mark_(nullptr),
2185         committed_(false),
2186         id_(semispace),
2187         anchor_(this),
2188         current_page_(nullptr),
2189         pages_used_(0) {}
2190 
2191   inline bool Contains(HeapObject* o);
2192   inline bool Contains(Object* o);
2193   inline bool ContainsSlow(Address a);
2194 
2195   void SetUp(size_t initial_capacity, size_t maximum_capacity);
2196   void TearDown();
HasBeenSetUp()2197   bool HasBeenSetUp() { return maximum_capacity_ != 0; }
2198 
2199   bool Commit();
2200   bool Uncommit();
is_committed()2201   bool is_committed() { return committed_; }
2202 
2203   // Grow the semispace to the new capacity.  The new capacity requested must
2204   // be larger than the current capacity and less than the maximum capacity.
2205   bool GrowTo(size_t new_capacity);
2206 
2207   // Shrinks the semispace to the new capacity.  The new capacity requested
2208   // must be more than the amount of used memory in the semispace and less
2209   // than the current capacity.
2210   bool ShrinkTo(size_t new_capacity);
2211 
2212   bool EnsureCurrentCapacity();
2213 
2214   // Returns the start address of the first page of the space.
space_start()2215   Address space_start() {
2216     DCHECK_NE(anchor_.next_page(), anchor());
2217     return anchor_.next_page()->area_start();
2218   }
2219 
first_page()2220   Page* first_page() { return anchor_.next_page(); }
current_page()2221   Page* current_page() { return current_page_; }
pages_used()2222   int pages_used() { return pages_used_; }
2223 
2224   // Returns one past the end address of the space.
space_end()2225   Address space_end() { return anchor_.prev_page()->area_end(); }
2226 
2227   // Returns the start address of the current page of the space.
page_low()2228   Address page_low() { return current_page_->area_start(); }
2229 
2230   // Returns one past the end address of the current page of the space.
page_high()2231   Address page_high() { return current_page_->area_end(); }
2232 
AdvancePage()2233   bool AdvancePage() {
2234     Page* next_page = current_page_->next_page();
2235     // We cannot expand if we reached the maximum number of pages already. Note
2236     // that we need to account for the next page already for this check as we
2237     // could potentially fill the whole page after advancing.
2238     const bool reached_max_pages = (pages_used_ + 1) == max_pages();
2239     if (next_page == anchor() || reached_max_pages) {
2240       return false;
2241     }
2242     current_page_ = next_page;
2243     pages_used_++;
2244     return true;
2245   }
2246 
2247   // Resets the space to using the first page.
2248   void Reset();
2249 
2250   void RemovePage(Page* page);
2251   void PrependPage(Page* page);
2252 
2253   // Age mark accessors.
age_mark()2254   Address age_mark() { return age_mark_; }
2255   void set_age_mark(Address mark);
2256 
2257   // Returns the current capacity of the semispace.
current_capacity()2258   size_t current_capacity() { return current_capacity_; }
2259 
2260   // Returns the maximum capacity of the semispace.
maximum_capacity()2261   size_t maximum_capacity() { return maximum_capacity_; }
2262 
2263   // Returns the initial capacity of the semispace.
minimum_capacity()2264   size_t minimum_capacity() { return minimum_capacity_; }
2265 
id()2266   SemiSpaceId id() { return id_; }
2267 
2268   // Approximate amount of physical memory committed for this space.
2269   size_t CommittedPhysicalMemory() override;
2270 
2271   // If we don't have these here then SemiSpace will be abstract.  However
2272   // they should never be called:
2273 
Size()2274   size_t Size() override {
2275     UNREACHABLE();
2276     return 0;
2277   }
2278 
SizeOfObjects()2279   size_t SizeOfObjects() override { return Size(); }
2280 
Available()2281   size_t Available() override {
2282     UNREACHABLE();
2283     return 0;
2284   }
2285 
begin()2286   iterator begin() { return iterator(anchor_.next_page()); }
end()2287   iterator end() { return iterator(anchor()); }
2288 
2289   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2290 
2291 #ifdef DEBUG
2292   void Print() override;
2293   // Validate a range of of addresses in a SemiSpace.
2294   // The "from" address must be on a page prior to the "to" address,
2295   // in the linked page order, or it must be earlier on the same page.
2296   static void AssertValidRange(Address from, Address to);
2297 #else
2298   // Do nothing.
AssertValidRange(Address from,Address to)2299   inline static void AssertValidRange(Address from, Address to) {}
2300 #endif
2301 
2302 #ifdef VERIFY_HEAP
2303   virtual void Verify();
2304 #endif
2305 
2306  private:
2307   void RewindPages(Page* start, int num_pages);
2308 
anchor()2309   inline Page* anchor() { return &anchor_; }
max_pages()2310   inline int max_pages() {
2311     return static_cast<int>(current_capacity_ / Page::kPageSize);
2312   }
2313 
2314   // Copies the flags into the masked positions on all pages in the space.
2315   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2316 
2317   // The currently committed space capacity.
2318   size_t current_capacity_;
2319 
2320   // The maximum capacity that can be used by this space. A space cannot grow
2321   // beyond that size.
2322   size_t maximum_capacity_;
2323 
2324   // The minimum capacity for the space. A space cannot shrink below this size.
2325   size_t minimum_capacity_;
2326 
2327   // Used to govern object promotion during mark-compact collection.
2328   Address age_mark_;
2329 
2330   bool committed_;
2331   SemiSpaceId id_;
2332 
2333   Page anchor_;
2334   Page* current_page_;
2335   int pages_used_;
2336 
2337   friend class NewSpace;
2338   friend class SemiSpaceIterator;
2339 };
2340 
2341 
2342 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2343 // semispace of the heap's new space.  It iterates over the objects in the
2344 // semispace from a given start address (defaulting to the bottom of the
2345 // semispace) to the top of the semispace.  New objects allocated after the
2346 // iterator is created are not iterated.
2347 class SemiSpaceIterator : public ObjectIterator {
2348  public:
2349   // Create an iterator over the allocated objects in the given to-space.
2350   explicit SemiSpaceIterator(NewSpace* space);
2351 
2352   inline HeapObject* Next() override;
2353 
2354  private:
2355   void Initialize(Address start, Address end);
2356 
2357   // The current iteration point.
2358   Address current_;
2359   // The end of iteration.
2360   Address limit_;
2361 };
2362 
2363 // -----------------------------------------------------------------------------
2364 // The young generation space.
2365 //
2366 // The new space consists of a contiguous pair of semispaces.  It simply
2367 // forwards most functions to the appropriate semispace.
2368 
2369 class NewSpace : public Space {
2370  public:
2371   typedef PageIterator iterator;
2372 
NewSpace(Heap * heap)2373   explicit NewSpace(Heap* heap)
2374       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2375         to_space_(heap, kToSpace),
2376         from_space_(heap, kFromSpace),
2377         reservation_(),
2378         top_on_previous_step_(0),
2379         allocated_histogram_(nullptr),
2380         promoted_histogram_(nullptr) {}
2381 
2382   inline bool Contains(HeapObject* o);
2383   inline bool ContainsSlow(Address a);
2384   inline bool Contains(Object* o);
2385 
2386   bool SetUp(size_t initial_semispace_capacity, size_t max_semispace_capacity);
2387 
2388   // Tears down the space.  Heap memory was not allocated by the space, so it
2389   // is not deallocated here.
2390   void TearDown();
2391 
2392   // True if the space has been set up but not torn down.
HasBeenSetUp()2393   bool HasBeenSetUp() {
2394     return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2395   }
2396 
2397   // Flip the pair of spaces.
2398   void Flip();
2399 
2400   // Grow the capacity of the semispaces.  Assumes that they are not at
2401   // their maximum capacity.
2402   void Grow();
2403 
2404   // Shrink the capacity of the semispaces.
2405   void Shrink();
2406 
2407   // Return the allocated bytes in the active semispace.
Size()2408   size_t Size() override {
2409     DCHECK_GE(top(), to_space_.page_low());
2410     return to_space_.pages_used() * Page::kAllocatableMemory +
2411            static_cast<size_t>(top() - to_space_.page_low());
2412   }
2413 
SizeOfObjects()2414   size_t SizeOfObjects() override { return Size(); }
2415 
2416   // Return the allocatable capacity of a semispace.
Capacity()2417   size_t Capacity() {
2418     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2419     return (to_space_.current_capacity() / Page::kPageSize) *
2420            Page::kAllocatableMemory;
2421   }
2422 
2423   // Return the current size of a semispace, allocatable and non-allocatable
2424   // memory.
TotalCapacity()2425   size_t TotalCapacity() {
2426     DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2427     return to_space_.current_capacity();
2428   }
2429 
2430   // Committed memory for NewSpace is the committed memory of both semi-spaces
2431   // combined.
CommittedMemory()2432   size_t CommittedMemory() override {
2433     return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2434   }
2435 
MaximumCommittedMemory()2436   size_t MaximumCommittedMemory() override {
2437     return from_space_.MaximumCommittedMemory() +
2438            to_space_.MaximumCommittedMemory();
2439   }
2440 
2441   // Approximate amount of physical memory committed for this space.
2442   size_t CommittedPhysicalMemory() override;
2443 
2444   // Return the available bytes without growing.
Available()2445   size_t Available() override {
2446     DCHECK_GE(Capacity(), Size());
2447     return Capacity() - Size();
2448   }
2449 
AllocatedSinceLastGC()2450   size_t AllocatedSinceLastGC() {
2451     bool seen_age_mark = false;
2452     Address age_mark = to_space_.age_mark();
2453     Page* current_page = to_space_.first_page();
2454     Page* age_mark_page = Page::FromAddress(age_mark);
2455     Page* last_page = Page::FromAddress(top() - kPointerSize);
2456     if (age_mark_page == last_page) {
2457       if (top() - age_mark >= 0) {
2458         return top() - age_mark;
2459       }
2460       // Top was reset at some point, invalidating this metric.
2461       return 0;
2462     }
2463     while (current_page != last_page) {
2464       if (current_page == age_mark_page) {
2465         seen_age_mark = true;
2466         break;
2467       }
2468       current_page = current_page->next_page();
2469     }
2470     if (!seen_age_mark) {
2471       // Top was reset at some point, invalidating this metric.
2472       return 0;
2473     }
2474     DCHECK_GE(age_mark_page->area_end(), age_mark);
2475     size_t allocated = age_mark_page->area_end() - age_mark;
2476     DCHECK_EQ(current_page, age_mark_page);
2477     current_page = age_mark_page->next_page();
2478     while (current_page != last_page) {
2479       allocated += Page::kAllocatableMemory;
2480       current_page = current_page->next_page();
2481     }
2482     DCHECK_GE(top(), current_page->area_start());
2483     allocated += top() - current_page->area_start();
2484     DCHECK_LE(allocated, Size());
2485     return allocated;
2486   }
2487 
MovePageFromSpaceToSpace(Page * page)2488   void MovePageFromSpaceToSpace(Page* page) {
2489     DCHECK(page->InFromSpace());
2490     from_space_.RemovePage(page);
2491     to_space_.PrependPage(page);
2492   }
2493 
2494   bool Rebalance();
2495 
2496   // Return the maximum capacity of a semispace.
MaximumCapacity()2497   size_t MaximumCapacity() {
2498     DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2499     return to_space_.maximum_capacity();
2500   }
2501 
IsAtMaximumCapacity()2502   bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2503 
2504   // Returns the initial capacity of a semispace.
InitialTotalCapacity()2505   size_t InitialTotalCapacity() {
2506     DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2507     return to_space_.minimum_capacity();
2508   }
2509 
2510   // Return the address of the allocation pointer in the active semispace.
top()2511   Address top() {
2512     DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2513     return allocation_info_.top();
2514   }
2515 
2516   // Return the address of the allocation pointer limit in the active semispace.
limit()2517   Address limit() {
2518     DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2519     return allocation_info_.limit();
2520   }
2521 
2522   // Return the address of the first object in the active semispace.
bottom()2523   Address bottom() { return to_space_.space_start(); }
2524 
2525   // Get the age mark of the inactive semispace.
age_mark()2526   Address age_mark() { return from_space_.age_mark(); }
2527   // Set the age mark in the active semispace.
set_age_mark(Address mark)2528   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2529 
2530   // The allocation top and limit address.
allocation_top_address()2531   Address* allocation_top_address() { return allocation_info_.top_address(); }
2532 
2533   // The allocation limit address.
allocation_limit_address()2534   Address* allocation_limit_address() {
2535     return allocation_info_.limit_address();
2536   }
2537 
2538   MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
2539       int size_in_bytes, AllocationAlignment alignment));
2540 
2541   MUST_USE_RESULT INLINE(
2542       AllocationResult AllocateRawUnaligned(int size_in_bytes));
2543 
2544   MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
2545       int size_in_bytes, AllocationAlignment alignment));
2546 
2547   MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
2548       int size_in_bytes, AllocationAlignment alignment);
2549 
2550   // Reset the allocation pointer to the beginning of the active semispace.
2551   void ResetAllocationInfo();
2552 
2553   // When inline allocation stepping is active, either because of incremental
2554   // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2555   // inline allocation every once in a while. This is done by setting
2556   // allocation_info_.limit to be lower than the actual limit and and increasing
2557   // it in steps to guarantee that the observers are notified periodically.
2558   void UpdateInlineAllocationLimit(int size_in_bytes);
2559 
DisableInlineAllocationSteps()2560   void DisableInlineAllocationSteps() {
2561     top_on_previous_step_ = 0;
2562     UpdateInlineAllocationLimit(0);
2563   }
2564 
2565   // Allows observation of inline allocation. The observer->Step() method gets
2566   // called after every step_size bytes have been allocated (approximately).
2567   // This works by adjusting the allocation limit to a lower value and adjusting
2568   // it after each step.
2569   void AddAllocationObserver(AllocationObserver* observer) override;
2570 
2571   void RemoveAllocationObserver(AllocationObserver* observer) override;
2572 
2573   // Get the extent of the inactive semispace (for use as a marking stack,
2574   // or to zap it). Notice: space-addresses are not necessarily on the
2575   // same page, so FromSpaceStart() might be above FromSpaceEnd().
FromSpacePageLow()2576   Address FromSpacePageLow() { return from_space_.page_low(); }
FromSpacePageHigh()2577   Address FromSpacePageHigh() { return from_space_.page_high(); }
FromSpaceStart()2578   Address FromSpaceStart() { return from_space_.space_start(); }
FromSpaceEnd()2579   Address FromSpaceEnd() { return from_space_.space_end(); }
2580 
2581   // Get the extent of the active semispace's pages' memory.
ToSpaceStart()2582   Address ToSpaceStart() { return to_space_.space_start(); }
ToSpaceEnd()2583   Address ToSpaceEnd() { return to_space_.space_end(); }
2584 
2585   inline bool ToSpaceContainsSlow(Address a);
2586   inline bool FromSpaceContainsSlow(Address a);
2587   inline bool ToSpaceContains(Object* o);
2588   inline bool FromSpaceContains(Object* o);
2589 
2590   // Try to switch the active semispace to a new, empty, page.
2591   // Returns false if this isn't possible or reasonable (i.e., there
2592   // are no pages, or the current page is already empty), or true
2593   // if successful.
2594   bool AddFreshPage();
2595   bool AddFreshPageSynchronized();
2596 
2597 #ifdef VERIFY_HEAP
2598   // Verify the active semispace.
2599   virtual void Verify();
2600 #endif
2601 
2602 #ifdef DEBUG
2603   // Print the active semispace.
Print()2604   void Print() override { to_space_.Print(); }
2605 #endif
2606 
2607   // Iterates the active semispace to collect statistics.
2608   void CollectStatistics();
2609   // Reports previously collected statistics of the active semispace.
2610   void ReportStatistics();
2611   // Clears previously collected statistics.
2612   void ClearHistograms();
2613 
2614   // Record the allocation or promotion of a heap object.  Note that we don't
2615   // record every single allocation, but only those that happen in the
2616   // to space during a scavenge GC.
2617   void RecordAllocation(HeapObject* obj);
2618   void RecordPromotion(HeapObject* obj);
2619 
2620   // Return whether the operation succeded.
CommitFromSpaceIfNeeded()2621   bool CommitFromSpaceIfNeeded() {
2622     if (from_space_.is_committed()) return true;
2623     return from_space_.Commit();
2624   }
2625 
UncommitFromSpace()2626   bool UncommitFromSpace() {
2627     if (!from_space_.is_committed()) return true;
2628     return from_space_.Uncommit();
2629   }
2630 
IsFromSpaceCommitted()2631   bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
2632 
active_space()2633   SemiSpace* active_space() { return &to_space_; }
2634 
2635   void PauseAllocationObservers() override;
2636   void ResumeAllocationObservers() override;
2637 
begin()2638   iterator begin() { return to_space_.begin(); }
end()2639   iterator end() { return to_space_.end(); }
2640 
2641   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2642 
2643  private:
2644   // Update allocation info to match the current to-space page.
2645   void UpdateAllocationInfo();
2646 
2647   base::Mutex mutex_;
2648 
2649   // The semispaces.
2650   SemiSpace to_space_;
2651   SemiSpace from_space_;
2652   base::VirtualMemory reservation_;
2653 
2654   // Allocation pointer and limit for normal allocation and allocation during
2655   // mark-compact collection.
2656   AllocationInfo allocation_info_;
2657 
2658   Address top_on_previous_step_;
2659 
2660   HistogramInfo* allocated_histogram_;
2661   HistogramInfo* promoted_histogram_;
2662 
2663   bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
2664 
2665   // If we are doing inline allocation in steps, this method performs the 'step'
2666   // operation. top is the memory address of the bump pointer at the last
2667   // inline allocation (i.e. it determines the numbers of bytes actually
2668   // allocated since the last step.) new_top is the address of the bump pointer
2669   // where the next byte is going to be allocated from. top and new_top may be
2670   // different when we cross a page boundary or reset the space.
2671   void InlineAllocationStep(Address top, Address new_top, Address soon_object,
2672                             size_t size);
2673   intptr_t GetNextInlineAllocationStepSize();
2674   void StartNextInlineAllocationStep();
2675 
2676   friend class SemiSpaceIterator;
2677 };
2678 
2679 class PauseAllocationObserversScope {
2680  public:
2681   explicit PauseAllocationObserversScope(Heap* heap);
2682   ~PauseAllocationObserversScope();
2683 
2684  private:
2685   Heap* heap_;
2686   DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
2687 };
2688 
2689 // -----------------------------------------------------------------------------
2690 // Compaction space that is used temporarily during compaction.
2691 
2692 class CompactionSpace : public PagedSpace {
2693  public:
CompactionSpace(Heap * heap,AllocationSpace id,Executability executable)2694   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2695       : PagedSpace(heap, id, executable) {}
2696 
is_local()2697   bool is_local() override { return true; }
2698 
2699  protected:
2700   // The space is temporary and not included in any snapshots.
snapshotable()2701   bool snapshotable() override { return false; }
2702 
2703   MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
2704       int size_in_bytes) override;
2705 };
2706 
2707 
2708 // A collection of |CompactionSpace|s used by a single compaction task.
2709 class CompactionSpaceCollection : public Malloced {
2710  public:
CompactionSpaceCollection(Heap * heap)2711   explicit CompactionSpaceCollection(Heap* heap)
2712       : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2713         code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2714 
Get(AllocationSpace space)2715   CompactionSpace* Get(AllocationSpace space) {
2716     switch (space) {
2717       case OLD_SPACE:
2718         return &old_space_;
2719       case CODE_SPACE:
2720         return &code_space_;
2721       default:
2722         UNREACHABLE();
2723     }
2724     UNREACHABLE();
2725     return nullptr;
2726   }
2727 
2728  private:
2729   CompactionSpace old_space_;
2730   CompactionSpace code_space_;
2731 };
2732 
2733 
2734 // -----------------------------------------------------------------------------
2735 // Old object space (includes the old space of objects and code space)
2736 
2737 class OldSpace : public PagedSpace {
2738  public:
2739   // Creates an old space object. The constructor does not allocate pages
2740   // from OS.
OldSpace(Heap * heap,AllocationSpace id,Executability executable)2741   OldSpace(Heap* heap, AllocationSpace id, Executability executable)
2742       : PagedSpace(heap, id, executable) {}
2743 };
2744 
2745 
2746 // For contiguous spaces, top should be in the space (or at the end) and limit
2747 // should be the end of the space.
2748 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2749   SLOW_DCHECK((space).page_low() <= (info).top() &&   \
2750               (info).top() <= (space).page_high() &&  \
2751               (info).limit() <= (space).page_high())
2752 
2753 
2754 // -----------------------------------------------------------------------------
2755 // Old space for all map objects
2756 
2757 class MapSpace : public PagedSpace {
2758  public:
2759   // Creates a map space object.
MapSpace(Heap * heap,AllocationSpace id)2760   MapSpace(Heap* heap, AllocationSpace id)
2761       : PagedSpace(heap, id, NOT_EXECUTABLE) {}
2762 
RoundSizeDownToObjectAlignment(int size)2763   int RoundSizeDownToObjectAlignment(int size) override {
2764     if (base::bits::IsPowerOfTwo32(Map::kSize)) {
2765       return RoundDown(size, Map::kSize);
2766     } else {
2767       return (size / Map::kSize) * Map::kSize;
2768     }
2769   }
2770 
2771 #ifdef VERIFY_HEAP
2772   void VerifyObject(HeapObject* obj) override;
2773 #endif
2774 };
2775 
2776 
2777 // -----------------------------------------------------------------------------
2778 // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
2779 // managed by the large object space. A large object is allocated from OS
2780 // heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2781 // A large object always starts at Page::kObjectStartOffset to a page.
2782 // Large objects do not move during garbage collections.
2783 
2784 class LargeObjectSpace : public Space {
2785  public:
2786   typedef LargePageIterator iterator;
2787 
2788   LargeObjectSpace(Heap* heap, AllocationSpace id);
2789   virtual ~LargeObjectSpace();
2790 
2791   // Initializes internal data structures.
2792   bool SetUp();
2793 
2794   // Releases internal resources, frees objects in this space.
2795   void TearDown();
2796 
ObjectSizeFor(size_t chunk_size)2797   static size_t ObjectSizeFor(size_t chunk_size) {
2798     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2799     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2800   }
2801 
2802   // Shared implementation of AllocateRaw, AllocateRawCode and
2803   // AllocateRawFixedArray.
2804   MUST_USE_RESULT AllocationResult
2805       AllocateRaw(int object_size, Executability executable);
2806 
2807   // Available bytes for objects in this space.
2808   inline size_t Available() override;
2809 
Size()2810   size_t Size() override { return size_; }
2811 
SizeOfObjects()2812   size_t SizeOfObjects() override { return objects_size_; }
2813 
2814   // Approximate amount of physical memory committed for this space.
2815   size_t CommittedPhysicalMemory() override;
2816 
PageCount()2817   int PageCount() { return page_count_; }
2818 
2819   // Finds an object for a given address, returns a Smi if it is not found.
2820   // The function iterates through all objects in this space, may be slow.
2821   Object* FindObject(Address a);
2822 
2823   // Finds a large object page containing the given address, returns NULL
2824   // if such a page doesn't exist.
2825   LargePage* FindPage(Address a);
2826 
2827   // Clears the marking state of live objects.
2828   void ClearMarkingStateOfLiveObjects();
2829 
2830   // Frees unmarked objects.
2831   void FreeUnmarkedObjects();
2832 
2833   void InsertChunkMapEntries(LargePage* page);
2834   void RemoveChunkMapEntries(LargePage* page);
2835   void RemoveChunkMapEntries(LargePage* page, Address free_start);
2836 
2837   // Checks whether a heap object is in this space; O(1).
2838   bool Contains(HeapObject* obj);
2839   // Checks whether an address is in the object area in this space. Iterates
2840   // all objects in the space. May be slow.
ContainsSlow(Address addr)2841   bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
2842 
2843   // Checks whether the space is empty.
IsEmpty()2844   bool IsEmpty() { return first_page_ == NULL; }
2845 
AdjustLiveBytes(int by)2846   void AdjustLiveBytes(int by) { objects_size_ += by; }
2847 
first_page()2848   LargePage* first_page() { return first_page_; }
2849 
2850   // Collect code statistics.
2851   void CollectCodeStatistics();
2852 
begin()2853   iterator begin() { return iterator(first_page_); }
end()2854   iterator end() { return iterator(nullptr); }
2855 
2856   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2857 
2858 #ifdef VERIFY_HEAP
2859   virtual void Verify();
2860 #endif
2861 
2862 #ifdef DEBUG
2863   void Print() override;
2864   void ReportStatistics();
2865 #endif
2866 
2867  private:
2868   // The head of the linked list of large object chunks.
2869   LargePage* first_page_;
2870   size_t size_;            // allocated bytes
2871   int page_count_;         // number of chunks
2872   size_t objects_size_;    // size of objects
2873   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2874   base::HashMap chunk_map_;
2875 
2876   friend class LargeObjectIterator;
2877 };
2878 
2879 
2880 class LargeObjectIterator : public ObjectIterator {
2881  public:
2882   explicit LargeObjectIterator(LargeObjectSpace* space);
2883 
2884   HeapObject* Next() override;
2885 
2886  private:
2887   LargePage* current_;
2888 };
2889 
2890 // Iterates over the chunks (pages and large object pages) that can contain
2891 // pointers to new space or to evacuation candidates.
2892 class MemoryChunkIterator BASE_EMBEDDED {
2893  public:
2894   inline explicit MemoryChunkIterator(Heap* heap);
2895 
2896   // Return NULL when the iterator is done.
2897   inline MemoryChunk* next();
2898 
2899  private:
2900   enum State {
2901     kOldSpaceState,
2902     kMapState,
2903     kCodeState,
2904     kLargeObjectState,
2905     kFinishedState
2906   };
2907   Heap* heap_;
2908   State state_;
2909   PageIterator old_iterator_;
2910   PageIterator code_iterator_;
2911   PageIterator map_iterator_;
2912   LargePageIterator lo_iterator_;
2913 };
2914 
2915 }  // namespace internal
2916 }  // namespace v8
2917 
2918 #endif  // V8_HEAP_SPACES_H_
2919