1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
7 
8 #include "src/allocation.h"
9 #include "src/atomic-utils.h"
10 #include "src/base/atomicops.h"
11 #include "src/base/bits.h"
12 #include "src/base/platform/mutex.h"
13 #include "src/flags.h"
14 #include "src/hashmap.h"
15 #include "src/list.h"
16 #include "src/objects.h"
17 #include "src/utils.h"
18 
19 namespace v8 {
20 namespace internal {
21 
22 class CompactionSpaceCollection;
23 class Isolate;
24 
25 // -----------------------------------------------------------------------------
26 // Heap structures:
27 //
28 // A JS heap consists of a young generation, an old generation, and a large
29 // object space. The young generation is divided into two semispaces. A
30 // scavenger implements Cheney's copying algorithm. The old generation is
31 // separated into a map space and an old object space. The map space contains
32 // all (and only) map objects, the rest of old objects go into the old space.
33 // The old generation is collected by a mark-sweep-compact collector.
34 //
35 // The semispaces of the young generation are contiguous.  The old and map
36 // spaces consists of a list of pages. A page has a page header and an object
37 // area.
38 //
39 // There is a separate large object space for objects larger than
40 // Page::kMaxRegularHeapObjectSize, so that they do not have to move during
41 // collection. The large object space is paged. Pages in large object space
42 // may be larger than the page size.
43 //
44 // A store-buffer based write barrier is used to keep track of intergenerational
45 // references.  See heap/store-buffer.h.
46 //
47 // During scavenges and mark-sweep collections we sometimes (after a store
48 // buffer overflow) iterate intergenerational pointers without decoding heap
49 // object maps so if the page belongs to old space or large object space
50 // it is essential to guarantee that the page does not contain any
51 // garbage pointers to new space: every pointer aligned word which satisfies
52 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
53 // new space. Thus objects in old space and large object spaces should have a
54 // special layout (e.g. no bare integer fields). This requirement does not
55 // apply to map space which is iterated in a special fashion. However we still
56 // require pointer fields of dead maps to be cleaned.
57 //
58 // To enable lazy cleaning of old space pages we can mark chunks of the page
59 // as being garbage.  Garbage sections are marked with a special map.  These
60 // sections are skipped when scanning the page, even if we are otherwise
61 // scanning without regard for object boundaries.  Garbage sections are chained
62 // together to form a free list after a GC.  Garbage sections created outside
63 // of GCs by object trunctation etc. may not be in the free list chain.  Very
64 // small free spaces are ignored, they need only be cleaned of bogus pointers
65 // into new space.
66 //
67 // Each page may have up to one special garbage section.  The start of this
68 // section is denoted by the top field in the space.  The end of the section
69 // is denoted by the limit field in the space.  This special garbage section
70 // is not marked with a free space map in the data.  The point of this section
71 // is to enable linear allocation without having to constantly update the byte
72 // array every time the top field is updated and a new object is created.  The
73 // special garbage section is not in the chain of garbage sections.
74 //
75 // Since the top and limit fields are in the space, not the page, only one page
76 // has a special garbage section, and if the top and limit are equal then there
77 // is no special garbage section.
78 
79 // Some assertion macros used in the debugging mode.
80 
81 #define DCHECK_PAGE_ALIGNED(address) \
82   DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
83 
84 #define DCHECK_OBJECT_ALIGNED(address) \
85   DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
86 
87 #define DCHECK_OBJECT_SIZE(size) \
88   DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
89 
90 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
91   DCHECK((0 < size) && (size <= code_space->AreaSize()))
92 
93 #define DCHECK_PAGE_OFFSET(offset) \
94   DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
95 
96 #define DCHECK_MAP_PAGE_INDEX(index) \
97   DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
98 
99 class AllocationInfo;
100 class CompactionSpace;
101 class FreeList;
102 class MemoryAllocator;
103 class MemoryChunk;
104 class PagedSpace;
105 class Space;
106 
107 class MarkBit {
108  public:
109   typedef uint32_t CellType;
110 
MarkBit(CellType * cell,CellType mask)111   inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
112 
113 #ifdef DEBUG
114   bool operator==(const MarkBit& other) {
115     return cell_ == other.cell_ && mask_ == other.mask_;
116   }
117 #endif
118 
119  private:
cell()120   inline CellType* cell() { return cell_; }
mask()121   inline CellType mask() { return mask_; }
122 
Next()123   inline MarkBit Next() {
124     CellType new_mask = mask_ << 1;
125     if (new_mask == 0) {
126       return MarkBit(cell_ + 1, 1);
127     } else {
128       return MarkBit(cell_, new_mask);
129     }
130   }
131 
Set()132   inline void Set() { *cell_ |= mask_; }
Get()133   inline bool Get() { return (*cell_ & mask_) != 0; }
Clear()134   inline void Clear() { *cell_ &= ~mask_; }
135 
136   CellType* cell_;
137   CellType mask_;
138 
139   friend class Marking;
140 };
141 
142 
143 // Bitmap is a sequence of cells each containing fixed number of bits.
144 class Bitmap {
145  public:
146   static const uint32_t kBitsPerCell = 32;
147   static const uint32_t kBitsPerCellLog2 = 5;
148   static const uint32_t kBitIndexMask = kBitsPerCell - 1;
149   static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
150   static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
151 
152   static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
153 
154   static const size_t kSize =
155       (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
156 
157 
CellsForLength(int length)158   static int CellsForLength(int length) {
159     return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
160   }
161 
CellsCount()162   int CellsCount() { return CellsForLength(kLength); }
163 
SizeFor(int cells_count)164   static int SizeFor(int cells_count) {
165     return sizeof(MarkBit::CellType) * cells_count;
166   }
167 
INLINE(static uint32_t IndexToCell (uint32_t index))168   INLINE(static uint32_t IndexToCell(uint32_t index)) {
169     return index >> kBitsPerCellLog2;
170   }
171 
IndexInCell(uint32_t index)172   V8_INLINE static uint32_t IndexInCell(uint32_t index) {
173     return index & kBitIndexMask;
174   }
175 
INLINE(static uint32_t CellToIndex (uint32_t index))176   INLINE(static uint32_t CellToIndex(uint32_t index)) {
177     return index << kBitsPerCellLog2;
178   }
179 
INLINE(static uint32_t CellAlignIndex (uint32_t index))180   INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
181     return (index + kBitIndexMask) & ~kBitIndexMask;
182   }
183 
INLINE(MarkBit::CellType * cells ())184   INLINE(MarkBit::CellType* cells()) {
185     return reinterpret_cast<MarkBit::CellType*>(this);
186   }
187 
INLINE(Address address ())188   INLINE(Address address()) { return reinterpret_cast<Address>(this); }
189 
INLINE(static Bitmap * FromAddress (Address addr))190   INLINE(static Bitmap* FromAddress(Address addr)) {
191     return reinterpret_cast<Bitmap*>(addr);
192   }
193 
MarkBitFromIndex(uint32_t index)194   inline MarkBit MarkBitFromIndex(uint32_t index) {
195     MarkBit::CellType mask = 1u << IndexInCell(index);
196     MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
197     return MarkBit(cell, mask);
198   }
199 
200   static inline void Clear(MemoryChunk* chunk);
201 
202   static void PrintWord(uint32_t word, uint32_t himask = 0) {
203     for (uint32_t mask = 1; mask != 0; mask <<= 1) {
204       if ((mask & himask) != 0) PrintF("[");
205       PrintF((mask & word) ? "1" : "0");
206       if ((mask & himask) != 0) PrintF("]");
207     }
208   }
209 
210   class CellPrinter {
211    public:
CellPrinter()212     CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
213 
Print(uint32_t pos,uint32_t cell)214     void Print(uint32_t pos, uint32_t cell) {
215       if (cell == seq_type) {
216         seq_length++;
217         return;
218       }
219 
220       Flush();
221 
222       if (IsSeq(cell)) {
223         seq_start = pos;
224         seq_length = 0;
225         seq_type = cell;
226         return;
227       }
228 
229       PrintF("%d: ", pos);
230       PrintWord(cell);
231       PrintF("\n");
232     }
233 
Flush()234     void Flush() {
235       if (seq_length > 0) {
236         PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
237                seq_length * kBitsPerCell);
238         seq_length = 0;
239       }
240     }
241 
IsSeq(uint32_t cell)242     static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
243 
244    private:
245     uint32_t seq_start;
246     uint32_t seq_type;
247     uint32_t seq_length;
248   };
249 
Print()250   void Print() {
251     CellPrinter printer;
252     for (int i = 0; i < CellsCount(); i++) {
253       printer.Print(i, cells()[i]);
254     }
255     printer.Flush();
256     PrintF("\n");
257   }
258 
IsClean()259   bool IsClean() {
260     for (int i = 0; i < CellsCount(); i++) {
261       if (cells()[i] != 0) {
262         return false;
263       }
264     }
265     return true;
266   }
267 
268   // Clears all bits starting from {cell_base_index} up to and excluding
269   // {index}. Note that {cell_base_index} is required to be cell aligned.
ClearRange(uint32_t cell_base_index,uint32_t index)270   void ClearRange(uint32_t cell_base_index, uint32_t index) {
271     DCHECK_EQ(IndexInCell(cell_base_index), 0u);
272     DCHECK_GE(index, cell_base_index);
273     uint32_t start_cell_index = IndexToCell(cell_base_index);
274     uint32_t end_cell_index = IndexToCell(index);
275     DCHECK_GE(end_cell_index, start_cell_index);
276     // Clear all cells till the cell containing the last index.
277     for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
278       cells()[i] = 0;
279     }
280     // Clear all bits in the last cell till the last bit before index.
281     uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
282     cells()[end_cell_index] &= clear_mask;
283   }
284 };
285 
286 
287 class SkipList;
288 class SlotsBuffer;
289 
290 // MemoryChunk represents a memory region owned by a specific space.
291 // It is divided into the header and the body. Chunk start is always
292 // 1MB aligned. Start of the body is aligned so it can accommodate
293 // any heap object.
294 class MemoryChunk {
295  public:
296   enum MemoryChunkFlags {
297     IS_EXECUTABLE,
298     ABOUT_TO_BE_FREED,
299     POINTERS_TO_HERE_ARE_INTERESTING,
300     POINTERS_FROM_HERE_ARE_INTERESTING,
301     SCAN_ON_SCAVENGE,
302     IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
303     IN_TO_SPACE,    // All pages in new space has one of these two set.
304     NEW_SPACE_BELOW_AGE_MARK,
305     EVACUATION_CANDIDATE,
306     RESCAN_ON_EVACUATION,
307     NEVER_EVACUATE,  // May contain immortal immutables.
308     POPULAR_PAGE,    // Slots buffer of this page overflowed on the previous GC.
309 
310     // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
311     // otherwise marking bits are still intact.
312     WAS_SWEPT,
313 
314     // Large objects can have a progress bar in their page header. These object
315     // are scanned in increments and will be kept black while being scanned.
316     // Even if the mutator writes to them they will be kept black and a white
317     // to grey transition is performed in the value.
318     HAS_PROGRESS_BAR,
319 
320     // This flag is intended to be used for testing. Works only when both
321     // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
322     // are set. It forces the page to become an evacuation candidate at next
323     // candidates selection cycle.
324     FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
325 
326     // This flag is inteded to be used for testing.
327     NEVER_ALLOCATE_ON_PAGE,
328 
329     // The memory chunk is already logically freed, however the actual freeing
330     // still has to be performed.
331     PRE_FREED,
332 
333     // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
334     //   has been aborted and needs special handling by the sweeper.
335     COMPACTION_WAS_ABORTED,
336 
337     // Last flag, keep at bottom.
338     NUM_MEMORY_CHUNK_FLAGS
339   };
340 
341   // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
342   // |kCompactingInProgress|:  Parallel compaction is currently in progress.
343   // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
344   //   be finalized.
345   // |kCompactingAborted|: Parallel compaction has been aborted, which should
346   //   for now only happen in OOM scenarios.
347   enum ParallelCompactingState {
348     kCompactingDone,
349     kCompactingInProgress,
350     kCompactingFinalize,
351     kCompactingAborted,
352   };
353 
354   // |kSweepingDone|: The page state when sweeping is complete or sweeping must
355   //   not be performed on that page.
356   // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will
357   //   not touch the page memory anymore.
358   // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
359   // |kSweepingPending|: This page is ready for parallel sweeping.
360   enum ParallelSweepingState {
361     kSweepingDone,
362     kSweepingFinalize,
363     kSweepingInProgress,
364     kSweepingPending
365   };
366 
367   // Every n write barrier invocations we go to runtime even though
368   // we could have handled it in generated code.  This lets us check
369   // whether we have hit the limit and should do some more marking.
370   static const int kWriteBarrierCounterGranularity = 500;
371 
372   static const int kPointersToHereAreInterestingMask =
373       1 << POINTERS_TO_HERE_ARE_INTERESTING;
374 
375   static const int kPointersFromHereAreInterestingMask =
376       1 << POINTERS_FROM_HERE_ARE_INTERESTING;
377 
378   static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
379 
380   static const int kSkipEvacuationSlotsRecordingMask =
381       (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
382       (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
383 
384   static const intptr_t kAlignment =
385       (static_cast<uintptr_t>(1) << kPageSizeBits);
386 
387   static const intptr_t kAlignmentMask = kAlignment - 1;
388 
389   static const intptr_t kSizeOffset = 0;
390 
391   static const intptr_t kLiveBytesOffset =
392       kSizeOffset + kPointerSize  // size_t size
393       + kIntptrSize               // intptr_t flags_
394       + kPointerSize              // Address area_start_
395       + kPointerSize              // Address area_end_
396       + 2 * kPointerSize          // base::VirtualMemory reservation_
397       + kPointerSize              // Address owner_
398       + kPointerSize              // Heap* heap_
399       + kIntSize;                 // int store_buffer_counter_
400 
401   static const size_t kSlotsBufferOffset =
402       kLiveBytesOffset + kIntSize;  // int live_byte_count_
403 
404   static const size_t kWriteBarrierCounterOffset =
405       kSlotsBufferOffset + kPointerSize  // SlotsBuffer* slots_buffer_;
406       + kPointerSize;                    // SkipList* skip_list_;
407 
408   static const size_t kMinHeaderSize =
409       kWriteBarrierCounterOffset +
410       kIntptrSize         // intptr_t write_barrier_counter_
411       + kIntSize          // int progress_bar_
412       + kPointerSize      // AtomicValue high_water_mark_
413       + kPointerSize      // base::Mutex* mutex_
414       + kPointerSize      // base::AtomicWord parallel_sweeping_
415       + kPointerSize      // AtomicValue parallel_compaction_
416       + 5 * kPointerSize  // AtomicNumber free-list statistics
417       + kPointerSize      // AtomicValue next_chunk_
418       + kPointerSize;     // AtomicValue prev_chunk_
419 
420   // We add some more space to the computed header size to amount for missing
421   // alignment requirements in our computation.
422   // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
423   static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
424 
425   static const int kBodyOffset =
426       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
427 
428   // The start offset of the object area in a page. Aligned to both maps and
429   // code alignment to be suitable for both.  Also aligned to 32 words because
430   // the marking bitmap is arranged in 32 bit chunks.
431   static const int kObjectStartAlignment = 32 * kPointerSize;
432   static const int kObjectStartOffset =
433       kBodyOffset - 1 +
434       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
435 
436   static const int kFlagsOffset = kPointerSize;
437 
438   static void IncrementLiveBytesFromMutator(HeapObject* object, int by);
439 
440   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)441   static MemoryChunk* FromAddress(Address a) {
442     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
443   }
444 
FromAddress(const byte * a)445   static const MemoryChunk* FromAddress(const byte* a) {
446     return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
447                                                 ~kAlignmentMask);
448   }
449 
IncrementLiveBytesFromGC(HeapObject * object,int by)450   static void IncrementLiveBytesFromGC(HeapObject* object, int by) {
451     MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
452   }
453 
454   // Only works for addresses in pointer spaces, not data or code spaces.
455   static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
456 
FastAddressToMarkbitIndex(Address addr)457   static inline uint32_t FastAddressToMarkbitIndex(Address addr) {
458     const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
459     return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
460   }
461 
UpdateHighWaterMark(Address mark)462   static inline void UpdateHighWaterMark(Address mark) {
463     if (mark == nullptr) return;
464     // Need to subtract one from the mark because when a chunk is full the
465     // top points to the next address after the chunk, which effectively belongs
466     // to another chunk. See the comment to Page::FromAllocationTop.
467     MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
468     intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
469     intptr_t old_mark = 0;
470     do {
471       old_mark = chunk->high_water_mark_.Value();
472     } while ((new_mark > old_mark) &&
473              !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
474   }
475 
address()476   Address address() { return reinterpret_cast<Address>(this); }
477 
is_valid()478   bool is_valid() { return address() != NULL; }
479 
next_chunk()480   MemoryChunk* next_chunk() { return next_chunk_.Value(); }
481 
prev_chunk()482   MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
483 
set_next_chunk(MemoryChunk * next)484   void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
485 
set_prev_chunk(MemoryChunk * prev)486   void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
487 
owner()488   Space* owner() const {
489     if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
490         kPageHeaderTag) {
491       return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
492                                       kPageHeaderTag);
493     } else {
494       return NULL;
495     }
496   }
497 
set_owner(Space * space)498   void set_owner(Space* space) {
499     DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
500     owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
501     DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
502            kPageHeaderTag);
503   }
504 
reserved_memory()505   base::VirtualMemory* reserved_memory() { return &reservation_; }
506 
set_reserved_memory(base::VirtualMemory * reservation)507   void set_reserved_memory(base::VirtualMemory* reservation) {
508     DCHECK_NOT_NULL(reservation);
509     reservation_.TakeControl(reservation);
510   }
511 
scan_on_scavenge()512   bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
initialize_scan_on_scavenge(bool scan)513   void initialize_scan_on_scavenge(bool scan) {
514     if (scan) {
515       SetFlag(SCAN_ON_SCAVENGE);
516     } else {
517       ClearFlag(SCAN_ON_SCAVENGE);
518     }
519   }
520   inline void set_scan_on_scavenge(bool scan);
521 
store_buffer_counter()522   int store_buffer_counter() { return store_buffer_counter_; }
set_store_buffer_counter(int counter)523   void set_store_buffer_counter(int counter) {
524     store_buffer_counter_ = counter;
525   }
526 
Contains(Address addr)527   bool Contains(Address addr) {
528     return addr >= area_start() && addr < area_end();
529   }
530 
531   // Checks whether addr can be a limit of addresses in this page.
532   // It's a limit if it's in the page, or if it's just after the
533   // last byte of the page.
ContainsLimit(Address addr)534   bool ContainsLimit(Address addr) {
535     return addr >= area_start() && addr <= area_end();
536   }
537 
SetFlag(int flag)538   void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
539 
ClearFlag(int flag)540   void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
541 
SetFlagTo(int flag,bool value)542   void SetFlagTo(int flag, bool value) {
543     if (value) {
544       SetFlag(flag);
545     } else {
546       ClearFlag(flag);
547     }
548   }
549 
IsFlagSet(int flag)550   bool IsFlagSet(int flag) {
551     return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
552   }
553 
554   // Set or clear multiple flags at a time. The flags in the mask
555   // are set to the value in "flags", the rest retain the current value
556   // in flags_.
SetFlags(intptr_t flags,intptr_t mask)557   void SetFlags(intptr_t flags, intptr_t mask) {
558     flags_ = (flags_ & ~mask) | (flags & mask);
559   }
560 
561   // Return all current flags.
GetFlags()562   intptr_t GetFlags() { return flags_; }
563 
parallel_sweeping_state()564   AtomicValue<ParallelSweepingState>& parallel_sweeping_state() {
565     return parallel_sweeping_;
566   }
567 
parallel_compaction_state()568   AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
569     return parallel_compaction_;
570   }
571 
TryLock()572   bool TryLock() { return mutex_->TryLock(); }
573 
mutex()574   base::Mutex* mutex() { return mutex_; }
575 
576   // WaitUntilSweepingCompleted only works when concurrent sweeping is in
577   // progress. In particular, when we know that right before this call a
578   // sweeper thread was sweeping this page.
WaitUntilSweepingCompleted()579   void WaitUntilSweepingCompleted() {
580     mutex_->Lock();
581     mutex_->Unlock();
582     DCHECK(SweepingCompleted());
583   }
584 
SweepingCompleted()585   bool SweepingCompleted() {
586     return parallel_sweeping_state().Value() <= kSweepingFinalize;
587   }
588 
589   // Manage live byte count (count of bytes known to be live,
590   // because they are marked black).
ResetLiveBytes()591   void ResetLiveBytes() {
592     if (FLAG_gc_verbose) {
593       PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
594              live_byte_count_);
595     }
596     live_byte_count_ = 0;
597   }
598 
IncrementLiveBytes(int by)599   void IncrementLiveBytes(int by) {
600     if (FLAG_gc_verbose) {
601       printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
602              live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
603              live_byte_count_ + by);
604     }
605     live_byte_count_ += by;
606     DCHECK_GE(live_byte_count_, 0);
607     DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
608   }
609 
LiveBytes()610   int LiveBytes() {
611     DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
612     return live_byte_count_;
613   }
614 
SetLiveBytes(int live_bytes)615   void SetLiveBytes(int live_bytes) {
616     DCHECK_GE(live_bytes, 0);
617     DCHECK_LE(static_cast<unsigned>(live_bytes), size_);
618     live_byte_count_ = live_bytes;
619   }
620 
write_barrier_counter()621   int write_barrier_counter() {
622     return static_cast<int>(write_barrier_counter_);
623   }
624 
set_write_barrier_counter(int counter)625   void set_write_barrier_counter(int counter) {
626     write_barrier_counter_ = counter;
627   }
628 
progress_bar()629   int progress_bar() {
630     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
631     return progress_bar_;
632   }
633 
set_progress_bar(int progress_bar)634   void set_progress_bar(int progress_bar) {
635     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
636     progress_bar_ = progress_bar;
637   }
638 
ResetProgressBar()639   void ResetProgressBar() {
640     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
641       set_progress_bar(0);
642       ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
643     }
644   }
645 
size()646   size_t size() const { return size_; }
647 
set_size(size_t size)648   void set_size(size_t size) { size_ = size; }
649 
SetArea(Address area_start,Address area_end)650   void SetArea(Address area_start, Address area_end) {
651     area_start_ = area_start;
652     area_end_ = area_end;
653   }
654 
executable()655   Executability executable() {
656     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
657   }
658 
InNewSpace()659   bool InNewSpace() {
660     return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
661   }
662 
InToSpace()663   bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
664 
InFromSpace()665   bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
666 
667   // Markbits support
668 
markbits()669   inline Bitmap* markbits() {
670     return Bitmap::FromAddress(address() + kHeaderSize);
671   }
672 
PrintMarkbits()673   void PrintMarkbits() { markbits()->Print(); }
674 
AddressToMarkbitIndex(Address addr)675   inline uint32_t AddressToMarkbitIndex(Address addr) {
676     return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
677   }
678 
MarkbitIndexToAddress(uint32_t index)679   inline Address MarkbitIndexToAddress(uint32_t index) {
680     return this->address() + (index << kPointerSizeLog2);
681   }
682 
683   void InsertAfter(MemoryChunk* other);
684   void Unlink();
685 
heap()686   inline Heap* heap() const { return heap_; }
687 
NeverEvacuate()688   bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
689 
MarkNeverEvacuate()690   void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
691 
IsEvacuationCandidate()692   bool IsEvacuationCandidate() {
693     DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
694     return IsFlagSet(EVACUATION_CANDIDATE);
695   }
696 
CanAllocate()697   bool CanAllocate() {
698     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
699   }
700 
ShouldSkipEvacuationSlotRecording()701   bool ShouldSkipEvacuationSlotRecording() {
702     return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
703   }
704 
skip_list()705   inline SkipList* skip_list() { return skip_list_; }
706 
set_skip_list(SkipList * skip_list)707   inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
708 
slots_buffer()709   inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
710 
slots_buffer_address()711   inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
712 
MarkEvacuationCandidate()713   void MarkEvacuationCandidate() {
714     DCHECK(!IsFlagSet(NEVER_EVACUATE));
715     DCHECK(slots_buffer_ == NULL);
716     SetFlag(EVACUATION_CANDIDATE);
717   }
718 
ClearEvacuationCandidate()719   void ClearEvacuationCandidate() {
720     DCHECK(slots_buffer_ == NULL);
721     ClearFlag(EVACUATION_CANDIDATE);
722   }
723 
area_start()724   Address area_start() { return area_start_; }
area_end()725   Address area_end() { return area_end_; }
area_size()726   int area_size() { return static_cast<int>(area_end() - area_start()); }
727   bool CommitArea(size_t requested);
728 
729   // Approximate amount of physical memory committed for this chunk.
CommittedPhysicalMemory()730   size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
731 
732   // Should be called when memory chunk is about to be freed.
733   void ReleaseAllocatedMemory();
734 
735  protected:
736   static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
737                                  Address area_start, Address area_end,
738                                  Executability executable, Space* owner);
739 
740   size_t size_;
741   intptr_t flags_;
742 
743   // Start and end of allocatable memory on this chunk.
744   Address area_start_;
745   Address area_end_;
746 
747   // If the chunk needs to remember its memory reservation, it is stored here.
748   base::VirtualMemory reservation_;
749   // The identity of the owning space.  This is tagged as a failure pointer, but
750   // no failure can be in an object, so this can be distinguished from any entry
751   // in a fixed array.
752   Address owner_;
753   Heap* heap_;
754   // Used by the store buffer to keep track of which pages to mark scan-on-
755   // scavenge.
756   int store_buffer_counter_;
757   // Count of bytes marked black on page.
758   int live_byte_count_;
759   SlotsBuffer* slots_buffer_;
760   SkipList* skip_list_;
761   intptr_t write_barrier_counter_;
762   // Used by the incremental marker to keep track of the scanning progress in
763   // large objects that have a progress bar and are scanned in increments.
764   int progress_bar_;
765   // Assuming the initial allocation on a page is sequential,
766   // count highest number of bytes ever allocated on the page.
767   AtomicValue<intptr_t> high_water_mark_;
768 
769   base::Mutex* mutex_;
770   AtomicValue<ParallelSweepingState> parallel_sweeping_;
771   AtomicValue<ParallelCompactingState> parallel_compaction_;
772 
773   // PagedSpace free-list statistics.
774   AtomicNumber<intptr_t> available_in_small_free_list_;
775   AtomicNumber<intptr_t> available_in_medium_free_list_;
776   AtomicNumber<intptr_t> available_in_large_free_list_;
777   AtomicNumber<intptr_t> available_in_huge_free_list_;
778   AtomicNumber<intptr_t> non_available_small_blocks_;
779 
780   // next_chunk_ holds a pointer of type MemoryChunk
781   AtomicValue<MemoryChunk*> next_chunk_;
782   // prev_chunk_ holds a pointer of type MemoryChunk
783   AtomicValue<MemoryChunk*> prev_chunk_;
784 
785  private:
InitializeReservedMemory()786   void InitializeReservedMemory() { reservation_.Reset(); }
787 
788   friend class MemoryAllocator;
789   friend class MemoryChunkValidator;
790 };
791 
792 
793 enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
794 
795 
796 // -----------------------------------------------------------------------------
797 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
798 //
799 // The only way to get a page pointer is by calling factory methods:
800 //   Page* p = Page::FromAddress(addr); or
801 //   Page* p = Page::FromAllocationTop(top);
802 class Page : public MemoryChunk {
803  public:
804   // Returns the page containing a given address. The address ranges
805   // from [page_addr .. page_addr + kPageSize[
806   // This only works if the object is in fact in a page.  See also MemoryChunk::
807   // FromAddress() and FromAnyAddress().
INLINE(static Page * FromAddress (Address a))808   INLINE(static Page* FromAddress(Address a)) {
809     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
810   }
811 
812   // Returns the page containing an allocation top. Because an allocation
813   // top address can be the upper bound of the page, we need to subtract
814   // it with kPointerSize first. The address ranges from
815   // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
INLINE(static Page * FromAllocationTop (Address top))816   INLINE(static Page* FromAllocationTop(Address top)) {
817     Page* p = FromAddress(top - kPointerSize);
818     return p;
819   }
820 
821   // Returns the next page in the chain of pages owned by a space.
next_page()822   inline Page* next_page() {
823     DCHECK(next_chunk()->owner() == owner());
824     return static_cast<Page*>(next_chunk());
825   }
prev_page()826   inline Page* prev_page() {
827     DCHECK(prev_chunk()->owner() == owner());
828     return static_cast<Page*>(prev_chunk());
829   }
830   inline void set_next_page(Page* page);
831   inline void set_prev_page(Page* page);
832 
833   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address a)834   static bool IsAlignedToPageSize(Address a) {
835     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
836   }
837 
838   // Returns the offset of a given address to this page.
INLINE(int Offset (Address a))839   INLINE(int Offset(Address a)) {
840     int offset = static_cast<int>(a - address());
841     return offset;
842   }
843 
844   // Returns the address for a given offset to the this page.
OffsetToAddress(int offset)845   Address OffsetToAddress(int offset) {
846     DCHECK_PAGE_OFFSET(offset);
847     return address() + offset;
848   }
849 
850   // ---------------------------------------------------------------------
851 
852   // Page size in bytes.  This must be a multiple of the OS page size.
853   static const int kPageSize = 1 << kPageSizeBits;
854 
855   // Maximum object size that gets allocated into regular pages. Objects larger
856   // than that size are allocated in large object space and are never moved in
857   // memory. This also applies to new space allocation, since objects are never
858   // migrated from new space to large object space. Takes double alignment into
859   // account.
860   // TODO(hpayer): This limit should be way smaller but we currently have
861   // short living objects >256K.
862   static const int kMaxRegularHeapObjectSize = 600 * KB;
863 
864   static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
865 
866   // Page size mask.
867   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
868 
869   inline void ClearGCFields();
870 
871   static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
872                                  Executability executable, PagedSpace* owner);
873 
874   void InitializeAsAnchor(PagedSpace* owner);
875 
WasSwept()876   bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
SetWasSwept()877   void SetWasSwept() { SetFlag(WAS_SWEPT); }
ClearWasSwept()878   void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
879 
880   void ResetFreeListStatistics();
881 
LiveBytesFromFreeList()882   int LiveBytesFromFreeList() {
883     return static_cast<int>(
884         area_size() - non_available_small_blocks() -
885         available_in_small_free_list() - available_in_medium_free_list() -
886         available_in_large_free_list() - available_in_huge_free_list());
887   }
888 
889 #define FRAGMENTATION_STATS_ACCESSORS(type, name)        \
890   type name() { return name##_.Value(); }                \
891   void set_##name(type name) { name##_.SetValue(name); } \
892   void add_##name(type name) { name##_.Increment(name); }
893 
FRAGMENTATION_STATS_ACCESSORS(intptr_t,non_available_small_blocks)894   FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
895   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
896   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
897   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
898   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
899 
900 #undef FRAGMENTATION_STATS_ACCESSORS
901 
902   void add_available_in_free_list(FreeListCategoryType type, intptr_t bytes) {
903     switch (type) {
904       case kSmall:
905         add_available_in_small_free_list(bytes);
906         break;
907       case kMedium:
908         add_available_in_medium_free_list(bytes);
909         break;
910       case kLarge:
911         add_available_in_large_free_list(bytes);
912         break;
913       case kHuge:
914         add_available_in_huge_free_list(bytes);
915         break;
916       default:
917         UNREACHABLE();
918     }
919   }
920 
available_in_free_list(FreeListCategoryType type)921   intptr_t available_in_free_list(FreeListCategoryType type) {
922     switch (type) {
923       case kSmall:
924         return available_in_small_free_list();
925       case kMedium:
926         return available_in_medium_free_list();
927       case kLarge:
928         return available_in_large_free_list();
929       case kHuge:
930         return available_in_huge_free_list();
931       default:
932         UNREACHABLE();
933     }
934     UNREACHABLE();
935     return 0;
936   }
937 
938 #ifdef DEBUG
939   void Print();
940 #endif  // DEBUG
941 
942   friend class MemoryAllocator;
943 };
944 
945 
946 class LargePage : public MemoryChunk {
947  public:
GetObject()948   HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
949 
next_page()950   inline LargePage* next_page() {
951     return static_cast<LargePage*>(next_chunk());
952   }
953 
set_next_page(LargePage * page)954   inline void set_next_page(LargePage* page) { set_next_chunk(page); }
955 
956  private:
957   static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
958 
959   friend class MemoryAllocator;
960 };
961 
962 
963 // ----------------------------------------------------------------------------
964 // Space is the abstract superclass for all allocation spaces.
965 class Space : public Malloced {
966  public:
Space(Heap * heap,AllocationSpace id,Executability executable)967   Space(Heap* heap, AllocationSpace id, Executability executable)
968       : heap_(heap),
969         id_(id),
970         executable_(executable),
971         committed_(0),
972         max_committed_(0) {}
973 
~Space()974   virtual ~Space() {}
975 
heap()976   Heap* heap() const { return heap_; }
977 
978   // Does the space need executable memory?
executable()979   Executability executable() { return executable_; }
980 
981   // Identity used in error reporting.
identity()982   AllocationSpace identity() { return id_; }
983 
984   // Return the total amount committed memory for this space, i.e., allocatable
985   // memory and page headers.
CommittedMemory()986   virtual intptr_t CommittedMemory() { return committed_; }
987 
MaximumCommittedMemory()988   virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
989 
990   // Returns allocated size.
991   virtual intptr_t Size() = 0;
992 
993   // Returns size of objects. Can differ from the allocated size
994   // (e.g. see LargeObjectSpace).
SizeOfObjects()995   virtual intptr_t SizeOfObjects() { return Size(); }
996 
997   // Approximate amount of physical memory committed for this space.
998   virtual size_t CommittedPhysicalMemory() = 0;
999 
1000   // Return the available bytes without growing.
1001   virtual intptr_t Available() = 0;
1002 
RoundSizeDownToObjectAlignment(int size)1003   virtual int RoundSizeDownToObjectAlignment(int size) {
1004     if (id_ == CODE_SPACE) {
1005       return RoundDown(size, kCodeAlignment);
1006     } else {
1007       return RoundDown(size, kPointerSize);
1008     }
1009   }
1010 
1011 #ifdef DEBUG
1012   virtual void Print() = 0;
1013 #endif
1014 
1015  protected:
AccountCommitted(intptr_t bytes)1016   void AccountCommitted(intptr_t bytes) {
1017     DCHECK_GE(bytes, 0);
1018     committed_ += bytes;
1019     if (committed_ > max_committed_) {
1020       max_committed_ = committed_;
1021     }
1022   }
1023 
AccountUncommitted(intptr_t bytes)1024   void AccountUncommitted(intptr_t bytes) {
1025     DCHECK_GE(bytes, 0);
1026     committed_ -= bytes;
1027     DCHECK_GE(committed_, 0);
1028   }
1029 
1030  private:
1031   Heap* heap_;
1032   AllocationSpace id_;
1033   Executability executable_;
1034 
1035   // Keeps track of committed memory in a space.
1036   intptr_t committed_;
1037   intptr_t max_committed_;
1038 };
1039 
1040 
1041 class MemoryChunkValidator {
1042   // Computed offsets should match the compiler generated ones.
1043   STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
1044   STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
1045                 offsetof(MemoryChunk, live_byte_count_));
1046   STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset ==
1047                 offsetof(MemoryChunk, slots_buffer_));
1048   STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
1049                 offsetof(MemoryChunk, write_barrier_counter_));
1050 
1051   // Validate our estimates on the header size.
1052   STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
1053   STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
1054   STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
1055 };
1056 
1057 
1058 // ----------------------------------------------------------------------------
1059 // All heap objects containing executable code (code objects) must be allocated
1060 // from a 2 GB range of memory, so that they can call each other using 32-bit
1061 // displacements.  This happens automatically on 32-bit platforms, where 32-bit
1062 // displacements cover the entire 4GB virtual address space.  On 64-bit
1063 // platforms, we support this using the CodeRange object, which reserves and
1064 // manages a range of virtual memory.
1065 class CodeRange {
1066  public:
1067   explicit CodeRange(Isolate* isolate);
~CodeRange()1068   ~CodeRange() { TearDown(); }
1069 
1070   // Reserves a range of virtual memory, but does not commit any of it.
1071   // Can only be called once, at heap initialization time.
1072   // Returns false on failure.
1073   bool SetUp(size_t requested_size);
1074 
valid()1075   bool valid() { return code_range_ != NULL; }
start()1076   Address start() {
1077     DCHECK(valid());
1078     return static_cast<Address>(code_range_->address());
1079   }
size()1080   size_t size() {
1081     DCHECK(valid());
1082     return code_range_->size();
1083   }
contains(Address address)1084   bool contains(Address address) {
1085     if (!valid()) return false;
1086     Address start = static_cast<Address>(code_range_->address());
1087     return start <= address && address < start + code_range_->size();
1088   }
1089 
1090   // Allocates a chunk of memory from the large-object portion of
1091   // the code range.  On platforms with no separate code range, should
1092   // not be called.
1093   MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
1094                                             const size_t commit_size,
1095                                             size_t* allocated);
1096   bool CommitRawMemory(Address start, size_t length);
1097   bool UncommitRawMemory(Address start, size_t length);
1098   void FreeRawMemory(Address buf, size_t length);
1099 
1100  private:
1101   // Frees the range of virtual memory, and frees the data structures used to
1102   // manage it.
1103   void TearDown();
1104 
1105   Isolate* isolate_;
1106 
1107   // The reserved range of virtual memory that all code objects are put in.
1108   base::VirtualMemory* code_range_;
1109   // Plain old data class, just a struct plus a constructor.
1110   class FreeBlock {
1111    public:
FreeBlock()1112     FreeBlock() : start(0), size(0) {}
FreeBlock(Address start_arg,size_t size_arg)1113     FreeBlock(Address start_arg, size_t size_arg)
1114         : start(start_arg), size(size_arg) {
1115       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1116       DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1117     }
FreeBlock(void * start_arg,size_t size_arg)1118     FreeBlock(void* start_arg, size_t size_arg)
1119         : start(static_cast<Address>(start_arg)), size(size_arg) {
1120       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1121       DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1122     }
1123 
1124     Address start;
1125     size_t size;
1126   };
1127 
1128   // The global mutex guards free_list_ and allocation_list_ as GC threads may
1129   // access both lists concurrently to the main thread.
1130   base::Mutex code_range_mutex_;
1131 
1132   // Freed blocks of memory are added to the free list.  When the allocation
1133   // list is exhausted, the free list is sorted and merged to make the new
1134   // allocation list.
1135   List<FreeBlock> free_list_;
1136 
1137   // Memory is allocated from the free blocks on the allocation list.
1138   // The block at current_allocation_block_index_ is the current block.
1139   List<FreeBlock> allocation_list_;
1140   int current_allocation_block_index_;
1141 
1142   // Finds a block on the allocation list that contains at least the
1143   // requested amount of memory.  If none is found, sorts and merges
1144   // the existing free memory blocks, and searches again.
1145   // If none can be found, returns false.
1146   bool GetNextAllocationBlock(size_t requested);
1147   // Compares the start addresses of two free blocks.
1148   static int CompareFreeBlockAddress(const FreeBlock* left,
1149                                      const FreeBlock* right);
1150   bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1151   void ReleaseBlock(const FreeBlock* block);
1152 
1153   DISALLOW_COPY_AND_ASSIGN(CodeRange);
1154 };
1155 
1156 
1157 class SkipList {
1158  public:
SkipList()1159   SkipList() { Clear(); }
1160 
Clear()1161   void Clear() {
1162     for (int idx = 0; idx < kSize; idx++) {
1163       starts_[idx] = reinterpret_cast<Address>(-1);
1164     }
1165   }
1166 
StartFor(Address addr)1167   Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
1168 
AddObject(Address addr,int size)1169   void AddObject(Address addr, int size) {
1170     int start_region = RegionNumber(addr);
1171     int end_region = RegionNumber(addr + size - kPointerSize);
1172     for (int idx = start_region; idx <= end_region; idx++) {
1173       if (starts_[idx] > addr) starts_[idx] = addr;
1174     }
1175   }
1176 
RegionNumber(Address addr)1177   static inline int RegionNumber(Address addr) {
1178     return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1179   }
1180 
Update(Address addr,int size)1181   static void Update(Address addr, int size) {
1182     Page* page = Page::FromAddress(addr);
1183     SkipList* list = page->skip_list();
1184     if (list == NULL) {
1185       list = new SkipList();
1186       page->set_skip_list(list);
1187     }
1188 
1189     list->AddObject(addr, size);
1190   }
1191 
1192  private:
1193   static const int kRegionSizeLog2 = 13;
1194   static const int kRegionSize = 1 << kRegionSizeLog2;
1195   static const int kSize = Page::kPageSize / kRegionSize;
1196 
1197   STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1198 
1199   Address starts_[kSize];
1200 };
1201 
1202 
1203 // ----------------------------------------------------------------------------
1204 // A space acquires chunks of memory from the operating system. The memory
1205 // allocator allocated and deallocates pages for the paged heap spaces and large
1206 // pages for large object space.
1207 //
1208 // Each space has to manage it's own pages.
1209 //
1210 class MemoryAllocator {
1211  public:
1212   explicit MemoryAllocator(Isolate* isolate);
1213 
1214   // Initializes its internal bookkeeping structures.
1215   // Max capacity of the total space and executable memory limit.
1216   bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
1217 
1218   void TearDown();
1219 
1220   Page* AllocatePage(intptr_t size, PagedSpace* owner,
1221                      Executability executable);
1222 
1223   LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
1224                                Executability executable);
1225 
1226   // PreFree logically frees the object, i.e., it takes care of the size
1227   // bookkeeping and calls the allocation callback.
1228   void PreFreeMemory(MemoryChunk* chunk);
1229 
1230   // FreeMemory can be called concurrently when PreFree was executed before.
1231   void PerformFreeMemory(MemoryChunk* chunk);
1232 
1233   // Free is a wrapper method, which calls PreFree and PerformFreeMemory
1234   // together.
1235   void Free(MemoryChunk* chunk);
1236 
1237   // Returns allocated spaces in bytes.
Size()1238   intptr_t Size() { return size_.Value(); }
1239 
1240   // Returns allocated executable spaces in bytes.
SizeExecutable()1241   intptr_t SizeExecutable() { return size_executable_.Value(); }
1242 
1243   // Returns the maximum available bytes of heaps.
Available()1244   intptr_t Available() {
1245     intptr_t size = Size();
1246     return capacity_ < size ? 0 : capacity_ - size;
1247   }
1248 
1249   // Returns the maximum available executable bytes of heaps.
AvailableExecutable()1250   intptr_t AvailableExecutable() {
1251     intptr_t executable_size = SizeExecutable();
1252     if (capacity_executable_ < executable_size) return 0;
1253     return capacity_executable_ - executable_size;
1254   }
1255 
1256   // Returns maximum available bytes that the old space can have.
MaxAvailable()1257   intptr_t MaxAvailable() {
1258     return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
1259   }
1260 
1261   // Returns an indication of whether a pointer is in a space that has
1262   // been allocated by this MemoryAllocator.
IsOutsideAllocatedSpace(const void * address)1263   V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1264     return address < lowest_ever_allocated_.Value() ||
1265            address >= highest_ever_allocated_.Value();
1266   }
1267 
1268 #ifdef DEBUG
1269   // Reports statistic info of the space.
1270   void ReportStatistics();
1271 #endif
1272 
1273   // Returns a MemoryChunk in which the memory region from commit_area_size to
1274   // reserve_area_size of the chunk area is reserved but not committed, it
1275   // could be committed later by calling MemoryChunk::CommitArea.
1276   MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1277                              intptr_t commit_area_size,
1278                              Executability executable, Space* space);
1279 
1280   Address ReserveAlignedMemory(size_t requested, size_t alignment,
1281                                base::VirtualMemory* controller);
1282   Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1283                                 size_t alignment, Executability executable,
1284                                 base::VirtualMemory* controller);
1285 
1286   bool CommitMemory(Address addr, size_t size, Executability executable);
1287 
1288   void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
1289                           Executability executable);
1290   void FreeMemory(base::VirtualMemory* reservation, Executability executable);
1291   void FreeMemory(Address addr, size_t size, Executability executable);
1292 
1293   // Commit a contiguous block of memory from the initial chunk.  Assumes that
1294   // the address is not NULL, the size is greater than zero, and that the
1295   // block is contained in the initial chunk.  Returns true if it succeeded
1296   // and false otherwise.
1297   bool CommitBlock(Address start, size_t size, Executability executable);
1298 
1299   // Uncommit a contiguous block of memory [start..(start+size)[.
1300   // start is not NULL, the size is greater than zero, and the
1301   // block is contained in the initial chunk.  Returns true if it succeeded
1302   // and false otherwise.
1303   bool UncommitBlock(Address start, size_t size);
1304 
1305   // Zaps a contiguous block of memory [start..(start+size)[ thus
1306   // filling it up with a recognizable non-NULL bit pattern.
1307   void ZapBlock(Address start, size_t size);
1308 
1309   void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
1310                                  size_t size);
1311 
1312   void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1313                                    ObjectSpace space, AllocationAction action);
1314 
1315   void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
1316 
1317   bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
1318 
1319   static int CodePageGuardStartOffset();
1320 
1321   static int CodePageGuardSize();
1322 
1323   static int CodePageAreaStartOffset();
1324 
1325   static int CodePageAreaEndOffset();
1326 
CodePageAreaSize()1327   static int CodePageAreaSize() {
1328     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1329   }
1330 
PageAreaSize(AllocationSpace space)1331   static int PageAreaSize(AllocationSpace space) {
1332     DCHECK_NE(LO_SPACE, space);
1333     return (space == CODE_SPACE) ? CodePageAreaSize()
1334                                  : Page::kAllocatableMemory;
1335   }
1336 
1337   MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1338                                               Address start, size_t commit_size,
1339                                               size_t reserved_size);
1340 
1341  private:
1342   Isolate* isolate_;
1343 
1344   // Maximum space size in bytes.
1345   intptr_t capacity_;
1346   // Maximum subset of capacity_ that can be executable
1347   intptr_t capacity_executable_;
1348 
1349   // Allocated space size in bytes.
1350   AtomicNumber<intptr_t> size_;
1351   // Allocated executable space size in bytes.
1352   AtomicNumber<intptr_t> size_executable_;
1353 
1354   // We keep the lowest and highest addresses allocated as a quick way
1355   // of determining that pointers are outside the heap. The estimate is
1356   // conservative, i.e. not all addrsses in 'allocated' space are allocated
1357   // to our heap. The range is [lowest, highest[, inclusive on the low end
1358   // and exclusive on the high end.
1359   AtomicValue<void*> lowest_ever_allocated_;
1360   AtomicValue<void*> highest_ever_allocated_;
1361 
1362   struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistrationMemoryAllocationCallbackRegistration1363     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1364                                          ObjectSpace space,
1365                                          AllocationAction action)
1366         : callback(callback), space(space), action(action) {}
1367     MemoryAllocationCallback callback;
1368     ObjectSpace space;
1369     AllocationAction action;
1370   };
1371 
1372   // A List of callback that are triggered when memory is allocated or free'd
1373   List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
1374 
1375   // Initializes pages in a chunk. Returns the first page address.
1376   // This function and GetChunkId() are provided for the mark-compact
1377   // collector to rebuild page headers in the from space, which is
1378   // used as a marking stack and its page headers are destroyed.
1379   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1380                                PagedSpace* owner);
1381 
UpdateAllocatedSpaceLimits(void * low,void * high)1382   void UpdateAllocatedSpaceLimits(void* low, void* high) {
1383     // The use of atomic primitives does not guarantee correctness (wrt.
1384     // desired semantics) by default. The loop here ensures that we update the
1385     // values only if they did not change in between.
1386     void* ptr = nullptr;
1387     do {
1388       ptr = lowest_ever_allocated_.Value();
1389     } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1390     do {
1391       ptr = highest_ever_allocated_.Value();
1392     } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
1393   }
1394 
1395   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1396 };
1397 
1398 
1399 // -----------------------------------------------------------------------------
1400 // Interface for heap object iterator to be implemented by all object space
1401 // object iterators.
1402 //
1403 // NOTE: The space specific object iterators also implements the own next()
1404 //       method which is used to avoid using virtual functions
1405 //       iterating a specific space.
1406 
1407 class ObjectIterator : public Malloced {
1408  public:
~ObjectIterator()1409   virtual ~ObjectIterator() {}
1410 
1411   virtual HeapObject* next_object() = 0;
1412 };
1413 
1414 
1415 // -----------------------------------------------------------------------------
1416 // Heap object iterator in new/old/map spaces.
1417 //
1418 // A HeapObjectIterator iterates objects from the bottom of the given space
1419 // to its top or from the bottom of the given page to its top.
1420 //
1421 // If objects are allocated in the page during iteration the iterator may
1422 // or may not iterate over those objects.  The caller must create a new
1423 // iterator in order to be sure to visit these new objects.
1424 class HeapObjectIterator : public ObjectIterator {
1425  public:
1426   // Creates a new object iterator in a given space.
1427   explicit HeapObjectIterator(PagedSpace* space);
1428   explicit HeapObjectIterator(Page* page);
1429 
1430   // Advance to the next object, skipping free spaces and other fillers and
1431   // skipping the special garbage section of which there is one per space.
1432   // Returns NULL when the iteration has ended.
1433   inline HeapObject* Next();
1434   inline HeapObject* next_object() override;
1435 
1436  private:
1437   enum PageMode { kOnePageOnly, kAllPagesInSpace };
1438 
1439   Address cur_addr_;              // Current iteration point.
1440   Address cur_end_;               // End iteration point.
1441   PagedSpace* space_;
1442   PageMode page_mode_;
1443 
1444   // Fast (inlined) path of next().
1445   inline HeapObject* FromCurrentPage();
1446 
1447   // Slow path of next(), goes into the next page.  Returns false if the
1448   // iteration has ended.
1449   bool AdvanceToNextPage();
1450 
1451   // Initializes fields.
1452   inline void Initialize(PagedSpace* owner, Address start, Address end,
1453                          PageMode mode);
1454 };
1455 
1456 
1457 // -----------------------------------------------------------------------------
1458 // A PageIterator iterates the pages in a paged space.
1459 
1460 class PageIterator BASE_EMBEDDED {
1461  public:
1462   explicit inline PageIterator(PagedSpace* space);
1463 
1464   inline bool has_next();
1465   inline Page* next();
1466 
1467  private:
1468   PagedSpace* space_;
1469   Page* prev_page_;  // Previous page returned.
1470   // Next page that will be returned.  Cached here so that we can use this
1471   // iterator for operations that deallocate pages.
1472   Page* next_page_;
1473 };
1474 
1475 
1476 // -----------------------------------------------------------------------------
1477 // A space has a circular list of pages. The next page can be accessed via
1478 // Page::next_page() call.
1479 
1480 // An abstraction of allocation and relocation pointers in a page-structured
1481 // space.
1482 class AllocationInfo {
1483  public:
AllocationInfo()1484   AllocationInfo() : top_(nullptr), limit_(nullptr) {}
AllocationInfo(Address top,Address limit)1485   AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
1486 
Reset(Address top,Address limit)1487   void Reset(Address top, Address limit) {
1488     set_top(top);
1489     set_limit(limit);
1490   }
1491 
INLINE(void set_top (Address top))1492   INLINE(void set_top(Address top)) {
1493     SLOW_DCHECK(top == NULL ||
1494                 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
1495     top_ = top;
1496   }
1497 
INLINE(Address top ())1498   INLINE(Address top()) const {
1499     SLOW_DCHECK(top_ == NULL ||
1500                 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1501     return top_;
1502   }
1503 
top_address()1504   Address* top_address() { return &top_; }
1505 
INLINE(void set_limit (Address limit))1506   INLINE(void set_limit(Address limit)) {
1507     limit_ = limit;
1508   }
1509 
INLINE(Address limit ())1510   INLINE(Address limit()) const {
1511     return limit_;
1512   }
1513 
limit_address()1514   Address* limit_address() { return &limit_; }
1515 
1516 #ifdef DEBUG
VerifyPagedAllocation()1517   bool VerifyPagedAllocation() {
1518     return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
1519            (top_ <= limit_);
1520   }
1521 #endif
1522 
1523  private:
1524   // Current allocation top.
1525   Address top_;
1526   // Current allocation limit.
1527   Address limit_;
1528 };
1529 
1530 
1531 // An abstraction of the accounting statistics of a page-structured space.
1532 //
1533 // The stats are only set by functions that ensure they stay balanced. These
1534 // functions increase or decrease one of the non-capacity stats in conjunction
1535 // with capacity, or else they always balance increases and decreases to the
1536 // non-capacity stats.
1537 class AllocationStats BASE_EMBEDDED {
1538  public:
AllocationStats()1539   AllocationStats() { Clear(); }
1540 
1541   // Zero out all the allocation statistics (i.e., no capacity).
Clear()1542   void Clear() {
1543     capacity_ = 0;
1544     max_capacity_ = 0;
1545     size_ = 0;
1546   }
1547 
ClearSize()1548   void ClearSize() { size_ = capacity_; }
1549 
1550   // Reset the allocation statistics (i.e., available = capacity with no wasted
1551   // or allocated bytes).
Reset()1552   void Reset() {
1553     size_ = 0;
1554   }
1555 
1556   // Accessors for the allocation statistics.
Capacity()1557   intptr_t Capacity() { return capacity_; }
MaxCapacity()1558   intptr_t MaxCapacity() { return max_capacity_; }
Size()1559   intptr_t Size() {
1560     CHECK_GE(size_, 0);
1561     return size_;
1562   }
1563 
1564   // Grow the space by adding available bytes.  They are initially marked as
1565   // being in use (part of the size), but will normally be immediately freed,
1566   // putting them on the free list and removing them from size_.
ExpandSpace(int size_in_bytes)1567   void ExpandSpace(int size_in_bytes) {
1568     capacity_ += size_in_bytes;
1569     size_ += size_in_bytes;
1570     if (capacity_ > max_capacity_) {
1571       max_capacity_ = capacity_;
1572     }
1573     CHECK(size_ >= 0);
1574   }
1575 
1576   // Shrink the space by removing available bytes.  Since shrinking is done
1577   // during sweeping, bytes have been marked as being in use (part of the size)
1578   // and are hereby freed.
ShrinkSpace(int size_in_bytes)1579   void ShrinkSpace(int size_in_bytes) {
1580     capacity_ -= size_in_bytes;
1581     size_ -= size_in_bytes;
1582     CHECK(size_ >= 0);
1583   }
1584 
1585   // Allocate from available bytes (available -> size).
AllocateBytes(intptr_t size_in_bytes)1586   void AllocateBytes(intptr_t size_in_bytes) {
1587     size_ += size_in_bytes;
1588     CHECK(size_ >= 0);
1589   }
1590 
1591   // Free allocated bytes, making them available (size -> available).
DeallocateBytes(intptr_t size_in_bytes)1592   void DeallocateBytes(intptr_t size_in_bytes) {
1593     size_ -= size_in_bytes;
1594     CHECK_GE(size_, 0);
1595   }
1596 
1597   // Merge {other} into {this}.
Merge(const AllocationStats & other)1598   void Merge(const AllocationStats& other) {
1599     capacity_ += other.capacity_;
1600     size_ += other.size_;
1601     if (other.max_capacity_ > max_capacity_) {
1602       max_capacity_ = other.max_capacity_;
1603     }
1604     CHECK_GE(size_, 0);
1605   }
1606 
DecreaseCapacity(intptr_t size_in_bytes)1607   void DecreaseCapacity(intptr_t size_in_bytes) {
1608     capacity_ -= size_in_bytes;
1609     CHECK_GE(capacity_, 0);
1610     CHECK_GE(capacity_, size_);
1611   }
1612 
IncreaseCapacity(intptr_t size_in_bytes)1613   void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
1614 
1615  private:
1616   // |capacity_|: The number of object-area bytes (i.e., not including page
1617   // bookkeeping structures) currently in the space.
1618   intptr_t capacity_;
1619 
1620   // |max_capacity_|: The maximum capacity ever observed.
1621   intptr_t max_capacity_;
1622 
1623   // |size_|: The number of allocated bytes.
1624   intptr_t size_;
1625 };
1626 
1627 
1628 // A free list category maintains a linked list of free memory blocks.
1629 class FreeListCategory {
1630  public:
FreeListCategory(FreeList * owner,FreeListCategoryType type)1631   explicit FreeListCategory(FreeList* owner, FreeListCategoryType type)
1632       : type_(type),
1633         top_(nullptr),
1634         end_(nullptr),
1635         available_(0),
1636         owner_(owner) {}
1637 
1638   // Concatenates {category} into {this}.
1639   //
1640   // Note: Thread-safe.
1641   intptr_t Concatenate(FreeListCategory* category);
1642 
1643   void Reset();
1644 
1645   void Free(FreeSpace* node, int size_in_bytes);
1646 
1647   // Pick a node from the list.
1648   FreeSpace* PickNodeFromList(int* node_size);
1649 
1650   // Pick a node from the list and compare it against {size_in_bytes}. If the
1651   // node's size is greater or equal return the node and null otherwise.
1652   FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
1653 
1654   // Search for a node of size {size_in_bytes}.
1655   FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
1656 
1657   intptr_t EvictFreeListItemsInList(Page* p);
1658   bool ContainsPageFreeListItemsInList(Page* p);
1659 
1660   void RepairFreeList(Heap* heap);
1661 
IsEmpty()1662   bool IsEmpty() { return top() == nullptr; }
1663 
owner()1664   FreeList* owner() { return owner_; }
available()1665   int available() const { return available_; }
1666 
1667 #ifdef DEBUG
1668   intptr_t SumFreeList();
1669   int FreeListLength();
1670   bool IsVeryLong();
1671 #endif
1672 
1673  private:
1674   // For debug builds we accurately compute free lists lengths up until
1675   // {kVeryLongFreeList} by manually walking the list.
1676   static const int kVeryLongFreeList = 500;
1677 
top()1678   FreeSpace* top() { return top_.Value(); }
set_top(FreeSpace * top)1679   void set_top(FreeSpace* top) { top_.SetValue(top); }
1680 
end()1681   FreeSpace* end() const { return end_; }
set_end(FreeSpace * end)1682   void set_end(FreeSpace* end) { end_ = end; }
1683 
1684   // |type_|: The type of this free list category.
1685   FreeListCategoryType type_;
1686 
1687   // |top_|: Points to the top FreeSpace* in the free list category.
1688   AtomicValue<FreeSpace*> top_;
1689 
1690   // |end_|: Points to the end FreeSpace* in the free list category.
1691   FreeSpace* end_;
1692 
1693   // |available_|: Total available bytes in all blocks of this free list
1694   //   category.
1695   int available_;
1696 
1697   // |owner_|: The owning free list of this category.
1698   FreeList* owner_;
1699 };
1700 
1701 // A free list maintaining free blocks of memory. The free list is organized in
1702 // a way to encourage objects allocated around the same time to be near each
1703 // other. The normal way to allocate is intended to be by bumping a 'top'
1704 // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
1705 // find a new space to allocate from. This is done with the free list, which is
1706 // divided up into rough categories to cut down on waste. Having finer
1707 // categories would scatter allocation more.
1708 
1709 // The free list is organized in categories as follows:
1710 // 1-31 words (too small): Such small free areas are discarded for efficiency
1711 //   reasons. They can be reclaimed by the compactor. However the distance
1712 //   between top and limit may be this small.
1713 // 32-255 words (small): Used for allocating free space between 1-31 words in
1714 //   size.
1715 // 256-2047 words (medium): Used for allocating free space between 32-255 words
1716 //   in size.
1717 // 1048-16383 words (large): Used for allocating free space between 256-2047
1718 //   words in size.
1719 // At least 16384 words (huge): This list is for objects of 2048 words or
1720 //   larger. Empty pages are also added to this list.
1721 class FreeList {
1722  public:
1723   // This method returns how much memory can be allocated after freeing
1724   // maximum_freed memory.
GuaranteedAllocatable(int maximum_freed)1725   static inline int GuaranteedAllocatable(int maximum_freed) {
1726     if (maximum_freed <= kSmallListMin) {
1727       return 0;
1728     } else if (maximum_freed <= kSmallListMax) {
1729       return kSmallAllocationMax;
1730     } else if (maximum_freed <= kMediumListMax) {
1731       return kMediumAllocationMax;
1732     } else if (maximum_freed <= kLargeListMax) {
1733       return kLargeAllocationMax;
1734     }
1735     return maximum_freed;
1736   }
1737 
1738   explicit FreeList(PagedSpace* owner);
1739 
1740   // The method concatenates {other} into {this} and returns the added bytes,
1741   // including waste.
1742   //
1743   // Note: Thread-safe.
1744   intptr_t Concatenate(FreeList* other);
1745 
1746   // Adds a node on the free list. The block of size {size_in_bytes} starting
1747   // at {start} is placed on the free list. The return value is the number of
1748   // bytes that were not added to the free list, because they freed memory block
1749   // was too small. Bookkeeping information will be written to the block, i.e.,
1750   // its contents will be destroyed. The start address should be word aligned,
1751   // and the size should be a non-zero multiple of the word size.
1752   int Free(Address start, int size_in_bytes);
1753 
1754   // Allocate a block of size {size_in_bytes} from the free list. The block is
1755   // unitialized. A failure is returned if no block is available. The size
1756   // should be a non-zero multiple of the word size.
1757   MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1758 
1759   // Clear the free list.
1760   void Reset();
1761 
ResetStats()1762   void ResetStats() { wasted_bytes_ = 0; }
1763 
1764   // Return the number of bytes available on the free list.
Available()1765   intptr_t Available() {
1766     return small_list_.available() + medium_list_.available() +
1767            large_list_.available() + huge_list_.available();
1768   }
1769 
1770   // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
1771   // size in the free list category exactly matching the size. If no suitable
1772   // node could be found, the method falls back to retrieving a {FreeSpace}
1773   // from the large or huge free list category.
1774   //
1775   // Can be used concurrently.
1776   MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
1777 
IsEmpty()1778   bool IsEmpty() {
1779     return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
1780            large_list_.IsEmpty() && huge_list_.IsEmpty();
1781   }
1782 
1783   // Used after booting the VM.
1784   void RepairLists(Heap* heap);
1785 
1786   intptr_t EvictFreeListItems(Page* p);
1787   bool ContainsPageFreeListItems(Page* p);
1788 
owner()1789   PagedSpace* owner() { return owner_; }
wasted_bytes()1790   intptr_t wasted_bytes() { return wasted_bytes_; }
mutex()1791   base::Mutex* mutex() { return &mutex_; }
1792 
1793 #ifdef DEBUG
1794   void Zap();
1795   intptr_t SumFreeLists();
1796   bool IsVeryLong();
1797 #endif
1798 
1799  private:
1800   // The size range of blocks, in bytes.
1801   static const int kMinBlockSize = 3 * kPointerSize;
1802   static const int kMaxBlockSize = Page::kAllocatableMemory;
1803 
1804   static const int kSmallListMin = 0x1f * kPointerSize;
1805   static const int kSmallListMax = 0xff * kPointerSize;
1806   static const int kMediumListMax = 0x7ff * kPointerSize;
1807   static const int kLargeListMax = 0x3fff * kPointerSize;
1808   static const int kSmallAllocationMax = kSmallListMin;
1809   static const int kMediumAllocationMax = kSmallListMax;
1810   static const int kLargeAllocationMax = kMediumListMax;
1811 
1812   FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
1813   FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
1814 
GetFreeListCategory(FreeListCategoryType category)1815   FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
1816     switch (category) {
1817       case kSmall:
1818         return &small_list_;
1819       case kMedium:
1820         return &medium_list_;
1821       case kLarge:
1822         return &large_list_;
1823       case kHuge:
1824         return &huge_list_;
1825       default:
1826         UNREACHABLE();
1827     }
1828     UNREACHABLE();
1829     return nullptr;
1830   }
1831 
1832   PagedSpace* owner_;
1833   base::Mutex mutex_;
1834   intptr_t wasted_bytes_;
1835   FreeListCategory small_list_;
1836   FreeListCategory medium_list_;
1837   FreeListCategory large_list_;
1838   FreeListCategory huge_list_;
1839 
1840   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1841 };
1842 
1843 
1844 class AllocationResult {
1845  public:
1846   // Implicit constructor from Object*.
AllocationResult(Object * object)1847   AllocationResult(Object* object)  // NOLINT
1848       : object_(object) {
1849     // AllocationResults can't return Smis, which are used to represent
1850     // failure and the space to retry in.
1851     CHECK(!object->IsSmi());
1852   }
1853 
AllocationResult()1854   AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
1855 
1856   static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
1857     return AllocationResult(space);
1858   }
1859 
IsRetry()1860   inline bool IsRetry() { return object_->IsSmi(); }
1861 
1862   template <typename T>
To(T ** obj)1863   bool To(T** obj) {
1864     if (IsRetry()) return false;
1865     *obj = T::cast(object_);
1866     return true;
1867   }
1868 
ToObjectChecked()1869   Object* ToObjectChecked() {
1870     CHECK(!IsRetry());
1871     return object_;
1872   }
1873 
1874   inline AllocationSpace RetrySpace();
1875 
1876  private:
AllocationResult(AllocationSpace space)1877   explicit AllocationResult(AllocationSpace space)
1878       : object_(Smi::FromInt(static_cast<int>(space))) {}
1879 
1880   Object* object_;
1881 };
1882 
1883 
1884 STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
1885 
1886 
1887 // LocalAllocationBuffer represents a linear allocation area that is created
1888 // from a given {AllocationResult} and can be used to allocate memory without
1889 // synchronization.
1890 //
1891 // The buffer is properly closed upon destruction and reassignment.
1892 // Example:
1893 //   {
1894 //     AllocationResult result = ...;
1895 //     LocalAllocationBuffer a(heap, result, size);
1896 //     LocalAllocationBuffer b = a;
1897 //     CHECK(!a.IsValid());
1898 //     CHECK(b.IsValid());
1899 //     // {a} is invalid now and cannot be used for further allocations.
1900 //   }
1901 //   // Since {b} went out of scope, the LAB is closed, resulting in creating a
1902 //   // filler object for the remaining area.
1903 class LocalAllocationBuffer {
1904  public:
1905   // Indicates that a buffer cannot be used for allocations anymore. Can result
1906   // from either reassigning a buffer, or trying to construct it from an
1907   // invalid {AllocationResult}.
1908   static inline LocalAllocationBuffer InvalidBuffer();
1909 
1910   // Creates a new LAB from a given {AllocationResult}. Results in
1911   // InvalidBuffer if the result indicates a retry.
1912   static inline LocalAllocationBuffer FromResult(Heap* heap,
1913                                                  AllocationResult result,
1914                                                  intptr_t size);
1915 
~LocalAllocationBuffer()1916   ~LocalAllocationBuffer() { Close(); }
1917 
1918   // Convert to C++11 move-semantics once allowed by the style guide.
1919   LocalAllocationBuffer(const LocalAllocationBuffer& other);
1920   LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
1921 
1922   MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
1923       int size_in_bytes, AllocationAlignment alignment);
1924 
IsValid()1925   inline bool IsValid() { return allocation_info_.top() != nullptr; }
1926 
1927   // Try to merge LABs, which is only possible when they are adjacent in memory.
1928   // Returns true if the merge was successful, false otherwise.
1929   inline bool TryMerge(LocalAllocationBuffer* other);
1930 
1931  private:
1932   LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
1933 
1934   void Close();
1935 
1936   Heap* heap_;
1937   AllocationInfo allocation_info_;
1938 };
1939 
1940 
1941 class PagedSpace : public Space {
1942  public:
1943   static const intptr_t kCompactionMemoryWanted = 500 * KB;
1944 
1945   // Creates a space with an id.
1946   PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
1947 
~PagedSpace()1948   ~PagedSpace() override { TearDown(); }
1949 
1950   // Set up the space using the given address range of virtual memory (from
1951   // the memory allocator's initial chunk) if possible.  If the block of
1952   // addresses is not big enough to contain a single page-aligned page, a
1953   // fresh chunk will be allocated.
1954   bool SetUp();
1955 
1956   // Returns true if the space has been successfully set up and not
1957   // subsequently torn down.
1958   bool HasBeenSetUp();
1959 
1960   // Checks whether an object/address is in this space.
1961   inline bool Contains(Address a);
1962   inline bool Contains(HeapObject* o);
1963   // Unlike Contains() methods it is safe to call this one even for addresses
1964   // of unmapped memory.
1965   bool ContainsSafe(Address addr);
1966 
1967   // Given an address occupied by a live object, return that object if it is
1968   // in this space, or a Smi if it is not.  The implementation iterates over
1969   // objects in the page containing the address, the cost is linear in the
1970   // number of objects in the page.  It may be slow.
1971   Object* FindObject(Address addr);
1972 
1973   // During boot the free_space_map is created, and afterwards we may need
1974   // to write it into the free list nodes that were already created.
1975   void RepairFreeListsAfterDeserialization();
1976 
1977   // Prepares for a mark-compact GC.
1978   void PrepareForMarkCompact();
1979 
1980   // Current capacity without growing (Size() + Available()).
Capacity()1981   intptr_t Capacity() { return accounting_stats_.Capacity(); }
1982 
1983   // Approximate amount of physical memory committed for this space.
1984   size_t CommittedPhysicalMemory() override;
1985 
1986   void ResetFreeListStatistics();
1987 
1988   // Sets the capacity, the available space and the wasted space to zero.
1989   // The stats are rebuilt during sweeping by adding each page to the
1990   // capacity and the size when it is encountered.  As free spaces are
1991   // discovered during the sweeping they are subtracted from the size and added
1992   // to the available and wasted totals.
ClearStats()1993   void ClearStats() {
1994     accounting_stats_.ClearSize();
1995     free_list_.ResetStats();
1996     ResetFreeListStatistics();
1997   }
1998 
1999   // Increases the number of available bytes of that space.
AddToAccountingStats(intptr_t bytes)2000   void AddToAccountingStats(intptr_t bytes) {
2001     accounting_stats_.DeallocateBytes(bytes);
2002   }
2003 
2004   // Available bytes without growing.  These are the bytes on the free list.
2005   // The bytes in the linear allocation area are not included in this total
2006   // because updating the stats would slow down allocation.  New pages are
2007   // immediately added to the free list so they show up here.
Available()2008   intptr_t Available() override { return free_list_.Available(); }
2009 
2010   // Allocated bytes in this space.  Garbage bytes that were not found due to
2011   // concurrent sweeping are counted as being allocated!  The bytes in the
2012   // current linear allocation area (between top and limit) are also counted
2013   // here.
Size()2014   intptr_t Size() override { return accounting_stats_.Size(); }
2015 
2016   // As size, but the bytes in lazily swept pages are estimated and the bytes
2017   // in the current linear allocation area are not included.
2018   intptr_t SizeOfObjects() override;
2019 
2020   // Wasted bytes in this space.  These are just the bytes that were thrown away
2021   // due to being too small to use for allocation.
Waste()2022   virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
2023 
2024   // Returns the allocation pointer in this space.
top()2025   Address top() { return allocation_info_.top(); }
limit()2026   Address limit() { return allocation_info_.limit(); }
2027 
2028   // The allocation top address.
allocation_top_address()2029   Address* allocation_top_address() { return allocation_info_.top_address(); }
2030 
2031   // The allocation limit address.
allocation_limit_address()2032   Address* allocation_limit_address() {
2033     return allocation_info_.limit_address();
2034   }
2035 
2036   // Allocate the requested number of bytes in the space if possible, return a
2037   // failure object if not.
2038   MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
2039       int size_in_bytes);
2040 
2041   MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
2042       int size_in_bytes);
2043 
2044   // Allocate the requested number of bytes in the space double aligned if
2045   // possible, return a failure object if not.
2046   MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
2047       int size_in_bytes, AllocationAlignment alignment);
2048 
2049   // Allocate the requested number of bytes in the space and consider allocation
2050   // alignment if needed.
2051   MUST_USE_RESULT inline AllocationResult AllocateRaw(
2052       int size_in_bytes, AllocationAlignment alignment);
2053 
2054   // Give a block of memory to the space's free list.  It might be added to
2055   // the free list or accounted as waste.
2056   // If add_to_freelist is false then just accounting stats are updated and
2057   // no attempt to add area to free list is made.
Free(Address start,int size_in_bytes)2058   int Free(Address start, int size_in_bytes) {
2059     int wasted = free_list_.Free(start, size_in_bytes);
2060     accounting_stats_.DeallocateBytes(size_in_bytes);
2061     return size_in_bytes - wasted;
2062   }
2063 
ResetFreeList()2064   void ResetFreeList() { free_list_.Reset(); }
2065 
2066   // Set space allocation info.
SetTopAndLimit(Address top,Address limit)2067   void SetTopAndLimit(Address top, Address limit) {
2068     DCHECK(top == limit ||
2069            Page::FromAddress(top) == Page::FromAddress(limit - 1));
2070     MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2071     allocation_info_.Reset(top, limit);
2072   }
2073 
2074   // Empty space allocation info, returning unused area to free list.
EmptyAllocationInfo()2075   void EmptyAllocationInfo() {
2076     // Mark the old linear allocation area with a free space map so it can be
2077     // skipped when scanning the heap.
2078     int old_linear_size = static_cast<int>(limit() - top());
2079     Free(top(), old_linear_size);
2080     SetTopAndLimit(NULL, NULL);
2081   }
2082 
Allocate(int bytes)2083   void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
2084 
2085   void IncreaseCapacity(int size);
2086 
2087   // Releases an unused page and shrinks the space.
2088   void ReleasePage(Page* page);
2089 
2090   // The dummy page that anchors the linked list of pages.
anchor()2091   Page* anchor() { return &anchor_; }
2092 
2093 #ifdef VERIFY_HEAP
2094   // Verify integrity of this space.
2095   virtual void Verify(ObjectVisitor* visitor);
2096 
2097   // Overridden by subclasses to verify space-specific object
2098   // properties (e.g., only maps or free-list nodes are in map space).
VerifyObject(HeapObject * obj)2099   virtual void VerifyObject(HeapObject* obj) {}
2100 #endif
2101 
2102 #ifdef DEBUG
2103   // Print meta info and objects in this space.
2104   void Print() override;
2105 
2106   // Reports statistics for the space
2107   void ReportStatistics();
2108 
2109   // Report code object related statistics
2110   void CollectCodeStatistics();
2111   static void ReportCodeStatistics(Isolate* isolate);
2112   static void ResetCodeStatistics(Isolate* isolate);
2113 #endif
2114 
2115   // Evacuation candidates are swept by evacuator.  Needs to return a valid
2116   // result before _and_ after evacuation has finished.
ShouldBeSweptBySweeperThreads(Page * p)2117   static bool ShouldBeSweptBySweeperThreads(Page* p) {
2118     return !p->IsEvacuationCandidate() &&
2119            !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
2120   }
2121 
2122   // This function tries to steal size_in_bytes memory from the sweeper threads
2123   // free-lists. If it does not succeed stealing enough memory, it will wait
2124   // for the sweeper threads to finish sweeping.
2125   // It returns true when sweeping is completed and false otherwise.
2126   bool EnsureSweeperProgress(intptr_t size_in_bytes);
2127 
set_end_of_unswept_pages(Page * page)2128   void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
2129 
end_of_unswept_pages()2130   Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
2131 
FirstPage()2132   Page* FirstPage() { return anchor_.next_page(); }
LastPage()2133   Page* LastPage() { return anchor_.prev_page(); }
2134 
2135   void EvictEvacuationCandidatesFromLinearAllocationArea();
2136 
2137   bool CanExpand(size_t size);
2138 
2139   // Returns the number of total pages in this space.
2140   int CountTotalPages();
2141 
2142   // Return size of allocatable area on a page in this space.
AreaSize()2143   inline int AreaSize() { return area_size_; }
2144 
is_local()2145   virtual bool is_local() { return false; }
2146 
2147   // Merges {other} into the current space. Note that this modifies {other},
2148   // e.g., removes its bump pointer area and resets statistics.
2149   void MergeCompactionSpace(CompactionSpace* other);
2150 
2151   void DivideUponCompactionSpaces(CompactionSpaceCollection** other, int num,
2152                                   intptr_t limit = kCompactionMemoryWanted);
2153 
2154   // Refills the free list from the corresponding free list filled by the
2155   // sweeper.
2156   virtual void RefillFreeList();
2157 
2158  protected:
2159   void AddMemory(Address start, intptr_t size);
2160 
2161   FreeSpace* TryRemoveMemory(intptr_t size_in_bytes);
2162 
2163   void MoveOverFreeMemory(PagedSpace* other);
2164 
2165   // PagedSpaces that should be included in snapshots have different, i.e.,
2166   // smaller, initial pages.
snapshotable()2167   virtual bool snapshotable() { return true; }
2168 
free_list()2169   FreeList* free_list() { return &free_list_; }
2170 
HasPages()2171   bool HasPages() { return anchor_.next_page() != &anchor_; }
2172 
2173   // Cleans up the space, frees all pages in this space except those belonging
2174   // to the initial chunk, uncommits addresses in the initial chunk.
2175   void TearDown();
2176 
2177   // Expands the space by allocating a fixed number of pages. Returns false if
2178   // it cannot allocate requested number of pages from OS, or if the hard heap
2179   // size limit has been hit.
2180   bool Expand();
2181 
2182   // Generic fast case allocation function that tries linear allocation at the
2183   // address denoted by top in allocation_info_.
2184   inline HeapObject* AllocateLinearly(int size_in_bytes);
2185 
2186   // Generic fast case allocation function that tries aligned linear allocation
2187   // at the address denoted by top in allocation_info_. Writes the aligned
2188   // allocation size, which includes the filler size, to size_in_bytes.
2189   inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
2190                                              AllocationAlignment alignment);
2191 
2192   // If sweeping is still in progress try to sweep unswept pages. If that is
2193   // not successful, wait for the sweeper threads and re-try free-list
2194   // allocation.
2195   MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
2196       int size_in_bytes);
2197 
2198   // Slow path of AllocateRaw.  This function is space-dependent.
2199   MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2200 
2201   int area_size_;
2202 
2203   // Accounting information for this space.
2204   AllocationStats accounting_stats_;
2205 
2206   // The dummy page that anchors the double linked list of pages.
2207   Page anchor_;
2208 
2209   // The space's free list.
2210   FreeList free_list_;
2211 
2212   // Normal allocation information.
2213   AllocationInfo allocation_info_;
2214 
2215   // The sweeper threads iterate over the list of pointer and data space pages
2216   // and sweep these pages concurrently. They will stop sweeping after the
2217   // end_of_unswept_pages_ page.
2218   Page* end_of_unswept_pages_;
2219 
2220   // Mutex guarding any concurrent access to the space.
2221   base::Mutex space_mutex_;
2222 
2223   friend class MarkCompactCollector;
2224   friend class PageIterator;
2225 
2226   // Used in cctest.
2227   friend class HeapTester;
2228 };
2229 
2230 
2231 class NumberAndSizeInfo BASE_EMBEDDED {
2232  public:
NumberAndSizeInfo()2233   NumberAndSizeInfo() : number_(0), bytes_(0) {}
2234 
number()2235   int number() const { return number_; }
increment_number(int num)2236   void increment_number(int num) { number_ += num; }
2237 
bytes()2238   int bytes() const { return bytes_; }
increment_bytes(int size)2239   void increment_bytes(int size) { bytes_ += size; }
2240 
clear()2241   void clear() {
2242     number_ = 0;
2243     bytes_ = 0;
2244   }
2245 
2246  private:
2247   int number_;
2248   int bytes_;
2249 };
2250 
2251 
2252 // HistogramInfo class for recording a single "bar" of a histogram.  This
2253 // class is used for collecting statistics to print to the log file.
2254 class HistogramInfo : public NumberAndSizeInfo {
2255  public:
HistogramInfo()2256   HistogramInfo() : NumberAndSizeInfo() {}
2257 
name()2258   const char* name() { return name_; }
set_name(const char * name)2259   void set_name(const char* name) { name_ = name; }
2260 
2261  private:
2262   const char* name_;
2263 };
2264 
2265 
2266 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2267 
2268 
2269 class SemiSpace;
2270 
2271 
2272 class NewSpacePage : public MemoryChunk {
2273  public:
2274   // GC related flags copied from from-space to to-space when
2275   // flipping semispaces.
2276   static const intptr_t kCopyOnFlipFlagsMask =
2277       (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2278       (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
2279       (1 << MemoryChunk::SCAN_ON_SCAVENGE);
2280 
2281   static const int kAreaSize = Page::kAllocatableMemory;
2282 
next_page()2283   inline NewSpacePage* next_page() {
2284     return static_cast<NewSpacePage*>(next_chunk());
2285   }
2286 
set_next_page(NewSpacePage * page)2287   inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
2288 
prev_page()2289   inline NewSpacePage* prev_page() {
2290     return static_cast<NewSpacePage*>(prev_chunk());
2291   }
2292 
set_prev_page(NewSpacePage * page)2293   inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
2294 
semi_space()2295   SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
2296 
is_anchor()2297   bool is_anchor() { return !this->InNewSpace(); }
2298 
IsAtStart(Address addr)2299   static bool IsAtStart(Address addr) {
2300     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
2301            kObjectStartOffset;
2302   }
2303 
IsAtEnd(Address addr)2304   static bool IsAtEnd(Address addr) {
2305     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2306   }
2307 
address()2308   Address address() { return reinterpret_cast<Address>(this); }
2309 
2310   // Finds the NewSpacePage containing the given address.
FromAddress(Address address_in_page)2311   static inline NewSpacePage* FromAddress(Address address_in_page) {
2312     Address page_start =
2313         reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2314                                   ~Page::kPageAlignmentMask);
2315     NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2316     return page;
2317   }
2318 
2319   // Find the page for a limit address. A limit address is either an address
2320   // inside a page, or the address right after the last byte of a page.
FromLimit(Address address_limit)2321   static inline NewSpacePage* FromLimit(Address address_limit) {
2322     return NewSpacePage::FromAddress(address_limit - 1);
2323   }
2324 
2325   // Checks if address1 and address2 are on the same new space page.
OnSamePage(Address address1,Address address2)2326   static inline bool OnSamePage(Address address1, Address address2) {
2327     return NewSpacePage::FromAddress(address1) ==
2328            NewSpacePage::FromAddress(address2);
2329   }
2330 
2331  private:
2332   // Create a NewSpacePage object that is only used as anchor
2333   // for the doubly-linked list of real pages.
NewSpacePage(SemiSpace * owner)2334   explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
2335 
2336   static NewSpacePage* Initialize(Heap* heap, Address start,
2337                                   SemiSpace* semi_space);
2338 
2339   // Intialize a fake NewSpacePage used as sentinel at the ends
2340   // of a doubly-linked list of real NewSpacePages.
2341   // Only uses the prev/next links, and sets flags to not be in new-space.
2342   void InitializeAsAnchor(SemiSpace* owner);
2343 
2344   friend class SemiSpace;
2345   friend class SemiSpaceIterator;
2346 };
2347 
2348 
2349 // -----------------------------------------------------------------------------
2350 // SemiSpace in young generation
2351 //
2352 // A semispace is a contiguous chunk of memory holding page-like memory
2353 // chunks. The mark-compact collector  uses the memory of the first page in
2354 // the from space as a marking stack when tracing live objects.
2355 
2356 class SemiSpace : public Space {
2357  public:
2358   // Constructor.
SemiSpace(Heap * heap,SemiSpaceId semispace)2359   SemiSpace(Heap* heap, SemiSpaceId semispace)
2360       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2361         start_(NULL),
2362         age_mark_(NULL),
2363         id_(semispace),
2364         anchor_(this),
2365         current_page_(NULL) {}
2366 
2367   // Sets up the semispace using the given chunk.
2368   void SetUp(Address start, int initial_capacity, int target_capacity,
2369              int maximum_capacity);
2370 
2371   // Tear down the space.  Heap memory was not allocated by the space, so it
2372   // is not deallocated here.
2373   void TearDown();
2374 
2375   // True if the space has been set up but not torn down.
HasBeenSetUp()2376   bool HasBeenSetUp() { return start_ != NULL; }
2377 
2378   // Grow the semispace to the new capacity.  The new capacity
2379   // requested must be larger than the current capacity and less than
2380   // the maximum capacity.
2381   bool GrowTo(int new_capacity);
2382 
2383   // Shrinks the semispace to the new capacity.  The new capacity
2384   // requested must be more than the amount of used memory in the
2385   // semispace and less than the current capacity.
2386   bool ShrinkTo(int new_capacity);
2387 
2388   // Sets the total capacity. Only possible when the space is not committed.
2389   bool SetTotalCapacity(int new_capacity);
2390 
2391   // Returns the start address of the first page of the space.
space_start()2392   Address space_start() {
2393     DCHECK(anchor_.next_page() != &anchor_);
2394     return anchor_.next_page()->area_start();
2395   }
2396 
2397   // Returns the start address of the current page of the space.
page_low()2398   Address page_low() { return current_page_->area_start(); }
2399 
2400   // Returns one past the end address of the space.
space_end()2401   Address space_end() { return anchor_.prev_page()->area_end(); }
2402 
2403   // Returns one past the end address of the current page of the space.
page_high()2404   Address page_high() { return current_page_->area_end(); }
2405 
AdvancePage()2406   bool AdvancePage() {
2407     NewSpacePage* next_page = current_page_->next_page();
2408     if (next_page == anchor()) return false;
2409     current_page_ = next_page;
2410     return true;
2411   }
2412 
2413   // Resets the space to using the first page.
2414   void Reset();
2415 
2416   // Age mark accessors.
age_mark()2417   Address age_mark() { return age_mark_; }
2418   void set_age_mark(Address mark);
2419 
2420   // True if the address is in the address range of this semispace (not
2421   // necessarily below the allocation pointer).
Contains(Address a)2422   bool Contains(Address a) {
2423     return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2424            reinterpret_cast<uintptr_t>(start_);
2425   }
2426 
2427   // True if the object is a heap object in the address range of this
2428   // semispace (not necessarily below the allocation pointer).
Contains(Object * o)2429   bool Contains(Object* o) {
2430     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2431   }
2432 
2433   // If we don't have these here then SemiSpace will be abstract.  However
2434   // they should never be called:
2435 
Size()2436   intptr_t Size() override {
2437     UNREACHABLE();
2438     return 0;
2439   }
2440 
SizeOfObjects()2441   intptr_t SizeOfObjects() override { return Size(); }
2442 
Available()2443   intptr_t Available() override {
2444     UNREACHABLE();
2445     return 0;
2446   }
2447 
2448 
is_committed()2449   bool is_committed() { return committed_; }
2450   bool Commit();
2451   bool Uncommit();
2452 
first_page()2453   NewSpacePage* first_page() { return anchor_.next_page(); }
current_page()2454   NewSpacePage* current_page() { return current_page_; }
2455 
2456 #ifdef VERIFY_HEAP
2457   virtual void Verify();
2458 #endif
2459 
2460 #ifdef DEBUG
2461   void Print() override;
2462   // Validate a range of of addresses in a SemiSpace.
2463   // The "from" address must be on a page prior to the "to" address,
2464   // in the linked page order, or it must be earlier on the same page.
2465   static void AssertValidRange(Address from, Address to);
2466 #else
2467   // Do nothing.
AssertValidRange(Address from,Address to)2468   inline static void AssertValidRange(Address from, Address to) {}
2469 #endif
2470 
2471   // Returns the current total capacity of the semispace.
TotalCapacity()2472   int TotalCapacity() { return total_capacity_; }
2473 
2474   // Returns the target for total capacity of the semispace.
TargetCapacity()2475   int TargetCapacity() { return target_capacity_; }
2476 
2477   // Returns the maximum total capacity of the semispace.
MaximumTotalCapacity()2478   int MaximumTotalCapacity() { return maximum_total_capacity_; }
2479 
2480   // Returns the initial capacity of the semispace.
InitialTotalCapacity()2481   int InitialTotalCapacity() { return initial_total_capacity_; }
2482 
id()2483   SemiSpaceId id() { return id_; }
2484 
2485   static void Swap(SemiSpace* from, SemiSpace* to);
2486 
2487   // Approximate amount of physical memory committed for this space.
2488   size_t CommittedPhysicalMemory() override;
2489 
2490  private:
2491   // Flips the semispace between being from-space and to-space.
2492   // Copies the flags into the masked positions on all pages in the space.
2493   void FlipPages(intptr_t flags, intptr_t flag_mask);
2494 
2495   // Updates Capacity and MaximumCommitted based on new capacity.
2496   void SetCapacity(int new_capacity);
2497 
anchor()2498   NewSpacePage* anchor() { return &anchor_; }
2499 
2500   // The current and maximum total capacity of the space.
2501   int total_capacity_;
2502   int target_capacity_;
2503   int maximum_total_capacity_;
2504   int initial_total_capacity_;
2505 
2506   // The start address of the space.
2507   Address start_;
2508   // Used to govern object promotion during mark-compact collection.
2509   Address age_mark_;
2510 
2511   // Masks and comparison values to test for containment in this semispace.
2512   uintptr_t address_mask_;
2513   uintptr_t object_mask_;
2514   uintptr_t object_expected_;
2515 
2516   bool committed_;
2517   SemiSpaceId id_;
2518 
2519   NewSpacePage anchor_;
2520   NewSpacePage* current_page_;
2521 
2522   friend class SemiSpaceIterator;
2523   friend class NewSpacePageIterator;
2524 };
2525 
2526 
2527 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2528 // semispace of the heap's new space.  It iterates over the objects in the
2529 // semispace from a given start address (defaulting to the bottom of the
2530 // semispace) to the top of the semispace.  New objects allocated after the
2531 // iterator is created are not iterated.
2532 class SemiSpaceIterator : public ObjectIterator {
2533  public:
2534   // Create an iterator over the allocated objects in the given to-space.
2535   explicit SemiSpaceIterator(NewSpace* space);
2536 
2537   inline HeapObject* Next();
2538 
2539   // Implementation of the ObjectIterator functions.
2540   inline HeapObject* next_object() override;
2541 
2542  private:
2543   void Initialize(Address start, Address end);
2544 
2545   // The current iteration point.
2546   Address current_;
2547   // The end of iteration.
2548   Address limit_;
2549 };
2550 
2551 
2552 // -----------------------------------------------------------------------------
2553 // A PageIterator iterates the pages in a semi-space.
2554 class NewSpacePageIterator BASE_EMBEDDED {
2555  public:
2556   // Make an iterator that runs over all pages in to-space.
2557   explicit inline NewSpacePageIterator(NewSpace* space);
2558 
2559   // Make an iterator that runs over all pages in the given semispace,
2560   // even those not used in allocation.
2561   explicit inline NewSpacePageIterator(SemiSpace* space);
2562 
2563   // Make iterator that iterates from the page containing start
2564   // to the page that contains limit in the same semispace.
2565   inline NewSpacePageIterator(Address start, Address limit);
2566 
2567   inline bool has_next();
2568   inline NewSpacePage* next();
2569 
2570  private:
2571   NewSpacePage* prev_page_;  // Previous page returned.
2572   // Next page that will be returned.  Cached here so that we can use this
2573   // iterator for operations that deallocate pages.
2574   NewSpacePage* next_page_;
2575   // Last page returned.
2576   NewSpacePage* last_page_;
2577 };
2578 
2579 // -----------------------------------------------------------------------------
2580 // Allows observation of inline allocation in the new space.
2581 class InlineAllocationObserver {
2582  public:
InlineAllocationObserver(intptr_t step_size)2583   explicit InlineAllocationObserver(intptr_t step_size)
2584       : step_size_(step_size), bytes_to_next_step_(step_size) {
2585     DCHECK(step_size >= kPointerSize);
2586   }
~InlineAllocationObserver()2587   virtual ~InlineAllocationObserver() {}
2588 
2589  private:
step_size()2590   intptr_t step_size() const { return step_size_; }
bytes_to_next_step()2591   intptr_t bytes_to_next_step() const { return bytes_to_next_step_; }
2592 
2593   // Pure virtual method provided by the subclasses that gets called when at
2594   // least step_size bytes have been allocated. soon_object is the address just
2595   // allocated (but not yet initialized.) size is the size of the object as
2596   // requested (i.e. w/o the alignment fillers). Some complexities to be aware
2597   // of:
2598   // 1) soon_object will be nullptr in cases where we end up observing an
2599   //    allocation that happens to be a filler space (e.g. page boundaries.)
2600   // 2) size is the requested size at the time of allocation. Right-trimming
2601   //    may change the object size dynamically.
2602   // 3) soon_object may actually be the first object in an allocation-folding
2603   //    group. In such a case size is the size of the group rather than the
2604   //    first object.
2605   virtual void Step(int bytes_allocated, Address soon_object, size_t size) = 0;
2606 
2607   // Called each time the new space does an inline allocation step. This may be
2608   // more frequently than the step_size we are monitoring (e.g. when there are
2609   // multiple observers, or when page or space boundary is encountered.)
InlineAllocationStep(int bytes_allocated,Address soon_object,size_t size)2610   void InlineAllocationStep(int bytes_allocated, Address soon_object,
2611                             size_t size) {
2612     bytes_to_next_step_ -= bytes_allocated;
2613     if (bytes_to_next_step_ <= 0) {
2614       Step(static_cast<int>(step_size_ - bytes_to_next_step_), soon_object,
2615            size);
2616       bytes_to_next_step_ = step_size_;
2617     }
2618   }
2619 
2620   intptr_t step_size_;
2621   intptr_t bytes_to_next_step_;
2622 
2623   friend class NewSpace;
2624 
2625   DISALLOW_COPY_AND_ASSIGN(InlineAllocationObserver);
2626 };
2627 
2628 // -----------------------------------------------------------------------------
2629 // The young generation space.
2630 //
2631 // The new space consists of a contiguous pair of semispaces.  It simply
2632 // forwards most functions to the appropriate semispace.
2633 
2634 class NewSpace : public Space {
2635  public:
2636   // Constructor.
NewSpace(Heap * heap)2637   explicit NewSpace(Heap* heap)
2638       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2639         to_space_(heap, kToSpace),
2640         from_space_(heap, kFromSpace),
2641         reservation_(),
2642         top_on_previous_step_(0),
2643         inline_allocation_observers_paused_(false) {}
2644 
2645   // Sets up the new space using the given chunk.
2646   bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
2647 
2648   // Tears down the space.  Heap memory was not allocated by the space, so it
2649   // is not deallocated here.
2650   void TearDown();
2651 
2652   // True if the space has been set up but not torn down.
HasBeenSetUp()2653   bool HasBeenSetUp() {
2654     return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2655   }
2656 
2657   // Flip the pair of spaces.
2658   void Flip();
2659 
2660   // Grow the capacity of the semispaces.  Assumes that they are not at
2661   // their maximum capacity.
2662   void Grow();
2663 
2664   // Grow the capacity of the semispaces by one page.
2665   bool GrowOnePage();
2666 
2667   // Shrink the capacity of the semispaces.
2668   void Shrink();
2669 
2670   // True if the address or object lies in the address range of either
2671   // semispace (not necessarily below the allocation pointer).
Contains(Address a)2672   bool Contains(Address a) {
2673     return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2674            reinterpret_cast<uintptr_t>(start_);
2675   }
2676 
Contains(Object * o)2677   bool Contains(Object* o) {
2678     Address a = reinterpret_cast<Address>(o);
2679     return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2680   }
2681 
2682   // Return the allocated bytes in the active semispace.
Size()2683   intptr_t Size() override {
2684     return pages_used_ * NewSpacePage::kAreaSize +
2685            static_cast<int>(top() - to_space_.page_low());
2686   }
2687 
2688   // The same, but returning an int.  We have to have the one that returns
2689   // intptr_t because it is inherited, but if we know we are dealing with the
2690   // new space, which can't get as big as the other spaces then this is useful:
SizeAsInt()2691   int SizeAsInt() { return static_cast<int>(Size()); }
2692 
2693   // Return the allocatable capacity of a semispace.
Capacity()2694   intptr_t Capacity() {
2695     SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
2696     return (to_space_.TotalCapacity() / Page::kPageSize) *
2697            NewSpacePage::kAreaSize;
2698   }
2699 
2700   // Return the current size of a semispace, allocatable and non-allocatable
2701   // memory.
TotalCapacity()2702   intptr_t TotalCapacity() {
2703     DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
2704     return to_space_.TotalCapacity();
2705   }
2706 
2707   // Committed memory for NewSpace is the committed memory of both semi-spaces
2708   // combined.
CommittedMemory()2709   intptr_t CommittedMemory() override {
2710     return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2711   }
2712 
MaximumCommittedMemory()2713   intptr_t MaximumCommittedMemory() override {
2714     return from_space_.MaximumCommittedMemory() +
2715            to_space_.MaximumCommittedMemory();
2716   }
2717 
2718   // Approximate amount of physical memory committed for this space.
2719   size_t CommittedPhysicalMemory() override;
2720 
2721   // Return the available bytes without growing.
Available()2722   intptr_t Available() override { return Capacity() - Size(); }
2723 
PagesFromStart(Address addr)2724   intptr_t PagesFromStart(Address addr) {
2725     return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize;
2726   }
2727 
AllocatedSinceLastGC()2728   size_t AllocatedSinceLastGC() {
2729     intptr_t allocated = top() - to_space_.age_mark();
2730     if (allocated < 0) {
2731       // Runtime has lowered the top below the age mark.
2732       return 0;
2733     }
2734     // Correctly account for non-allocatable regions at the beginning of
2735     // each page from the age_mark() to the top().
2736     intptr_t pages =
2737         PagesFromStart(top()) - PagesFromStart(to_space_.age_mark());
2738     allocated -= pages * (NewSpacePage::kObjectStartOffset);
2739     DCHECK(0 <= allocated && allocated <= Size());
2740     return static_cast<size_t>(allocated);
2741   }
2742 
2743   // Return the maximum capacity of a semispace.
MaximumCapacity()2744   int MaximumCapacity() {
2745     DCHECK(to_space_.MaximumTotalCapacity() ==
2746            from_space_.MaximumTotalCapacity());
2747     return to_space_.MaximumTotalCapacity();
2748   }
2749 
IsAtMaximumCapacity()2750   bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2751 
2752   // Returns the initial capacity of a semispace.
InitialTotalCapacity()2753   int InitialTotalCapacity() {
2754     DCHECK(to_space_.InitialTotalCapacity() ==
2755            from_space_.InitialTotalCapacity());
2756     return to_space_.InitialTotalCapacity();
2757   }
2758 
2759   // Return the address of the allocation pointer in the active semispace.
top()2760   Address top() {
2761     DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2762     return allocation_info_.top();
2763   }
2764 
2765   // Return the address of the allocation pointer limit in the active semispace.
limit()2766   Address limit() {
2767     DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2768     return allocation_info_.limit();
2769   }
2770 
2771   // Return the address of the first object in the active semispace.
bottom()2772   Address bottom() { return to_space_.space_start(); }
2773 
2774   // Get the age mark of the inactive semispace.
age_mark()2775   Address age_mark() { return from_space_.age_mark(); }
2776   // Set the age mark in the active semispace.
set_age_mark(Address mark)2777   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2778 
2779   // The start address of the space and a bit mask. Anding an address in the
2780   // new space with the mask will result in the start address.
start()2781   Address start() { return start_; }
mask()2782   uintptr_t mask() { return address_mask_; }
2783 
INLINE(uint32_t AddressToMarkbitIndex (Address addr))2784   INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2785     DCHECK(Contains(addr));
2786     DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) ||
2787            IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2788     return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2789   }
2790 
INLINE(Address MarkbitIndexToAddress (uint32_t index))2791   INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2792     return reinterpret_cast<Address>(index << kPointerSizeLog2);
2793   }
2794 
2795   // The allocation top and limit address.
allocation_top_address()2796   Address* allocation_top_address() { return allocation_info_.top_address(); }
2797 
2798   // The allocation limit address.
allocation_limit_address()2799   Address* allocation_limit_address() {
2800     return allocation_info_.limit_address();
2801   }
2802 
2803   MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
2804       int size_in_bytes, AllocationAlignment alignment));
2805 
2806   MUST_USE_RESULT INLINE(
2807       AllocationResult AllocateRawUnaligned(int size_in_bytes));
2808 
2809   MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
2810       int size_in_bytes, AllocationAlignment alignment));
2811 
2812   MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
2813       int size_in_bytes, AllocationAlignment alignment);
2814 
2815   // Reset the allocation pointer to the beginning of the active semispace.
2816   void ResetAllocationInfo();
2817 
2818   void UpdateInlineAllocationLimit(int size_in_bytes);
2819 
2820   // Allows observation of inline allocation. The observer->Step() method gets
2821   // called after every step_size bytes have been allocated (approximately).
2822   // This works by adjusting the allocation limit to a lower value and adjusting
2823   // it after each step.
2824   void AddInlineAllocationObserver(InlineAllocationObserver* observer);
2825 
2826   // Removes a previously installed observer.
2827   void RemoveInlineAllocationObserver(InlineAllocationObserver* observer);
2828 
DisableInlineAllocationSteps()2829   void DisableInlineAllocationSteps() {
2830     top_on_previous_step_ = 0;
2831     UpdateInlineAllocationLimit(0);
2832   }
2833 
2834   // Get the extent of the inactive semispace (for use as a marking stack,
2835   // or to zap it). Notice: space-addresses are not necessarily on the
2836   // same page, so FromSpaceStart() might be above FromSpaceEnd().
FromSpacePageLow()2837   Address FromSpacePageLow() { return from_space_.page_low(); }
FromSpacePageHigh()2838   Address FromSpacePageHigh() { return from_space_.page_high(); }
FromSpaceStart()2839   Address FromSpaceStart() { return from_space_.space_start(); }
FromSpaceEnd()2840   Address FromSpaceEnd() { return from_space_.space_end(); }
2841 
2842   // Get the extent of the active semispace's pages' memory.
ToSpaceStart()2843   Address ToSpaceStart() { return to_space_.space_start(); }
ToSpaceEnd()2844   Address ToSpaceEnd() { return to_space_.space_end(); }
2845 
ToSpaceContains(Address address)2846   inline bool ToSpaceContains(Address address) {
2847     return to_space_.Contains(address);
2848   }
FromSpaceContains(Address address)2849   inline bool FromSpaceContains(Address address) {
2850     return from_space_.Contains(address);
2851   }
2852 
2853   // True if the object is a heap object in the address range of the
2854   // respective semispace (not necessarily below the allocation pointer of the
2855   // semispace).
ToSpaceContains(Object * o)2856   inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
FromSpaceContains(Object * o)2857   inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2858 
2859   // Try to switch the active semispace to a new, empty, page.
2860   // Returns false if this isn't possible or reasonable (i.e., there
2861   // are no pages, or the current page is already empty), or true
2862   // if successful.
2863   bool AddFreshPage();
2864   bool AddFreshPageSynchronized();
2865 
2866 #ifdef VERIFY_HEAP
2867   // Verify the active semispace.
2868   virtual void Verify();
2869 #endif
2870 
2871 #ifdef DEBUG
2872   // Print the active semispace.
Print()2873   void Print() override { to_space_.Print(); }
2874 #endif
2875 
2876   // Iterates the active semispace to collect statistics.
2877   void CollectStatistics();
2878   // Reports previously collected statistics of the active semispace.
2879   void ReportStatistics();
2880   // Clears previously collected statistics.
2881   void ClearHistograms();
2882 
2883   // Record the allocation or promotion of a heap object.  Note that we don't
2884   // record every single allocation, but only those that happen in the
2885   // to space during a scavenge GC.
2886   void RecordAllocation(HeapObject* obj);
2887   void RecordPromotion(HeapObject* obj);
2888 
2889   // Return whether the operation succeded.
CommitFromSpaceIfNeeded()2890   bool CommitFromSpaceIfNeeded() {
2891     if (from_space_.is_committed()) return true;
2892     return from_space_.Commit();
2893   }
2894 
UncommitFromSpace()2895   bool UncommitFromSpace() {
2896     if (!from_space_.is_committed()) return true;
2897     return from_space_.Uncommit();
2898   }
2899 
IsFromSpaceCommitted()2900   bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
2901 
active_space()2902   SemiSpace* active_space() { return &to_space_; }
2903 
2904  private:
2905   // Update allocation info to match the current to-space page.
2906   void UpdateAllocationInfo();
2907 
2908   base::Mutex mutex_;
2909 
2910   Address chunk_base_;
2911   uintptr_t chunk_size_;
2912 
2913   // The semispaces.
2914   SemiSpace to_space_;
2915   SemiSpace from_space_;
2916   base::VirtualMemory reservation_;
2917   int pages_used_;
2918 
2919   // Start address and bit mask for containment testing.
2920   Address start_;
2921   uintptr_t address_mask_;
2922   uintptr_t object_mask_;
2923   uintptr_t object_expected_;
2924 
2925   // Allocation pointer and limit for normal allocation and allocation during
2926   // mark-compact collection.
2927   AllocationInfo allocation_info_;
2928 
2929   // When inline allocation stepping is active, either because of incremental
2930   // marking or because of idle scavenge, we 'interrupt' inline allocation every
2931   // once in a while. This is done by setting allocation_info_.limit to be lower
2932   // than the actual limit and and increasing it in steps to guarantee that the
2933   // observers are notified periodically.
2934   List<InlineAllocationObserver*> inline_allocation_observers_;
2935   Address top_on_previous_step_;
2936   bool inline_allocation_observers_paused_;
2937 
2938   HistogramInfo* allocated_histogram_;
2939   HistogramInfo* promoted_histogram_;
2940 
2941   bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
2942 
2943   // If we are doing inline allocation in steps, this method performs the 'step'
2944   // operation. top is the memory address of the bump pointer at the last
2945   // inline allocation (i.e. it determines the numbers of bytes actually
2946   // allocated since the last step.) new_top is the address of the bump pointer
2947   // where the next byte is going to be allocated from. top and new_top may be
2948   // different when we cross a page boundary or reset the space.
2949   void InlineAllocationStep(Address top, Address new_top, Address soon_object,
2950                             size_t size);
2951   intptr_t GetNextInlineAllocationStepSize();
2952   void StartNextInlineAllocationStep();
2953   void PauseInlineAllocationObservers();
2954   void ResumeInlineAllocationObservers();
2955 
2956   friend class PauseInlineAllocationObserversScope;
2957   friend class SemiSpaceIterator;
2958 };
2959 
2960 class PauseInlineAllocationObserversScope {
2961  public:
PauseInlineAllocationObserversScope(NewSpace * new_space)2962   explicit PauseInlineAllocationObserversScope(NewSpace* new_space)
2963       : new_space_(new_space) {
2964     new_space_->PauseInlineAllocationObservers();
2965   }
~PauseInlineAllocationObserversScope()2966   ~PauseInlineAllocationObserversScope() {
2967     new_space_->ResumeInlineAllocationObservers();
2968   }
2969 
2970  private:
2971   NewSpace* new_space_;
2972   DISALLOW_COPY_AND_ASSIGN(PauseInlineAllocationObserversScope);
2973 };
2974 
2975 // -----------------------------------------------------------------------------
2976 // Compaction space that is used temporarily during compaction.
2977 
2978 class CompactionSpace : public PagedSpace {
2979  public:
CompactionSpace(Heap * heap,AllocationSpace id,Executability executable)2980   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2981       : PagedSpace(heap, id, executable) {}
2982 
2983   // Adds external memory starting at {start} of {size_in_bytes} to the space.
AddExternalMemory(Address start,int size_in_bytes)2984   void AddExternalMemory(Address start, int size_in_bytes) {
2985     IncreaseCapacity(size_in_bytes);
2986     Free(start, size_in_bytes);
2987   }
2988 
is_local()2989   bool is_local() override { return true; }
2990 
2991   void RefillFreeList() override;
2992 
2993  protected:
2994   // The space is temporary and not included in any snapshots.
snapshotable()2995   bool snapshotable() override { return false; }
2996 
2997   MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
2998       int size_in_bytes) override;
2999 };
3000 
3001 
3002 // A collection of |CompactionSpace|s used by a single compaction task.
3003 class CompactionSpaceCollection : public Malloced {
3004  public:
CompactionSpaceCollection(Heap * heap)3005   explicit CompactionSpaceCollection(Heap* heap)
3006       : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
3007         code_space_(heap, CODE_SPACE, Executability::EXECUTABLE),
3008         duration_(0.0),
3009         bytes_compacted_(0) {}
3010 
Get(AllocationSpace space)3011   CompactionSpace* Get(AllocationSpace space) {
3012     switch (space) {
3013       case OLD_SPACE:
3014         return &old_space_;
3015       case CODE_SPACE:
3016         return &code_space_;
3017       default:
3018         UNREACHABLE();
3019     }
3020     UNREACHABLE();
3021     return nullptr;
3022   }
3023 
ReportCompactionProgress(double duration,intptr_t bytes_compacted)3024   void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3025     duration_ += duration;
3026     bytes_compacted_ += bytes_compacted;
3027   }
3028 
duration()3029   double duration() const { return duration_; }
bytes_compacted()3030   intptr_t bytes_compacted() const { return bytes_compacted_; }
3031 
3032  private:
3033   CompactionSpace old_space_;
3034   CompactionSpace code_space_;
3035 
3036   // Book keeping.
3037   double duration_;
3038   intptr_t bytes_compacted_;
3039 };
3040 
3041 
3042 // -----------------------------------------------------------------------------
3043 // Old object space (includes the old space of objects and code space)
3044 
3045 class OldSpace : public PagedSpace {
3046  public:
3047   // Creates an old space object. The constructor does not allocate pages
3048   // from OS.
OldSpace(Heap * heap,AllocationSpace id,Executability executable)3049   OldSpace(Heap* heap, AllocationSpace id, Executability executable)
3050       : PagedSpace(heap, id, executable) {}
3051 };
3052 
3053 
3054 // For contiguous spaces, top should be in the space (or at the end) and limit
3055 // should be the end of the space.
3056 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
3057   SLOW_DCHECK((space).page_low() <= (info).top() &&   \
3058               (info).top() <= (space).page_high() &&  \
3059               (info).limit() <= (space).page_high())
3060 
3061 
3062 // -----------------------------------------------------------------------------
3063 // Old space for all map objects
3064 
3065 class MapSpace : public PagedSpace {
3066  public:
3067   // Creates a map space object.
MapSpace(Heap * heap,AllocationSpace id)3068   MapSpace(Heap* heap, AllocationSpace id)
3069       : PagedSpace(heap, id, NOT_EXECUTABLE),
3070         max_map_space_pages_(kMaxMapPageIndex - 1) {}
3071 
3072   // Given an index, returns the page address.
3073   // TODO(1600): this limit is artifical just to keep code compilable
3074   static const int kMaxMapPageIndex = 1 << 16;
3075 
RoundSizeDownToObjectAlignment(int size)3076   int RoundSizeDownToObjectAlignment(int size) override {
3077     if (base::bits::IsPowerOfTwo32(Map::kSize)) {
3078       return RoundDown(size, Map::kSize);
3079     } else {
3080       return (size / Map::kSize) * Map::kSize;
3081     }
3082   }
3083 
3084 #ifdef VERIFY_HEAP
3085   void VerifyObject(HeapObject* obj) override;
3086 #endif
3087 
3088  private:
3089   static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize;
3090 
3091   // Do map space compaction if there is a page gap.
CompactionThreshold()3092   int CompactionThreshold() {
3093     return kMapsPerPage * (max_map_space_pages_ - 1);
3094   }
3095 
3096   const int max_map_space_pages_;
3097 };
3098 
3099 
3100 // -----------------------------------------------------------------------------
3101 // Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and
3102 // managed by the large object space. A large object is allocated from OS
3103 // heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
3104 // A large object always starts at Page::kObjectStartOffset to a page.
3105 // Large objects do not move during garbage collections.
3106 
3107 class LargeObjectSpace : public Space {
3108  public:
3109   LargeObjectSpace(Heap* heap, AllocationSpace id);
3110   virtual ~LargeObjectSpace();
3111 
3112   // Initializes internal data structures.
3113   bool SetUp();
3114 
3115   // Releases internal resources, frees objects in this space.
3116   void TearDown();
3117 
ObjectSizeFor(intptr_t chunk_size)3118   static intptr_t ObjectSizeFor(intptr_t chunk_size) {
3119     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
3120     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
3121   }
3122 
3123   // Shared implementation of AllocateRaw, AllocateRawCode and
3124   // AllocateRawFixedArray.
3125   MUST_USE_RESULT AllocationResult
3126       AllocateRaw(int object_size, Executability executable);
3127 
3128   // Available bytes for objects in this space.
3129   inline intptr_t Available() override;
3130 
Size()3131   intptr_t Size() override { return size_; }
3132 
SizeOfObjects()3133   intptr_t SizeOfObjects() override { return objects_size_; }
3134 
3135   // Approximate amount of physical memory committed for this space.
3136   size_t CommittedPhysicalMemory() override;
3137 
PageCount()3138   int PageCount() { return page_count_; }
3139 
3140   // Finds an object for a given address, returns a Smi if it is not found.
3141   // The function iterates through all objects in this space, may be slow.
3142   Object* FindObject(Address a);
3143 
3144   // Finds a large object page containing the given address, returns NULL
3145   // if such a page doesn't exist.
3146   LargePage* FindPage(Address a);
3147 
3148   // Clears the marking state of live objects.
3149   void ClearMarkingStateOfLiveObjects();
3150 
3151   // Frees unmarked objects.
3152   void FreeUnmarkedObjects();
3153 
3154   // Checks whether a heap object is in this space; O(1).
3155   bool Contains(HeapObject* obj);
3156   bool Contains(Address address);
3157 
3158   // Checks whether the space is empty.
IsEmpty()3159   bool IsEmpty() { return first_page_ == NULL; }
3160 
first_page()3161   LargePage* first_page() { return first_page_; }
3162 
3163 #ifdef VERIFY_HEAP
3164   virtual void Verify();
3165 #endif
3166 
3167 #ifdef DEBUG
3168   void Print() override;
3169   void ReportStatistics();
3170   void CollectCodeStatistics();
3171 #endif
3172   // Checks whether an address is in the object area in this space.  It
3173   // iterates all objects in the space. May be slow.
SlowContains(Address addr)3174   bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
3175 
3176  private:
3177   // The head of the linked list of large object chunks.
3178   LargePage* first_page_;
3179   intptr_t size_;          // allocated bytes
3180   int page_count_;         // number of chunks
3181   intptr_t objects_size_;  // size of objects
3182   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
3183   HashMap chunk_map_;
3184 
3185   friend class LargeObjectIterator;
3186 };
3187 
3188 
3189 class LargeObjectIterator : public ObjectIterator {
3190  public:
3191   explicit LargeObjectIterator(LargeObjectSpace* space);
3192 
3193   HeapObject* Next();
3194 
3195   // implementation of ObjectIterator.
next_object()3196   virtual HeapObject* next_object() { return Next(); }
3197 
3198  private:
3199   LargePage* current_;
3200 };
3201 
3202 
3203 // Iterates over the chunks (pages and large object pages) that can contain
3204 // pointers to new space.
3205 class PointerChunkIterator BASE_EMBEDDED {
3206  public:
3207   inline explicit PointerChunkIterator(Heap* heap);
3208 
3209   // Return NULL when the iterator is done.
3210   inline MemoryChunk* next();
3211 
3212  private:
3213   enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
3214   State state_;
3215   PageIterator old_iterator_;
3216   PageIterator map_iterator_;
3217   LargeObjectIterator lo_iterator_;
3218 };
3219 
3220 
3221 #ifdef DEBUG
3222 struct CommentStatistic {
3223   const char* comment;
3224   int size;
3225   int count;
ClearCommentStatistic3226   void Clear() {
3227     comment = NULL;
3228     size = 0;
3229     count = 0;
3230   }
3231   // Must be small, since an iteration is used for lookup.
3232   static const int kMaxComments = 64;
3233 };
3234 #endif
3235 }  // namespace internal
3236 }  // namespace v8
3237 
3238 #endif  // V8_HEAP_SPACES_H_
3239