1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/spaces.h"
6
7 #include <utility>
8
9 #include "src/base/bits.h"
10 #include "src/base/platform/platform.h"
11 #include "src/base/platform/semaphore.h"
12 #include "src/full-codegen/full-codegen.h"
13 #include "src/heap/array-buffer-tracker.h"
14 #include "src/heap/slot-set.h"
15 #include "src/macro-assembler.h"
16 #include "src/msan.h"
17 #include "src/snapshot/snapshot.h"
18 #include "src/v8.h"
19
20 namespace v8 {
21 namespace internal {
22
23
24 // ----------------------------------------------------------------------------
25 // HeapObjectIterator
26
HeapObjectIterator(PagedSpace * space)27 HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
28 : cur_addr_(nullptr),
29 cur_end_(nullptr),
30 space_(space),
31 page_range_(space->anchor()->next_page(), space->anchor()),
32 current_page_(page_range_.begin()) {}
33
HeapObjectIterator(Page * page)34 HeapObjectIterator::HeapObjectIterator(Page* page)
35 : cur_addr_(nullptr),
36 cur_end_(nullptr),
37 space_(reinterpret_cast<PagedSpace*>(page->owner())),
38 page_range_(page),
39 current_page_(page_range_.begin()) {
40 #ifdef DEBUG
41 Space* owner = page->owner();
42 DCHECK(owner == page->heap()->old_space() ||
43 owner == page->heap()->map_space() ||
44 owner == page->heap()->code_space());
45 #endif // DEBUG
46 }
47
48 // We have hit the end of the page and should advance to the next block of
49 // objects. This happens at the end of the page.
AdvanceToNextPage()50 bool HeapObjectIterator::AdvanceToNextPage() {
51 DCHECK_EQ(cur_addr_, cur_end_);
52 if (current_page_ == page_range_.end()) return false;
53 Page* cur_page = *(current_page_++);
54 space_->heap()
55 ->mark_compact_collector()
56 ->sweeper()
57 .SweepOrWaitUntilSweepingCompleted(cur_page);
58 cur_addr_ = cur_page->area_start();
59 cur_end_ = cur_page->area_end();
60 DCHECK(cur_page->SweepingDone());
61 return true;
62 }
63
PauseAllocationObserversScope(Heap * heap)64 PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
65 : heap_(heap) {
66 AllSpaces spaces(heap_);
67 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
68 space->PauseAllocationObservers();
69 }
70 }
71
~PauseAllocationObserversScope()72 PauseAllocationObserversScope::~PauseAllocationObserversScope() {
73 AllSpaces spaces(heap_);
74 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
75 space->ResumeAllocationObservers();
76 }
77 }
78
79 // -----------------------------------------------------------------------------
80 // CodeRange
81
82
CodeRange(Isolate * isolate)83 CodeRange::CodeRange(Isolate* isolate)
84 : isolate_(isolate),
85 code_range_(NULL),
86 free_list_(0),
87 allocation_list_(0),
88 current_allocation_block_index_(0) {}
89
90
SetUp(size_t requested)91 bool CodeRange::SetUp(size_t requested) {
92 DCHECK(code_range_ == NULL);
93
94 if (requested == 0) {
95 // When a target requires the code range feature, we put all code objects
96 // in a kMaximalCodeRangeSize range of virtual address space, so that
97 // they can call each other with near calls.
98 if (kRequiresCodeRange) {
99 requested = kMaximalCodeRangeSize;
100 } else {
101 return true;
102 }
103 }
104
105 if (requested <= kMinimumCodeRangeSize) {
106 requested = kMinimumCodeRangeSize;
107 }
108
109 const size_t reserved_area =
110 kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
111 if (requested < (kMaximalCodeRangeSize - reserved_area))
112 requested += reserved_area;
113
114 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
115
116 code_range_ = new base::VirtualMemory(
117 requested, Max(kCodeRangeAreaAlignment,
118 static_cast<size_t>(base::OS::AllocateAlignment())));
119 CHECK(code_range_ != NULL);
120 if (!code_range_->IsReserved()) {
121 delete code_range_;
122 code_range_ = NULL;
123 return false;
124 }
125
126 // We are sure that we have mapped a block of requested addresses.
127 DCHECK(code_range_->size() == requested);
128 Address base = reinterpret_cast<Address>(code_range_->address());
129
130 // On some platforms, specifically Win64, we need to reserve some pages at
131 // the beginning of an executable space.
132 if (reserved_area > 0) {
133 if (!code_range_->Commit(base, reserved_area, true)) {
134 delete code_range_;
135 code_range_ = NULL;
136 return false;
137 }
138 base += reserved_area;
139 }
140 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
141 size_t size = code_range_->size() - (aligned_base - base) - reserved_area;
142 allocation_list_.Add(FreeBlock(aligned_base, size));
143 current_allocation_block_index_ = 0;
144
145 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
146 return true;
147 }
148
149
CompareFreeBlockAddress(const FreeBlock * left,const FreeBlock * right)150 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
151 const FreeBlock* right) {
152 // The entire point of CodeRange is that the difference between two
153 // addresses in the range can be represented as a signed 32-bit int,
154 // so the cast is semantically correct.
155 return static_cast<int>(left->start - right->start);
156 }
157
158
GetNextAllocationBlock(size_t requested)159 bool CodeRange::GetNextAllocationBlock(size_t requested) {
160 for (current_allocation_block_index_++;
161 current_allocation_block_index_ < allocation_list_.length();
162 current_allocation_block_index_++) {
163 if (requested <= allocation_list_[current_allocation_block_index_].size) {
164 return true; // Found a large enough allocation block.
165 }
166 }
167
168 // Sort and merge the free blocks on the free list and the allocation list.
169 free_list_.AddAll(allocation_list_);
170 allocation_list_.Clear();
171 free_list_.Sort(&CompareFreeBlockAddress);
172 for (int i = 0; i < free_list_.length();) {
173 FreeBlock merged = free_list_[i];
174 i++;
175 // Add adjacent free blocks to the current merged block.
176 while (i < free_list_.length() &&
177 free_list_[i].start == merged.start + merged.size) {
178 merged.size += free_list_[i].size;
179 i++;
180 }
181 if (merged.size > 0) {
182 allocation_list_.Add(merged);
183 }
184 }
185 free_list_.Clear();
186
187 for (current_allocation_block_index_ = 0;
188 current_allocation_block_index_ < allocation_list_.length();
189 current_allocation_block_index_++) {
190 if (requested <= allocation_list_[current_allocation_block_index_].size) {
191 return true; // Found a large enough allocation block.
192 }
193 }
194 current_allocation_block_index_ = 0;
195 // Code range is full or too fragmented.
196 return false;
197 }
198
199
AllocateRawMemory(const size_t requested_size,const size_t commit_size,size_t * allocated)200 Address CodeRange::AllocateRawMemory(const size_t requested_size,
201 const size_t commit_size,
202 size_t* allocated) {
203 // request_size includes guards while committed_size does not. Make sure
204 // callers know about the invariant.
205 CHECK_LE(commit_size,
206 requested_size - 2 * MemoryAllocator::CodePageGuardSize());
207 FreeBlock current;
208 if (!ReserveBlock(requested_size, ¤t)) {
209 *allocated = 0;
210 return NULL;
211 }
212 *allocated = current.size;
213 DCHECK(*allocated <= current.size);
214 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
215 if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
216 code_range_, current.start, commit_size, *allocated)) {
217 *allocated = 0;
218 ReleaseBlock(¤t);
219 return NULL;
220 }
221 return current.start;
222 }
223
224
CommitRawMemory(Address start,size_t length)225 bool CodeRange::CommitRawMemory(Address start, size_t length) {
226 return isolate_->heap()->memory_allocator()->CommitMemory(start, length,
227 EXECUTABLE);
228 }
229
230
UncommitRawMemory(Address start,size_t length)231 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
232 return code_range_->Uncommit(start, length);
233 }
234
235
FreeRawMemory(Address address,size_t length)236 void CodeRange::FreeRawMemory(Address address, size_t length) {
237 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
238 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
239 free_list_.Add(FreeBlock(address, length));
240 code_range_->Uncommit(address, length);
241 }
242
243
TearDown()244 void CodeRange::TearDown() {
245 delete code_range_; // Frees all memory in the virtual memory range.
246 code_range_ = NULL;
247 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
248 free_list_.Free();
249 allocation_list_.Free();
250 }
251
252
ReserveBlock(const size_t requested_size,FreeBlock * block)253 bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
254 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
255 DCHECK(allocation_list_.length() == 0 ||
256 current_allocation_block_index_ < allocation_list_.length());
257 if (allocation_list_.length() == 0 ||
258 requested_size > allocation_list_[current_allocation_block_index_].size) {
259 // Find an allocation block large enough.
260 if (!GetNextAllocationBlock(requested_size)) return false;
261 }
262 // Commit the requested memory at the start of the current allocation block.
263 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
264 *block = allocation_list_[current_allocation_block_index_];
265 // Don't leave a small free block, useless for a large object or chunk.
266 if (aligned_requested < (block->size - Page::kPageSize)) {
267 block->size = aligned_requested;
268 }
269 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
270 allocation_list_[current_allocation_block_index_].start += block->size;
271 allocation_list_[current_allocation_block_index_].size -= block->size;
272 return true;
273 }
274
275
ReleaseBlock(const FreeBlock * block)276 void CodeRange::ReleaseBlock(const FreeBlock* block) {
277 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
278 free_list_.Add(*block);
279 }
280
281
282 // -----------------------------------------------------------------------------
283 // MemoryAllocator
284 //
285
MemoryAllocator(Isolate * isolate)286 MemoryAllocator::MemoryAllocator(Isolate* isolate)
287 : isolate_(isolate),
288 code_range_(nullptr),
289 capacity_(0),
290 capacity_executable_(0),
291 size_(0),
292 size_executable_(0),
293 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
294 highest_ever_allocated_(reinterpret_cast<void*>(0)),
295 unmapper_(this) {}
296
SetUp(size_t capacity,size_t capacity_executable,size_t code_range_size)297 bool MemoryAllocator::SetUp(size_t capacity, size_t capacity_executable,
298 size_t code_range_size) {
299 capacity_ = RoundUp(capacity, Page::kPageSize);
300 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
301 DCHECK_GE(capacity_, capacity_executable_);
302
303 size_ = 0;
304 size_executable_ = 0;
305
306 code_range_ = new CodeRange(isolate_);
307 if (!code_range_->SetUp(code_range_size)) return false;
308
309 return true;
310 }
311
312
TearDown()313 void MemoryAllocator::TearDown() {
314 unmapper()->TearDown();
315
316 // Check that spaces were torn down before MemoryAllocator.
317 DCHECK_EQ(size_.Value(), 0u);
318 // TODO(gc) this will be true again when we fix FreeMemory.
319 // DCHECK(size_executable_ == 0);
320 capacity_ = 0;
321 capacity_executable_ = 0;
322
323 if (last_chunk_.IsReserved()) {
324 last_chunk_.Release();
325 }
326
327 delete code_range_;
328 code_range_ = nullptr;
329 }
330
331 class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
332 public:
UnmapFreeMemoryTask(Unmapper * unmapper)333 explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
334
335 private:
336 // v8::Task overrides.
Run()337 void Run() override {
338 unmapper_->PerformFreeMemoryOnQueuedChunks();
339 unmapper_->pending_unmapping_tasks_semaphore_.Signal();
340 }
341
342 Unmapper* unmapper_;
343 DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
344 };
345
FreeQueuedChunks()346 void MemoryAllocator::Unmapper::FreeQueuedChunks() {
347 ReconsiderDelayedChunks();
348 if (FLAG_concurrent_sweeping) {
349 V8::GetCurrentPlatform()->CallOnBackgroundThread(
350 new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
351 concurrent_unmapping_tasks_active_++;
352 } else {
353 PerformFreeMemoryOnQueuedChunks();
354 }
355 }
356
WaitUntilCompleted()357 bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
358 bool waited = false;
359 while (concurrent_unmapping_tasks_active_ > 0) {
360 pending_unmapping_tasks_semaphore_.Wait();
361 concurrent_unmapping_tasks_active_--;
362 waited = true;
363 }
364 return waited;
365 }
366
PerformFreeMemoryOnQueuedChunks()367 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
368 MemoryChunk* chunk = nullptr;
369 // Regular chunks.
370 while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
371 bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
372 allocator_->PerformFreeMemory(chunk);
373 if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
374 }
375 // Non-regular chunks.
376 while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
377 allocator_->PerformFreeMemory(chunk);
378 }
379 }
380
TearDown()381 void MemoryAllocator::Unmapper::TearDown() {
382 WaitUntilCompleted();
383 ReconsiderDelayedChunks();
384 CHECK(delayed_regular_chunks_.empty());
385 PerformFreeMemoryOnQueuedChunks();
386 }
387
ReconsiderDelayedChunks()388 void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
389 std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
390 // Move constructed, so the permanent list should be empty.
391 DCHECK(delayed_regular_chunks_.empty());
392 for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
393 AddMemoryChunkSafe<kRegular>(*it);
394 }
395 }
396
CanFreeMemoryChunk(MemoryChunk * chunk)397 bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
398 MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
399 // We cannot free a memory chunk in new space while the sweeper is running
400 // because the memory chunk can be in the queue of a sweeper task.
401 // Chunks in old generation are unmapped if they are empty.
402 DCHECK(chunk->InNewSpace() || chunk->SweepingDone());
403 return !chunk->InNewSpace() || mc == nullptr || !FLAG_concurrent_sweeping ||
404 mc->sweeper().IsSweepingCompleted(NEW_SPACE);
405 }
406
CommitMemory(Address base,size_t size,Executability executable)407 bool MemoryAllocator::CommitMemory(Address base, size_t size,
408 Executability executable) {
409 if (!base::VirtualMemory::CommitRegion(base, size,
410 executable == EXECUTABLE)) {
411 return false;
412 }
413 UpdateAllocatedSpaceLimits(base, base + size);
414 return true;
415 }
416
417
FreeMemory(base::VirtualMemory * reservation,Executability executable)418 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
419 Executability executable) {
420 // TODO(gc) make code_range part of memory allocator?
421 // Code which is part of the code-range does not have its own VirtualMemory.
422 DCHECK(code_range() == NULL ||
423 !code_range()->contains(static_cast<Address>(reservation->address())));
424 DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
425 reservation->size() <= Page::kPageSize);
426
427 reservation->Release();
428 }
429
430
FreeMemory(Address base,size_t size,Executability executable)431 void MemoryAllocator::FreeMemory(Address base, size_t size,
432 Executability executable) {
433 // TODO(gc) make code_range part of memory allocator?
434 if (code_range() != NULL &&
435 code_range()->contains(static_cast<Address>(base))) {
436 DCHECK(executable == EXECUTABLE);
437 code_range()->FreeRawMemory(base, size);
438 } else {
439 DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
440 bool result = base::VirtualMemory::ReleaseRegion(base, size);
441 USE(result);
442 DCHECK(result);
443 }
444 }
445
ReserveAlignedMemory(size_t size,size_t alignment,base::VirtualMemory * controller)446 Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
447 base::VirtualMemory* controller) {
448 base::VirtualMemory reservation(size, alignment);
449
450 if (!reservation.IsReserved()) return NULL;
451 size_.Increment(reservation.size());
452 Address base =
453 RoundUp(static_cast<Address>(reservation.address()), alignment);
454 controller->TakeControl(&reservation);
455 return base;
456 }
457
AllocateAlignedMemory(size_t reserve_size,size_t commit_size,size_t alignment,Executability executable,base::VirtualMemory * controller)458 Address MemoryAllocator::AllocateAlignedMemory(
459 size_t reserve_size, size_t commit_size, size_t alignment,
460 Executability executable, base::VirtualMemory* controller) {
461 DCHECK(commit_size <= reserve_size);
462 base::VirtualMemory reservation;
463 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
464 if (base == NULL) return NULL;
465
466 if (executable == EXECUTABLE) {
467 if (!CommitExecutableMemory(&reservation, base, commit_size,
468 reserve_size)) {
469 base = NULL;
470 }
471 } else {
472 if (reservation.Commit(base, commit_size, false)) {
473 UpdateAllocatedSpaceLimits(base, base + commit_size);
474 } else {
475 base = NULL;
476 }
477 }
478
479 if (base == NULL) {
480 // Failed to commit the body. Release the mapping and any partially
481 // commited regions inside it.
482 reservation.Release();
483 size_.Decrement(reserve_size);
484 return NULL;
485 }
486
487 controller->TakeControl(&reservation);
488 return base;
489 }
490
InitializeAsAnchor(Space * space)491 void Page::InitializeAsAnchor(Space* space) {
492 set_owner(space);
493 set_next_chunk(this);
494 set_prev_chunk(this);
495 SetFlags(0, ~0);
496 SetFlag(ANCHOR);
497 }
498
Initialize(Heap * heap,Address base,size_t size,Address area_start,Address area_end,Executability executable,Space * owner,base::VirtualMemory * reservation)499 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
500 Address area_start, Address area_end,
501 Executability executable, Space* owner,
502 base::VirtualMemory* reservation) {
503 MemoryChunk* chunk = FromAddress(base);
504
505 DCHECK(base == chunk->address());
506
507 chunk->heap_ = heap;
508 chunk->size_ = size;
509 chunk->area_start_ = area_start;
510 chunk->area_end_ = area_end;
511 chunk->flags_ = Flags(NO_FLAGS);
512 chunk->set_owner(owner);
513 chunk->InitializeReservedMemory();
514 chunk->old_to_new_slots_.SetValue(nullptr);
515 chunk->old_to_old_slots_ = nullptr;
516 chunk->typed_old_to_new_slots_.SetValue(nullptr);
517 chunk->typed_old_to_old_slots_ = nullptr;
518 chunk->skip_list_ = nullptr;
519 chunk->progress_bar_ = 0;
520 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
521 chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
522 chunk->mutex_ = new base::Mutex();
523 chunk->available_in_free_list_ = 0;
524 chunk->wasted_memory_ = 0;
525 chunk->ResetLiveBytes();
526 chunk->ClearLiveness();
527 chunk->set_next_chunk(nullptr);
528 chunk->set_prev_chunk(nullptr);
529 chunk->local_tracker_ = nullptr;
530
531 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
532
533 if (executable == EXECUTABLE) {
534 chunk->SetFlag(IS_EXECUTABLE);
535 }
536
537 if (reservation != nullptr) {
538 chunk->reservation_.TakeControl(reservation);
539 }
540
541 return chunk;
542 }
543
544
545 // Commit MemoryChunk area to the requested size.
CommitArea(size_t requested)546 bool MemoryChunk::CommitArea(size_t requested) {
547 size_t guard_size =
548 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
549 size_t header_size = area_start() - address() - guard_size;
550 size_t commit_size =
551 RoundUp(header_size + requested, MemoryAllocator::GetCommitPageSize());
552 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
553 MemoryAllocator::GetCommitPageSize());
554
555 if (commit_size > committed_size) {
556 // Commit size should be less or equal than the reserved size.
557 DCHECK(commit_size <= size() - 2 * guard_size);
558 // Append the committed area.
559 Address start = address() + committed_size + guard_size;
560 size_t length = commit_size - committed_size;
561 if (reservation_.IsReserved()) {
562 Executability executable =
563 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
564 if (!heap()->memory_allocator()->CommitMemory(start, length,
565 executable)) {
566 return false;
567 }
568 } else {
569 CodeRange* code_range = heap_->memory_allocator()->code_range();
570 DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
571 if (!code_range->CommitRawMemory(start, length)) return false;
572 }
573
574 if (Heap::ShouldZapGarbage()) {
575 heap_->memory_allocator()->ZapBlock(start, length);
576 }
577 } else if (commit_size < committed_size) {
578 DCHECK(commit_size > 0);
579 // Shrink the committed area.
580 size_t length = committed_size - commit_size;
581 Address start = address() + committed_size + guard_size - length;
582 if (reservation_.IsReserved()) {
583 if (!reservation_.Uncommit(start, length)) return false;
584 } else {
585 CodeRange* code_range = heap_->memory_allocator()->code_range();
586 DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
587 if (!code_range->UncommitRawMemory(start, length)) return false;
588 }
589 }
590
591 area_end_ = area_start_ + requested;
592 return true;
593 }
594
CommittedPhysicalMemory()595 size_t MemoryChunk::CommittedPhysicalMemory() {
596 if (!base::VirtualMemory::HasLazyCommits() || owner()->identity() == LO_SPACE)
597 return size();
598 return high_water_mark_.Value();
599 }
600
InsertAfter(MemoryChunk * other)601 void MemoryChunk::InsertAfter(MemoryChunk* other) {
602 MemoryChunk* other_next = other->next_chunk();
603
604 set_next_chunk(other_next);
605 set_prev_chunk(other);
606 other_next->set_prev_chunk(this);
607 other->set_next_chunk(this);
608 }
609
610
Unlink()611 void MemoryChunk::Unlink() {
612 MemoryChunk* next_element = next_chunk();
613 MemoryChunk* prev_element = prev_chunk();
614 next_element->set_prev_chunk(prev_element);
615 prev_element->set_next_chunk(next_element);
616 set_prev_chunk(NULL);
617 set_next_chunk(NULL);
618 }
619
ShrinkChunk(MemoryChunk * chunk,size_t bytes_to_shrink)620 void MemoryAllocator::ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink) {
621 DCHECK_GE(bytes_to_shrink, static_cast<size_t>(GetCommitPageSize()));
622 DCHECK_EQ(0u, bytes_to_shrink % GetCommitPageSize());
623 Address free_start = chunk->area_end_ - bytes_to_shrink;
624 // Don't adjust the size of the page. The area is just uncomitted but not
625 // released.
626 chunk->area_end_ -= bytes_to_shrink;
627 UncommitBlock(free_start, bytes_to_shrink);
628 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
629 if (chunk->reservation_.IsReserved())
630 chunk->reservation_.Guard(chunk->area_end_);
631 else
632 base::OS::Guard(chunk->area_end_, GetCommitPageSize());
633 }
634 }
635
AllocateChunk(size_t reserve_area_size,size_t commit_area_size,Executability executable,Space * owner)636 MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
637 size_t commit_area_size,
638 Executability executable,
639 Space* owner) {
640 DCHECK_LE(commit_area_size, reserve_area_size);
641
642 size_t chunk_size;
643 Heap* heap = isolate_->heap();
644 Address base = nullptr;
645 base::VirtualMemory reservation;
646 Address area_start = nullptr;
647 Address area_end = nullptr;
648
649 //
650 // MemoryChunk layout:
651 //
652 // Executable
653 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
654 // | Header |
655 // +----------------------------+<- base + CodePageGuardStartOffset
656 // | Guard |
657 // +----------------------------+<- area_start_
658 // | Area |
659 // +----------------------------+<- area_end_ (area_start + commit_area_size)
660 // | Committed but not used |
661 // +----------------------------+<- aligned at OS page boundary
662 // | Reserved but not committed |
663 // +----------------------------+<- aligned at OS page boundary
664 // | Guard |
665 // +----------------------------+<- base + chunk_size
666 //
667 // Non-executable
668 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
669 // | Header |
670 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
671 // | Area |
672 // +----------------------------+<- area_end_ (area_start + commit_area_size)
673 // | Committed but not used |
674 // +----------------------------+<- aligned at OS page boundary
675 // | Reserved but not committed |
676 // +----------------------------+<- base + chunk_size
677 //
678
679 if (executable == EXECUTABLE) {
680 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
681 GetCommitPageSize()) +
682 CodePageGuardSize();
683
684 // Check executable memory limit.
685 if ((size_executable_.Value() + chunk_size) > capacity_executable_) {
686 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
687 "V8 Executable Allocation capacity exceeded"));
688 return NULL;
689 }
690
691 // Size of header (not executable) plus area (executable).
692 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
693 GetCommitPageSize());
694 // Allocate executable memory either from code range or from the
695 // OS.
696 #ifdef V8_TARGET_ARCH_MIPS64
697 // Use code range only for large object space on mips64 to keep address
698 // range within 256-MB memory region.
699 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
700 #else
701 if (code_range()->valid()) {
702 #endif
703 base =
704 code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
705 DCHECK(
706 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
707 if (base == NULL) return NULL;
708 size_.Increment(chunk_size);
709 // Update executable memory size.
710 size_executable_.Increment(chunk_size);
711 } else {
712 base = AllocateAlignedMemory(chunk_size, commit_size,
713 MemoryChunk::kAlignment, executable,
714 &reservation);
715 if (base == NULL) return NULL;
716 // Update executable memory size.
717 size_executable_.Increment(reservation.size());
718 }
719
720 if (Heap::ShouldZapGarbage()) {
721 ZapBlock(base, CodePageGuardStartOffset());
722 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
723 }
724
725 area_start = base + CodePageAreaStartOffset();
726 area_end = area_start + commit_area_size;
727 } else {
728 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
729 GetCommitPageSize());
730 size_t commit_size =
731 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
732 GetCommitPageSize());
733 base =
734 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
735 executable, &reservation);
736
737 if (base == NULL) return NULL;
738
739 if (Heap::ShouldZapGarbage()) {
740 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
741 }
742
743 area_start = base + Page::kObjectStartOffset;
744 area_end = area_start + commit_area_size;
745 }
746
747 // Use chunk_size for statistics and callbacks because we assume that they
748 // treat reserved but not-yet committed memory regions of chunks as allocated.
749 isolate_->counters()->memory_allocated()->Increment(
750 static_cast<int>(chunk_size));
751
752 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
753
754 // We cannot use the last chunk in the address space because we would
755 // overflow when comparing top and limit if this chunk is used for a
756 // linear allocation area.
757 if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) {
758 CHECK(!last_chunk_.IsReserved());
759 last_chunk_.TakeControl(&reservation);
760 UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
761 last_chunk_.size());
762 size_.Decrement(chunk_size);
763 if (executable == EXECUTABLE) {
764 size_executable_.Decrement(chunk_size);
765 }
766 CHECK(last_chunk_.IsReserved());
767 return AllocateChunk(reserve_area_size, commit_area_size, executable,
768 owner);
769 }
770
771 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
772 executable, owner, &reservation);
773 }
774
775
776 void Page::ResetFreeListStatistics() {
777 wasted_memory_ = 0;
778 available_in_free_list_ = 0;
779 }
780
781 size_t Page::AvailableInFreeList() {
782 size_t sum = 0;
783 ForAllFreeListCategories([this, &sum](FreeListCategory* category) {
784 sum += category->available();
785 });
786 return sum;
787 }
788
789 size_t Page::ShrinkToHighWaterMark() {
790 // Shrink pages to high water mark. The water mark points either to a filler
791 // or the area_end.
792 HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
793 if (filler->address() == area_end()) return 0;
794 CHECK(filler->IsFiller());
795 if (!filler->IsFreeSpace()) return 0;
796
797 #ifdef DEBUG
798 // Check the the filler is indeed the last filler on the page.
799 HeapObjectIterator it(this);
800 HeapObject* filler2 = nullptr;
801 for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
802 filler2 = HeapObject::FromAddress(obj->address() + obj->Size());
803 }
804 if (filler2 == nullptr || filler2->address() == area_end()) return 0;
805 DCHECK(filler2->IsFiller());
806 // The deserializer might leave behind fillers. In this case we need to
807 // iterate even further.
808 while ((filler2->address() + filler2->Size()) != area_end()) {
809 filler2 = HeapObject::FromAddress(filler2->address() + filler2->Size());
810 DCHECK(filler2->IsFiller());
811 }
812 DCHECK_EQ(filler->address(), filler2->address());
813 #endif // DEBUG
814
815 size_t unused = RoundDown(
816 static_cast<size_t>(area_end() - filler->address() - FreeSpace::kSize),
817 MemoryAllocator::GetCommitPageSize());
818 if (unused > 0) {
819 if (FLAG_trace_gc_verbose) {
820 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
821 reinterpret_cast<void*>(this),
822 reinterpret_cast<void*>(area_end()),
823 reinterpret_cast<void*>(area_end() - unused));
824 }
825 heap()->CreateFillerObjectAt(
826 filler->address(),
827 static_cast<int>(area_end() - filler->address() - unused),
828 ClearRecordedSlots::kNo);
829 heap()->memory_allocator()->ShrinkChunk(this, unused);
830 CHECK(filler->IsFiller());
831 CHECK_EQ(filler->address() + filler->Size(), area_end());
832 }
833 return unused;
834 }
835
836 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
837 Address start_free) {
838 // We do not allow partial shrink for code.
839 DCHECK(chunk->executable() == NOT_EXECUTABLE);
840
841 intptr_t size;
842 base::VirtualMemory* reservation = chunk->reserved_memory();
843 DCHECK(reservation->IsReserved());
844 size = static_cast<intptr_t>(reservation->size());
845
846 size_t to_free_size = size - (start_free - chunk->address());
847
848 DCHECK(size_.Value() >= to_free_size);
849 size_.Decrement(to_free_size);
850 isolate_->counters()->memory_allocated()->Decrement(
851 static_cast<int>(to_free_size));
852 chunk->set_size(size - to_free_size);
853
854 reservation->ReleasePartial(start_free);
855 }
856
857 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
858 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
859 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
860
861 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
862 chunk->IsEvacuationCandidate());
863
864 base::VirtualMemory* reservation = chunk->reserved_memory();
865 const size_t size =
866 reservation->IsReserved() ? reservation->size() : chunk->size();
867 DCHECK_GE(size_.Value(), static_cast<size_t>(size));
868 size_.Decrement(size);
869 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
870 if (chunk->executable() == EXECUTABLE) {
871 DCHECK_GE(size_executable_.Value(), size);
872 size_executable_.Decrement(size);
873 }
874
875 chunk->SetFlag(MemoryChunk::PRE_FREED);
876 }
877
878
879 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
880 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
881 chunk->ReleaseAllocatedMemory();
882
883 base::VirtualMemory* reservation = chunk->reserved_memory();
884 if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
885 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
886 } else {
887 if (reservation->IsReserved()) {
888 FreeMemory(reservation, chunk->executable());
889 } else {
890 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
891 }
892 }
893 }
894
895 template <MemoryAllocator::FreeMode mode>
896 void MemoryAllocator::Free(MemoryChunk* chunk) {
897 switch (mode) {
898 case kFull:
899 PreFreeMemory(chunk);
900 PerformFreeMemory(chunk);
901 break;
902 case kPooledAndQueue:
903 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
904 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
905 chunk->SetFlag(MemoryChunk::POOLED);
906 // Fall through to kPreFreeAndQueue.
907 case kPreFreeAndQueue:
908 PreFreeMemory(chunk);
909 // The chunks added to this queue will be freed by a concurrent thread.
910 unmapper()->AddMemoryChunkSafe(chunk);
911 break;
912 default:
913 UNREACHABLE();
914 }
915 }
916
917 template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
918
919 template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
920 MemoryChunk* chunk);
921
922 template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
923 MemoryChunk* chunk);
924
925 template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
926 Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
927 Executability executable) {
928 MemoryChunk* chunk = nullptr;
929 if (alloc_mode == kPooled) {
930 DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
931 DCHECK_EQ(executable, NOT_EXECUTABLE);
932 chunk = AllocatePagePooled(owner);
933 }
934 if (chunk == nullptr) {
935 chunk = AllocateChunk(size, size, executable, owner);
936 }
937 if (chunk == nullptr) return nullptr;
938 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
939 }
940
941 template Page*
942 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
943 size_t size, PagedSpace* owner, Executability executable);
944 template Page*
945 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
946 size_t size, SemiSpace* owner, Executability executable);
947 template Page*
948 MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
949 size_t size, SemiSpace* owner, Executability executable);
950
951 LargePage* MemoryAllocator::AllocateLargePage(size_t size,
952 LargeObjectSpace* owner,
953 Executability executable) {
954 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
955 if (chunk == nullptr) return nullptr;
956 return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
957 }
958
959 template <typename SpaceType>
960 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
961 MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
962 if (chunk == nullptr) return nullptr;
963 const int size = MemoryChunk::kPageSize;
964 const Address start = reinterpret_cast<Address>(chunk);
965 const Address area_start = start + MemoryChunk::kObjectStartOffset;
966 const Address area_end = start + size;
967 if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
968 return nullptr;
969 }
970 base::VirtualMemory reservation(start, size);
971 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
972 NOT_EXECUTABLE, owner, &reservation);
973 size_.Increment(size);
974 return chunk;
975 }
976
977 bool MemoryAllocator::CommitBlock(Address start, size_t size,
978 Executability executable) {
979 if (!CommitMemory(start, size, executable)) return false;
980
981 if (Heap::ShouldZapGarbage()) {
982 ZapBlock(start, size);
983 }
984
985 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
986 return true;
987 }
988
989
990 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
991 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
992 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
993 return true;
994 }
995
996
997 void MemoryAllocator::ZapBlock(Address start, size_t size) {
998 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
999 Memory::Address_at(start + s) = kZapValue;
1000 }
1001 }
1002
1003 #ifdef DEBUG
1004 void MemoryAllocator::ReportStatistics() {
1005 size_t size = Size();
1006 float pct = static_cast<float>(capacity_ - size) / capacity_;
1007 PrintF(" capacity: %zu , used: %" V8PRIdPTR ", available: %%%d\n\n",
1008 capacity_, size, static_cast<int>(pct * 100));
1009 }
1010 #endif
1011
1012 size_t MemoryAllocator::CodePageGuardStartOffset() {
1013 // We are guarding code pages: the first OS page after the header
1014 // will be protected as non-writable.
1015 return RoundUp(Page::kObjectStartOffset, GetCommitPageSize());
1016 }
1017
1018 size_t MemoryAllocator::CodePageGuardSize() {
1019 return static_cast<int>(GetCommitPageSize());
1020 }
1021
1022 size_t MemoryAllocator::CodePageAreaStartOffset() {
1023 // We are guarding code pages: the first OS page after the header
1024 // will be protected as non-writable.
1025 return CodePageGuardStartOffset() + CodePageGuardSize();
1026 }
1027
1028 size_t MemoryAllocator::CodePageAreaEndOffset() {
1029 // We are guarding code pages: the last OS page will be protected as
1030 // non-writable.
1031 return Page::kPageSize - static_cast<int>(GetCommitPageSize());
1032 }
1033
1034 intptr_t MemoryAllocator::GetCommitPageSize() {
1035 if (FLAG_v8_os_page_size != 0) {
1036 DCHECK(base::bits::IsPowerOfTwo32(FLAG_v8_os_page_size));
1037 return FLAG_v8_os_page_size * KB;
1038 } else {
1039 return base::OS::CommitPageSize();
1040 }
1041 }
1042
1043
1044 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
1045 Address start, size_t commit_size,
1046 size_t reserved_size) {
1047 // Commit page header (not executable).
1048 Address header = start;
1049 size_t header_size = CodePageGuardStartOffset();
1050 if (vm->Commit(header, header_size, false)) {
1051 // Create guard page after the header.
1052 if (vm->Guard(start + CodePageGuardStartOffset())) {
1053 // Commit page body (executable).
1054 Address body = start + CodePageAreaStartOffset();
1055 size_t body_size = commit_size - CodePageGuardStartOffset();
1056 if (vm->Commit(body, body_size, true)) {
1057 // Create guard page before the end.
1058 if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
1059 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
1060 commit_size -
1061 CodePageGuardStartOffset());
1062 return true;
1063 }
1064 vm->Uncommit(body, body_size);
1065 }
1066 }
1067 vm->Uncommit(header, header_size);
1068 }
1069 return false;
1070 }
1071
1072
1073 // -----------------------------------------------------------------------------
1074 // MemoryChunk implementation
1075
1076 void MemoryChunk::ReleaseAllocatedMemory() {
1077 if (skip_list_ != nullptr) {
1078 delete skip_list_;
1079 skip_list_ = nullptr;
1080 }
1081 if (mutex_ != nullptr) {
1082 delete mutex_;
1083 mutex_ = nullptr;
1084 }
1085 if (old_to_new_slots_.Value() != nullptr) ReleaseOldToNewSlots();
1086 if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
1087 if (typed_old_to_new_slots_.Value() != nullptr) ReleaseTypedOldToNewSlots();
1088 if (typed_old_to_old_slots_ != nullptr) ReleaseTypedOldToOldSlots();
1089 if (local_tracker_ != nullptr) ReleaseLocalTracker();
1090 }
1091
1092 static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
1093 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
1094 DCHECK(pages > 0);
1095 SlotSet* slot_set = new SlotSet[pages];
1096 for (size_t i = 0; i < pages; i++) {
1097 slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
1098 }
1099 return slot_set;
1100 }
1101
1102 void MemoryChunk::AllocateOldToNewSlots() {
1103 DCHECK(nullptr == old_to_new_slots_.Value());
1104 old_to_new_slots_.SetValue(AllocateSlotSet(size_, address()));
1105 }
1106
1107 void MemoryChunk::ReleaseOldToNewSlots() {
1108 SlotSet* old_to_new_slots = old_to_new_slots_.Value();
1109 delete[] old_to_new_slots;
1110 old_to_new_slots_.SetValue(nullptr);
1111 }
1112
1113 void MemoryChunk::AllocateOldToOldSlots() {
1114 DCHECK(nullptr == old_to_old_slots_);
1115 old_to_old_slots_ = AllocateSlotSet(size_, address());
1116 }
1117
1118 void MemoryChunk::ReleaseOldToOldSlots() {
1119 delete[] old_to_old_slots_;
1120 old_to_old_slots_ = nullptr;
1121 }
1122
1123 void MemoryChunk::AllocateTypedOldToNewSlots() {
1124 DCHECK(nullptr == typed_old_to_new_slots_.Value());
1125 typed_old_to_new_slots_.SetValue(new TypedSlotSet(address()));
1126 }
1127
1128 void MemoryChunk::ReleaseTypedOldToNewSlots() {
1129 TypedSlotSet* typed_old_to_new_slots = typed_old_to_new_slots_.Value();
1130 delete typed_old_to_new_slots;
1131 typed_old_to_new_slots_.SetValue(nullptr);
1132 }
1133
1134 void MemoryChunk::AllocateTypedOldToOldSlots() {
1135 DCHECK(nullptr == typed_old_to_old_slots_);
1136 typed_old_to_old_slots_ = new TypedSlotSet(address());
1137 }
1138
1139 void MemoryChunk::ReleaseTypedOldToOldSlots() {
1140 delete typed_old_to_old_slots_;
1141 typed_old_to_old_slots_ = nullptr;
1142 }
1143
1144 void MemoryChunk::AllocateLocalTracker() {
1145 DCHECK_NULL(local_tracker_);
1146 local_tracker_ = new LocalArrayBufferTracker(heap());
1147 }
1148
1149 void MemoryChunk::ReleaseLocalTracker() {
1150 DCHECK_NOT_NULL(local_tracker_);
1151 delete local_tracker_;
1152 local_tracker_ = nullptr;
1153 }
1154
1155 void MemoryChunk::ClearLiveness() {
1156 markbits()->Clear();
1157 ResetLiveBytes();
1158 }
1159
1160 // -----------------------------------------------------------------------------
1161 // PagedSpace implementation
1162
1163 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
1164 ObjectSpace::kObjectSpaceNewSpace);
1165 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
1166 ObjectSpace::kObjectSpaceOldSpace);
1167 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
1168 ObjectSpace::kObjectSpaceCodeSpace);
1169 STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
1170 ObjectSpace::kObjectSpaceMapSpace);
1171
1172 void Space::AllocationStep(Address soon_object, int size) {
1173 if (!allocation_observers_paused_) {
1174 for (int i = 0; i < allocation_observers_->length(); ++i) {
1175 AllocationObserver* o = (*allocation_observers_)[i];
1176 o->AllocationStep(size, soon_object, size);
1177 }
1178 }
1179 }
1180
1181 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
1182 Executability executable)
1183 : Space(heap, space, executable), anchor_(this), free_list_(this) {
1184 area_size_ = MemoryAllocator::PageAreaSize(space);
1185 accounting_stats_.Clear();
1186
1187 allocation_info_.Reset(nullptr, nullptr);
1188 }
1189
1190
1191 bool PagedSpace::SetUp() { return true; }
1192
1193
1194 bool PagedSpace::HasBeenSetUp() { return true; }
1195
1196
1197 void PagedSpace::TearDown() {
1198 for (auto it = begin(); it != end();) {
1199 Page* page = *(it++); // Will be erased.
1200 ArrayBufferTracker::FreeAll(page);
1201 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
1202 }
1203 anchor_.set_next_page(&anchor_);
1204 anchor_.set_prev_page(&anchor_);
1205 accounting_stats_.Clear();
1206 }
1207
1208 void PagedSpace::RefillFreeList() {
1209 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1210 // generation spaces out.
1211 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1212 identity() != MAP_SPACE) {
1213 return;
1214 }
1215 MarkCompactCollector* collector = heap()->mark_compact_collector();
1216 intptr_t added = 0;
1217 {
1218 Page* p = nullptr;
1219 while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
1220 // Only during compaction pages can actually change ownership. This is
1221 // safe because there exists no other competing action on the page links
1222 // during compaction.
1223 if (is_local() && (p->owner() != this)) {
1224 base::LockGuard<base::Mutex> guard(
1225 reinterpret_cast<PagedSpace*>(p->owner())->mutex());
1226 p->Unlink();
1227 p->set_owner(this);
1228 p->InsertAfter(anchor_.prev_page());
1229 }
1230 added += RelinkFreeListCategories(p);
1231 added += p->wasted_memory();
1232 if (is_local() && (added > kCompactionMemoryWanted)) break;
1233 }
1234 }
1235 accounting_stats_.IncreaseCapacity(added);
1236 }
1237
1238 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1239 DCHECK(identity() == other->identity());
1240 // Unmerged fields:
1241 // area_size_
1242 // anchor_
1243
1244 other->EmptyAllocationInfo();
1245
1246 // Update and clear accounting statistics.
1247 accounting_stats_.Merge(other->accounting_stats_);
1248 other->accounting_stats_.Clear();
1249
1250 // The linear allocation area of {other} should be destroyed now.
1251 DCHECK(other->top() == nullptr);
1252 DCHECK(other->limit() == nullptr);
1253
1254 AccountCommitted(other->CommittedMemory());
1255
1256 // Move over pages.
1257 for (auto it = other->begin(); it != other->end();) {
1258 Page* p = *(it++);
1259
1260 // Relinking requires the category to be unlinked.
1261 other->UnlinkFreeListCategories(p);
1262
1263 p->Unlink();
1264 p->set_owner(this);
1265 p->InsertAfter(anchor_.prev_page());
1266 RelinkFreeListCategories(p);
1267 DCHECK_EQ(p->AvailableInFreeList(), p->available_in_free_list());
1268 }
1269 }
1270
1271
1272 size_t PagedSpace::CommittedPhysicalMemory() {
1273 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1274 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1275 size_t size = 0;
1276 for (Page* page : *this) {
1277 size += page->CommittedPhysicalMemory();
1278 }
1279 return size;
1280 }
1281
1282 bool PagedSpace::ContainsSlow(Address addr) {
1283 Page* p = Page::FromAddress(addr);
1284 for (Page* page : *this) {
1285 if (page == p) return true;
1286 }
1287 return false;
1288 }
1289
1290
1291 Object* PagedSpace::FindObject(Address addr) {
1292 // Note: this function can only be called on iterable spaces.
1293 DCHECK(!heap()->mark_compact_collector()->in_use());
1294
1295 if (!Contains(addr)) return Smi::kZero; // Signaling not found.
1296
1297 Page* p = Page::FromAddress(addr);
1298 HeapObjectIterator it(p);
1299 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1300 Address cur = obj->address();
1301 Address next = cur + obj->Size();
1302 if ((cur <= addr) && (addr < next)) return obj;
1303 }
1304
1305 UNREACHABLE();
1306 return Smi::kZero;
1307 }
1308
1309 void PagedSpace::ShrinkImmortalImmovablePages() {
1310 DCHECK(!heap()->deserialization_complete());
1311 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1312 EmptyAllocationInfo();
1313 ResetFreeList();
1314
1315 for (Page* page : *this) {
1316 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
1317 size_t unused = page->ShrinkToHighWaterMark();
1318 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
1319 AccountUncommitted(unused);
1320 }
1321 }
1322
1323 bool PagedSpace::Expand() {
1324 const int size = AreaSize();
1325
1326 if (!heap()->CanExpandOldGeneration(size)) return false;
1327
1328 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
1329 if (p == nullptr) return false;
1330
1331 AccountCommitted(p->size());
1332
1333 // Pages created during bootstrapping may contain immortal immovable objects.
1334 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1335
1336 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
1337
1338 p->InsertAfter(anchor_.prev_page());
1339
1340 return true;
1341 }
1342
1343
1344 int PagedSpace::CountTotalPages() {
1345 int count = 0;
1346 for (Page* page : *this) {
1347 count++;
1348 USE(page);
1349 }
1350 return count;
1351 }
1352
1353
1354 void PagedSpace::ResetFreeListStatistics() {
1355 for (Page* page : *this) {
1356 page->ResetFreeListStatistics();
1357 }
1358 }
1359
1360 void PagedSpace::SetAllocationInfo(Address top, Address limit) {
1361 SetTopAndLimit(top, limit);
1362 if (top != nullptr && top != limit &&
1363 heap()->incremental_marking()->black_allocation()) {
1364 Page* page = Page::FromAllocationAreaAddress(top);
1365 page->markbits()->SetRange(page->AddressToMarkbitIndex(top),
1366 page->AddressToMarkbitIndex(limit));
1367 page->IncrementLiveBytes(static_cast<int>(limit - top));
1368 }
1369 }
1370
1371 void PagedSpace::MarkAllocationInfoBlack() {
1372 DCHECK(heap()->incremental_marking()->black_allocation());
1373 Address current_top = top();
1374 Address current_limit = limit();
1375 if (current_top != nullptr && current_top != current_limit) {
1376 Page* page = Page::FromAllocationAreaAddress(current_top);
1377 page->markbits()->SetRange(page->AddressToMarkbitIndex(current_top),
1378 page->AddressToMarkbitIndex(current_limit));
1379 page->IncrementLiveBytes(static_cast<int>(current_limit - current_top));
1380 }
1381 }
1382
1383 // Empty space allocation info, returning unused area to free list.
1384 void PagedSpace::EmptyAllocationInfo() {
1385 // Mark the old linear allocation area with a free space map so it can be
1386 // skipped when scanning the heap.
1387 Address current_top = top();
1388 Address current_limit = limit();
1389 if (current_top == nullptr) {
1390 DCHECK(current_limit == nullptr);
1391 return;
1392 }
1393
1394 if (heap()->incremental_marking()->black_allocation()) {
1395 Page* page = Page::FromAllocationAreaAddress(current_top);
1396
1397 // Clear the bits in the unused black area.
1398 if (current_top != current_limit) {
1399 page->markbits()->ClearRange(page->AddressToMarkbitIndex(current_top),
1400 page->AddressToMarkbitIndex(current_limit));
1401 page->IncrementLiveBytes(-static_cast<int>(current_limit - current_top));
1402 }
1403 }
1404
1405 SetTopAndLimit(NULL, NULL);
1406 DCHECK_GE(current_limit, current_top);
1407 Free(current_top, current_limit - current_top);
1408 }
1409
1410 void PagedSpace::IncreaseCapacity(size_t bytes) {
1411 accounting_stats_.ExpandSpace(bytes);
1412 }
1413
1414 void PagedSpace::ReleasePage(Page* page) {
1415 DCHECK_EQ(page->LiveBytes(), 0);
1416 DCHECK_EQ(page->owner(), this);
1417
1418 free_list_.EvictFreeListItems(page);
1419 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1420
1421 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1422 allocation_info_.Reset(nullptr, nullptr);
1423 }
1424
1425 // If page is still in a list, unlink it from that list.
1426 if (page->next_chunk() != NULL) {
1427 DCHECK(page->prev_chunk() != NULL);
1428 page->Unlink();
1429 }
1430
1431 AccountUncommitted(page->size());
1432 accounting_stats_.ShrinkSpace(page->area_size());
1433 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1434 }
1435
1436 std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
1437 return std::unique_ptr<ObjectIterator>(new HeapObjectIterator(this));
1438 }
1439
1440 #ifdef DEBUG
1441 void PagedSpace::Print() {}
1442 #endif
1443
1444 #ifdef VERIFY_HEAP
1445 void PagedSpace::Verify(ObjectVisitor* visitor) {
1446 bool allocation_pointer_found_in_space =
1447 (allocation_info_.top() == allocation_info_.limit());
1448 for (Page* page : *this) {
1449 CHECK(page->owner() == this);
1450 if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
1451 allocation_pointer_found_in_space = true;
1452 }
1453 CHECK(page->SweepingDone());
1454 HeapObjectIterator it(page);
1455 Address end_of_previous_object = page->area_start();
1456 Address top = page->area_end();
1457 int black_size = 0;
1458 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1459 CHECK(end_of_previous_object <= object->address());
1460
1461 // The first word should be a map, and we expect all map pointers to
1462 // be in map space.
1463 Map* map = object->map();
1464 CHECK(map->IsMap());
1465 CHECK(heap()->map_space()->Contains(map));
1466
1467 // Perform space-specific object verification.
1468 VerifyObject(object);
1469
1470 // The object itself should look OK.
1471 object->ObjectVerify();
1472
1473 // All the interior pointers should be contained in the heap.
1474 int size = object->Size();
1475 object->IterateBody(map->instance_type(), size, visitor);
1476 if (Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
1477 black_size += size;
1478 }
1479
1480 CHECK(object->address() + size <= top);
1481 end_of_previous_object = object->address() + size;
1482 }
1483 CHECK_LE(black_size, page->LiveBytes());
1484 }
1485 CHECK(allocation_pointer_found_in_space);
1486 }
1487 #endif // VERIFY_HEAP
1488
1489 // -----------------------------------------------------------------------------
1490 // NewSpace implementation
1491
1492 bool NewSpace::SetUp(size_t initial_semispace_capacity,
1493 size_t maximum_semispace_capacity) {
1494 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1495 DCHECK(base::bits::IsPowerOfTwo32(
1496 static_cast<uint32_t>(maximum_semispace_capacity)));
1497
1498 to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1499 from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1500 if (!to_space_.Commit()) {
1501 return false;
1502 }
1503 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1504 ResetAllocationInfo();
1505
1506 // Allocate and set up the histogram arrays if necessary.
1507 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1508 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1509 #define SET_NAME(name) \
1510 allocated_histogram_[name].set_name(#name); \
1511 promoted_histogram_[name].set_name(#name);
1512 INSTANCE_TYPE_LIST(SET_NAME)
1513 #undef SET_NAME
1514
1515 return true;
1516 }
1517
1518
1519 void NewSpace::TearDown() {
1520 if (allocated_histogram_) {
1521 DeleteArray(allocated_histogram_);
1522 allocated_histogram_ = NULL;
1523 }
1524 if (promoted_histogram_) {
1525 DeleteArray(promoted_histogram_);
1526 promoted_histogram_ = NULL;
1527 }
1528
1529 allocation_info_.Reset(nullptr, nullptr);
1530
1531 to_space_.TearDown();
1532 from_space_.TearDown();
1533 }
1534
1535 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1536
1537
1538 void NewSpace::Grow() {
1539 // Double the semispace size but only up to maximum capacity.
1540 DCHECK(TotalCapacity() < MaximumCapacity());
1541 size_t new_capacity =
1542 Min(MaximumCapacity(),
1543 static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
1544 if (to_space_.GrowTo(new_capacity)) {
1545 // Only grow from space if we managed to grow to-space.
1546 if (!from_space_.GrowTo(new_capacity)) {
1547 // If we managed to grow to-space but couldn't grow from-space,
1548 // attempt to shrink to-space.
1549 if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
1550 // We are in an inconsistent state because we could not
1551 // commit/uncommit memory from new space.
1552 CHECK(false);
1553 }
1554 }
1555 }
1556 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1557 }
1558
1559
1560 void NewSpace::Shrink() {
1561 size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
1562 size_t rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1563 if (rounded_new_capacity < TotalCapacity() &&
1564 to_space_.ShrinkTo(rounded_new_capacity)) {
1565 // Only shrink from-space if we managed to shrink to-space.
1566 from_space_.Reset();
1567 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1568 // If we managed to shrink to-space but couldn't shrink from
1569 // space, attempt to grow to-space again.
1570 if (!to_space_.GrowTo(from_space_.current_capacity())) {
1571 // We are in an inconsistent state because we could not
1572 // commit/uncommit memory from new space.
1573 CHECK(false);
1574 }
1575 }
1576 }
1577 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1578 }
1579
1580 bool NewSpace::Rebalance() {
1581 CHECK(heap()->promotion_queue()->is_empty());
1582 // Order here is important to make use of the page pool.
1583 return to_space_.EnsureCurrentCapacity() &&
1584 from_space_.EnsureCurrentCapacity();
1585 }
1586
1587 bool SemiSpace::EnsureCurrentCapacity() {
1588 if (is_committed()) {
1589 const int expected_pages =
1590 static_cast<int>(current_capacity_ / Page::kPageSize);
1591 int actual_pages = 0;
1592 Page* current_page = anchor()->next_page();
1593 while (current_page != anchor()) {
1594 actual_pages++;
1595 current_page = current_page->next_page();
1596 if (actual_pages > expected_pages) {
1597 Page* to_remove = current_page->prev_page();
1598 // Make sure we don't overtake the actual top pointer.
1599 CHECK_NE(to_remove, current_page_);
1600 to_remove->Unlink();
1601 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
1602 to_remove);
1603 }
1604 }
1605 while (actual_pages < expected_pages) {
1606 actual_pages++;
1607 current_page =
1608 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1609 Page::kAllocatableMemory, this, executable());
1610 if (current_page == nullptr) return false;
1611 DCHECK_NOT_NULL(current_page);
1612 current_page->InsertAfter(anchor());
1613 current_page->ClearLiveness();
1614 current_page->SetFlags(anchor()->prev_page()->GetFlags(),
1615 Page::kCopyAllFlags);
1616 heap()->CreateFillerObjectAt(current_page->area_start(),
1617 static_cast<int>(current_page->area_size()),
1618 ClearRecordedSlots::kNo);
1619 }
1620 }
1621 return true;
1622 }
1623
1624 void LocalAllocationBuffer::Close() {
1625 if (IsValid()) {
1626 heap_->CreateFillerObjectAt(
1627 allocation_info_.top(),
1628 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1629 ClearRecordedSlots::kNo);
1630 }
1631 }
1632
1633
1634 LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
1635 AllocationInfo allocation_info)
1636 : heap_(heap), allocation_info_(allocation_info) {
1637 if (IsValid()) {
1638 heap_->CreateFillerObjectAt(
1639 allocation_info_.top(),
1640 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1641 ClearRecordedSlots::kNo);
1642 }
1643 }
1644
1645
1646 LocalAllocationBuffer::LocalAllocationBuffer(
1647 const LocalAllocationBuffer& other) {
1648 *this = other;
1649 }
1650
1651
1652 LocalAllocationBuffer& LocalAllocationBuffer::operator=(
1653 const LocalAllocationBuffer& other) {
1654 Close();
1655 heap_ = other.heap_;
1656 allocation_info_ = other.allocation_info_;
1657
1658 // This is needed since we (a) cannot yet use move-semantics, and (b) want
1659 // to make the use of the class easy by it as value and (c) implicitly call
1660 // {Close} upon copy.
1661 const_cast<LocalAllocationBuffer&>(other)
1662 .allocation_info_.Reset(nullptr, nullptr);
1663 return *this;
1664 }
1665
1666
1667 void NewSpace::UpdateAllocationInfo() {
1668 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1669 allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
1670 UpdateInlineAllocationLimit(0);
1671 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1672 }
1673
1674
1675 void NewSpace::ResetAllocationInfo() {
1676 Address old_top = allocation_info_.top();
1677 to_space_.Reset();
1678 UpdateAllocationInfo();
1679 // Clear all mark-bits in the to-space.
1680 for (Page* p : to_space_) {
1681 p->ClearLiveness();
1682 }
1683 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
1684 }
1685
1686
1687 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1688 if (heap()->inline_allocation_disabled()) {
1689 // Lowest limit when linear allocation was disabled.
1690 Address high = to_space_.page_high();
1691 Address new_top = allocation_info_.top() + size_in_bytes;
1692 allocation_info_.set_limit(Min(new_top, high));
1693 } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
1694 // Normal limit is the end of the current page.
1695 allocation_info_.set_limit(to_space_.page_high());
1696 } else {
1697 // Lower limit during incremental marking.
1698 Address high = to_space_.page_high();
1699 Address new_top = allocation_info_.top() + size_in_bytes;
1700 Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
1701 allocation_info_.set_limit(Min(new_limit, high));
1702 }
1703 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1704 }
1705
1706
1707 bool NewSpace::AddFreshPage() {
1708 Address top = allocation_info_.top();
1709 DCHECK(!Page::IsAtObjectStart(top));
1710 if (!to_space_.AdvancePage()) {
1711 // No more pages left to advance.
1712 return false;
1713 }
1714
1715 // Clear remainder of current page.
1716 Address limit = Page::FromAllocationAreaAddress(top)->area_end();
1717 if (heap()->gc_state() == Heap::SCAVENGE) {
1718 heap()->promotion_queue()->SetNewLimit(limit);
1719 }
1720
1721 int remaining_in_page = static_cast<int>(limit - top);
1722 heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
1723 UpdateAllocationInfo();
1724
1725 return true;
1726 }
1727
1728
1729 bool NewSpace::AddFreshPageSynchronized() {
1730 base::LockGuard<base::Mutex> guard(&mutex_);
1731 return AddFreshPage();
1732 }
1733
1734
1735 bool NewSpace::EnsureAllocation(int size_in_bytes,
1736 AllocationAlignment alignment) {
1737 Address old_top = allocation_info_.top();
1738 Address high = to_space_.page_high();
1739 int filler_size = Heap::GetFillToAlign(old_top, alignment);
1740 int aligned_size_in_bytes = size_in_bytes + filler_size;
1741
1742 if (old_top + aligned_size_in_bytes > high) {
1743 // Not enough room in the page, try to allocate a new one.
1744 if (!AddFreshPage()) {
1745 return false;
1746 }
1747
1748 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
1749
1750 old_top = allocation_info_.top();
1751 high = to_space_.page_high();
1752 filler_size = Heap::GetFillToAlign(old_top, alignment);
1753 }
1754
1755 DCHECK(old_top + aligned_size_in_bytes <= high);
1756
1757 if (allocation_info_.limit() < high) {
1758 // Either the limit has been lowered because linear allocation was disabled
1759 // or because incremental marking wants to get a chance to do a step,
1760 // or because idle scavenge job wants to get a chance to post a task.
1761 // Set the new limit accordingly.
1762 Address new_top = old_top + aligned_size_in_bytes;
1763 Address soon_object = old_top + filler_size;
1764 InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
1765 UpdateInlineAllocationLimit(aligned_size_in_bytes);
1766 }
1767 return true;
1768 }
1769
1770
1771 void NewSpace::StartNextInlineAllocationStep() {
1772 if (!allocation_observers_paused_) {
1773 top_on_previous_step_ =
1774 allocation_observers_->length() ? allocation_info_.top() : 0;
1775 UpdateInlineAllocationLimit(0);
1776 }
1777 }
1778
1779
1780 intptr_t NewSpace::GetNextInlineAllocationStepSize() {
1781 intptr_t next_step = 0;
1782 for (int i = 0; i < allocation_observers_->length(); ++i) {
1783 AllocationObserver* o = (*allocation_observers_)[i];
1784 next_step = next_step ? Min(next_step, o->bytes_to_next_step())
1785 : o->bytes_to_next_step();
1786 }
1787 DCHECK(allocation_observers_->length() == 0 || next_step != 0);
1788 return next_step;
1789 }
1790
1791 void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
1792 Space::AddAllocationObserver(observer);
1793 StartNextInlineAllocationStep();
1794 }
1795
1796 void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
1797 Space::RemoveAllocationObserver(observer);
1798 StartNextInlineAllocationStep();
1799 }
1800
1801 void NewSpace::PauseAllocationObservers() {
1802 // Do a step to account for memory allocated so far.
1803 InlineAllocationStep(top(), top(), nullptr, 0);
1804 Space::PauseAllocationObservers();
1805 top_on_previous_step_ = 0;
1806 UpdateInlineAllocationLimit(0);
1807 }
1808
1809 void NewSpace::ResumeAllocationObservers() {
1810 DCHECK(top_on_previous_step_ == 0);
1811 Space::ResumeAllocationObservers();
1812 StartNextInlineAllocationStep();
1813 }
1814
1815
1816 void NewSpace::InlineAllocationStep(Address top, Address new_top,
1817 Address soon_object, size_t size) {
1818 if (top_on_previous_step_) {
1819 int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
1820 for (int i = 0; i < allocation_observers_->length(); ++i) {
1821 (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
1822 size);
1823 }
1824 top_on_previous_step_ = new_top;
1825 }
1826 }
1827
1828 std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
1829 return std::unique_ptr<ObjectIterator>(new SemiSpaceIterator(this));
1830 }
1831
1832 #ifdef VERIFY_HEAP
1833 // We do not use the SemiSpaceIterator because verification doesn't assume
1834 // that it works (it depends on the invariants we are checking).
1835 void NewSpace::Verify() {
1836 // The allocation pointer should be in the space or at the very end.
1837 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1838
1839 // There should be objects packed in from the low address up to the
1840 // allocation pointer.
1841 Address current = to_space_.first_page()->area_start();
1842 CHECK_EQ(current, to_space_.space_start());
1843
1844 while (current != top()) {
1845 if (!Page::IsAlignedToPageSize(current)) {
1846 // The allocation pointer should not be in the middle of an object.
1847 CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
1848 current < top());
1849
1850 HeapObject* object = HeapObject::FromAddress(current);
1851
1852 // The first word should be a map, and we expect all map pointers to
1853 // be in map space.
1854 Map* map = object->map();
1855 CHECK(map->IsMap());
1856 CHECK(heap()->map_space()->Contains(map));
1857
1858 // The object should not be code or a map.
1859 CHECK(!object->IsMap());
1860 CHECK(!object->IsAbstractCode());
1861
1862 // The object itself should look OK.
1863 object->ObjectVerify();
1864
1865 // All the interior pointers should be contained in the heap.
1866 VerifyPointersVisitor visitor;
1867 int size = object->Size();
1868 object->IterateBody(map->instance_type(), size, &visitor);
1869
1870 current += size;
1871 } else {
1872 // At end of page, switch to next page.
1873 Page* page = Page::FromAllocationAreaAddress(current)->next_page();
1874 // Next page should be valid.
1875 CHECK(!page->is_anchor());
1876 current = page->area_start();
1877 }
1878 }
1879
1880 // Check semi-spaces.
1881 CHECK_EQ(from_space_.id(), kFromSpace);
1882 CHECK_EQ(to_space_.id(), kToSpace);
1883 from_space_.Verify();
1884 to_space_.Verify();
1885 }
1886 #endif
1887
1888 // -----------------------------------------------------------------------------
1889 // SemiSpace implementation
1890
1891 void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) {
1892 DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
1893 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1894 current_capacity_ = minimum_capacity_;
1895 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1896 committed_ = false;
1897 }
1898
1899
1900 void SemiSpace::TearDown() {
1901 // Properly uncommit memory to keep the allocator counters in sync.
1902 if (is_committed()) {
1903 for (Page* p : *this) {
1904 ArrayBufferTracker::FreeAll(p);
1905 }
1906 Uncommit();
1907 }
1908 current_capacity_ = maximum_capacity_ = 0;
1909 }
1910
1911
1912 bool SemiSpace::Commit() {
1913 DCHECK(!is_committed());
1914 Page* current = anchor();
1915 const int num_pages = static_cast<int>(current_capacity_ / Page::kPageSize);
1916 for (int pages_added = 0; pages_added < num_pages; pages_added++) {
1917 Page* new_page =
1918 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1919 Page::kAllocatableMemory, this, executable());
1920 if (new_page == nullptr) {
1921 RewindPages(current, pages_added);
1922 return false;
1923 }
1924 new_page->InsertAfter(current);
1925 current = new_page;
1926 }
1927 Reset();
1928 AccountCommitted(current_capacity_);
1929 if (age_mark_ == nullptr) {
1930 age_mark_ = first_page()->area_start();
1931 }
1932 committed_ = true;
1933 return true;
1934 }
1935
1936
1937 bool SemiSpace::Uncommit() {
1938 DCHECK(is_committed());
1939 for (auto it = begin(); it != end();) {
1940 Page* p = *(it++);
1941 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p);
1942 }
1943 anchor()->set_next_page(anchor());
1944 anchor()->set_prev_page(anchor());
1945 AccountUncommitted(current_capacity_);
1946 committed_ = false;
1947 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
1948 return true;
1949 }
1950
1951
1952 size_t SemiSpace::CommittedPhysicalMemory() {
1953 if (!is_committed()) return 0;
1954 size_t size = 0;
1955 for (Page* p : *this) {
1956 size += p->CommittedPhysicalMemory();
1957 }
1958 return size;
1959 }
1960
1961 bool SemiSpace::GrowTo(size_t new_capacity) {
1962 if (!is_committed()) {
1963 if (!Commit()) return false;
1964 }
1965 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
1966 DCHECK_LE(new_capacity, maximum_capacity_);
1967 DCHECK_GT(new_capacity, current_capacity_);
1968 const size_t delta = new_capacity - current_capacity_;
1969 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1970 const int delta_pages = static_cast<int>(delta / Page::kPageSize);
1971 Page* last_page = anchor()->prev_page();
1972 DCHECK_NE(last_page, anchor());
1973 for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
1974 Page* new_page =
1975 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1976 Page::kAllocatableMemory, this, executable());
1977 if (new_page == nullptr) {
1978 RewindPages(last_page, pages_added);
1979 return false;
1980 }
1981 new_page->InsertAfter(last_page);
1982 new_page->ClearLiveness();
1983 // Duplicate the flags that was set on the old page.
1984 new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
1985 last_page = new_page;
1986 }
1987 AccountCommitted(delta);
1988 current_capacity_ = new_capacity;
1989 return true;
1990 }
1991
1992 void SemiSpace::RewindPages(Page* start, int num_pages) {
1993 Page* new_last_page = nullptr;
1994 Page* last_page = start;
1995 while (num_pages > 0) {
1996 DCHECK_NE(last_page, anchor());
1997 new_last_page = last_page->prev_page();
1998 last_page->prev_page()->set_next_page(last_page->next_page());
1999 last_page->next_page()->set_prev_page(last_page->prev_page());
2000 last_page = new_last_page;
2001 num_pages--;
2002 }
2003 }
2004
2005 bool SemiSpace::ShrinkTo(size_t new_capacity) {
2006 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0u);
2007 DCHECK_GE(new_capacity, minimum_capacity_);
2008 DCHECK_LT(new_capacity, current_capacity_);
2009 if (is_committed()) {
2010 const size_t delta = current_capacity_ - new_capacity;
2011 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
2012 int delta_pages = static_cast<int>(delta / Page::kPageSize);
2013 Page* new_last_page;
2014 Page* last_page;
2015 while (delta_pages > 0) {
2016 last_page = anchor()->prev_page();
2017 new_last_page = last_page->prev_page();
2018 new_last_page->set_next_page(anchor());
2019 anchor()->set_prev_page(new_last_page);
2020 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
2021 last_page);
2022 delta_pages--;
2023 }
2024 AccountUncommitted(delta);
2025 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2026 }
2027 current_capacity_ = new_capacity;
2028 return true;
2029 }
2030
2031 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
2032 anchor_.set_owner(this);
2033 anchor_.prev_page()->set_next_page(&anchor_);
2034 anchor_.next_page()->set_prev_page(&anchor_);
2035
2036 for (Page* page : *this) {
2037 page->set_owner(this);
2038 page->SetFlags(flags, mask);
2039 if (id_ == kToSpace) {
2040 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
2041 page->SetFlag(MemoryChunk::IN_TO_SPACE);
2042 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2043 page->ResetLiveBytes();
2044 } else {
2045 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
2046 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
2047 }
2048 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
2049 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
2050 }
2051 }
2052
2053
2054 void SemiSpace::Reset() {
2055 DCHECK_NE(anchor_.next_page(), &anchor_);
2056 current_page_ = anchor_.next_page();
2057 pages_used_ = 0;
2058 }
2059
2060 void SemiSpace::RemovePage(Page* page) {
2061 if (current_page_ == page) {
2062 current_page_ = page->prev_page();
2063 }
2064 page->Unlink();
2065 }
2066
2067 void SemiSpace::PrependPage(Page* page) {
2068 page->SetFlags(current_page()->GetFlags(), Page::kCopyAllFlags);
2069 page->set_owner(this);
2070 page->InsertAfter(anchor());
2071 pages_used_++;
2072 }
2073
2074 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
2075 // We won't be swapping semispaces without data in them.
2076 DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
2077 DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
2078
2079 intptr_t saved_to_space_flags = to->current_page()->GetFlags();
2080
2081 // We swap all properties but id_.
2082 std::swap(from->current_capacity_, to->current_capacity_);
2083 std::swap(from->maximum_capacity_, to->maximum_capacity_);
2084 std::swap(from->minimum_capacity_, to->minimum_capacity_);
2085 std::swap(from->age_mark_, to->age_mark_);
2086 std::swap(from->committed_, to->committed_);
2087 std::swap(from->anchor_, to->anchor_);
2088 std::swap(from->current_page_, to->current_page_);
2089
2090 to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
2091 from->FixPagesFlags(0, 0);
2092 }
2093
2094 void SemiSpace::set_age_mark(Address mark) {
2095 DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
2096 age_mark_ = mark;
2097 // Mark all pages up to the one containing mark.
2098 for (Page* p : NewSpacePageRange(space_start(), mark)) {
2099 p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2100 }
2101 }
2102
2103 std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
2104 // Use the NewSpace::NewObjectIterator to iterate the ToSpace.
2105 UNREACHABLE();
2106 return std::unique_ptr<ObjectIterator>();
2107 }
2108
2109 #ifdef DEBUG
2110 void SemiSpace::Print() {}
2111 #endif
2112
2113 #ifdef VERIFY_HEAP
2114 void SemiSpace::Verify() {
2115 bool is_from_space = (id_ == kFromSpace);
2116 Page* page = anchor_.next_page();
2117 CHECK(anchor_.owner() == this);
2118 while (page != &anchor_) {
2119 CHECK_EQ(page->owner(), this);
2120 CHECK(page->InNewSpace());
2121 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
2122 : MemoryChunk::IN_TO_SPACE));
2123 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
2124 : MemoryChunk::IN_FROM_SPACE));
2125 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
2126 if (!is_from_space) {
2127 // The pointers-from-here-are-interesting flag isn't updated dynamically
2128 // on from-space pages, so it might be out of sync with the marking state.
2129 if (page->heap()->incremental_marking()->IsMarking()) {
2130 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2131 } else {
2132 CHECK(
2133 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2134 }
2135 // TODO(gc): Check that the live_bytes_count_ field matches the
2136 // black marking on the page (if we make it match in new-space).
2137 }
2138 CHECK_EQ(page->prev_page()->next_page(), page);
2139 page = page->next_page();
2140 }
2141 }
2142 #endif
2143
2144 #ifdef DEBUG
2145 void SemiSpace::AssertValidRange(Address start, Address end) {
2146 // Addresses belong to same semi-space
2147 Page* page = Page::FromAllocationAreaAddress(start);
2148 Page* end_page = Page::FromAllocationAreaAddress(end);
2149 SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
2150 CHECK_EQ(space, end_page->owner());
2151 // Start address is before end address, either on same page,
2152 // or end address is on a later page in the linked list of
2153 // semi-space pages.
2154 if (page == end_page) {
2155 CHECK_LE(start, end);
2156 } else {
2157 while (page != end_page) {
2158 page = page->next_page();
2159 CHECK_NE(page, space->anchor());
2160 }
2161 }
2162 }
2163 #endif
2164
2165
2166 // -----------------------------------------------------------------------------
2167 // SemiSpaceIterator implementation.
2168
2169 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
2170 Initialize(space->bottom(), space->top());
2171 }
2172
2173
2174 void SemiSpaceIterator::Initialize(Address start, Address end) {
2175 SemiSpace::AssertValidRange(start, end);
2176 current_ = start;
2177 limit_ = end;
2178 }
2179
2180 #ifdef DEBUG
2181 // heap_histograms is shared, always clear it before using it.
2182 static void ClearHistograms(Isolate* isolate) {
2183 // We reset the name each time, though it hasn't changed.
2184 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
2185 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
2186 #undef DEF_TYPE_NAME
2187
2188 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
2189 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
2190 #undef CLEAR_HISTOGRAM
2191
2192 isolate->js_spill_information()->Clear();
2193 }
2194
2195 static int CollectHistogramInfo(HeapObject* obj) {
2196 Isolate* isolate = obj->GetIsolate();
2197 InstanceType type = obj->map()->instance_type();
2198 DCHECK(0 <= type && type <= LAST_TYPE);
2199 DCHECK(isolate->heap_histograms()[type].name() != NULL);
2200 isolate->heap_histograms()[type].increment_number(1);
2201 isolate->heap_histograms()[type].increment_bytes(obj->Size());
2202
2203 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
2204 JSObject::cast(obj)
2205 ->IncrementSpillStatistics(isolate->js_spill_information());
2206 }
2207
2208 return obj->Size();
2209 }
2210
2211
2212 static void ReportHistogram(Isolate* isolate, bool print_spill) {
2213 PrintF("\n Object Histogram:\n");
2214 for (int i = 0; i <= LAST_TYPE; i++) {
2215 if (isolate->heap_histograms()[i].number() > 0) {
2216 PrintF(" %-34s%10d (%10d bytes)\n",
2217 isolate->heap_histograms()[i].name(),
2218 isolate->heap_histograms()[i].number(),
2219 isolate->heap_histograms()[i].bytes());
2220 }
2221 }
2222 PrintF("\n");
2223
2224 // Summarize string types.
2225 int string_number = 0;
2226 int string_bytes = 0;
2227 #define INCREMENT(type, size, name, camel_name) \
2228 string_number += isolate->heap_histograms()[type].number(); \
2229 string_bytes += isolate->heap_histograms()[type].bytes();
2230 STRING_TYPE_LIST(INCREMENT)
2231 #undef INCREMENT
2232 if (string_number > 0) {
2233 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
2234 string_bytes);
2235 }
2236
2237 if (FLAG_collect_heap_spill_statistics && print_spill) {
2238 isolate->js_spill_information()->Print();
2239 }
2240 }
2241 #endif // DEBUG
2242
2243
2244 // Support for statistics gathering for --heap-stats and --log-gc.
2245 void NewSpace::ClearHistograms() {
2246 for (int i = 0; i <= LAST_TYPE; i++) {
2247 allocated_histogram_[i].clear();
2248 promoted_histogram_[i].clear();
2249 }
2250 }
2251
2252
2253 // Because the copying collector does not touch garbage objects, we iterate
2254 // the new space before a collection to get a histogram of allocated objects.
2255 // This only happens when --log-gc flag is set.
2256 void NewSpace::CollectStatistics() {
2257 ClearHistograms();
2258 SemiSpaceIterator it(this);
2259 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
2260 RecordAllocation(obj);
2261 }
2262
2263
2264 static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
2265 const char* description) {
2266 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
2267 // Lump all the string types together.
2268 int string_number = 0;
2269 int string_bytes = 0;
2270 #define INCREMENT(type, size, name, camel_name) \
2271 string_number += info[type].number(); \
2272 string_bytes += info[type].bytes();
2273 STRING_TYPE_LIST(INCREMENT)
2274 #undef INCREMENT
2275 if (string_number > 0) {
2276 LOG(isolate,
2277 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2278 }
2279
2280 // Then do the other types.
2281 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2282 if (info[i].number() > 0) {
2283 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2284 info[i].bytes()));
2285 }
2286 }
2287 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2288 }
2289
2290
2291 void NewSpace::ReportStatistics() {
2292 #ifdef DEBUG
2293 if (FLAG_heap_stats) {
2294 float pct = static_cast<float>(Available()) / TotalCapacity();
2295 PrintF(" capacity: %" V8PRIdPTR ", available: %" V8PRIdPTR ", %%%d\n",
2296 TotalCapacity(), Available(), static_cast<int>(pct * 100));
2297 PrintF("\n Object Histogram:\n");
2298 for (int i = 0; i <= LAST_TYPE; i++) {
2299 if (allocated_histogram_[i].number() > 0) {
2300 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2301 allocated_histogram_[i].number(),
2302 allocated_histogram_[i].bytes());
2303 }
2304 }
2305 PrintF("\n");
2306 }
2307 #endif // DEBUG
2308
2309 if (FLAG_log_gc) {
2310 Isolate* isolate = heap()->isolate();
2311 DoReportStatistics(isolate, allocated_histogram_, "allocated");
2312 DoReportStatistics(isolate, promoted_histogram_, "promoted");
2313 }
2314 }
2315
2316
2317 void NewSpace::RecordAllocation(HeapObject* obj) {
2318 InstanceType type = obj->map()->instance_type();
2319 DCHECK(0 <= type && type <= LAST_TYPE);
2320 allocated_histogram_[type].increment_number(1);
2321 allocated_histogram_[type].increment_bytes(obj->Size());
2322 }
2323
2324
2325 void NewSpace::RecordPromotion(HeapObject* obj) {
2326 InstanceType type = obj->map()->instance_type();
2327 DCHECK(0 <= type && type <= LAST_TYPE);
2328 promoted_histogram_[type].increment_number(1);
2329 promoted_histogram_[type].increment_bytes(obj->Size());
2330 }
2331
2332
2333 size_t NewSpace::CommittedPhysicalMemory() {
2334 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2335 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2336 size_t size = to_space_.CommittedPhysicalMemory();
2337 if (from_space_.is_committed()) {
2338 size += from_space_.CommittedPhysicalMemory();
2339 }
2340 return size;
2341 }
2342
2343
2344 // -----------------------------------------------------------------------------
2345 // Free lists for old object spaces implementation
2346
2347
2348 void FreeListCategory::Reset() {
2349 set_top(nullptr);
2350 set_prev(nullptr);
2351 set_next(nullptr);
2352 available_ = 0;
2353 }
2354
2355 FreeSpace* FreeListCategory::PickNodeFromList(size_t* node_size) {
2356 DCHECK(page()->CanAllocate());
2357
2358 FreeSpace* node = top();
2359 if (node == nullptr) return nullptr;
2360 set_top(node->next());
2361 *node_size = node->Size();
2362 available_ -= *node_size;
2363 return node;
2364 }
2365
2366 FreeSpace* FreeListCategory::TryPickNodeFromList(size_t minimum_size,
2367 size_t* node_size) {
2368 DCHECK(page()->CanAllocate());
2369
2370 FreeSpace* node = PickNodeFromList(node_size);
2371 if ((node != nullptr) && (*node_size < minimum_size)) {
2372 Free(node, *node_size, kLinkCategory);
2373 *node_size = 0;
2374 return nullptr;
2375 }
2376 return node;
2377 }
2378
2379 FreeSpace* FreeListCategory::SearchForNodeInList(size_t minimum_size,
2380 size_t* node_size) {
2381 DCHECK(page()->CanAllocate());
2382
2383 FreeSpace* prev_non_evac_node = nullptr;
2384 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2385 cur_node = cur_node->next()) {
2386 size_t size = cur_node->size();
2387 if (size >= minimum_size) {
2388 DCHECK_GE(available_, size);
2389 available_ -= size;
2390 if (cur_node == top()) {
2391 set_top(cur_node->next());
2392 }
2393 if (prev_non_evac_node != nullptr) {
2394 prev_non_evac_node->set_next(cur_node->next());
2395 }
2396 *node_size = size;
2397 return cur_node;
2398 }
2399
2400 prev_non_evac_node = cur_node;
2401 }
2402 return nullptr;
2403 }
2404
2405 bool FreeListCategory::Free(FreeSpace* free_space, size_t size_in_bytes,
2406 FreeMode mode) {
2407 if (!page()->CanAllocate()) return false;
2408
2409 free_space->set_next(top());
2410 set_top(free_space);
2411 available_ += size_in_bytes;
2412 if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
2413 owner()->AddCategory(this);
2414 }
2415 return true;
2416 }
2417
2418
2419 void FreeListCategory::RepairFreeList(Heap* heap) {
2420 FreeSpace* n = top();
2421 while (n != NULL) {
2422 Map** map_location = reinterpret_cast<Map**>(n->address());
2423 if (*map_location == NULL) {
2424 *map_location = heap->free_space_map();
2425 } else {
2426 DCHECK(*map_location == heap->free_space_map());
2427 }
2428 n = n->next();
2429 }
2430 }
2431
2432 void FreeListCategory::Relink() {
2433 DCHECK(!is_linked());
2434 owner()->AddCategory(this);
2435 }
2436
2437 void FreeListCategory::Invalidate() {
2438 page()->remove_available_in_free_list(available());
2439 Reset();
2440 type_ = kInvalidCategory;
2441 }
2442
2443 FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
2444 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2445 categories_[i] = nullptr;
2446 }
2447 Reset();
2448 }
2449
2450
2451 void FreeList::Reset() {
2452 ForAllFreeListCategories(
2453 [](FreeListCategory* category) { category->Reset(); });
2454 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2455 categories_[i] = nullptr;
2456 }
2457 ResetStats();
2458 }
2459
2460 size_t FreeList::Free(Address start, size_t size_in_bytes, FreeMode mode) {
2461 if (size_in_bytes == 0) return 0;
2462
2463 owner()->heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
2464 ClearRecordedSlots::kNo);
2465
2466 Page* page = Page::FromAddress(start);
2467
2468 // Blocks have to be a minimum size to hold free list items.
2469 if (size_in_bytes < kMinBlockSize) {
2470 page->add_wasted_memory(size_in_bytes);
2471 wasted_bytes_.Increment(size_in_bytes);
2472 return size_in_bytes;
2473 }
2474
2475 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2476 // Insert other blocks at the head of a free list of the appropriate
2477 // magnitude.
2478 FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
2479 if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
2480 page->add_available_in_free_list(size_in_bytes);
2481 }
2482 DCHECK_EQ(page->AvailableInFreeList(), page->available_in_free_list());
2483 return 0;
2484 }
2485
2486 FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, size_t* node_size) {
2487 FreeListCategoryIterator it(this, type);
2488 FreeSpace* node = nullptr;
2489 while (it.HasNext()) {
2490 FreeListCategory* current = it.Next();
2491 node = current->PickNodeFromList(node_size);
2492 if (node != nullptr) {
2493 Page::FromAddress(node->address())
2494 ->remove_available_in_free_list(*node_size);
2495 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2496 return node;
2497 }
2498 RemoveCategory(current);
2499 }
2500 return node;
2501 }
2502
2503 FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
2504 size_t minimum_size) {
2505 if (categories_[type] == nullptr) return nullptr;
2506 FreeSpace* node =
2507 categories_[type]->TryPickNodeFromList(minimum_size, node_size);
2508 if (node != nullptr) {
2509 Page::FromAddress(node->address())
2510 ->remove_available_in_free_list(*node_size);
2511 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2512 }
2513 return node;
2514 }
2515
2516 FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
2517 size_t* node_size,
2518 size_t minimum_size) {
2519 FreeListCategoryIterator it(this, type);
2520 FreeSpace* node = nullptr;
2521 while (it.HasNext()) {
2522 FreeListCategory* current = it.Next();
2523 node = current->SearchForNodeInList(minimum_size, node_size);
2524 if (node != nullptr) {
2525 Page::FromAddress(node->address())
2526 ->remove_available_in_free_list(*node_size);
2527 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2528 return node;
2529 }
2530 if (current->is_empty()) {
2531 RemoveCategory(current);
2532 }
2533 }
2534 return node;
2535 }
2536
2537 FreeSpace* FreeList::FindNodeFor(size_t size_in_bytes, size_t* node_size) {
2538 FreeSpace* node = nullptr;
2539
2540 // First try the allocation fast path: try to allocate the minimum element
2541 // size of a free list category. This operation is constant time.
2542 FreeListCategoryType type =
2543 SelectFastAllocationFreeListCategoryType(size_in_bytes);
2544 for (int i = type; i < kHuge; i++) {
2545 node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
2546 if (node != nullptr) return node;
2547 }
2548
2549 // Next search the huge list for free list nodes. This takes linear time in
2550 // the number of huge elements.
2551 node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
2552 if (node != nullptr) {
2553 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2554 return node;
2555 }
2556
2557 // We need a huge block of memory, but we didn't find anything in the huge
2558 // list.
2559 if (type == kHuge) return nullptr;
2560
2561 // Now search the best fitting free list for a node that has at least the
2562 // requested size.
2563 type = SelectFreeListCategoryType(size_in_bytes);
2564 node = TryFindNodeIn(type, node_size, size_in_bytes);
2565
2566 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2567 return node;
2568 }
2569
2570 // Allocation on the old space free list. If it succeeds then a new linear
2571 // allocation space has been set up with the top and limit of the space. If
2572 // the allocation fails then NULL is returned, and the caller can perform a GC
2573 // or allocate a new page before retrying.
2574 HeapObject* FreeList::Allocate(size_t size_in_bytes) {
2575 DCHECK(size_in_bytes <= kMaxBlockSize);
2576 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2577 DCHECK_LE(owner_->top(), owner_->limit());
2578 #ifdef DEBUG
2579 if (owner_->top() != owner_->limit()) {
2580 DCHECK_EQ(Page::FromAddress(owner_->top()),
2581 Page::FromAddress(owner_->limit() - 1));
2582 }
2583 #endif
2584 // Don't free list allocate if there is linear space available.
2585 DCHECK_LT(static_cast<size_t>(owner_->limit() - owner_->top()),
2586 size_in_bytes);
2587
2588 // Mark the old linear allocation area with a free space map so it can be
2589 // skipped when scanning the heap. This also puts it back in the free list
2590 // if it is big enough.
2591 owner_->EmptyAllocationInfo();
2592
2593 owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
2594 Heap::kNoGCFlags, kNoGCCallbackFlags);
2595
2596 size_t new_node_size = 0;
2597 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2598 if (new_node == nullptr) return nullptr;
2599
2600 DCHECK_GE(new_node_size, size_in_bytes);
2601 size_t bytes_left = new_node_size - size_in_bytes;
2602
2603 #ifdef DEBUG
2604 for (size_t i = 0; i < size_in_bytes / kPointerSize; i++) {
2605 reinterpret_cast<Object**>(new_node->address())[i] =
2606 Smi::FromInt(kCodeZapValue);
2607 }
2608 #endif
2609
2610 // The old-space-step might have finished sweeping and restarted marking.
2611 // Verify that it did not turn the page of the new node into an evacuation
2612 // candidate.
2613 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2614
2615 const size_t kThreshold = IncrementalMarking::kAllocatedThreshold;
2616
2617 // Memory in the linear allocation area is counted as allocated. We may free
2618 // a little of this again immediately - see below.
2619 owner_->Allocate(static_cast<int>(new_node_size));
2620
2621 if (owner_->heap()->inline_allocation_disabled()) {
2622 // Keep the linear allocation area empty if requested to do so, just
2623 // return area back to the free list instead.
2624 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2625 owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
2626 new_node->address() + size_in_bytes);
2627 } else if (bytes_left > kThreshold &&
2628 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2629 FLAG_incremental_marking) {
2630 size_t linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2631 // We don't want to give too large linear areas to the allocator while
2632 // incremental marking is going on, because we won't check again whether
2633 // we want to do another increment until the linear area is used up.
2634 DCHECK_GE(new_node_size, size_in_bytes + linear_size);
2635 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2636 new_node_size - size_in_bytes - linear_size);
2637 owner_->SetAllocationInfo(
2638 new_node->address() + size_in_bytes,
2639 new_node->address() + size_in_bytes + linear_size);
2640 } else {
2641 // Normally we give the rest of the node to the allocator as its new
2642 // linear allocation area.
2643 owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
2644 new_node->address() + new_node_size);
2645 }
2646
2647 return new_node;
2648 }
2649
2650 size_t FreeList::EvictFreeListItems(Page* page) {
2651 size_t sum = 0;
2652 page->ForAllFreeListCategories(
2653 [this, &sum, page](FreeListCategory* category) {
2654 DCHECK_EQ(this, category->owner());
2655 sum += category->available();
2656 RemoveCategory(category);
2657 category->Invalidate();
2658 });
2659 return sum;
2660 }
2661
2662 bool FreeList::ContainsPageFreeListItems(Page* page) {
2663 bool contained = false;
2664 page->ForAllFreeListCategories(
2665 [this, &contained](FreeListCategory* category) {
2666 if (category->owner() == this && category->is_linked()) {
2667 contained = true;
2668 }
2669 });
2670 return contained;
2671 }
2672
2673 void FreeList::RepairLists(Heap* heap) {
2674 ForAllFreeListCategories(
2675 [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
2676 }
2677
2678 bool FreeList::AddCategory(FreeListCategory* category) {
2679 FreeListCategoryType type = category->type_;
2680 FreeListCategory* top = categories_[type];
2681
2682 if (category->is_empty()) return false;
2683 if (top == category) return false;
2684
2685 // Common double-linked list insertion.
2686 if (top != nullptr) {
2687 top->set_prev(category);
2688 }
2689 category->set_next(top);
2690 categories_[type] = category;
2691 return true;
2692 }
2693
2694 void FreeList::RemoveCategory(FreeListCategory* category) {
2695 FreeListCategoryType type = category->type_;
2696 FreeListCategory* top = categories_[type];
2697
2698 // Common double-linked list removal.
2699 if (top == category) {
2700 categories_[type] = category->next();
2701 }
2702 if (category->prev() != nullptr) {
2703 category->prev()->set_next(category->next());
2704 }
2705 if (category->next() != nullptr) {
2706 category->next()->set_prev(category->prev());
2707 }
2708 category->set_next(nullptr);
2709 category->set_prev(nullptr);
2710 }
2711
2712 void FreeList::PrintCategories(FreeListCategoryType type) {
2713 FreeListCategoryIterator it(this, type);
2714 PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
2715 static_cast<void*>(categories_[type]), type);
2716 while (it.HasNext()) {
2717 FreeListCategory* current = it.Next();
2718 PrintF("%p -> ", static_cast<void*>(current));
2719 }
2720 PrintF("null\n");
2721 }
2722
2723
2724 #ifdef DEBUG
2725 size_t FreeListCategory::SumFreeList() {
2726 size_t sum = 0;
2727 FreeSpace* cur = top();
2728 while (cur != NULL) {
2729 DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
2730 sum += cur->nobarrier_size();
2731 cur = cur->next();
2732 }
2733 return sum;
2734 }
2735
2736 int FreeListCategory::FreeListLength() {
2737 int length = 0;
2738 FreeSpace* cur = top();
2739 while (cur != NULL) {
2740 length++;
2741 cur = cur->next();
2742 if (length == kVeryLongFreeList) return length;
2743 }
2744 return length;
2745 }
2746
2747 bool FreeList::IsVeryLong() {
2748 int len = 0;
2749 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2750 FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
2751 while (it.HasNext()) {
2752 len += it.Next()->FreeListLength();
2753 if (len >= FreeListCategory::kVeryLongFreeList) return true;
2754 }
2755 }
2756 return false;
2757 }
2758
2759
2760 // This can take a very long time because it is linear in the number of entries
2761 // on the free list, so it should not be called if FreeListLength returns
2762 // kVeryLongFreeList.
2763 size_t FreeList::SumFreeLists() {
2764 size_t sum = 0;
2765 ForAllFreeListCategories(
2766 [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
2767 return sum;
2768 }
2769 #endif
2770
2771
2772 // -----------------------------------------------------------------------------
2773 // OldSpace implementation
2774
2775 void PagedSpace::PrepareForMarkCompact() {
2776 // We don't have a linear allocation area while sweeping. It will be restored
2777 // on the first allocation after the sweep.
2778 EmptyAllocationInfo();
2779
2780 // Clear the free list before a full GC---it will be rebuilt afterward.
2781 free_list_.Reset();
2782 }
2783
2784 size_t PagedSpace::SizeOfObjects() {
2785 CHECK_GE(limit(), top());
2786 DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
2787 return Size() - (limit() - top());
2788 }
2789
2790
2791 // After we have booted, we have created a map which represents free space
2792 // on the heap. If there was already a free list then the elements on it
2793 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
2794 // fix them.
2795 void PagedSpace::RepairFreeListsAfterDeserialization() {
2796 free_list_.RepairLists(heap());
2797 // Each page may have a small free space that is not tracked by a free list.
2798 // Update the maps for those free space objects.
2799 for (Page* page : *this) {
2800 size_t size = page->wasted_memory();
2801 if (size == 0) continue;
2802 DCHECK_GE(static_cast<size_t>(Page::kPageSize), size);
2803 Address address = page->OffsetToAddress(Page::kPageSize - size);
2804 heap()->CreateFillerObjectAt(address, static_cast<int>(size),
2805 ClearRecordedSlots::kNo);
2806 }
2807 }
2808
2809
2810 HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
2811 MarkCompactCollector* collector = heap()->mark_compact_collector();
2812 if (collector->sweeping_in_progress()) {
2813 // Wait for the sweeper threads here and complete the sweeping phase.
2814 collector->EnsureSweepingCompleted();
2815
2816 // After waiting for the sweeper threads, there may be new free-list
2817 // entries.
2818 return free_list_.Allocate(size_in_bytes);
2819 }
2820 return nullptr;
2821 }
2822
2823
2824 HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
2825 MarkCompactCollector* collector = heap()->mark_compact_collector();
2826 if (collector->sweeping_in_progress()) {
2827 collector->SweepAndRefill(this);
2828 return free_list_.Allocate(size_in_bytes);
2829 }
2830 return nullptr;
2831 }
2832
2833 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2834 DCHECK_GE(size_in_bytes, 0);
2835 const int kMaxPagesToSweep = 1;
2836
2837 // Allocation in this space has failed.
2838
2839 MarkCompactCollector* collector = heap()->mark_compact_collector();
2840 // Sweeping is still in progress.
2841 if (collector->sweeping_in_progress()) {
2842 // First try to refill the free-list, concurrent sweeper threads
2843 // may have freed some objects in the meantime.
2844 RefillFreeList();
2845
2846 // Retry the free list allocation.
2847 HeapObject* object =
2848 free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2849 if (object != NULL) return object;
2850
2851 // If sweeping is still in progress try to sweep pages on the main thread.
2852 int max_freed = collector->sweeper().ParallelSweepSpace(
2853 identity(), size_in_bytes, kMaxPagesToSweep);
2854 RefillFreeList();
2855 if (max_freed >= size_in_bytes) {
2856 object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2857 if (object != nullptr) return object;
2858 }
2859 }
2860
2861 if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
2862 DCHECK((CountTotalPages() > 1) ||
2863 (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
2864 return free_list_.Allocate(static_cast<size_t>(size_in_bytes));
2865 }
2866
2867 // If sweeper threads are active, wait for them at that point and steal
2868 // elements form their free-lists. Allocation may still fail their which
2869 // would indicate that there is not enough memory for the given allocation.
2870 return SweepAndRetryAllocation(size_in_bytes);
2871 }
2872
2873 #ifdef DEBUG
2874 void PagedSpace::ReportStatistics() {
2875 int pct = static_cast<int>(Available() * 100 / Capacity());
2876 PrintF(" capacity: %" V8PRIdPTR ", waste: %" V8PRIdPTR
2877 ", available: %" V8PRIdPTR ", %%%d\n",
2878 Capacity(), Waste(), Available(), pct);
2879
2880 if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2881 heap()->mark_compact_collector()->EnsureSweepingCompleted();
2882 }
2883 ClearHistograms(heap()->isolate());
2884 HeapObjectIterator obj_it(this);
2885 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2886 CollectHistogramInfo(obj);
2887 ReportHistogram(heap()->isolate(), true);
2888 }
2889 #endif
2890
2891
2892 // -----------------------------------------------------------------------------
2893 // MapSpace implementation
2894
2895 #ifdef VERIFY_HEAP
2896 void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
2897 #endif
2898
2899 Address LargePage::GetAddressToShrink() {
2900 HeapObject* object = GetObject();
2901 if (executable() == EXECUTABLE) {
2902 return 0;
2903 }
2904 size_t used_size = RoundUp((object->address() - address()) + object->Size(),
2905 MemoryAllocator::GetCommitPageSize());
2906 if (used_size < CommittedPhysicalMemory()) {
2907 return address() + used_size;
2908 }
2909 return 0;
2910 }
2911
2912 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
2913 RememberedSet<OLD_TO_NEW>::RemoveRange(this, free_start, area_end(),
2914 SlotSet::FREE_EMPTY_BUCKETS);
2915 RememberedSet<OLD_TO_OLD>::RemoveRange(this, free_start, area_end(),
2916 SlotSet::FREE_EMPTY_BUCKETS);
2917 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(this, free_start, area_end());
2918 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(this, free_start, area_end());
2919 }
2920
2921 // -----------------------------------------------------------------------------
2922 // LargeObjectIterator
2923
2924 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2925 current_ = space->first_page_;
2926 }
2927
2928
2929 HeapObject* LargeObjectIterator::Next() {
2930 if (current_ == NULL) return NULL;
2931
2932 HeapObject* object = current_->GetObject();
2933 current_ = current_->next_page();
2934 return object;
2935 }
2936
2937
2938 // -----------------------------------------------------------------------------
2939 // LargeObjectSpace
2940
2941 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
2942 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2943 first_page_(NULL),
2944 size_(0),
2945 page_count_(0),
2946 objects_size_(0),
2947 chunk_map_(1024) {}
2948
2949 LargeObjectSpace::~LargeObjectSpace() {}
2950
2951
2952 bool LargeObjectSpace::SetUp() {
2953 first_page_ = NULL;
2954 size_ = 0;
2955 page_count_ = 0;
2956 objects_size_ = 0;
2957 chunk_map_.Clear();
2958 return true;
2959 }
2960
2961
2962 void LargeObjectSpace::TearDown() {
2963 while (first_page_ != NULL) {
2964 LargePage* page = first_page_;
2965 first_page_ = first_page_->next_page();
2966 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2967 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
2968 }
2969 SetUp();
2970 }
2971
2972
2973 AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2974 Executability executable) {
2975 // Check if we want to force a GC before growing the old space further.
2976 // If so, fail the allocation.
2977 if (!heap()->CanExpandOldGeneration(object_size) ||
2978 !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
2979 return AllocationResult::Retry(identity());
2980 }
2981
2982 LargePage* page = heap()->memory_allocator()->AllocateLargePage(
2983 object_size, this, executable);
2984 if (page == NULL) return AllocationResult::Retry(identity());
2985 DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
2986
2987 size_ += static_cast<int>(page->size());
2988 AccountCommitted(page->size());
2989 objects_size_ += object_size;
2990 page_count_++;
2991 page->set_next_page(first_page_);
2992 first_page_ = page;
2993
2994 InsertChunkMapEntries(page);
2995
2996 HeapObject* object = page->GetObject();
2997 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
2998
2999 if (Heap::ShouldZapGarbage()) {
3000 // Make the object consistent so the heap can be verified in OldSpaceStep.
3001 // We only need to do this in debug builds or if verify_heap is on.
3002 reinterpret_cast<Object**>(object->address())[0] =
3003 heap()->fixed_array_map();
3004 reinterpret_cast<Object**>(object->address())[1] = Smi::kZero;
3005 }
3006
3007 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
3008 kNoGCCallbackFlags);
3009 AllocationStep(object->address(), object_size);
3010
3011 if (heap()->incremental_marking()->black_allocation()) {
3012 Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
3013 MemoryChunk::IncrementLiveBytesFromGC(object, object_size);
3014 }
3015 return object;
3016 }
3017
3018
3019 size_t LargeObjectSpace::CommittedPhysicalMemory() {
3020 // On a platform that provides lazy committing of memory, we over-account
3021 // the actually committed memory. There is no easy way right now to support
3022 // precise accounting of committed memory in large object space.
3023 return CommittedMemory();
3024 }
3025
3026
3027 // GC support
3028 Object* LargeObjectSpace::FindObject(Address a) {
3029 LargePage* page = FindPage(a);
3030 if (page != NULL) {
3031 return page->GetObject();
3032 }
3033 return Smi::kZero; // Signaling not found.
3034 }
3035
3036
3037 LargePage* LargeObjectSpace::FindPage(Address a) {
3038 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
3039 base::HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
3040 static_cast<uint32_t>(key));
3041 if (e != NULL) {
3042 DCHECK(e->value != NULL);
3043 LargePage* page = reinterpret_cast<LargePage*>(e->value);
3044 DCHECK(LargePage::IsValid(page));
3045 if (page->Contains(a)) {
3046 return page;
3047 }
3048 }
3049 return NULL;
3050 }
3051
3052
3053 void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3054 LargePage* current = first_page_;
3055 while (current != NULL) {
3056 HeapObject* object = current->GetObject();
3057 MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
3058 DCHECK(Marking::IsBlack(mark_bit));
3059 Marking::BlackToWhite(mark_bit);
3060 Page::FromAddress(object->address())->ResetProgressBar();
3061 Page::FromAddress(object->address())->ResetLiveBytes();
3062 current = current->next_page();
3063 }
3064 }
3065
3066 void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
3067 // Register all MemoryChunk::kAlignment-aligned chunks covered by
3068 // this large page in the chunk map.
3069 uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
3070 uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
3071 MemoryChunk::kAlignment;
3072 for (uintptr_t key = start; key <= limit; key++) {
3073 base::HashMap::Entry* entry = chunk_map_.InsertNew(
3074 reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
3075 DCHECK(entry != NULL);
3076 entry->value = page;
3077 }
3078 }
3079
3080 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
3081 RemoveChunkMapEntries(page, page->address());
3082 }
3083
3084 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
3085 Address free_start) {
3086 uintptr_t start = RoundUp(reinterpret_cast<uintptr_t>(free_start),
3087 MemoryChunk::kAlignment) /
3088 MemoryChunk::kAlignment;
3089 uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
3090 MemoryChunk::kAlignment;
3091 for (uintptr_t key = start; key <= limit; key++) {
3092 chunk_map_.Remove(reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
3093 }
3094 }
3095
3096 void LargeObjectSpace::FreeUnmarkedObjects() {
3097 LargePage* previous = NULL;
3098 LargePage* current = first_page_;
3099 while (current != NULL) {
3100 HeapObject* object = current->GetObject();
3101 MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
3102 DCHECK(!Marking::IsGrey(mark_bit));
3103 if (Marking::IsBlack(mark_bit)) {
3104 Address free_start;
3105 if ((free_start = current->GetAddressToShrink()) != 0) {
3106 // TODO(hpayer): Perform partial free concurrently.
3107 current->ClearOutOfLiveRangeSlots(free_start);
3108 RemoveChunkMapEntries(current, free_start);
3109 heap()->memory_allocator()->PartialFreeMemory(current, free_start);
3110 }
3111 previous = current;
3112 current = current->next_page();
3113 } else {
3114 LargePage* page = current;
3115 // Cut the chunk out from the chunk list.
3116 current = current->next_page();
3117 if (previous == NULL) {
3118 first_page_ = current;
3119 } else {
3120 previous->set_next_page(current);
3121 }
3122
3123 // Free the chunk.
3124 size_ -= static_cast<int>(page->size());
3125 AccountUncommitted(page->size());
3126 objects_size_ -= object->Size();
3127 page_count_--;
3128
3129 RemoveChunkMapEntries(page);
3130 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
3131 }
3132 }
3133 }
3134
3135
3136 bool LargeObjectSpace::Contains(HeapObject* object) {
3137 Address address = object->address();
3138 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3139
3140 bool owned = (chunk->owner() == this);
3141
3142 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3143
3144 return owned;
3145 }
3146
3147 std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
3148 return std::unique_ptr<ObjectIterator>(new LargeObjectIterator(this));
3149 }
3150
3151 #ifdef VERIFY_HEAP
3152 // We do not assume that the large object iterator works, because it depends
3153 // on the invariants we are checking during verification.
3154 void LargeObjectSpace::Verify() {
3155 for (LargePage* chunk = first_page_; chunk != NULL;
3156 chunk = chunk->next_page()) {
3157 // Each chunk contains an object that starts at the large object page's
3158 // object area start.
3159 HeapObject* object = chunk->GetObject();
3160 Page* page = Page::FromAddress(object->address());
3161 CHECK(object->address() == page->area_start());
3162
3163 // The first word should be a map, and we expect all map pointers to be
3164 // in map space.
3165 Map* map = object->map();
3166 CHECK(map->IsMap());
3167 CHECK(heap()->map_space()->Contains(map));
3168
3169 // We have only code, sequential strings, external strings
3170 // (sequential strings that have been morphed into external
3171 // strings), fixed arrays, byte arrays, and constant pool arrays in the
3172 // large object space.
3173 CHECK(object->IsAbstractCode() || object->IsSeqString() ||
3174 object->IsExternalString() || object->IsFixedArray() ||
3175 object->IsFixedDoubleArray() || object->IsByteArray());
3176
3177 // The object itself should look OK.
3178 object->ObjectVerify();
3179
3180 // Byte arrays and strings don't have interior pointers.
3181 if (object->IsAbstractCode()) {
3182 VerifyPointersVisitor code_visitor;
3183 object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3184 } else if (object->IsFixedArray()) {
3185 FixedArray* array = FixedArray::cast(object);
3186 for (int j = 0; j < array->length(); j++) {
3187 Object* element = array->get(j);
3188 if (element->IsHeapObject()) {
3189 HeapObject* element_object = HeapObject::cast(element);
3190 CHECK(heap()->Contains(element_object));
3191 CHECK(element_object->map()->IsMap());
3192 }
3193 }
3194 }
3195 }
3196 }
3197 #endif
3198
3199 #ifdef DEBUG
3200 void LargeObjectSpace::Print() {
3201 OFStream os(stdout);
3202 LargeObjectIterator it(this);
3203 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3204 obj->Print(os);
3205 }
3206 }
3207
3208
3209 void LargeObjectSpace::ReportStatistics() {
3210 PrintF(" size: %" V8PRIdPTR "\n", size_);
3211 int num_objects = 0;
3212 ClearHistograms(heap()->isolate());
3213 LargeObjectIterator it(this);
3214 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3215 num_objects++;
3216 CollectHistogramInfo(obj);
3217 }
3218
3219 PrintF(
3220 " number of objects %d, "
3221 "size of objects %" V8PRIdPTR "\n",
3222 num_objects, objects_size_);
3223 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3224 }
3225
3226
3227 void Page::Print() {
3228 // Make a best-effort to print the objects in the page.
3229 PrintF("Page@%p in %s\n", static_cast<void*>(this->address()),
3230 AllocationSpaceName(this->owner()->identity()));
3231 printf(" --------------------------------------\n");
3232 HeapObjectIterator objects(this);
3233 unsigned mark_size = 0;
3234 for (HeapObject* object = objects.Next(); object != NULL;
3235 object = objects.Next()) {
3236 bool is_marked = Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object));
3237 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3238 if (is_marked) {
3239 mark_size += object->Size();
3240 }
3241 object->ShortPrint();
3242 PrintF("\n");
3243 }
3244 printf(" --------------------------------------\n");
3245 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3246 }
3247
3248 #endif // DEBUG
3249 } // namespace internal
3250 } // namespace v8
3251