1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/store-buffer.h"
6
7 #include <algorithm>
8
9 #include "src/counters.h"
10 #include "src/heap/incremental-marking.h"
11 #include "src/isolate.h"
12 #include "src/objects-inl.h"
13 #include "src/v8.h"
14
15 namespace v8 {
16 namespace internal {
17
StoreBuffer(Heap * heap)18 StoreBuffer::StoreBuffer(Heap* heap)
19 : heap_(heap), top_(nullptr), current_(0), virtual_memory_(nullptr) {
20 for (int i = 0; i < kStoreBuffers; i++) {
21 start_[i] = nullptr;
22 limit_[i] = nullptr;
23 lazy_top_[i] = nullptr;
24 }
25 task_running_ = false;
26 }
27
SetUp()28 void StoreBuffer::SetUp() {
29 // Allocate 3x the buffer size, so that we can start the new store buffer
30 // aligned to 2x the size. This lets us use a bit test to detect the end of
31 // the area.
32 virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
33 uintptr_t start_as_int =
34 reinterpret_cast<uintptr_t>(virtual_memory_->address());
35 start_[0] =
36 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize));
37 limit_[0] = start_[0] + (kStoreBufferSize / kPointerSize);
38 start_[1] = limit_[0];
39 limit_[1] = start_[1] + (kStoreBufferSize / kPointerSize);
40
41 Address* vm_limit = reinterpret_cast<Address*>(
42 reinterpret_cast<char*>(virtual_memory_->address()) +
43 virtual_memory_->size());
44
45 USE(vm_limit);
46 for (int i = 0; i < kStoreBuffers; i++) {
47 DCHECK(reinterpret_cast<Address>(start_[i]) >= virtual_memory_->address());
48 DCHECK(reinterpret_cast<Address>(limit_[i]) >= virtual_memory_->address());
49 DCHECK(start_[i] <= vm_limit);
50 DCHECK(limit_[i] <= vm_limit);
51 DCHECK((reinterpret_cast<uintptr_t>(limit_[i]) & kStoreBufferMask) == 0);
52 }
53
54 if (!virtual_memory_->Commit(reinterpret_cast<Address>(start_[0]),
55 kStoreBufferSize * kStoreBuffers,
56 false)) { // Not executable.
57 V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
58 }
59 current_ = 0;
60 top_ = start_[current_];
61 }
62
63
TearDown()64 void StoreBuffer::TearDown() {
65 delete virtual_memory_;
66 top_ = nullptr;
67 for (int i = 0; i < kStoreBuffers; i++) {
68 start_[i] = nullptr;
69 limit_[i] = nullptr;
70 lazy_top_[i] = nullptr;
71 }
72 }
73
74
StoreBufferOverflow(Isolate * isolate)75 void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
76 isolate->heap()->store_buffer()->FlipStoreBuffers();
77 isolate->counters()->store_buffer_overflows()->Increment();
78 }
79
FlipStoreBuffers()80 void StoreBuffer::FlipStoreBuffers() {
81 base::LockGuard<base::Mutex> guard(&mutex_);
82 int other = (current_ + 1) % kStoreBuffers;
83 MoveEntriesToRememberedSet(other);
84 lazy_top_[current_] = top_;
85 current_ = other;
86 top_ = start_[current_];
87
88 if (!task_running_) {
89 task_running_ = true;
90 Task* task = new Task(heap_->isolate(), this);
91 V8::GetCurrentPlatform()->CallOnBackgroundThread(
92 task, v8::Platform::kShortRunningTask);
93 }
94 }
95
MoveEntriesToRememberedSet(int index)96 void StoreBuffer::MoveEntriesToRememberedSet(int index) {
97 if (!lazy_top_[index]) return;
98 DCHECK_GE(index, 0);
99 DCHECK_LT(index, kStoreBuffers);
100 for (Address* current = start_[index]; current < lazy_top_[index];
101 current++) {
102 DCHECK(!heap_->code_space()->Contains(*current));
103 Address addr = *current;
104 Page* page = Page::FromAnyPointerAddress(heap_, addr);
105 if (IsDeletionAddress(addr)) {
106 current++;
107 Address end = *current;
108 DCHECK(!IsDeletionAddress(end));
109 addr = UnmarkDeletionAddress(addr);
110 if (end) {
111 RememberedSet<OLD_TO_NEW>::RemoveRange(page, addr, end,
112 SlotSet::PREFREE_EMPTY_BUCKETS);
113 } else {
114 RememberedSet<OLD_TO_NEW>::Remove(page, addr);
115 }
116 } else {
117 DCHECK(!IsDeletionAddress(addr));
118 RememberedSet<OLD_TO_NEW>::Insert(page, addr);
119 }
120 }
121 lazy_top_[index] = nullptr;
122 }
123
MoveAllEntriesToRememberedSet()124 void StoreBuffer::MoveAllEntriesToRememberedSet() {
125 base::LockGuard<base::Mutex> guard(&mutex_);
126 int other = (current_ + 1) % kStoreBuffers;
127 MoveEntriesToRememberedSet(other);
128 lazy_top_[current_] = top_;
129 MoveEntriesToRememberedSet(current_);
130 top_ = start_[current_];
131 }
132
ConcurrentlyProcessStoreBuffer()133 void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
134 base::LockGuard<base::Mutex> guard(&mutex_);
135 int other = (current_ + 1) % kStoreBuffers;
136 MoveEntriesToRememberedSet(other);
137 task_running_ = false;
138 }
139
DeleteEntry(Address start,Address end)140 void StoreBuffer::DeleteEntry(Address start, Address end) {
141 // Deletions coming from the GC are directly deleted from the remembered
142 // set. Deletions coming from the runtime are added to the store buffer
143 // to allow concurrent processing.
144 if (heap_->gc_state() == Heap::NOT_IN_GC) {
145 if (top_ + sizeof(Address) * 2 > limit_[current_]) {
146 StoreBufferOverflow(heap_->isolate());
147 }
148 *top_ = MarkDeletionAddress(start);
149 top_++;
150 *top_ = end;
151 top_++;
152 } else {
153 // In GC the store buffer has to be empty at any time.
154 DCHECK(Empty());
155 Page* page = Page::FromAddress(start);
156 if (end) {
157 RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
158 SlotSet::PREFREE_EMPTY_BUCKETS);
159 } else {
160 RememberedSet<OLD_TO_NEW>::Remove(page, start);
161 }
162 }
163 }
164 } // namespace internal
165 } // namespace v8
166