1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/zone/accounting-allocator.h"
6
7 #include <cstdlib>
8
9 #if V8_LIBC_BIONIC
10 #include <malloc.h> // NOLINT
11 #endif
12
13 namespace v8 {
14 namespace internal {
15
AccountingAllocator()16 AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
17 static const size_t kDefaultBucketMaxSize = 5;
18
19 memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
20 std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
21 nullptr);
22 std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
23 std::fill(unused_segments_max_sizes_,
24 unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
25 }
26
~AccountingAllocator()27 AccountingAllocator::~AccountingAllocator() { ClearPool(); }
28
MemoryPressureNotification(MemoryPressureLevel level)29 void AccountingAllocator::MemoryPressureNotification(
30 MemoryPressureLevel level) {
31 memory_pressure_level_.SetValue(level);
32
33 if (level != MemoryPressureLevel::kNone) {
34 ClearPool();
35 }
36 }
37
ConfigureSegmentPool(const size_t max_pool_size)38 void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
39 // The sum of the bytes of one segment of each size.
40 static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) -
41 (size_t(1) << kMinSegmentSizePower);
42 size_t fits_fully = max_pool_size / full_size;
43
44 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
45
46 // We assume few zones (less than 'fits_fully' many) to be active at the same
47 // time. When zones grow regularly, they will keep requesting segments of
48 // increasing size each time. Therefore we try to get as many segments with an
49 // equal number of segments of each size as possible.
50 // The remaining space is used to make more room for an 'incomplete set' of
51 // segments beginning with the smaller ones.
52 // This code will work best if the max_pool_size is a multiple of the
53 // full_size. If max_pool_size is no sum of segment sizes the actual pool
54 // size might be smaller then max_pool_size. Note that no actual memory gets
55 // wasted though.
56 // TODO(heimbuef): Determine better strategy generating a segment sizes
57 // distribution that is closer to real/benchmark usecases and uses the given
58 // max_pool_size more efficiently.
59 size_t total_size = fits_fully * full_size;
60
61 for (size_t power = 0; power < kNumberBuckets; ++power) {
62 if (total_size + (size_t(1) << (power + kMinSegmentSizePower)) <=
63 max_pool_size) {
64 unused_segments_max_sizes_[power] = fits_fully + 1;
65 total_size += size_t(1) << power;
66 } else {
67 unused_segments_max_sizes_[power] = fits_fully;
68 }
69 }
70 }
71
GetSegment(size_t bytes)72 Segment* AccountingAllocator::GetSegment(size_t bytes) {
73 Segment* result = GetSegmentFromPool(bytes);
74 if (result == nullptr) {
75 result = AllocateSegment(bytes);
76 result->Initialize(bytes);
77 }
78
79 return result;
80 }
81
AllocateSegment(size_t bytes)82 Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
83 void* memory = malloc(bytes);
84 if (memory) {
85 base::AtomicWord current =
86 base::NoBarrier_AtomicIncrement(¤t_memory_usage_, bytes);
87 base::AtomicWord max = base::NoBarrier_Load(&max_memory_usage_);
88 while (current > max) {
89 max = base::NoBarrier_CompareAndSwap(&max_memory_usage_, max, current);
90 }
91 }
92 return reinterpret_cast<Segment*>(memory);
93 }
94
ReturnSegment(Segment * segment)95 void AccountingAllocator::ReturnSegment(Segment* segment) {
96 segment->ZapContents();
97
98 if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
99 FreeSegment(segment);
100 } else if (!AddSegmentToPool(segment)) {
101 FreeSegment(segment);
102 }
103 }
104
FreeSegment(Segment * memory)105 void AccountingAllocator::FreeSegment(Segment* memory) {
106 base::NoBarrier_AtomicIncrement(
107 ¤t_memory_usage_, -static_cast<base::AtomicWord>(memory->size()));
108 memory->ZapHeader();
109 free(memory);
110 }
111
GetCurrentMemoryUsage() const112 size_t AccountingAllocator::GetCurrentMemoryUsage() const {
113 return base::NoBarrier_Load(¤t_memory_usage_);
114 }
115
GetMaxMemoryUsage() const116 size_t AccountingAllocator::GetMaxMemoryUsage() const {
117 return base::NoBarrier_Load(&max_memory_usage_);
118 }
119
GetCurrentPoolSize() const120 size_t AccountingAllocator::GetCurrentPoolSize() const {
121 return base::NoBarrier_Load(¤t_pool_size_);
122 }
123
GetSegmentFromPool(size_t requested_size)124 Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
125 if (requested_size > (1 << kMaxSegmentSizePower)) {
126 return nullptr;
127 }
128
129 size_t power = kMinSegmentSizePower;
130 while (requested_size > (static_cast<size_t>(1) << power)) power++;
131
132 DCHECK_GE(power, kMinSegmentSizePower + 0);
133 power -= kMinSegmentSizePower;
134
135 Segment* segment;
136 {
137 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
138
139 segment = unused_segments_heads_[power];
140
141 if (segment != nullptr) {
142 unused_segments_heads_[power] = segment->next();
143 segment->set_next(nullptr);
144
145 unused_segments_sizes_[power]--;
146 base::NoBarrier_AtomicIncrement(
147 ¤t_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
148 }
149 }
150
151 if (segment) {
152 DCHECK_GE(segment->size(), requested_size);
153 }
154 return segment;
155 }
156
AddSegmentToPool(Segment * segment)157 bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
158 size_t size = segment->size();
159
160 if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
161
162 if (size < (1 << kMinSegmentSizePower)) return false;
163
164 size_t power = kMaxSegmentSizePower;
165
166 while (size < (static_cast<size_t>(1) << power)) power--;
167
168 DCHECK_GE(power, kMinSegmentSizePower + 0);
169 power -= kMinSegmentSizePower;
170
171 {
172 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
173
174 if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
175 return false;
176 }
177
178 segment->set_next(unused_segments_heads_[power]);
179 unused_segments_heads_[power] = segment;
180 base::NoBarrier_AtomicIncrement(¤t_pool_size_, size);
181 unused_segments_sizes_[power]++;
182 }
183
184 return true;
185 }
186
ClearPool()187 void AccountingAllocator::ClearPool() {
188 base::LockGuard<base::Mutex> lock_guard(&unused_segments_mutex_);
189
190 for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
191 power++) {
192 Segment* current = unused_segments_heads_[power];
193 while (current) {
194 Segment* next = current->next();
195 FreeSegment(current);
196 current = next;
197 }
198 unused_segments_heads_[power] = nullptr;
199 }
200 }
201
202 } // namespace internal
203 } // namespace v8
204