1 // Copyright 2016 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_SLOT_SET_H 6 #define V8_SLOT_SET_H 7 8 #include <map> 9 #include <stack> 10 11 #include "src/allocation.h" 12 #include "src/base/atomic-utils.h" 13 #include "src/base/bits.h" 14 #include "src/utils.h" 15 16 namespace v8 { 17 namespace internal { 18 19 enum SlotCallbackResult { KEEP_SLOT, REMOVE_SLOT }; 20 21 // Data structure for maintaining a set of slots in a standard (non-large) 22 // page. The base address of the page must be set with SetPageStart before any 23 // operation. 24 // The data structure assumes that the slots are pointer size aligned and 25 // splits the valid slot offset range into kBuckets buckets. 26 // Each bucket is a bitmap with a bit corresponding to a single slot offset. 27 class SlotSet : public Malloced { 28 public: 29 enum EmptyBucketMode { 30 FREE_EMPTY_BUCKETS, // An empty bucket will be deallocated immediately. 31 PREFREE_EMPTY_BUCKETS, // An empty bucket will be unlinked from the slot 32 // set, but deallocated on demand by a sweeper 33 // thread. 34 KEEP_EMPTY_BUCKETS // An empty bucket will be kept. 35 }; 36 SlotSet()37 SlotSet() { 38 for (int i = 0; i < kBuckets; i++) { 39 bucket[i].SetValue(nullptr); 40 } 41 } 42 ~SlotSet()43 ~SlotSet() { 44 for (int i = 0; i < kBuckets; i++) { 45 ReleaseBucket(i); 46 } 47 FreeToBeFreedBuckets(); 48 } 49 SetPageStart(Address page_start)50 void SetPageStart(Address page_start) { page_start_ = page_start; } 51 52 // The slot offset specifies a slot at address page_start_ + slot_offset. 53 // This method should only be called on the main thread because concurrent 54 // allocation of the bucket is not thread-safe. Insert(int slot_offset)55 void Insert(int slot_offset) { 56 int bucket_index, cell_index, bit_index; 57 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); 58 base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value(); 59 if (current_bucket == nullptr) { 60 current_bucket = AllocateBucket(); 61 bucket[bucket_index].SetValue(current_bucket); 62 } 63 if (!(current_bucket[cell_index].Value() & (1u << bit_index))) { 64 current_bucket[cell_index].SetBit(bit_index); 65 } 66 } 67 68 // The slot offset specifies a slot at address page_start_ + slot_offset. Remove(int slot_offset)69 void Remove(int slot_offset) { 70 int bucket_index, cell_index, bit_index; 71 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); 72 base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value(); 73 if (current_bucket != nullptr) { 74 uint32_t cell = current_bucket[cell_index].Value(); 75 if (cell) { 76 uint32_t bit_mask = 1u << bit_index; 77 if (cell & bit_mask) { 78 current_bucket[cell_index].ClearBit(bit_index); 79 } 80 } 81 } 82 } 83 84 // The slot offsets specify a range of slots at addresses: 85 // [page_start_ + start_offset ... page_start_ + end_offset). RemoveRange(int start_offset,int end_offset,EmptyBucketMode mode)86 void RemoveRange(int start_offset, int end_offset, EmptyBucketMode mode) { 87 CHECK_LE(end_offset, 1 << kPageSizeBits); 88 DCHECK_LE(start_offset, end_offset); 89 int start_bucket, start_cell, start_bit; 90 SlotToIndices(start_offset, &start_bucket, &start_cell, &start_bit); 91 int end_bucket, end_cell, end_bit; 92 SlotToIndices(end_offset, &end_bucket, &end_cell, &end_bit); 93 uint32_t start_mask = (1u << start_bit) - 1; 94 uint32_t end_mask = ~((1u << end_bit) - 1); 95 if (start_bucket == end_bucket && start_cell == end_cell) { 96 ClearCell(start_bucket, start_cell, ~(start_mask | end_mask)); 97 return; 98 } 99 int current_bucket = start_bucket; 100 int current_cell = start_cell; 101 ClearCell(current_bucket, current_cell, ~start_mask); 102 current_cell++; 103 base::AtomicValue<uint32_t>* bucket_ptr = bucket[current_bucket].Value(); 104 if (current_bucket < end_bucket) { 105 if (bucket_ptr != nullptr) { 106 ClearBucket(bucket_ptr, current_cell, kCellsPerBucket); 107 } 108 // The rest of the current bucket is cleared. 109 // Move on to the next bucket. 110 current_bucket++; 111 current_cell = 0; 112 } 113 DCHECK(current_bucket == end_bucket || 114 (current_bucket < end_bucket && current_cell == 0)); 115 while (current_bucket < end_bucket) { 116 if (mode == PREFREE_EMPTY_BUCKETS) { 117 PreFreeEmptyBucket(current_bucket); 118 } else if (mode == FREE_EMPTY_BUCKETS) { 119 ReleaseBucket(current_bucket); 120 } else { 121 DCHECK(mode == KEEP_EMPTY_BUCKETS); 122 bucket_ptr = bucket[current_bucket].Value(); 123 if (bucket_ptr) { 124 ClearBucket(bucket_ptr, 0, kCellsPerBucket); 125 } 126 } 127 current_bucket++; 128 } 129 // All buckets between start_bucket and end_bucket are cleared. 130 bucket_ptr = bucket[current_bucket].Value(); 131 DCHECK(current_bucket == end_bucket && current_cell <= end_cell); 132 if (current_bucket == kBuckets || bucket_ptr == nullptr) { 133 return; 134 } 135 while (current_cell < end_cell) { 136 bucket_ptr[current_cell].SetValue(0); 137 current_cell++; 138 } 139 // All cells between start_cell and end_cell are cleared. 140 DCHECK(current_bucket == end_bucket && current_cell == end_cell); 141 ClearCell(end_bucket, end_cell, ~end_mask); 142 } 143 144 // The slot offset specifies a slot at address page_start_ + slot_offset. Lookup(int slot_offset)145 bool Lookup(int slot_offset) { 146 int bucket_index, cell_index, bit_index; 147 SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index); 148 if (bucket[bucket_index].Value() != nullptr) { 149 uint32_t cell = bucket[bucket_index].Value()[cell_index].Value(); 150 return (cell & (1u << bit_index)) != 0; 151 } 152 return false; 153 } 154 155 // Iterate over all slots in the set and for each slot invoke the callback. 156 // If the callback returns REMOVE_SLOT then the slot is removed from the set. 157 // Returns the new number of slots. 158 // This method should only be called on the main thread. 159 // 160 // Sample usage: 161 // Iterate([](Address slot_address) { 162 // if (good(slot_address)) return KEEP_SLOT; 163 // else return REMOVE_SLOT; 164 // }); 165 template <typename Callback> Iterate(Callback callback,EmptyBucketMode mode)166 int Iterate(Callback callback, EmptyBucketMode mode) { 167 int new_count = 0; 168 for (int bucket_index = 0; bucket_index < kBuckets; bucket_index++) { 169 base::AtomicValue<uint32_t>* current_bucket = 170 bucket[bucket_index].Value(); 171 if (current_bucket != nullptr) { 172 int in_bucket_count = 0; 173 int cell_offset = bucket_index * kBitsPerBucket; 174 for (int i = 0; i < kCellsPerBucket; i++, cell_offset += kBitsPerCell) { 175 if (current_bucket[i].Value()) { 176 uint32_t cell = current_bucket[i].Value(); 177 uint32_t old_cell = cell; 178 uint32_t mask = 0; 179 while (cell) { 180 int bit_offset = base::bits::CountTrailingZeros32(cell); 181 uint32_t bit_mask = 1u << bit_offset; 182 uint32_t slot = (cell_offset + bit_offset) << kPointerSizeLog2; 183 if (callback(page_start_ + slot) == KEEP_SLOT) { 184 ++in_bucket_count; 185 } else { 186 mask |= bit_mask; 187 } 188 cell ^= bit_mask; 189 } 190 uint32_t new_cell = old_cell & ~mask; 191 if (old_cell != new_cell) { 192 while (!current_bucket[i].TrySetValue(old_cell, new_cell)) { 193 // If TrySetValue fails, the cell must have changed. We just 194 // have to read the current value of the cell, & it with the 195 // computed value, and retry. We can do this, because this 196 // method will only be called on the main thread and filtering 197 // threads will only remove slots. 198 old_cell = current_bucket[i].Value(); 199 new_cell = old_cell & ~mask; 200 } 201 } 202 } 203 } 204 if (mode == PREFREE_EMPTY_BUCKETS && in_bucket_count == 0) { 205 PreFreeEmptyBucket(bucket_index); 206 } 207 new_count += in_bucket_count; 208 } 209 } 210 return new_count; 211 } 212 FreeToBeFreedBuckets()213 void FreeToBeFreedBuckets() { 214 base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_); 215 while (!to_be_freed_buckets_.empty()) { 216 base::AtomicValue<uint32_t>* top = to_be_freed_buckets_.top(); 217 to_be_freed_buckets_.pop(); 218 DeleteArray<base::AtomicValue<uint32_t>>(top); 219 } 220 } 221 222 private: 223 static const int kMaxSlots = (1 << kPageSizeBits) / kPointerSize; 224 static const int kCellsPerBucket = 32; 225 static const int kCellsPerBucketLog2 = 5; 226 static const int kBitsPerCell = 32; 227 static const int kBitsPerCellLog2 = 5; 228 static const int kBitsPerBucket = kCellsPerBucket * kBitsPerCell; 229 static const int kBitsPerBucketLog2 = kCellsPerBucketLog2 + kBitsPerCellLog2; 230 static const int kBuckets = kMaxSlots / kCellsPerBucket / kBitsPerCell; 231 AllocateBucket()232 base::AtomicValue<uint32_t>* AllocateBucket() { 233 base::AtomicValue<uint32_t>* result = 234 NewArray<base::AtomicValue<uint32_t>>(kCellsPerBucket); 235 for (int i = 0; i < kCellsPerBucket; i++) { 236 result[i].SetValue(0); 237 } 238 return result; 239 } 240 ClearBucket(base::AtomicValue<uint32_t> * bucket,int start_cell,int end_cell)241 void ClearBucket(base::AtomicValue<uint32_t>* bucket, int start_cell, 242 int end_cell) { 243 DCHECK_GE(start_cell, 0); 244 DCHECK_LE(end_cell, kCellsPerBucket); 245 int current_cell = start_cell; 246 while (current_cell < kCellsPerBucket) { 247 bucket[current_cell].SetValue(0); 248 current_cell++; 249 } 250 } 251 PreFreeEmptyBucket(int bucket_index)252 void PreFreeEmptyBucket(int bucket_index) { 253 base::AtomicValue<uint32_t>* bucket_ptr = bucket[bucket_index].Value(); 254 if (bucket_ptr != nullptr) { 255 base::LockGuard<base::Mutex> guard(&to_be_freed_buckets_mutex_); 256 to_be_freed_buckets_.push(bucket_ptr); 257 bucket[bucket_index].SetValue(nullptr); 258 } 259 } 260 ReleaseBucket(int bucket_index)261 void ReleaseBucket(int bucket_index) { 262 DeleteArray<base::AtomicValue<uint32_t>>(bucket[bucket_index].Value()); 263 bucket[bucket_index].SetValue(nullptr); 264 } 265 ClearCell(int bucket_index,int cell_index,uint32_t mask)266 void ClearCell(int bucket_index, int cell_index, uint32_t mask) { 267 if (bucket_index < kBuckets) { 268 base::AtomicValue<uint32_t>* cells = bucket[bucket_index].Value(); 269 if (cells != nullptr) { 270 uint32_t cell = cells[cell_index].Value(); 271 if (cell) cells[cell_index].SetBits(0, mask); 272 } 273 } else { 274 // GCC bug 59124: Emits wrong warnings 275 // "array subscript is above array bounds" 276 UNREACHABLE(); 277 } 278 } 279 280 // Converts the slot offset into bucket/cell/bit index. SlotToIndices(int slot_offset,int * bucket_index,int * cell_index,int * bit_index)281 void SlotToIndices(int slot_offset, int* bucket_index, int* cell_index, 282 int* bit_index) { 283 DCHECK_EQ(slot_offset % kPointerSize, 0); 284 int slot = slot_offset >> kPointerSizeLog2; 285 DCHECK(slot >= 0 && slot <= kMaxSlots); 286 *bucket_index = slot >> kBitsPerBucketLog2; 287 *cell_index = (slot >> kBitsPerCellLog2) & (kCellsPerBucket - 1); 288 *bit_index = slot & (kBitsPerCell - 1); 289 } 290 291 base::AtomicValue<base::AtomicValue<uint32_t>*> bucket[kBuckets]; 292 Address page_start_; 293 base::Mutex to_be_freed_buckets_mutex_; 294 std::stack<base::AtomicValue<uint32_t>*> to_be_freed_buckets_; 295 }; 296 297 enum SlotType { 298 EMBEDDED_OBJECT_SLOT, 299 OBJECT_SLOT, 300 CELL_TARGET_SLOT, 301 CODE_TARGET_SLOT, 302 CODE_ENTRY_SLOT, 303 DEBUG_TARGET_SLOT, 304 CLEARED_SLOT 305 }; 306 307 // Data structure for maintaining a multiset of typed slots in a page. 308 // Typed slots can only appear in Code and JSFunction objects, so 309 // the maximum possible offset is limited by the LargePage::kMaxCodePageSize. 310 // The implementation is a chain of chunks, where each chunks is an array of 311 // encoded (slot type, slot offset) pairs. 312 // There is no duplicate detection and we do not expect many duplicates because 313 // typed slots contain V8 internal pointers that are not directly exposed to JS. 314 class TypedSlotSet { 315 public: 316 enum IterationMode { PREFREE_EMPTY_CHUNKS, KEEP_EMPTY_CHUNKS }; 317 318 typedef std::pair<SlotType, uint32_t> TypeAndOffset; 319 320 struct TypedSlot { TypedSlotTypedSlot321 TypedSlot() { 322 type_and_offset_.SetValue(0); 323 host_offset_.SetValue(0); 324 } 325 TypedSlotTypedSlot326 TypedSlot(SlotType type, uint32_t host_offset, uint32_t offset) { 327 type_and_offset_.SetValue(TypeField::encode(type) | 328 OffsetField::encode(offset)); 329 host_offset_.SetValue(host_offset); 330 } 331 332 bool operator==(const TypedSlot other) { 333 return type_and_offset_.Value() == other.type_and_offset_.Value() && 334 host_offset_.Value() == other.host_offset_.Value(); 335 } 336 337 bool operator!=(const TypedSlot other) { return !(*this == other); } 338 typeTypedSlot339 SlotType type() { return TypeField::decode(type_and_offset_.Value()); } 340 offsetTypedSlot341 uint32_t offset() { return OffsetField::decode(type_and_offset_.Value()); } 342 GetTypeAndOffsetTypedSlot343 TypeAndOffset GetTypeAndOffset() { 344 uint32_t type_and_offset = type_and_offset_.Value(); 345 return std::make_pair(TypeField::decode(type_and_offset), 346 OffsetField::decode(type_and_offset)); 347 } 348 host_offsetTypedSlot349 uint32_t host_offset() { return host_offset_.Value(); } 350 SetTypedSlot351 void Set(TypedSlot slot) { 352 type_and_offset_.SetValue(slot.type_and_offset_.Value()); 353 host_offset_.SetValue(slot.host_offset_.Value()); 354 } 355 ClearTypedSlot356 void Clear() { 357 type_and_offset_.SetValue(TypeField::encode(CLEARED_SLOT) | 358 OffsetField::encode(0)); 359 host_offset_.SetValue(0); 360 } 361 362 base::AtomicValue<uint32_t> type_and_offset_; 363 base::AtomicValue<uint32_t> host_offset_; 364 }; 365 static const int kMaxOffset = 1 << 29; 366 TypedSlotSet(Address page_start)367 explicit TypedSlotSet(Address page_start) : page_start_(page_start) { 368 chunk_.SetValue(new Chunk(nullptr, kInitialBufferSize)); 369 } 370 ~TypedSlotSet()371 ~TypedSlotSet() { 372 Chunk* chunk = chunk_.Value(); 373 while (chunk != nullptr) { 374 Chunk* next = chunk->next.Value(); 375 delete chunk; 376 chunk = next; 377 } 378 FreeToBeFreedChunks(); 379 } 380 381 // The slot offset specifies a slot at address page_start_ + offset. 382 // This method can only be called on the main thread. Insert(SlotType type,uint32_t host_offset,uint32_t offset)383 void Insert(SlotType type, uint32_t host_offset, uint32_t offset) { 384 TypedSlot slot(type, host_offset, offset); 385 Chunk* top_chunk = chunk_.Value(); 386 if (!top_chunk) { 387 top_chunk = new Chunk(nullptr, kInitialBufferSize); 388 chunk_.SetValue(top_chunk); 389 } 390 if (!top_chunk->AddSlot(slot)) { 391 Chunk* new_top_chunk = 392 new Chunk(top_chunk, NextCapacity(top_chunk->capacity.Value())); 393 bool added = new_top_chunk->AddSlot(slot); 394 chunk_.SetValue(new_top_chunk); 395 DCHECK(added); 396 USE(added); 397 } 398 } 399 400 // Iterate over all slots in the set and for each slot invoke the callback. 401 // If the callback returns REMOVE_SLOT then the slot is removed from the set. 402 // Returns the new number of slots. 403 // 404 // Sample usage: 405 // Iterate([](SlotType slot_type, Address slot_address) { 406 // if (good(slot_type, slot_address)) return KEEP_SLOT; 407 // else return REMOVE_SLOT; 408 // }); 409 template <typename Callback> Iterate(Callback callback,IterationMode mode)410 int Iterate(Callback callback, IterationMode mode) { 411 STATIC_ASSERT(CLEARED_SLOT < 8); 412 Chunk* chunk = chunk_.Value(); 413 Chunk* previous = nullptr; 414 int new_count = 0; 415 while (chunk != nullptr) { 416 TypedSlot* buffer = chunk->buffer.Value(); 417 int count = chunk->count.Value(); 418 bool empty = true; 419 for (int i = 0; i < count; i++) { 420 // Order is important here. We have to read out the slot type last to 421 // observe the concurrent removal case consistently. 422 Address host_addr = page_start_ + buffer[i].host_offset(); 423 TypeAndOffset type_and_offset = buffer[i].GetTypeAndOffset(); 424 SlotType type = type_and_offset.first; 425 if (type != CLEARED_SLOT) { 426 Address addr = page_start_ + type_and_offset.second; 427 if (callback(type, host_addr, addr) == KEEP_SLOT) { 428 new_count++; 429 empty = false; 430 } else { 431 buffer[i].Clear(); 432 } 433 } 434 } 435 436 Chunk* next = chunk->next.Value(); 437 if (mode == PREFREE_EMPTY_CHUNKS && empty) { 438 // We remove the chunk from the list but let it still point its next 439 // chunk to allow concurrent iteration. 440 if (previous) { 441 previous->next.SetValue(next); 442 } else { 443 chunk_.SetValue(next); 444 } 445 base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_); 446 to_be_freed_chunks_.push(chunk); 447 } else { 448 previous = chunk; 449 } 450 chunk = next; 451 } 452 return new_count; 453 } 454 FreeToBeFreedChunks()455 void FreeToBeFreedChunks() { 456 base::LockGuard<base::Mutex> guard(&to_be_freed_chunks_mutex_); 457 while (!to_be_freed_chunks_.empty()) { 458 Chunk* top = to_be_freed_chunks_.top(); 459 to_be_freed_chunks_.pop(); 460 delete top; 461 } 462 } 463 RemoveInvaldSlots(std::map<uint32_t,uint32_t> & invalid_ranges)464 void RemoveInvaldSlots(std::map<uint32_t, uint32_t>& invalid_ranges) { 465 Chunk* chunk = chunk_.Value(); 466 while (chunk != nullptr) { 467 TypedSlot* buffer = chunk->buffer.Value(); 468 int count = chunk->count.Value(); 469 for (int i = 0; i < count; i++) { 470 uint32_t host_offset = buffer[i].host_offset(); 471 std::map<uint32_t, uint32_t>::iterator upper_bound = 472 invalid_ranges.upper_bound(host_offset); 473 if (upper_bound == invalid_ranges.begin()) continue; 474 // upper_bounds points to the invalid range after the given slot. Hence, 475 // we have to go to the previous element. 476 upper_bound--; 477 DCHECK_LE(upper_bound->first, host_offset); 478 if (upper_bound->second > host_offset) { 479 buffer[i].Clear(); 480 } 481 } 482 chunk = chunk->next.Value(); 483 } 484 } 485 486 private: 487 static const int kInitialBufferSize = 100; 488 static const int kMaxBufferSize = 16 * KB; 489 NextCapacity(int capacity)490 static int NextCapacity(int capacity) { 491 return Min(kMaxBufferSize, capacity * 2); 492 } 493 494 class OffsetField : public BitField<int, 0, 29> {}; 495 class TypeField : public BitField<SlotType, 29, 3> {}; 496 497 struct Chunk : Malloced { ChunkChunk498 explicit Chunk(Chunk* next_chunk, int chunk_capacity) { 499 count.SetValue(0); 500 capacity.SetValue(chunk_capacity); 501 buffer.SetValue(NewArray<TypedSlot>(chunk_capacity)); 502 next.SetValue(next_chunk); 503 } AddSlotChunk504 bool AddSlot(TypedSlot slot) { 505 int current_count = count.Value(); 506 if (current_count == capacity.Value()) return false; 507 TypedSlot* current_buffer = buffer.Value(); 508 // Order is important here. We have to write the slot first before 509 // increasing the counter to guarantee that a consistent state is 510 // observed by concurrent threads. 511 current_buffer[current_count].Set(slot); 512 count.SetValue(current_count + 1); 513 return true; 514 } ~ChunkChunk515 ~Chunk() { DeleteArray(buffer.Value()); } 516 base::AtomicValue<Chunk*> next; 517 base::AtomicValue<int> count; 518 base::AtomicValue<int> capacity; 519 base::AtomicValue<TypedSlot*> buffer; 520 }; 521 522 Address page_start_; 523 base::AtomicValue<Chunk*> chunk_; 524 base::Mutex to_be_freed_chunks_mutex_; 525 std::stack<Chunk*> to_be_freed_chunks_; 526 }; 527 528 } // namespace internal 529 } // namespace v8 530 531 #endif // V8_SLOT_SET_H 532