1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the DenseMap class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #ifndef LLVM_ADT_DENSEMAP_H
15 #define LLVM_ADT_DENSEMAP_H
16
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/EpochTracker.h"
19 #include "llvm/Support/AlignOf.h"
20 #include "llvm/Support/Compiler.h"
21 #include "llvm/Support/MathExtras.h"
22 #include "llvm/Support/type_traits.h"
23 #include <algorithm>
24 #include <cassert>
25 #include <cstddef>
26 #include <cstring>
27 #include <iterator>
28 #include <limits>
29 #include <new>
30 #include <utility>
31
32 namespace llvm {
33
34 namespace detail {
35
36 // We extend a pair to allow users to override the bucket type with their own
37 // implementation without requiring two members.
38 template <typename KeyT, typename ValueT>
39 struct DenseMapPair : public std::pair<KeyT, ValueT> {
getFirstDenseMapPair40 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
getFirstDenseMapPair41 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
getSecondDenseMapPair42 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
getSecondDenseMapPair43 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
44 };
45
46 } // end namespace detail
47
48 template <
49 typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>,
50 typename Bucket = detail::DenseMapPair<KeyT, ValueT>, bool IsConst = false>
51 class DenseMapIterator;
52
53 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
54 typename BucketT>
55 class DenseMapBase : public DebugEpochBase {
56 public:
57 typedef unsigned size_type;
58 typedef KeyT key_type;
59 typedef ValueT mapped_type;
60 typedef BucketT value_type;
61
62 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT> iterator;
63 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>
64 const_iterator;
begin()65 inline iterator begin() {
66 // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
67 return empty() ? end() : iterator(getBuckets(), getBucketsEnd(), *this);
68 }
end()69 inline iterator end() {
70 return iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
71 }
begin()72 inline const_iterator begin() const {
73 return empty() ? end()
74 : const_iterator(getBuckets(), getBucketsEnd(), *this);
75 }
end()76 inline const_iterator end() const {
77 return const_iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
78 }
79
empty()80 LLVM_NODISCARD bool empty() const {
81 return getNumEntries() == 0;
82 }
size()83 unsigned size() const { return getNumEntries(); }
84
85 /// Grow the densemap so that it can contain at least \p NumEntries items
86 /// before resizing again.
reserve(size_type NumEntries)87 void reserve(size_type NumEntries) {
88 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
89 incrementEpoch();
90 if (NumBuckets > getNumBuckets())
91 grow(NumBuckets);
92 }
93
clear()94 void clear() {
95 incrementEpoch();
96 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
97
98 // If the capacity of the array is huge, and the # elements used is small,
99 // shrink the array.
100 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
101 shrink_and_clear();
102 return;
103 }
104
105 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
106 unsigned NumEntries = getNumEntries();
107 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
108 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
109 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
110 P->getSecond().~ValueT();
111 --NumEntries;
112 }
113 P->getFirst() = EmptyKey;
114 }
115 }
116 assert(NumEntries == 0 && "Node count imbalance!");
117 setNumEntries(0);
118 setNumTombstones(0);
119 }
120
121 /// Return 1 if the specified key is in the map, 0 otherwise.
count(const KeyT & Val)122 size_type count(const KeyT &Val) const {
123 const BucketT *TheBucket;
124 return LookupBucketFor(Val, TheBucket) ? 1 : 0;
125 }
126
find(const KeyT & Val)127 iterator find(const KeyT &Val) {
128 BucketT *TheBucket;
129 if (LookupBucketFor(Val, TheBucket))
130 return iterator(TheBucket, getBucketsEnd(), *this, true);
131 return end();
132 }
find(const KeyT & Val)133 const_iterator find(const KeyT &Val) const {
134 const BucketT *TheBucket;
135 if (LookupBucketFor(Val, TheBucket))
136 return const_iterator(TheBucket, getBucketsEnd(), *this, true);
137 return end();
138 }
139
140 /// Alternate version of find() which allows a different, and possibly
141 /// less expensive, key type.
142 /// The DenseMapInfo is responsible for supplying methods
143 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
144 /// type used.
145 template<class LookupKeyT>
find_as(const LookupKeyT & Val)146 iterator find_as(const LookupKeyT &Val) {
147 BucketT *TheBucket;
148 if (LookupBucketFor(Val, TheBucket))
149 return iterator(TheBucket, getBucketsEnd(), *this, true);
150 return end();
151 }
152 template<class LookupKeyT>
find_as(const LookupKeyT & Val)153 const_iterator find_as(const LookupKeyT &Val) const {
154 const BucketT *TheBucket;
155 if (LookupBucketFor(Val, TheBucket))
156 return const_iterator(TheBucket, getBucketsEnd(), *this, true);
157 return end();
158 }
159
160 /// lookup - Return the entry for the specified key, or a default
161 /// constructed value if no such entry exists.
lookup(const KeyT & Val)162 ValueT lookup(const KeyT &Val) const {
163 const BucketT *TheBucket;
164 if (LookupBucketFor(Val, TheBucket))
165 return TheBucket->getSecond();
166 return ValueT();
167 }
168
169 // Inserts key,value pair into the map if the key isn't already in the map.
170 // If the key is already in the map, it returns false and doesn't update the
171 // value.
insert(const std::pair<KeyT,ValueT> & KV)172 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
173 return try_emplace(KV.first, KV.second);
174 }
175
176 // Inserts key,value pair into the map if the key isn't already in the map.
177 // If the key is already in the map, it returns false and doesn't update the
178 // value.
insert(std::pair<KeyT,ValueT> && KV)179 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
180 return try_emplace(std::move(KV.first), std::move(KV.second));
181 }
182
183 // Inserts key,value pair into the map if the key isn't already in the map.
184 // The value is constructed in-place if the key is not in the map, otherwise
185 // it is not moved.
186 template <typename... Ts>
try_emplace(KeyT && Key,Ts &&...Args)187 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
188 BucketT *TheBucket;
189 if (LookupBucketFor(Key, TheBucket))
190 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
191 false); // Already in map.
192
193 // Otherwise, insert the new element.
194 TheBucket =
195 InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
196 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
197 true);
198 }
199
200 // Inserts key,value pair into the map if the key isn't already in the map.
201 // The value is constructed in-place if the key is not in the map, otherwise
202 // it is not moved.
203 template <typename... Ts>
try_emplace(const KeyT & Key,Ts &&...Args)204 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
205 BucketT *TheBucket;
206 if (LookupBucketFor(Key, TheBucket))
207 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
208 false); // Already in map.
209
210 // Otherwise, insert the new element.
211 TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
212 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
213 true);
214 }
215
216 /// Alternate version of insert() which allows a different, and possibly
217 /// less expensive, key type.
218 /// The DenseMapInfo is responsible for supplying methods
219 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
220 /// type used.
221 template <typename LookupKeyT>
insert_as(std::pair<KeyT,ValueT> && KV,const LookupKeyT & Val)222 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
223 const LookupKeyT &Val) {
224 BucketT *TheBucket;
225 if (LookupBucketFor(Val, TheBucket))
226 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
227 false); // Already in map.
228
229 // Otherwise, insert the new element.
230 TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
231 std::move(KV.second), Val);
232 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
233 true);
234 }
235
236 /// insert - Range insertion of pairs.
237 template<typename InputIt>
insert(InputIt I,InputIt E)238 void insert(InputIt I, InputIt E) {
239 for (; I != E; ++I)
240 insert(*I);
241 }
242
erase(const KeyT & Val)243 bool erase(const KeyT &Val) {
244 BucketT *TheBucket;
245 if (!LookupBucketFor(Val, TheBucket))
246 return false; // not in map.
247
248 TheBucket->getSecond().~ValueT();
249 TheBucket->getFirst() = getTombstoneKey();
250 decrementNumEntries();
251 incrementNumTombstones();
252 return true;
253 }
erase(iterator I)254 void erase(iterator I) {
255 BucketT *TheBucket = &*I;
256 TheBucket->getSecond().~ValueT();
257 TheBucket->getFirst() = getTombstoneKey();
258 decrementNumEntries();
259 incrementNumTombstones();
260 }
261
FindAndConstruct(const KeyT & Key)262 value_type& FindAndConstruct(const KeyT &Key) {
263 BucketT *TheBucket;
264 if (LookupBucketFor(Key, TheBucket))
265 return *TheBucket;
266
267 return *InsertIntoBucket(TheBucket, Key);
268 }
269
270 ValueT &operator[](const KeyT &Key) {
271 return FindAndConstruct(Key).second;
272 }
273
FindAndConstruct(KeyT && Key)274 value_type& FindAndConstruct(KeyT &&Key) {
275 BucketT *TheBucket;
276 if (LookupBucketFor(Key, TheBucket))
277 return *TheBucket;
278
279 return *InsertIntoBucket(TheBucket, std::move(Key));
280 }
281
282 ValueT &operator[](KeyT &&Key) {
283 return FindAndConstruct(std::move(Key)).second;
284 }
285
286 /// isPointerIntoBucketsArray - Return true if the specified pointer points
287 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
288 /// value in the DenseMap).
isPointerIntoBucketsArray(const void * Ptr)289 bool isPointerIntoBucketsArray(const void *Ptr) const {
290 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
291 }
292
293 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
294 /// array. In conjunction with the previous method, this can be used to
295 /// determine whether an insertion caused the DenseMap to reallocate.
getPointerIntoBucketsArray()296 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
297
298 protected:
299 DenseMapBase() = default;
300
destroyAll()301 void destroyAll() {
302 if (getNumBuckets() == 0) // Nothing to do.
303 return;
304
305 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
306 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
307 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
308 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
309 P->getSecond().~ValueT();
310 P->getFirst().~KeyT();
311 }
312 }
313
initEmpty()314 void initEmpty() {
315 setNumEntries(0);
316 setNumTombstones(0);
317
318 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
319 "# initial buckets must be a power of two!");
320 const KeyT EmptyKey = getEmptyKey();
321 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
322 ::new (&B->getFirst()) KeyT(EmptyKey);
323 }
324
325 /// Returns the number of buckets to allocate to ensure that the DenseMap can
326 /// accommodate \p NumEntries without need to grow().
getMinBucketToReserveForEntries(unsigned NumEntries)327 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
328 // Ensure that "NumEntries * 4 < NumBuckets * 3"
329 if (NumEntries == 0)
330 return 0;
331 // +1 is required because of the strict equality.
332 // For example if NumEntries is 48, we need to return 401.
333 return NextPowerOf2(NumEntries * 4 / 3 + 1);
334 }
335
moveFromOldBuckets(BucketT * OldBucketsBegin,BucketT * OldBucketsEnd)336 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
337 initEmpty();
338
339 // Insert all the old elements.
340 const KeyT EmptyKey = getEmptyKey();
341 const KeyT TombstoneKey = getTombstoneKey();
342 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
343 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
344 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
345 // Insert the key/value into the new table.
346 BucketT *DestBucket;
347 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
348 (void)FoundVal; // silence warning.
349 assert(!FoundVal && "Key already in new map?");
350 DestBucket->getFirst() = std::move(B->getFirst());
351 ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
352 incrementNumEntries();
353
354 // Free the value.
355 B->getSecond().~ValueT();
356 }
357 B->getFirst().~KeyT();
358 }
359 }
360
361 template <typename OtherBaseT>
copyFrom(const DenseMapBase<OtherBaseT,KeyT,ValueT,KeyInfoT,BucketT> & other)362 void copyFrom(
363 const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
364 assert(&other != this);
365 assert(getNumBuckets() == other.getNumBuckets());
366
367 setNumEntries(other.getNumEntries());
368 setNumTombstones(other.getNumTombstones());
369
370 if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
371 memcpy(getBuckets(), other.getBuckets(),
372 getNumBuckets() * sizeof(BucketT));
373 else
374 for (size_t i = 0; i < getNumBuckets(); ++i) {
375 ::new (&getBuckets()[i].getFirst())
376 KeyT(other.getBuckets()[i].getFirst());
377 if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
378 !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
379 ::new (&getBuckets()[i].getSecond())
380 ValueT(other.getBuckets()[i].getSecond());
381 }
382 }
383
getHashValue(const KeyT & Val)384 static unsigned getHashValue(const KeyT &Val) {
385 return KeyInfoT::getHashValue(Val);
386 }
387 template<typename LookupKeyT>
getHashValue(const LookupKeyT & Val)388 static unsigned getHashValue(const LookupKeyT &Val) {
389 return KeyInfoT::getHashValue(Val);
390 }
getEmptyKey()391 static const KeyT getEmptyKey() {
392 return KeyInfoT::getEmptyKey();
393 }
getTombstoneKey()394 static const KeyT getTombstoneKey() {
395 return KeyInfoT::getTombstoneKey();
396 }
397
398 private:
getNumEntries()399 unsigned getNumEntries() const {
400 return static_cast<const DerivedT *>(this)->getNumEntries();
401 }
setNumEntries(unsigned Num)402 void setNumEntries(unsigned Num) {
403 static_cast<DerivedT *>(this)->setNumEntries(Num);
404 }
incrementNumEntries()405 void incrementNumEntries() {
406 setNumEntries(getNumEntries() + 1);
407 }
decrementNumEntries()408 void decrementNumEntries() {
409 setNumEntries(getNumEntries() - 1);
410 }
getNumTombstones()411 unsigned getNumTombstones() const {
412 return static_cast<const DerivedT *>(this)->getNumTombstones();
413 }
setNumTombstones(unsigned Num)414 void setNumTombstones(unsigned Num) {
415 static_cast<DerivedT *>(this)->setNumTombstones(Num);
416 }
incrementNumTombstones()417 void incrementNumTombstones() {
418 setNumTombstones(getNumTombstones() + 1);
419 }
decrementNumTombstones()420 void decrementNumTombstones() {
421 setNumTombstones(getNumTombstones() - 1);
422 }
getBuckets()423 const BucketT *getBuckets() const {
424 return static_cast<const DerivedT *>(this)->getBuckets();
425 }
getBuckets()426 BucketT *getBuckets() {
427 return static_cast<DerivedT *>(this)->getBuckets();
428 }
getNumBuckets()429 unsigned getNumBuckets() const {
430 return static_cast<const DerivedT *>(this)->getNumBuckets();
431 }
getBucketsEnd()432 BucketT *getBucketsEnd() {
433 return getBuckets() + getNumBuckets();
434 }
getBucketsEnd()435 const BucketT *getBucketsEnd() const {
436 return getBuckets() + getNumBuckets();
437 }
438
grow(unsigned AtLeast)439 void grow(unsigned AtLeast) {
440 static_cast<DerivedT *>(this)->grow(AtLeast);
441 }
442
shrink_and_clear()443 void shrink_and_clear() {
444 static_cast<DerivedT *>(this)->shrink_and_clear();
445 }
446
447 template <typename KeyArg, typename... ValueArgs>
InsertIntoBucket(BucketT * TheBucket,KeyArg && Key,ValueArgs &&...Values)448 BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
449 ValueArgs &&... Values) {
450 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
451
452 TheBucket->getFirst() = std::forward<KeyArg>(Key);
453 ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
454 return TheBucket;
455 }
456
457 template <typename LookupKeyT>
InsertIntoBucketWithLookup(BucketT * TheBucket,KeyT && Key,ValueT && Value,LookupKeyT & Lookup)458 BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
459 ValueT &&Value, LookupKeyT &Lookup) {
460 TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
461
462 TheBucket->getFirst() = std::move(Key);
463 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
464 return TheBucket;
465 }
466
467 template <typename LookupKeyT>
InsertIntoBucketImpl(const KeyT & Key,const LookupKeyT & Lookup,BucketT * TheBucket)468 BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
469 BucketT *TheBucket) {
470 incrementEpoch();
471
472 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
473 // the buckets are empty (meaning that many are filled with tombstones),
474 // grow the table.
475 //
476 // The later case is tricky. For example, if we had one empty bucket with
477 // tons of tombstones, failing lookups (e.g. for insertion) would have to
478 // probe almost the entire table until it found the empty bucket. If the
479 // table completely filled with tombstones, no lookup would ever succeed,
480 // causing infinite loops in lookup.
481 unsigned NewNumEntries = getNumEntries() + 1;
482 unsigned NumBuckets = getNumBuckets();
483 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
484 this->grow(NumBuckets * 2);
485 LookupBucketFor(Lookup, TheBucket);
486 NumBuckets = getNumBuckets();
487 } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
488 NumBuckets/8)) {
489 this->grow(NumBuckets);
490 LookupBucketFor(Lookup, TheBucket);
491 }
492 assert(TheBucket);
493
494 // Only update the state after we've grown our bucket space appropriately
495 // so that when growing buckets we have self-consistent entry count.
496 incrementNumEntries();
497
498 // If we are writing over a tombstone, remember this.
499 const KeyT EmptyKey = getEmptyKey();
500 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
501 decrementNumTombstones();
502
503 return TheBucket;
504 }
505
506 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
507 /// FoundBucket. If the bucket contains the key and a value, this returns
508 /// true, otherwise it returns a bucket with an empty marker or tombstone and
509 /// returns false.
510 template<typename LookupKeyT>
LookupBucketFor(const LookupKeyT & Val,const BucketT * & FoundBucket)511 bool LookupBucketFor(const LookupKeyT &Val,
512 const BucketT *&FoundBucket) const {
513 const BucketT *BucketsPtr = getBuckets();
514 const unsigned NumBuckets = getNumBuckets();
515
516 if (NumBuckets == 0) {
517 FoundBucket = nullptr;
518 return false;
519 }
520
521 // FoundTombstone - Keep track of whether we find a tombstone while probing.
522 const BucketT *FoundTombstone = nullptr;
523 const KeyT EmptyKey = getEmptyKey();
524 const KeyT TombstoneKey = getTombstoneKey();
525 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
526 !KeyInfoT::isEqual(Val, TombstoneKey) &&
527 "Empty/Tombstone value shouldn't be inserted into map!");
528
529 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
530 unsigned ProbeAmt = 1;
531 while (true) {
532 const BucketT *ThisBucket = BucketsPtr + BucketNo;
533 // Found Val's bucket? If so, return it.
534 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
535 FoundBucket = ThisBucket;
536 return true;
537 }
538
539 // If we found an empty bucket, the key doesn't exist in the set.
540 // Insert it and return the default value.
541 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
542 // If we've already seen a tombstone while probing, fill it in instead
543 // of the empty bucket we eventually probed to.
544 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
545 return false;
546 }
547
548 // If this is a tombstone, remember it. If Val ends up not in the map, we
549 // prefer to return it than something that would require more probing.
550 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
551 !FoundTombstone)
552 FoundTombstone = ThisBucket; // Remember the first tombstone found.
553
554 // Otherwise, it's a hash collision or a tombstone, continue quadratic
555 // probing.
556 BucketNo += ProbeAmt++;
557 BucketNo &= (NumBuckets-1);
558 }
559 }
560
561 template <typename LookupKeyT>
LookupBucketFor(const LookupKeyT & Val,BucketT * & FoundBucket)562 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
563 const BucketT *ConstFoundBucket;
564 bool Result = const_cast<const DenseMapBase *>(this)
565 ->LookupBucketFor(Val, ConstFoundBucket);
566 FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
567 return Result;
568 }
569
570 public:
571 /// Return the approximate size (in bytes) of the actual map.
572 /// This is just the raw memory used by DenseMap.
573 /// If entries are pointers to objects, the size of the referenced objects
574 /// are not included.
getMemorySize()575 size_t getMemorySize() const {
576 return getNumBuckets() * sizeof(BucketT);
577 }
578 };
579
580 template <typename KeyT, typename ValueT,
581 typename KeyInfoT = DenseMapInfo<KeyT>,
582 typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
583 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
584 KeyT, ValueT, KeyInfoT, BucketT> {
585 // Lift some types from the dependent base class into this class for
586 // simplicity of referring to them.
587 typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
588 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
589
590 BucketT *Buckets;
591 unsigned NumEntries;
592 unsigned NumTombstones;
593 unsigned NumBuckets;
594
595 public:
596 /// Create a DenseMap wth an optional \p InitialReserve that guarantee that
597 /// this number of elements can be inserted in the map without grow()
598 explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
599
DenseMap(const DenseMap & other)600 DenseMap(const DenseMap &other) : BaseT() {
601 init(0);
602 copyFrom(other);
603 }
604
DenseMap(DenseMap && other)605 DenseMap(DenseMap &&other) : BaseT() {
606 init(0);
607 swap(other);
608 }
609
610 template<typename InputIt>
DenseMap(const InputIt & I,const InputIt & E)611 DenseMap(const InputIt &I, const InputIt &E) {
612 init(std::distance(I, E));
613 this->insert(I, E);
614 }
615
~DenseMap()616 ~DenseMap() {
617 this->destroyAll();
618 operator delete(Buckets);
619 }
620
swap(DenseMap & RHS)621 void swap(DenseMap& RHS) {
622 this->incrementEpoch();
623 RHS.incrementEpoch();
624 std::swap(Buckets, RHS.Buckets);
625 std::swap(NumEntries, RHS.NumEntries);
626 std::swap(NumTombstones, RHS.NumTombstones);
627 std::swap(NumBuckets, RHS.NumBuckets);
628 }
629
630 DenseMap& operator=(const DenseMap& other) {
631 if (&other != this)
632 copyFrom(other);
633 return *this;
634 }
635
636 DenseMap& operator=(DenseMap &&other) {
637 this->destroyAll();
638 operator delete(Buckets);
639 init(0);
640 swap(other);
641 return *this;
642 }
643
copyFrom(const DenseMap & other)644 void copyFrom(const DenseMap& other) {
645 this->destroyAll();
646 operator delete(Buckets);
647 if (allocateBuckets(other.NumBuckets)) {
648 this->BaseT::copyFrom(other);
649 } else {
650 NumEntries = 0;
651 NumTombstones = 0;
652 }
653 }
654
init(unsigned InitNumEntries)655 void init(unsigned InitNumEntries) {
656 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
657 if (allocateBuckets(InitBuckets)) {
658 this->BaseT::initEmpty();
659 } else {
660 NumEntries = 0;
661 NumTombstones = 0;
662 }
663 }
664
grow(unsigned AtLeast)665 void grow(unsigned AtLeast) {
666 unsigned OldNumBuckets = NumBuckets;
667 BucketT *OldBuckets = Buckets;
668
669 allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
670 assert(Buckets);
671 if (!OldBuckets) {
672 this->BaseT::initEmpty();
673 return;
674 }
675
676 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
677
678 // Free the old table.
679 operator delete(OldBuckets);
680 }
681
shrink_and_clear()682 void shrink_and_clear() {
683 unsigned OldNumEntries = NumEntries;
684 this->destroyAll();
685
686 // Reduce the number of buckets.
687 unsigned NewNumBuckets = 0;
688 if (OldNumEntries)
689 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
690 if (NewNumBuckets == NumBuckets) {
691 this->BaseT::initEmpty();
692 return;
693 }
694
695 operator delete(Buckets);
696 init(NewNumBuckets);
697 }
698
699 private:
getNumEntries()700 unsigned getNumEntries() const {
701 return NumEntries;
702 }
setNumEntries(unsigned Num)703 void setNumEntries(unsigned Num) {
704 NumEntries = Num;
705 }
706
getNumTombstones()707 unsigned getNumTombstones() const {
708 return NumTombstones;
709 }
setNumTombstones(unsigned Num)710 void setNumTombstones(unsigned Num) {
711 NumTombstones = Num;
712 }
713
getBuckets()714 BucketT *getBuckets() const {
715 return Buckets;
716 }
717
getNumBuckets()718 unsigned getNumBuckets() const {
719 return NumBuckets;
720 }
721
allocateBuckets(unsigned Num)722 bool allocateBuckets(unsigned Num) {
723 NumBuckets = Num;
724 if (NumBuckets == 0) {
725 Buckets = nullptr;
726 return false;
727 }
728
729 Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
730 return true;
731 }
732 };
733
734 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
735 typename KeyInfoT = DenseMapInfo<KeyT>,
736 typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
737 class SmallDenseMap
738 : public DenseMapBase<
739 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
740 ValueT, KeyInfoT, BucketT> {
741 // Lift some types from the dependent base class into this class for
742 // simplicity of referring to them.
743 typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
744 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
745 static_assert(isPowerOf2_64(InlineBuckets),
746 "InlineBuckets must be a power of 2.");
747
748 unsigned Small : 1;
749 unsigned NumEntries : 31;
750 unsigned NumTombstones;
751
752 struct LargeRep {
753 BucketT *Buckets;
754 unsigned NumBuckets;
755 };
756
757 /// A "union" of an inline bucket array and the struct representing
758 /// a large bucket. This union will be discriminated by the 'Small' bit.
759 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
760
761 public:
762 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
763 init(NumInitBuckets);
764 }
765
SmallDenseMap(const SmallDenseMap & other)766 SmallDenseMap(const SmallDenseMap &other) : BaseT() {
767 init(0);
768 copyFrom(other);
769 }
770
SmallDenseMap(SmallDenseMap && other)771 SmallDenseMap(SmallDenseMap &&other) : BaseT() {
772 init(0);
773 swap(other);
774 }
775
776 template<typename InputIt>
SmallDenseMap(const InputIt & I,const InputIt & E)777 SmallDenseMap(const InputIt &I, const InputIt &E) {
778 init(NextPowerOf2(std::distance(I, E)));
779 this->insert(I, E);
780 }
781
~SmallDenseMap()782 ~SmallDenseMap() {
783 this->destroyAll();
784 deallocateBuckets();
785 }
786
swap(SmallDenseMap & RHS)787 void swap(SmallDenseMap& RHS) {
788 unsigned TmpNumEntries = RHS.NumEntries;
789 RHS.NumEntries = NumEntries;
790 NumEntries = TmpNumEntries;
791 std::swap(NumTombstones, RHS.NumTombstones);
792
793 const KeyT EmptyKey = this->getEmptyKey();
794 const KeyT TombstoneKey = this->getTombstoneKey();
795 if (Small && RHS.Small) {
796 // If we're swapping inline bucket arrays, we have to cope with some of
797 // the tricky bits of DenseMap's storage system: the buckets are not
798 // fully initialized. Thus we swap every key, but we may have
799 // a one-directional move of the value.
800 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
801 BucketT *LHSB = &getInlineBuckets()[i],
802 *RHSB = &RHS.getInlineBuckets()[i];
803 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
804 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
805 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
806 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
807 if (hasLHSValue && hasRHSValue) {
808 // Swap together if we can...
809 std::swap(*LHSB, *RHSB);
810 continue;
811 }
812 // Swap separately and handle any assymetry.
813 std::swap(LHSB->getFirst(), RHSB->getFirst());
814 if (hasLHSValue) {
815 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
816 LHSB->getSecond().~ValueT();
817 } else if (hasRHSValue) {
818 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
819 RHSB->getSecond().~ValueT();
820 }
821 }
822 return;
823 }
824 if (!Small && !RHS.Small) {
825 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
826 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
827 return;
828 }
829
830 SmallDenseMap &SmallSide = Small ? *this : RHS;
831 SmallDenseMap &LargeSide = Small ? RHS : *this;
832
833 // First stash the large side's rep and move the small side across.
834 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
835 LargeSide.getLargeRep()->~LargeRep();
836 LargeSide.Small = true;
837 // This is similar to the standard move-from-old-buckets, but the bucket
838 // count hasn't actually rotated in this case. So we have to carefully
839 // move construct the keys and values into their new locations, but there
840 // is no need to re-hash things.
841 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
842 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
843 *OldB = &SmallSide.getInlineBuckets()[i];
844 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
845 OldB->getFirst().~KeyT();
846 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
847 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
848 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
849 OldB->getSecond().~ValueT();
850 }
851 }
852
853 // The hard part of moving the small buckets across is done, just move
854 // the TmpRep into its new home.
855 SmallSide.Small = false;
856 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
857 }
858
859 SmallDenseMap& operator=(const SmallDenseMap& other) {
860 if (&other != this)
861 copyFrom(other);
862 return *this;
863 }
864
865 SmallDenseMap& operator=(SmallDenseMap &&other) {
866 this->destroyAll();
867 deallocateBuckets();
868 init(0);
869 swap(other);
870 return *this;
871 }
872
copyFrom(const SmallDenseMap & other)873 void copyFrom(const SmallDenseMap& other) {
874 this->destroyAll();
875 deallocateBuckets();
876 Small = true;
877 if (other.getNumBuckets() > InlineBuckets) {
878 Small = false;
879 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
880 }
881 this->BaseT::copyFrom(other);
882 }
883
init(unsigned InitBuckets)884 void init(unsigned InitBuckets) {
885 Small = true;
886 if (InitBuckets > InlineBuckets) {
887 Small = false;
888 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
889 }
890 this->BaseT::initEmpty();
891 }
892
grow(unsigned AtLeast)893 void grow(unsigned AtLeast) {
894 if (AtLeast >= InlineBuckets)
895 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
896
897 if (Small) {
898 if (AtLeast < InlineBuckets)
899 return; // Nothing to do.
900
901 // First move the inline buckets into a temporary storage.
902 AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
903 BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
904 BucketT *TmpEnd = TmpBegin;
905
906 // Loop over the buckets, moving non-empty, non-tombstones into the
907 // temporary storage. Have the loop move the TmpEnd forward as it goes.
908 const KeyT EmptyKey = this->getEmptyKey();
909 const KeyT TombstoneKey = this->getTombstoneKey();
910 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
911 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
912 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
913 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
914 "Too many inline buckets!");
915 ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
916 ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
917 ++TmpEnd;
918 P->getSecond().~ValueT();
919 }
920 P->getFirst().~KeyT();
921 }
922
923 // Now make this map use the large rep, and move all the entries back
924 // into it.
925 Small = false;
926 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
927 this->moveFromOldBuckets(TmpBegin, TmpEnd);
928 return;
929 }
930
931 LargeRep OldRep = std::move(*getLargeRep());
932 getLargeRep()->~LargeRep();
933 if (AtLeast <= InlineBuckets) {
934 Small = true;
935 } else {
936 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
937 }
938
939 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
940
941 // Free the old table.
942 operator delete(OldRep.Buckets);
943 }
944
shrink_and_clear()945 void shrink_and_clear() {
946 unsigned OldSize = this->size();
947 this->destroyAll();
948
949 // Reduce the number of buckets.
950 unsigned NewNumBuckets = 0;
951 if (OldSize) {
952 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
953 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
954 NewNumBuckets = 64;
955 }
956 if ((Small && NewNumBuckets <= InlineBuckets) ||
957 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
958 this->BaseT::initEmpty();
959 return;
960 }
961
962 deallocateBuckets();
963 init(NewNumBuckets);
964 }
965
966 private:
getNumEntries()967 unsigned getNumEntries() const {
968 return NumEntries;
969 }
setNumEntries(unsigned Num)970 void setNumEntries(unsigned Num) {
971 // NumEntries is hardcoded to be 31 bits wide.
972 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
973 NumEntries = Num;
974 }
975
getNumTombstones()976 unsigned getNumTombstones() const {
977 return NumTombstones;
978 }
setNumTombstones(unsigned Num)979 void setNumTombstones(unsigned Num) {
980 NumTombstones = Num;
981 }
982
getInlineBuckets()983 const BucketT *getInlineBuckets() const {
984 assert(Small);
985 // Note that this cast does not violate aliasing rules as we assert that
986 // the memory's dynamic type is the small, inline bucket buffer, and the
987 // 'storage.buffer' static type is 'char *'.
988 return reinterpret_cast<const BucketT *>(storage.buffer);
989 }
getInlineBuckets()990 BucketT *getInlineBuckets() {
991 return const_cast<BucketT *>(
992 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
993 }
getLargeRep()994 const LargeRep *getLargeRep() const {
995 assert(!Small);
996 // Note, same rule about aliasing as with getInlineBuckets.
997 return reinterpret_cast<const LargeRep *>(storage.buffer);
998 }
getLargeRep()999 LargeRep *getLargeRep() {
1000 return const_cast<LargeRep *>(
1001 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1002 }
1003
getBuckets()1004 const BucketT *getBuckets() const {
1005 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1006 }
getBuckets()1007 BucketT *getBuckets() {
1008 return const_cast<BucketT *>(
1009 const_cast<const SmallDenseMap *>(this)->getBuckets());
1010 }
getNumBuckets()1011 unsigned getNumBuckets() const {
1012 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1013 }
1014
deallocateBuckets()1015 void deallocateBuckets() {
1016 if (Small)
1017 return;
1018
1019 operator delete(getLargeRep()->Buckets);
1020 getLargeRep()->~LargeRep();
1021 }
1022
allocateBuckets(unsigned Num)1023 LargeRep allocateBuckets(unsigned Num) {
1024 assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1025 LargeRep Rep = {
1026 static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
1027 };
1028 return Rep;
1029 }
1030 };
1031
1032 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1033 bool IsConst>
1034 class DenseMapIterator : DebugEpochBase::HandleBase {
1035 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true> ConstIterator;
1036 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1037 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1038
1039 public:
1040 typedef ptrdiff_t difference_type;
1041 typedef typename std::conditional<IsConst, const Bucket, Bucket>::type
1042 value_type;
1043 typedef value_type *pointer;
1044 typedef value_type &reference;
1045 typedef std::forward_iterator_tag iterator_category;
1046
1047 private:
1048 pointer Ptr, End;
1049
1050 public:
DenseMapIterator()1051 DenseMapIterator() : Ptr(nullptr), End(nullptr) {}
1052
1053 DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
1054 bool NoAdvance = false)
1055 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1056 assert(isHandleInSync() && "invalid construction!");
1057 if (!NoAdvance) AdvancePastEmptyBuckets();
1058 }
1059
1060 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1061 // for const iterator destinations so it doesn't end up as a user defined copy
1062 // constructor.
1063 template <bool IsConstSrc,
1064 typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
DenseMapIterator(const DenseMapIterator<KeyT,ValueT,KeyInfoT,Bucket,IsConstSrc> & I)1065 DenseMapIterator(
1066 const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1067 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1068
1069 reference operator*() const {
1070 assert(isHandleInSync() && "invalid iterator access!");
1071 return *Ptr;
1072 }
1073 pointer operator->() const {
1074 assert(isHandleInSync() && "invalid iterator access!");
1075 return Ptr;
1076 }
1077
1078 bool operator==(const ConstIterator &RHS) const {
1079 assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1080 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1081 assert(getEpochAddress() == RHS.getEpochAddress() &&
1082 "comparing incomparable iterators!");
1083 return Ptr == RHS.Ptr;
1084 }
1085 bool operator!=(const ConstIterator &RHS) const {
1086 assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1087 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1088 assert(getEpochAddress() == RHS.getEpochAddress() &&
1089 "comparing incomparable iterators!");
1090 return Ptr != RHS.Ptr;
1091 }
1092
1093 inline DenseMapIterator& operator++() { // Preincrement
1094 assert(isHandleInSync() && "invalid iterator access!");
1095 ++Ptr;
1096 AdvancePastEmptyBuckets();
1097 return *this;
1098 }
1099 DenseMapIterator operator++(int) { // Postincrement
1100 assert(isHandleInSync() && "invalid iterator access!");
1101 DenseMapIterator tmp = *this; ++*this; return tmp;
1102 }
1103
1104 private:
AdvancePastEmptyBuckets()1105 void AdvancePastEmptyBuckets() {
1106 const KeyT Empty = KeyInfoT::getEmptyKey();
1107 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1108
1109 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1110 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1111 ++Ptr;
1112 }
1113 };
1114
1115 template<typename KeyT, typename ValueT, typename KeyInfoT>
1116 static inline size_t
capacity_in_bytes(const DenseMap<KeyT,ValueT,KeyInfoT> & X)1117 capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1118 return X.getMemorySize();
1119 }
1120
1121 } // end namespace llvm
1122
1123 #endif // LLVM_ADT_DENSEMAP_H
1124