1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the DenseMap class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_ADT_DENSEMAP_H
15 #define LLVM_ADT_DENSEMAP_H
16 
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/EpochTracker.h"
19 #include "llvm/Support/AlignOf.h"
20 #include "llvm/Support/Compiler.h"
21 #include "llvm/Support/MathExtras.h"
22 #include "llvm/Support/PointerLikeTypeTraits.h"
23 #include "llvm/Support/type_traits.h"
24 #include <algorithm>
25 #include <cassert>
26 #include <climits>
27 #include <cstddef>
28 #include <cstring>
29 #include <iterator>
30 #include <new>
31 #include <utility>
32 
33 namespace llvm {
34 
35 namespace detail {
36 // We extend a pair to allow users to override the bucket type with their own
37 // implementation without requiring two members.
38 template <typename KeyT, typename ValueT>
39 struct DenseMapPair : public std::pair<KeyT, ValueT> {
getFirstDenseMapPair40   KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
getFirstDenseMapPair41   const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
getSecondDenseMapPair42   ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
getSecondDenseMapPair43   const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
44 };
45 }
46 
47 template <
48     typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>,
49     typename Bucket = detail::DenseMapPair<KeyT, ValueT>, bool IsConst = false>
50 class DenseMapIterator;
51 
52 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
53           typename BucketT>
54 class DenseMapBase : public DebugEpochBase {
55 public:
56   typedef unsigned size_type;
57   typedef KeyT key_type;
58   typedef ValueT mapped_type;
59   typedef BucketT value_type;
60 
61   typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT> iterator;
62   typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>
63       const_iterator;
begin()64   inline iterator begin() {
65     // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
66     return empty() ? end() : iterator(getBuckets(), getBucketsEnd(), *this);
67   }
end()68   inline iterator end() {
69     return iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
70   }
begin()71   inline const_iterator begin() const {
72     return empty() ? end()
73                    : const_iterator(getBuckets(), getBucketsEnd(), *this);
74   }
end()75   inline const_iterator end() const {
76     return const_iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
77   }
78 
empty()79   bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
80     return getNumEntries() == 0;
81   }
size()82   unsigned size() const { return getNumEntries(); }
83 
84   /// Grow the densemap so that it has at least Size buckets. Does not shrink
resize(size_type Size)85   void resize(size_type Size) {
86     incrementEpoch();
87     if (Size > getNumBuckets())
88       grow(Size);
89   }
90 
clear()91   void clear() {
92     incrementEpoch();
93     if (getNumEntries() == 0 && getNumTombstones() == 0) return;
94 
95     // If the capacity of the array is huge, and the # elements used is small,
96     // shrink the array.
97     if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
98       shrink_and_clear();
99       return;
100     }
101 
102     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
103     unsigned NumEntries = getNumEntries();
104     for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
105       if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
106         if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
107           P->getSecond().~ValueT();
108           --NumEntries;
109         }
110         P->getFirst() = EmptyKey;
111       }
112     }
113     assert(NumEntries == 0 && "Node count imbalance!");
114     setNumEntries(0);
115     setNumTombstones(0);
116   }
117 
118   /// Return 1 if the specified key is in the map, 0 otherwise.
count(const KeyT & Val)119   size_type count(const KeyT &Val) const {
120     const BucketT *TheBucket;
121     return LookupBucketFor(Val, TheBucket) ? 1 : 0;
122   }
123 
find(const KeyT & Val)124   iterator find(const KeyT &Val) {
125     BucketT *TheBucket;
126     if (LookupBucketFor(Val, TheBucket))
127       return iterator(TheBucket, getBucketsEnd(), *this, true);
128     return end();
129   }
find(const KeyT & Val)130   const_iterator find(const KeyT &Val) const {
131     const BucketT *TheBucket;
132     if (LookupBucketFor(Val, TheBucket))
133       return const_iterator(TheBucket, getBucketsEnd(), *this, true);
134     return end();
135   }
136 
137   /// Alternate version of find() which allows a different, and possibly
138   /// less expensive, key type.
139   /// The DenseMapInfo is responsible for supplying methods
140   /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
141   /// type used.
142   template<class LookupKeyT>
find_as(const LookupKeyT & Val)143   iterator find_as(const LookupKeyT &Val) {
144     BucketT *TheBucket;
145     if (LookupBucketFor(Val, TheBucket))
146       return iterator(TheBucket, getBucketsEnd(), *this, true);
147     return end();
148   }
149   template<class LookupKeyT>
find_as(const LookupKeyT & Val)150   const_iterator find_as(const LookupKeyT &Val) const {
151     const BucketT *TheBucket;
152     if (LookupBucketFor(Val, TheBucket))
153       return const_iterator(TheBucket, getBucketsEnd(), *this, true);
154     return end();
155   }
156 
157   /// lookup - Return the entry for the specified key, or a default
158   /// constructed value if no such entry exists.
lookup(const KeyT & Val)159   ValueT lookup(const KeyT &Val) const {
160     const BucketT *TheBucket;
161     if (LookupBucketFor(Val, TheBucket))
162       return TheBucket->getSecond();
163     return ValueT();
164   }
165 
166   // Inserts key,value pair into the map if the key isn't already in the map.
167   // If the key is already in the map, it returns false and doesn't update the
168   // value.
insert(const std::pair<KeyT,ValueT> & KV)169   std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
170     BucketT *TheBucket;
171     if (LookupBucketFor(KV.first, TheBucket))
172       return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
173                             false); // Already in map.
174 
175     // Otherwise, insert the new element.
176     TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
177     return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
178                           true);
179   }
180 
181   // Inserts key,value pair into the map if the key isn't already in the map.
182   // If the key is already in the map, it returns false and doesn't update the
183   // value.
insert(std::pair<KeyT,ValueT> && KV)184   std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
185     BucketT *TheBucket;
186     if (LookupBucketFor(KV.first, TheBucket))
187       return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
188                             false); // Already in map.
189 
190     // Otherwise, insert the new element.
191     TheBucket = InsertIntoBucket(std::move(KV.first),
192                                  std::move(KV.second),
193                                  TheBucket);
194     return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
195                           true);
196   }
197 
198   /// insert - Range insertion of pairs.
199   template<typename InputIt>
insert(InputIt I,InputIt E)200   void insert(InputIt I, InputIt E) {
201     for (; I != E; ++I)
202       insert(*I);
203   }
204 
205 
erase(const KeyT & Val)206   bool erase(const KeyT &Val) {
207     BucketT *TheBucket;
208     if (!LookupBucketFor(Val, TheBucket))
209       return false; // not in map.
210 
211     TheBucket->getSecond().~ValueT();
212     TheBucket->getFirst() = getTombstoneKey();
213     decrementNumEntries();
214     incrementNumTombstones();
215     return true;
216   }
erase(iterator I)217   void erase(iterator I) {
218     BucketT *TheBucket = &*I;
219     TheBucket->getSecond().~ValueT();
220     TheBucket->getFirst() = getTombstoneKey();
221     decrementNumEntries();
222     incrementNumTombstones();
223   }
224 
FindAndConstruct(const KeyT & Key)225   value_type& FindAndConstruct(const KeyT &Key) {
226     BucketT *TheBucket;
227     if (LookupBucketFor(Key, TheBucket))
228       return *TheBucket;
229 
230     return *InsertIntoBucket(Key, ValueT(), TheBucket);
231   }
232 
233   ValueT &operator[](const KeyT &Key) {
234     return FindAndConstruct(Key).second;
235   }
236 
FindAndConstruct(KeyT && Key)237   value_type& FindAndConstruct(KeyT &&Key) {
238     BucketT *TheBucket;
239     if (LookupBucketFor(Key, TheBucket))
240       return *TheBucket;
241 
242     return *InsertIntoBucket(std::move(Key), ValueT(), TheBucket);
243   }
244 
245   ValueT &operator[](KeyT &&Key) {
246     return FindAndConstruct(std::move(Key)).second;
247   }
248 
249   /// isPointerIntoBucketsArray - Return true if the specified pointer points
250   /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
251   /// value in the DenseMap).
isPointerIntoBucketsArray(const void * Ptr)252   bool isPointerIntoBucketsArray(const void *Ptr) const {
253     return Ptr >= getBuckets() && Ptr < getBucketsEnd();
254   }
255 
256   /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
257   /// array.  In conjunction with the previous method, this can be used to
258   /// determine whether an insertion caused the DenseMap to reallocate.
getPointerIntoBucketsArray()259   const void *getPointerIntoBucketsArray() const { return getBuckets(); }
260 
261 protected:
262   DenseMapBase() = default;
263 
destroyAll()264   void destroyAll() {
265     if (getNumBuckets() == 0) // Nothing to do.
266       return;
267 
268     const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
269     for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
270       if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
271           !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
272         P->getSecond().~ValueT();
273       P->getFirst().~KeyT();
274     }
275 
276 #ifndef NDEBUG
277     memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets());
278 #endif
279   }
280 
initEmpty()281   void initEmpty() {
282     setNumEntries(0);
283     setNumTombstones(0);
284 
285     assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
286            "# initial buckets must be a power of two!");
287     const KeyT EmptyKey = getEmptyKey();
288     for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
289       new (&B->getFirst()) KeyT(EmptyKey);
290   }
291 
moveFromOldBuckets(BucketT * OldBucketsBegin,BucketT * OldBucketsEnd)292   void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
293     initEmpty();
294 
295     // Insert all the old elements.
296     const KeyT EmptyKey = getEmptyKey();
297     const KeyT TombstoneKey = getTombstoneKey();
298     for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
299       if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
300           !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
301         // Insert the key/value into the new table.
302         BucketT *DestBucket;
303         bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
304         (void)FoundVal; // silence warning.
305         assert(!FoundVal && "Key already in new map?");
306         DestBucket->getFirst() = std::move(B->getFirst());
307         new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
308         incrementNumEntries();
309 
310         // Free the value.
311         B->getSecond().~ValueT();
312       }
313       B->getFirst().~KeyT();
314     }
315 
316 #ifndef NDEBUG
317     if (OldBucketsBegin != OldBucketsEnd)
318       memset((void*)OldBucketsBegin, 0x5a,
319              sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin));
320 #endif
321   }
322 
323   template <typename OtherBaseT>
copyFrom(const DenseMapBase<OtherBaseT,KeyT,ValueT,KeyInfoT,BucketT> & other)324   void copyFrom(
325       const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
326     assert(&other != this);
327     assert(getNumBuckets() == other.getNumBuckets());
328 
329     setNumEntries(other.getNumEntries());
330     setNumTombstones(other.getNumTombstones());
331 
332     if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
333       memcpy(getBuckets(), other.getBuckets(),
334              getNumBuckets() * sizeof(BucketT));
335     else
336       for (size_t i = 0; i < getNumBuckets(); ++i) {
337         new (&getBuckets()[i].getFirst())
338             KeyT(other.getBuckets()[i].getFirst());
339         if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
340             !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
341           new (&getBuckets()[i].getSecond())
342               ValueT(other.getBuckets()[i].getSecond());
343       }
344   }
345 
getHashValue(const KeyT & Val)346   static unsigned getHashValue(const KeyT &Val) {
347     return KeyInfoT::getHashValue(Val);
348   }
349   template<typename LookupKeyT>
getHashValue(const LookupKeyT & Val)350   static unsigned getHashValue(const LookupKeyT &Val) {
351     return KeyInfoT::getHashValue(Val);
352   }
getEmptyKey()353   static const KeyT getEmptyKey() {
354     return KeyInfoT::getEmptyKey();
355   }
getTombstoneKey()356   static const KeyT getTombstoneKey() {
357     return KeyInfoT::getTombstoneKey();
358   }
359 
360 private:
getNumEntries()361   unsigned getNumEntries() const {
362     return static_cast<const DerivedT *>(this)->getNumEntries();
363   }
setNumEntries(unsigned Num)364   void setNumEntries(unsigned Num) {
365     static_cast<DerivedT *>(this)->setNumEntries(Num);
366   }
incrementNumEntries()367   void incrementNumEntries() {
368     setNumEntries(getNumEntries() + 1);
369   }
decrementNumEntries()370   void decrementNumEntries() {
371     setNumEntries(getNumEntries() - 1);
372   }
getNumTombstones()373   unsigned getNumTombstones() const {
374     return static_cast<const DerivedT *>(this)->getNumTombstones();
375   }
setNumTombstones(unsigned Num)376   void setNumTombstones(unsigned Num) {
377     static_cast<DerivedT *>(this)->setNumTombstones(Num);
378   }
incrementNumTombstones()379   void incrementNumTombstones() {
380     setNumTombstones(getNumTombstones() + 1);
381   }
decrementNumTombstones()382   void decrementNumTombstones() {
383     setNumTombstones(getNumTombstones() - 1);
384   }
getBuckets()385   const BucketT *getBuckets() const {
386     return static_cast<const DerivedT *>(this)->getBuckets();
387   }
getBuckets()388   BucketT *getBuckets() {
389     return static_cast<DerivedT *>(this)->getBuckets();
390   }
getNumBuckets()391   unsigned getNumBuckets() const {
392     return static_cast<const DerivedT *>(this)->getNumBuckets();
393   }
getBucketsEnd()394   BucketT *getBucketsEnd() {
395     return getBuckets() + getNumBuckets();
396   }
getBucketsEnd()397   const BucketT *getBucketsEnd() const {
398     return getBuckets() + getNumBuckets();
399   }
400 
grow(unsigned AtLeast)401   void grow(unsigned AtLeast) {
402     static_cast<DerivedT *>(this)->grow(AtLeast);
403   }
404 
shrink_and_clear()405   void shrink_and_clear() {
406     static_cast<DerivedT *>(this)->shrink_and_clear();
407   }
408 
409 
InsertIntoBucket(const KeyT & Key,const ValueT & Value,BucketT * TheBucket)410   BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
411                             BucketT *TheBucket) {
412     TheBucket = InsertIntoBucketImpl(Key, TheBucket);
413 
414     TheBucket->getFirst() = Key;
415     new (&TheBucket->getSecond()) ValueT(Value);
416     return TheBucket;
417   }
418 
InsertIntoBucket(const KeyT & Key,ValueT && Value,BucketT * TheBucket)419   BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
420                             BucketT *TheBucket) {
421     TheBucket = InsertIntoBucketImpl(Key, TheBucket);
422 
423     TheBucket->getFirst() = Key;
424     new (&TheBucket->getSecond()) ValueT(std::move(Value));
425     return TheBucket;
426   }
427 
InsertIntoBucket(KeyT && Key,ValueT && Value,BucketT * TheBucket)428   BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
429     TheBucket = InsertIntoBucketImpl(Key, TheBucket);
430 
431     TheBucket->getFirst() = std::move(Key);
432     new (&TheBucket->getSecond()) ValueT(std::move(Value));
433     return TheBucket;
434   }
435 
InsertIntoBucketImpl(const KeyT & Key,BucketT * TheBucket)436   BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) {
437     incrementEpoch();
438 
439     // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
440     // the buckets are empty (meaning that many are filled with tombstones),
441     // grow the table.
442     //
443     // The later case is tricky.  For example, if we had one empty bucket with
444     // tons of tombstones, failing lookups (e.g. for insertion) would have to
445     // probe almost the entire table until it found the empty bucket.  If the
446     // table completely filled with tombstones, no lookup would ever succeed,
447     // causing infinite loops in lookup.
448     unsigned NewNumEntries = getNumEntries() + 1;
449     unsigned NumBuckets = getNumBuckets();
450     if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
451       this->grow(NumBuckets * 2);
452       LookupBucketFor(Key, TheBucket);
453       NumBuckets = getNumBuckets();
454     } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
455                              NumBuckets/8)) {
456       this->grow(NumBuckets);
457       LookupBucketFor(Key, TheBucket);
458     }
459     assert(TheBucket);
460 
461     // Only update the state after we've grown our bucket space appropriately
462     // so that when growing buckets we have self-consistent entry count.
463     incrementNumEntries();
464 
465     // If we are writing over a tombstone, remember this.
466     const KeyT EmptyKey = getEmptyKey();
467     if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
468       decrementNumTombstones();
469 
470     return TheBucket;
471   }
472 
473   /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
474   /// FoundBucket.  If the bucket contains the key and a value, this returns
475   /// true, otherwise it returns a bucket with an empty marker or tombstone and
476   /// returns false.
477   template<typename LookupKeyT>
LookupBucketFor(const LookupKeyT & Val,const BucketT * & FoundBucket)478   bool LookupBucketFor(const LookupKeyT &Val,
479                        const BucketT *&FoundBucket) const {
480     const BucketT *BucketsPtr = getBuckets();
481     const unsigned NumBuckets = getNumBuckets();
482 
483     if (NumBuckets == 0) {
484       FoundBucket = nullptr;
485       return false;
486     }
487 
488     // FoundTombstone - Keep track of whether we find a tombstone while probing.
489     const BucketT *FoundTombstone = nullptr;
490     const KeyT EmptyKey = getEmptyKey();
491     const KeyT TombstoneKey = getTombstoneKey();
492     assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
493            !KeyInfoT::isEqual(Val, TombstoneKey) &&
494            "Empty/Tombstone value shouldn't be inserted into map!");
495 
496     unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
497     unsigned ProbeAmt = 1;
498     while (1) {
499       const BucketT *ThisBucket = BucketsPtr + BucketNo;
500       // Found Val's bucket?  If so, return it.
501       if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
502         FoundBucket = ThisBucket;
503         return true;
504       }
505 
506       // If we found an empty bucket, the key doesn't exist in the set.
507       // Insert it and return the default value.
508       if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
509         // If we've already seen a tombstone while probing, fill it in instead
510         // of the empty bucket we eventually probed to.
511         FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
512         return false;
513       }
514 
515       // If this is a tombstone, remember it.  If Val ends up not in the map, we
516       // prefer to return it than something that would require more probing.
517       if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
518           !FoundTombstone)
519         FoundTombstone = ThisBucket;  // Remember the first tombstone found.
520 
521       // Otherwise, it's a hash collision or a tombstone, continue quadratic
522       // probing.
523       BucketNo += ProbeAmt++;
524       BucketNo &= (NumBuckets-1);
525     }
526   }
527 
528   template <typename LookupKeyT>
LookupBucketFor(const LookupKeyT & Val,BucketT * & FoundBucket)529   bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
530     const BucketT *ConstFoundBucket;
531     bool Result = const_cast<const DenseMapBase *>(this)
532       ->LookupBucketFor(Val, ConstFoundBucket);
533     FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
534     return Result;
535   }
536 
537 public:
538   /// Return the approximate size (in bytes) of the actual map.
539   /// This is just the raw memory used by DenseMap.
540   /// If entries are pointers to objects, the size of the referenced objects
541   /// are not included.
getMemorySize()542   size_t getMemorySize() const {
543     return getNumBuckets() * sizeof(BucketT);
544   }
545 };
546 
547 template <typename KeyT, typename ValueT,
548           typename KeyInfoT = DenseMapInfo<KeyT>,
549           typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
550 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
551                                      KeyT, ValueT, KeyInfoT, BucketT> {
552   // Lift some types from the dependent base class into this class for
553   // simplicity of referring to them.
554   typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
555   friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
556 
557   BucketT *Buckets;
558   unsigned NumEntries;
559   unsigned NumTombstones;
560   unsigned NumBuckets;
561 
562 public:
563   explicit DenseMap(unsigned NumInitBuckets = 0) {
564     init(NumInitBuckets);
565   }
566 
DenseMap(const DenseMap & other)567   DenseMap(const DenseMap &other) : BaseT() {
568     init(0);
569     copyFrom(other);
570   }
571 
DenseMap(DenseMap && other)572   DenseMap(DenseMap &&other) : BaseT() {
573     init(0);
574     swap(other);
575   }
576 
577   template<typename InputIt>
DenseMap(const InputIt & I,const InputIt & E)578   DenseMap(const InputIt &I, const InputIt &E) {
579     init(NextPowerOf2(std::distance(I, E)));
580     this->insert(I, E);
581   }
582 
~DenseMap()583   ~DenseMap() {
584     this->destroyAll();
585     operator delete(Buckets);
586   }
587 
swap(DenseMap & RHS)588   void swap(DenseMap& RHS) {
589     this->incrementEpoch();
590     RHS.incrementEpoch();
591     std::swap(Buckets, RHS.Buckets);
592     std::swap(NumEntries, RHS.NumEntries);
593     std::swap(NumTombstones, RHS.NumTombstones);
594     std::swap(NumBuckets, RHS.NumBuckets);
595   }
596 
597   DenseMap& operator=(const DenseMap& other) {
598     if (&other != this)
599       copyFrom(other);
600     return *this;
601   }
602 
603   DenseMap& operator=(DenseMap &&other) {
604     this->destroyAll();
605     operator delete(Buckets);
606     init(0);
607     swap(other);
608     return *this;
609   }
610 
copyFrom(const DenseMap & other)611   void copyFrom(const DenseMap& other) {
612     this->destroyAll();
613     operator delete(Buckets);
614     if (allocateBuckets(other.NumBuckets)) {
615       this->BaseT::copyFrom(other);
616     } else {
617       NumEntries = 0;
618       NumTombstones = 0;
619     }
620   }
621 
init(unsigned InitBuckets)622   void init(unsigned InitBuckets) {
623     if (allocateBuckets(InitBuckets)) {
624       this->BaseT::initEmpty();
625     } else {
626       NumEntries = 0;
627       NumTombstones = 0;
628     }
629   }
630 
grow(unsigned AtLeast)631   void grow(unsigned AtLeast) {
632     unsigned OldNumBuckets = NumBuckets;
633     BucketT *OldBuckets = Buckets;
634 
635     allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
636     assert(Buckets);
637     if (!OldBuckets) {
638       this->BaseT::initEmpty();
639       return;
640     }
641 
642     this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
643 
644     // Free the old table.
645     operator delete(OldBuckets);
646   }
647 
shrink_and_clear()648   void shrink_and_clear() {
649     unsigned OldNumEntries = NumEntries;
650     this->destroyAll();
651 
652     // Reduce the number of buckets.
653     unsigned NewNumBuckets = 0;
654     if (OldNumEntries)
655       NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
656     if (NewNumBuckets == NumBuckets) {
657       this->BaseT::initEmpty();
658       return;
659     }
660 
661     operator delete(Buckets);
662     init(NewNumBuckets);
663   }
664 
665 private:
getNumEntries()666   unsigned getNumEntries() const {
667     return NumEntries;
668   }
setNumEntries(unsigned Num)669   void setNumEntries(unsigned Num) {
670     NumEntries = Num;
671   }
672 
getNumTombstones()673   unsigned getNumTombstones() const {
674     return NumTombstones;
675   }
setNumTombstones(unsigned Num)676   void setNumTombstones(unsigned Num) {
677     NumTombstones = Num;
678   }
679 
getBuckets()680   BucketT *getBuckets() const {
681     return Buckets;
682   }
683 
getNumBuckets()684   unsigned getNumBuckets() const {
685     return NumBuckets;
686   }
687 
allocateBuckets(unsigned Num)688   bool allocateBuckets(unsigned Num) {
689     NumBuckets = Num;
690     if (NumBuckets == 0) {
691       Buckets = nullptr;
692       return false;
693     }
694 
695     Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
696     return true;
697   }
698 };
699 
700 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
701           typename KeyInfoT = DenseMapInfo<KeyT>,
702           typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
703 class SmallDenseMap
704     : public DenseMapBase<
705           SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
706           ValueT, KeyInfoT, BucketT> {
707   // Lift some types from the dependent base class into this class for
708   // simplicity of referring to them.
709   typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
710   friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
711 
712   unsigned Small : 1;
713   unsigned NumEntries : 31;
714   unsigned NumTombstones;
715 
716   struct LargeRep {
717     BucketT *Buckets;
718     unsigned NumBuckets;
719   };
720 
721   /// A "union" of an inline bucket array and the struct representing
722   /// a large bucket. This union will be discriminated by the 'Small' bit.
723   AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
724 
725 public:
726   explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
727     init(NumInitBuckets);
728   }
729 
SmallDenseMap(const SmallDenseMap & other)730   SmallDenseMap(const SmallDenseMap &other) : BaseT() {
731     init(0);
732     copyFrom(other);
733   }
734 
SmallDenseMap(SmallDenseMap && other)735   SmallDenseMap(SmallDenseMap &&other) : BaseT() {
736     init(0);
737     swap(other);
738   }
739 
740   template<typename InputIt>
SmallDenseMap(const InputIt & I,const InputIt & E)741   SmallDenseMap(const InputIt &I, const InputIt &E) {
742     init(NextPowerOf2(std::distance(I, E)));
743     this->insert(I, E);
744   }
745 
~SmallDenseMap()746   ~SmallDenseMap() {
747     this->destroyAll();
748     deallocateBuckets();
749   }
750 
swap(SmallDenseMap & RHS)751   void swap(SmallDenseMap& RHS) {
752     unsigned TmpNumEntries = RHS.NumEntries;
753     RHS.NumEntries = NumEntries;
754     NumEntries = TmpNumEntries;
755     std::swap(NumTombstones, RHS.NumTombstones);
756 
757     const KeyT EmptyKey = this->getEmptyKey();
758     const KeyT TombstoneKey = this->getTombstoneKey();
759     if (Small && RHS.Small) {
760       // If we're swapping inline bucket arrays, we have to cope with some of
761       // the tricky bits of DenseMap's storage system: the buckets are not
762       // fully initialized. Thus we swap every key, but we may have
763       // a one-directional move of the value.
764       for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
765         BucketT *LHSB = &getInlineBuckets()[i],
766                 *RHSB = &RHS.getInlineBuckets()[i];
767         bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
768                             !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
769         bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
770                             !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
771         if (hasLHSValue && hasRHSValue) {
772           // Swap together if we can...
773           std::swap(*LHSB, *RHSB);
774           continue;
775         }
776         // Swap separately and handle any assymetry.
777         std::swap(LHSB->getFirst(), RHSB->getFirst());
778         if (hasLHSValue) {
779           new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
780           LHSB->getSecond().~ValueT();
781         } else if (hasRHSValue) {
782           new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
783           RHSB->getSecond().~ValueT();
784         }
785       }
786       return;
787     }
788     if (!Small && !RHS.Small) {
789       std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
790       std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
791       return;
792     }
793 
794     SmallDenseMap &SmallSide = Small ? *this : RHS;
795     SmallDenseMap &LargeSide = Small ? RHS : *this;
796 
797     // First stash the large side's rep and move the small side across.
798     LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
799     LargeSide.getLargeRep()->~LargeRep();
800     LargeSide.Small = true;
801     // This is similar to the standard move-from-old-buckets, but the bucket
802     // count hasn't actually rotated in this case. So we have to carefully
803     // move construct the keys and values into their new locations, but there
804     // is no need to re-hash things.
805     for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
806       BucketT *NewB = &LargeSide.getInlineBuckets()[i],
807               *OldB = &SmallSide.getInlineBuckets()[i];
808       new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
809       OldB->getFirst().~KeyT();
810       if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
811           !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
812         new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
813         OldB->getSecond().~ValueT();
814       }
815     }
816 
817     // The hard part of moving the small buckets across is done, just move
818     // the TmpRep into its new home.
819     SmallSide.Small = false;
820     new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
821   }
822 
823   SmallDenseMap& operator=(const SmallDenseMap& other) {
824     if (&other != this)
825       copyFrom(other);
826     return *this;
827   }
828 
829   SmallDenseMap& operator=(SmallDenseMap &&other) {
830     this->destroyAll();
831     deallocateBuckets();
832     init(0);
833     swap(other);
834     return *this;
835   }
836 
copyFrom(const SmallDenseMap & other)837   void copyFrom(const SmallDenseMap& other) {
838     this->destroyAll();
839     deallocateBuckets();
840     Small = true;
841     if (other.getNumBuckets() > InlineBuckets) {
842       Small = false;
843       new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
844     }
845     this->BaseT::copyFrom(other);
846   }
847 
init(unsigned InitBuckets)848   void init(unsigned InitBuckets) {
849     Small = true;
850     if (InitBuckets > InlineBuckets) {
851       Small = false;
852       new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
853     }
854     this->BaseT::initEmpty();
855   }
856 
grow(unsigned AtLeast)857   void grow(unsigned AtLeast) {
858     if (AtLeast >= InlineBuckets)
859       AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
860 
861     if (Small) {
862       if (AtLeast < InlineBuckets)
863         return; // Nothing to do.
864 
865       // First move the inline buckets into a temporary storage.
866       AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
867       BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
868       BucketT *TmpEnd = TmpBegin;
869 
870       // Loop over the buckets, moving non-empty, non-tombstones into the
871       // temporary storage. Have the loop move the TmpEnd forward as it goes.
872       const KeyT EmptyKey = this->getEmptyKey();
873       const KeyT TombstoneKey = this->getTombstoneKey();
874       for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
875         if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
876             !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
877           assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
878                  "Too many inline buckets!");
879           new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
880           new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
881           ++TmpEnd;
882           P->getSecond().~ValueT();
883         }
884         P->getFirst().~KeyT();
885       }
886 
887       // Now make this map use the large rep, and move all the entries back
888       // into it.
889       Small = false;
890       new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
891       this->moveFromOldBuckets(TmpBegin, TmpEnd);
892       return;
893     }
894 
895     LargeRep OldRep = std::move(*getLargeRep());
896     getLargeRep()->~LargeRep();
897     if (AtLeast <= InlineBuckets) {
898       Small = true;
899     } else {
900       new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
901     }
902 
903     this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
904 
905     // Free the old table.
906     operator delete(OldRep.Buckets);
907   }
908 
shrink_and_clear()909   void shrink_and_clear() {
910     unsigned OldSize = this->size();
911     this->destroyAll();
912 
913     // Reduce the number of buckets.
914     unsigned NewNumBuckets = 0;
915     if (OldSize) {
916       NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
917       if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
918         NewNumBuckets = 64;
919     }
920     if ((Small && NewNumBuckets <= InlineBuckets) ||
921         (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
922       this->BaseT::initEmpty();
923       return;
924     }
925 
926     deallocateBuckets();
927     init(NewNumBuckets);
928   }
929 
930 private:
getNumEntries()931   unsigned getNumEntries() const {
932     return NumEntries;
933   }
setNumEntries(unsigned Num)934   void setNumEntries(unsigned Num) {
935     assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
936     NumEntries = Num;
937   }
938 
getNumTombstones()939   unsigned getNumTombstones() const {
940     return NumTombstones;
941   }
setNumTombstones(unsigned Num)942   void setNumTombstones(unsigned Num) {
943     NumTombstones = Num;
944   }
945 
getInlineBuckets()946   const BucketT *getInlineBuckets() const {
947     assert(Small);
948     // Note that this cast does not violate aliasing rules as we assert that
949     // the memory's dynamic type is the small, inline bucket buffer, and the
950     // 'storage.buffer' static type is 'char *'.
951     return reinterpret_cast<const BucketT *>(storage.buffer);
952   }
getInlineBuckets()953   BucketT *getInlineBuckets() {
954     return const_cast<BucketT *>(
955       const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
956   }
getLargeRep()957   const LargeRep *getLargeRep() const {
958     assert(!Small);
959     // Note, same rule about aliasing as with getInlineBuckets.
960     return reinterpret_cast<const LargeRep *>(storage.buffer);
961   }
getLargeRep()962   LargeRep *getLargeRep() {
963     return const_cast<LargeRep *>(
964       const_cast<const SmallDenseMap *>(this)->getLargeRep());
965   }
966 
getBuckets()967   const BucketT *getBuckets() const {
968     return Small ? getInlineBuckets() : getLargeRep()->Buckets;
969   }
getBuckets()970   BucketT *getBuckets() {
971     return const_cast<BucketT *>(
972       const_cast<const SmallDenseMap *>(this)->getBuckets());
973   }
getNumBuckets()974   unsigned getNumBuckets() const {
975     return Small ? InlineBuckets : getLargeRep()->NumBuckets;
976   }
977 
deallocateBuckets()978   void deallocateBuckets() {
979     if (Small)
980       return;
981 
982     operator delete(getLargeRep()->Buckets);
983     getLargeRep()->~LargeRep();
984   }
985 
allocateBuckets(unsigned Num)986   LargeRep allocateBuckets(unsigned Num) {
987     assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
988     LargeRep Rep = {
989       static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
990     };
991     return Rep;
992   }
993 };
994 
995 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
996           bool IsConst>
997 class DenseMapIterator : DebugEpochBase::HandleBase {
998   typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true> ConstIterator;
999   friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1000   friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1001 
1002 public:
1003   typedef ptrdiff_t difference_type;
1004   typedef typename std::conditional<IsConst, const Bucket, Bucket>::type
1005   value_type;
1006   typedef value_type *pointer;
1007   typedef value_type &reference;
1008   typedef std::forward_iterator_tag iterator_category;
1009 private:
1010   pointer Ptr, End;
1011 public:
DenseMapIterator()1012   DenseMapIterator() : Ptr(nullptr), End(nullptr) {}
1013 
1014   DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
1015                    bool NoAdvance = false)
1016       : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1017     assert(isHandleInSync() && "invalid construction!");
1018     if (!NoAdvance) AdvancePastEmptyBuckets();
1019   }
1020 
1021   // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1022   // for const iterator destinations so it doesn't end up as a user defined copy
1023   // constructor.
1024   template <bool IsConstSrc,
1025             typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
DenseMapIterator(const DenseMapIterator<KeyT,ValueT,KeyInfoT,Bucket,IsConstSrc> & I)1026   DenseMapIterator(
1027       const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1028       : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1029 
1030   reference operator*() const {
1031     assert(isHandleInSync() && "invalid iterator access!");
1032     return *Ptr;
1033   }
1034   pointer operator->() const {
1035     assert(isHandleInSync() && "invalid iterator access!");
1036     return Ptr;
1037   }
1038 
1039   bool operator==(const ConstIterator &RHS) const {
1040     assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1041     assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1042     assert(getEpochAddress() == RHS.getEpochAddress() &&
1043            "comparing incomparable iterators!");
1044     return Ptr == RHS.Ptr;
1045   }
1046   bool operator!=(const ConstIterator &RHS) const {
1047     assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1048     assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1049     assert(getEpochAddress() == RHS.getEpochAddress() &&
1050            "comparing incomparable iterators!");
1051     return Ptr != RHS.Ptr;
1052   }
1053 
1054   inline DenseMapIterator& operator++() {  // Preincrement
1055     assert(isHandleInSync() && "invalid iterator access!");
1056     ++Ptr;
1057     AdvancePastEmptyBuckets();
1058     return *this;
1059   }
1060   DenseMapIterator operator++(int) {  // Postincrement
1061     assert(isHandleInSync() && "invalid iterator access!");
1062     DenseMapIterator tmp = *this; ++*this; return tmp;
1063   }
1064 
1065 private:
AdvancePastEmptyBuckets()1066   void AdvancePastEmptyBuckets() {
1067     const KeyT Empty = KeyInfoT::getEmptyKey();
1068     const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1069 
1070     while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1071                           KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1072       ++Ptr;
1073   }
1074 };
1075 
1076 template<typename KeyT, typename ValueT, typename KeyInfoT>
1077 static inline size_t
capacity_in_bytes(const DenseMap<KeyT,ValueT,KeyInfoT> & X)1078 capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1079   return X.getMemorySize();
1080 }
1081 
1082 } // end namespace llvm
1083 
1084 #endif
1085