1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // An open-addressing
16 // hashtable with quadratic probing.
17 //
18 // This is a low level hashtable on top of which different interfaces can be
19 // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
20 //
21 // The table interface is similar to that of std::unordered_set. Notable
22 // differences are that most member functions support heterogeneous keys when
23 // BOTH the hash and eq functions are marked as transparent. They do so by
24 // providing a typedef called `is_transparent`.
25 //
26 // When heterogeneous lookup is enabled, functions that take key_type act as if
27 // they have an overload set like:
28 //
29 //   iterator find(const key_type& key);
30 //   template <class K>
31 //   iterator find(const K& key);
32 //
33 //   size_type erase(const key_type& key);
34 //   template <class K>
35 //   size_type erase(const K& key);
36 //
37 //   std::pair<iterator, iterator> equal_range(const key_type& key);
38 //   template <class K>
39 //   std::pair<iterator, iterator> equal_range(const K& key);
40 //
41 // When heterogeneous lookup is disabled, only the explicit `key_type` overloads
42 // exist.
43 //
44 // find() also supports passing the hash explicitly:
45 //
46 //   iterator find(const key_type& key, size_t hash);
47 //   template <class U>
48 //   iterator find(const U& key, size_t hash);
49 //
50 // In addition the pointer to element and iterator stability guarantees are
51 // weaker: all iterators and pointers are invalidated after a new element is
52 // inserted.
53 //
54 // IMPLEMENTATION DETAILS
55 //
56 // The table stores elements inline in a slot array. In addition to the slot
57 // array the table maintains some control state per slot. The extra state is one
58 // byte per slot and stores empty or deleted marks, or alternatively 7 bits from
59 // the hash of an occupied slot. The table is split into logical groups of
60 // slots, like so:
61 //
62 //      Group 1         Group 2        Group 3
63 // +---------------+---------------+---------------+
64 // | | | | | | | | | | | | | | | | | | | | | | | | |
65 // +---------------+---------------+---------------+
66 //
67 // On lookup the hash is split into two parts:
68 // - H2: 7 bits (those stored in the control bytes)
69 // - H1: the rest of the bits
70 // The groups are probed using H1. For each group the slots are matched to H2 in
71 // parallel. Because H2 is 7 bits (128 states) and the number of slots per group
72 // is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
73 //
74 // On insert, once the right group is found (as in lookup), its slots are
75 // filled in order.
76 //
77 // On erase a slot is cleared. In case the group did not have any empty slots
78 // before the erase, the erased slot is marked as deleted.
79 //
80 // Groups without empty slots (but maybe with deleted slots) extend the probe
81 // sequence. The probing algorithm is quadratic. Given N the number of groups,
82 // the probing function for the i'th probe is:
83 //
84 //   P(0) = H1 % N
85 //
86 //   P(i) = (P(i - 1) + i) % N
87 //
88 // This probing function guarantees that after N probes, all the groups of the
89 // table will be probed exactly once.
90 
91 #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
92 #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
93 
94 #include <algorithm>
95 #include <cmath>
96 #include <cstdint>
97 #include <cstring>
98 #include <iterator>
99 #include <limits>
100 #include <memory>
101 #include <tuple>
102 #include <type_traits>
103 #include <utility>
104 
105 #include "absl/base/internal/bits.h"
106 #include "absl/base/internal/endian.h"
107 #include "absl/base/port.h"
108 #include "absl/container/internal/common.h"
109 #include "absl/container/internal/compressed_tuple.h"
110 #include "absl/container/internal/container_memory.h"
111 #include "absl/container/internal/hash_policy_traits.h"
112 #include "absl/container/internal/hashtable_debug_hooks.h"
113 #include "absl/container/internal/hashtablez_sampler.h"
114 #include "absl/container/internal/have_sse.h"
115 #include "absl/container/internal/layout.h"
116 #include "absl/memory/memory.h"
117 #include "absl/meta/type_traits.h"
118 #include "absl/utility/utility.h"
119 
120 namespace absl {
121 ABSL_NAMESPACE_BEGIN
122 namespace container_internal {
123 
124 template <size_t Width>
125 class probe_seq {
126  public:
probe_seq(size_t hash,size_t mask)127   probe_seq(size_t hash, size_t mask) {
128     assert(((mask + 1) & mask) == 0 && "not a mask");
129     mask_ = mask;
130     offset_ = hash & mask_;
131   }
offset()132   size_t offset() const { return offset_; }
offset(size_t i)133   size_t offset(size_t i) const { return (offset_ + i) & mask_; }
134 
next()135   void next() {
136     index_ += Width;
137     offset_ += index_;
138     offset_ &= mask_;
139   }
140   // 0-based probe index. The i-th probe in the probe sequence.
index()141   size_t index() const { return index_; }
142 
143  private:
144   size_t mask_;
145   size_t offset_;
146   size_t index_ = 0;
147 };
148 
149 template <class ContainerKey, class Hash, class Eq>
150 struct RequireUsableKey {
151   template <class PassedKey, class... Args>
152   std::pair<
153       decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
154       decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
155                                          std::declval<const PassedKey&>()))>*
156   operator()(const PassedKey&, const Args&...) const;
157 };
158 
159 template <class E, class Policy, class Hash, class Eq, class... Ts>
160 struct IsDecomposable : std::false_type {};
161 
162 template <class Policy, class Hash, class Eq, class... Ts>
163 struct IsDecomposable<
164     absl::void_t<decltype(
165         Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
166                       std::declval<Ts>()...))>,
167     Policy, Hash, Eq, Ts...> : std::true_type {};
168 
169 // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
170 template <class T>
171 constexpr bool IsNoThrowSwappable() {
172   using std::swap;
173   return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
174 }
175 
176 template <typename T>
177 int TrailingZeros(T x) {
178   return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64(
179                               static_cast<uint64_t>(x))
180                         : base_internal::CountTrailingZerosNonZero32(
181                               static_cast<uint32_t>(x));
182 }
183 
184 template <typename T>
185 int LeadingZeros(T x) {
186   return sizeof(T) == 8
187              ? base_internal::CountLeadingZeros64(static_cast<uint64_t>(x))
188              : base_internal::CountLeadingZeros32(static_cast<uint32_t>(x));
189 }
190 
191 // An abstraction over a bitmask. It provides an easy way to iterate through the
192 // indexes of the set bits of a bitmask.  When Shift=0 (platforms with SSE),
193 // this is a true bitmask.  On non-SSE, platforms the arithematic used to
194 // emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
195 // either 0x00 or 0x80.
196 //
197 // For example:
198 //   for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
199 //   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
200 template <class T, int SignificantBits, int Shift = 0>
201 class BitMask {
202   static_assert(std::is_unsigned<T>::value, "");
203   static_assert(Shift == 0 || Shift == 3, "");
204 
205  public:
206   // These are useful for unit tests (gunit).
207   using value_type = int;
208   using iterator = BitMask;
209   using const_iterator = BitMask;
210 
211   explicit BitMask(T mask) : mask_(mask) {}
212   BitMask& operator++() {
213     mask_ &= (mask_ - 1);
214     return *this;
215   }
216   explicit operator bool() const { return mask_ != 0; }
217   int operator*() const { return LowestBitSet(); }
218   int LowestBitSet() const {
219     return container_internal::TrailingZeros(mask_) >> Shift;
220   }
221   int HighestBitSet() const {
222     return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) -
223             1) >>
224            Shift;
225   }
226 
227   BitMask begin() const { return *this; }
228   BitMask end() const { return BitMask(0); }
229 
230   int TrailingZeros() const {
231     return container_internal::TrailingZeros(mask_) >> Shift;
232   }
233 
234   int LeadingZeros() const {
235     constexpr int total_significant_bits = SignificantBits << Shift;
236     constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
237     return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift;
238   }
239 
240  private:
241   friend bool operator==(const BitMask& a, const BitMask& b) {
242     return a.mask_ == b.mask_;
243   }
244   friend bool operator!=(const BitMask& a, const BitMask& b) {
245     return a.mask_ != b.mask_;
246   }
247 
248   T mask_;
249 };
250 
251 using ctrl_t = signed char;
252 using h2_t = uint8_t;
253 
254 // The values here are selected for maximum performance. See the static asserts
255 // below for details.
256 enum Ctrl : ctrl_t {
257   kEmpty = -128,   // 0b10000000
258   kDeleted = -2,   // 0b11111110
259   kSentinel = -1,  // 0b11111111
260 };
261 static_assert(
262     kEmpty & kDeleted & kSentinel & 0x80,
263     "Special markers need to have the MSB to make checking for them efficient");
264 static_assert(kEmpty < kSentinel && kDeleted < kSentinel,
265               "kEmpty and kDeleted must be smaller than kSentinel to make the "
266               "SIMD test of IsEmptyOrDeleted() efficient");
267 static_assert(kSentinel == -1,
268               "kSentinel must be -1 to elide loading it from memory into SIMD "
269               "registers (pcmpeqd xmm, xmm)");
270 static_assert(kEmpty == -128,
271               "kEmpty must be -128 to make the SIMD check for its "
272               "existence efficient (psignb xmm, xmm)");
273 static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F,
274               "kEmpty and kDeleted must share an unset bit that is not shared "
275               "by kSentinel to make the scalar test for MatchEmptyOrDeleted() "
276               "efficient");
277 static_assert(kDeleted == -2,
278               "kDeleted must be -2 to make the implementation of "
279               "ConvertSpecialToEmptyAndFullToDeleted efficient");
280 
281 // A single block of empty control bytes for tables without any slots allocated.
282 // This enables removing a branch in the hot path of find().
283 inline ctrl_t* EmptyGroup() {
284   alignas(16) static constexpr ctrl_t empty_group[] = {
285       kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty,
286       kEmpty,    kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty};
287   return const_cast<ctrl_t*>(empty_group);
288 }
289 
290 // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
291 // randomize insertion order within groups.
292 bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl);
293 
294 // Returns a hash seed.
295 //
296 // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
297 // non-determinism of iteration order in most cases.
298 inline size_t HashSeed(const ctrl_t* ctrl) {
299   // The low bits of the pointer have little or no entropy because of
300   // alignment. We shift the pointer to try to use higher entropy bits. A
301   // good number seems to be 12 bits, because that aligns with page size.
302   return reinterpret_cast<uintptr_t>(ctrl) >> 12;
303 }
304 
305 inline size_t H1(size_t hash, const ctrl_t* ctrl) {
306   return (hash >> 7) ^ HashSeed(ctrl);
307 }
308 inline ctrl_t H2(size_t hash) { return hash & 0x7F; }
309 
310 inline bool IsEmpty(ctrl_t c) { return c == kEmpty; }
311 inline bool IsFull(ctrl_t c) { return c >= 0; }
312 inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
313 inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
314 
315 #if SWISSTABLE_HAVE_SSE2
316 
317 // https://github.com/abseil/abseil-cpp/issues/209
318 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
319 // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
320 // Work around this by using the portable implementation of Group
321 // when using -funsigned-char under GCC.
322 inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
323 #if defined(__GNUC__) && !defined(__clang__)
324   if (std::is_unsigned<char>::value) {
325     const __m128i mask = _mm_set1_epi8(0x80);
326     const __m128i diff = _mm_subs_epi8(b, a);
327     return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
328   }
329 #endif
330   return _mm_cmpgt_epi8(a, b);
331 }
332 
333 struct GroupSse2Impl {
334   static constexpr size_t kWidth = 16;  // the number of slots per group
335 
336   explicit GroupSse2Impl(const ctrl_t* pos) {
337     ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
338   }
339 
340   // Returns a bitmask representing the positions of slots that match hash.
341   BitMask<uint32_t, kWidth> Match(h2_t hash) const {
342     auto match = _mm_set1_epi8(hash);
343     return BitMask<uint32_t, kWidth>(
344         _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
345   }
346 
347   // Returns a bitmask representing the positions of empty slots.
348   BitMask<uint32_t, kWidth> MatchEmpty() const {
349 #if SWISSTABLE_HAVE_SSSE3
350     // This only works because kEmpty is -128.
351     return BitMask<uint32_t, kWidth>(
352         _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
353 #else
354     return Match(static_cast<h2_t>(kEmpty));
355 #endif
356   }
357 
358   // Returns a bitmask representing the positions of empty or deleted slots.
359   BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
360     auto special = _mm_set1_epi8(kSentinel);
361     return BitMask<uint32_t, kWidth>(
362         _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
363   }
364 
365   // Returns the number of trailing empty or deleted elements in the group.
366   uint32_t CountLeadingEmptyOrDeleted() const {
367     auto special = _mm_set1_epi8(kSentinel);
368     return TrailingZeros(
369         _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1);
370   }
371 
372   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
373     auto msbs = _mm_set1_epi8(static_cast<char>(-128));
374     auto x126 = _mm_set1_epi8(126);
375 #if SWISSTABLE_HAVE_SSSE3
376     auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
377 #else
378     auto zero = _mm_setzero_si128();
379     auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
380     auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
381 #endif
382     _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
383   }
384 
385   __m128i ctrl;
386 };
387 #endif  // SWISSTABLE_HAVE_SSE2
388 
389 struct GroupPortableImpl {
390   static constexpr size_t kWidth = 8;
391 
392   explicit GroupPortableImpl(const ctrl_t* pos)
393       : ctrl(little_endian::Load64(pos)) {}
394 
395   BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
396     // For the technique, see:
397     // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
398     // (Determine if a word has a byte equal to n).
399     //
400     // Caveat: there are false positives but:
401     // - they only occur if there is a real match
402     // - they never occur on kEmpty, kDeleted, kSentinel
403     // - they will be handled gracefully by subsequent checks in code
404     //
405     // Example:
406     //   v = 0x1716151413121110
407     //   hash = 0x12
408     //   retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
409     constexpr uint64_t msbs = 0x8080808080808080ULL;
410     constexpr uint64_t lsbs = 0x0101010101010101ULL;
411     auto x = ctrl ^ (lsbs * hash);
412     return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
413   }
414 
415   BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
416     constexpr uint64_t msbs = 0x8080808080808080ULL;
417     return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
418   }
419 
420   BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
421     constexpr uint64_t msbs = 0x8080808080808080ULL;
422     return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
423   }
424 
425   uint32_t CountLeadingEmptyOrDeleted() const {
426     constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
427     return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
428   }
429 
430   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
431     constexpr uint64_t msbs = 0x8080808080808080ULL;
432     constexpr uint64_t lsbs = 0x0101010101010101ULL;
433     auto x = ctrl & msbs;
434     auto res = (~x + (x >> 7)) & ~lsbs;
435     little_endian::Store64(dst, res);
436   }
437 
438   uint64_t ctrl;
439 };
440 
441 #if SWISSTABLE_HAVE_SSE2
442 using Group = GroupSse2Impl;
443 #else
444 using Group = GroupPortableImpl;
445 #endif
446 
447 template <class Policy, class Hash, class Eq, class Alloc>
448 class raw_hash_set;
449 
450 inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
451 
452 // PRECONDITION:
453 //   IsValidCapacity(capacity)
454 //   ctrl[capacity] == kSentinel
455 //   ctrl[i] != kSentinel for all i < capacity
456 // Applies mapping for every byte in ctrl:
457 //   DELETED -> EMPTY
458 //   EMPTY -> EMPTY
459 //   FULL -> DELETED
460 inline void ConvertDeletedToEmptyAndFullToDeleted(
461     ctrl_t* ctrl, size_t capacity) {
462   assert(ctrl[capacity] == kSentinel);
463   assert(IsValidCapacity(capacity));
464   for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
465     Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
466   }
467   // Copy the cloned ctrl bytes.
468   std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
469   ctrl[capacity] = kSentinel;
470 }
471 
472 // Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
473 inline size_t NormalizeCapacity(size_t n) {
474   return n ? ~size_t{} >> LeadingZeros(n) : 1;
475 }
476 
477 // We use 7/8th as maximum load factor.
478 // For 16-wide groups, that gives an average of two empty slots per group.
479 inline size_t CapacityToGrowth(size_t capacity) {
480   assert(IsValidCapacity(capacity));
481   // `capacity*7/8`
482   if (Group::kWidth == 8 && capacity == 7) {
483     // x-x/8 does not work when x==7.
484     return 6;
485   }
486   return capacity - capacity / 8;
487 }
488 // From desired "growth" to a lowerbound of the necessary capacity.
489 // Might not be a valid one and required NormalizeCapacity().
490 inline size_t GrowthToLowerboundCapacity(size_t growth) {
491   // `growth*8/7`
492   if (Group::kWidth == 8 && growth == 7) {
493     // x+(x-1)/7 does not work when x==7.
494     return 8;
495   }
496   return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
497 }
498 
499 // Policy: a policy defines how to perform different operations on
500 // the slots of the hashtable (see hash_policy_traits.h for the full interface
501 // of policy).
502 //
503 // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
504 // functor should accept a key and return size_t as hash. For best performance
505 // it is important that the hash function provides high entropy across all bits
506 // of the hash.
507 //
508 // Eq: a (possibly polymorphic) functor that compares two keys for equality. It
509 // should accept two (of possibly different type) keys and return a bool: true
510 // if they are equal, false if they are not. If two keys compare equal, then
511 // their hash values as defined by Hash MUST be equal.
512 //
513 // Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which
514 // the storage of the hashtable will be allocated and the elements will be
515 // constructed and destroyed.
516 template <class Policy, class Hash, class Eq, class Alloc>
517 class raw_hash_set {
518   using PolicyTraits = hash_policy_traits<Policy>;
519   using KeyArgImpl =
520       KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
521 
522  public:
523   using init_type = typename PolicyTraits::init_type;
524   using key_type = typename PolicyTraits::key_type;
525   // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
526   // code fixes!
527   using slot_type = typename PolicyTraits::slot_type;
528   using allocator_type = Alloc;
529   using size_type = size_t;
530   using difference_type = ptrdiff_t;
531   using hasher = Hash;
532   using key_equal = Eq;
533   using policy_type = Policy;
534   using value_type = typename PolicyTraits::value_type;
535   using reference = value_type&;
536   using const_reference = const value_type&;
537   using pointer = typename absl::allocator_traits<
538       allocator_type>::template rebind_traits<value_type>::pointer;
539   using const_pointer = typename absl::allocator_traits<
540       allocator_type>::template rebind_traits<value_type>::const_pointer;
541 
542   // Alias used for heterogeneous lookup functions.
543   // `key_arg<K>` evaluates to `K` when the functors are transparent and to
544   // `key_type` otherwise. It permits template argument deduction on `K` for the
545   // transparent case.
546   template <class K>
547   using key_arg = typename KeyArgImpl::template type<K, key_type>;
548 
549  private:
550   // Give an early error when key_type is not hashable/eq.
551   auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
552   auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
553 
554   using Layout = absl::container_internal::Layout<ctrl_t, slot_type>;
555 
556   static Layout MakeLayout(size_t capacity) {
557     assert(IsValidCapacity(capacity));
558     return Layout(capacity + Group::kWidth + 1, capacity);
559   }
560 
561   using AllocTraits = absl::allocator_traits<allocator_type>;
562   using SlotAlloc = typename absl::allocator_traits<
563       allocator_type>::template rebind_alloc<slot_type>;
564   using SlotAllocTraits = typename absl::allocator_traits<
565       allocator_type>::template rebind_traits<slot_type>;
566 
567   static_assert(std::is_lvalue_reference<reference>::value,
568                 "Policy::element() must return a reference");
569 
570   template <typename T>
571   struct SameAsElementReference
572       : std::is_same<typename std::remove_cv<
573                          typename std::remove_reference<reference>::type>::type,
574                      typename std::remove_cv<
575                          typename std::remove_reference<T>::type>::type> {};
576 
577   // An enabler for insert(T&&): T must be convertible to init_type or be the
578   // same as [cv] value_type [ref].
579   // Note: we separate SameAsElementReference into its own type to avoid using
580   // reference unless we need to. MSVC doesn't seem to like it in some
581   // cases.
582   template <class T>
583   using RequiresInsertable = typename std::enable_if<
584       absl::disjunction<std::is_convertible<T, init_type>,
585                         SameAsElementReference<T>>::value,
586       int>::type;
587 
588   // RequiresNotInit is a workaround for gcc prior to 7.1.
589   // See https://godbolt.org/g/Y4xsUh.
590   template <class T>
591   using RequiresNotInit =
592       typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
593 
594   template <class... Ts>
595   using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
596 
597  public:
598   static_assert(std::is_same<pointer, value_type*>::value,
599                 "Allocators with custom pointer types are not supported");
600   static_assert(std::is_same<const_pointer, const value_type*>::value,
601                 "Allocators with custom pointer types are not supported");
602 
603   class iterator {
604     friend class raw_hash_set;
605 
606    public:
607     using iterator_category = std::forward_iterator_tag;
608     using value_type = typename raw_hash_set::value_type;
609     using reference =
610         absl::conditional_t<PolicyTraits::constant_iterators::value,
611                             const value_type&, value_type&>;
612     using pointer = absl::remove_reference_t<reference>*;
613     using difference_type = typename raw_hash_set::difference_type;
614 
615     iterator() {}
616 
617     // PRECONDITION: not an end() iterator.
618     reference operator*() const {
619       assert_is_full();
620       return PolicyTraits::element(slot_);
621     }
622 
623     // PRECONDITION: not an end() iterator.
624     pointer operator->() const { return &operator*(); }
625 
626     // PRECONDITION: not an end() iterator.
627     iterator& operator++() {
628       assert_is_full();
629       ++ctrl_;
630       ++slot_;
631       skip_empty_or_deleted();
632       return *this;
633     }
634     // PRECONDITION: not an end() iterator.
635     iterator operator++(int) {
636       auto tmp = *this;
637       ++*this;
638       return tmp;
639     }
640 
641     friend bool operator==(const iterator& a, const iterator& b) {
642       a.assert_is_valid();
643       b.assert_is_valid();
644       return a.ctrl_ == b.ctrl_;
645     }
646     friend bool operator!=(const iterator& a, const iterator& b) {
647       return !(a == b);
648     }
649 
650    private:
651     iterator(ctrl_t* ctrl) : ctrl_(ctrl) {}  // for end()
652     iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {}
653 
654     void assert_is_full() const { assert(IsFull(*ctrl_)); }
655     void assert_is_valid() const {
656       assert(!ctrl_ || IsFull(*ctrl_) || *ctrl_ == kSentinel);
657     }
658 
659     void skip_empty_or_deleted() {
660       while (IsEmptyOrDeleted(*ctrl_)) {
661         // ctrl is not necessarily aligned to Group::kWidth. It is also likely
662         // to read past the space for ctrl bytes and into slots. This is ok
663         // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there
664         // is no way to read outside the combined slot array.
665         uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
666         ctrl_ += shift;
667         slot_ += shift;
668       }
669     }
670 
671     ctrl_t* ctrl_ = nullptr;
672     // To avoid uninitialized member warnings, put slot_ in an anonymous union.
673     // The member is not initialized on singleton and end iterators.
674     union {
675       slot_type* slot_;
676     };
677   };
678 
679   class const_iterator {
680     friend class raw_hash_set;
681 
682    public:
683     using iterator_category = typename iterator::iterator_category;
684     using value_type = typename raw_hash_set::value_type;
685     using reference = typename raw_hash_set::const_reference;
686     using pointer = typename raw_hash_set::const_pointer;
687     using difference_type = typename raw_hash_set::difference_type;
688 
689     const_iterator() {}
690     // Implicit construction from iterator.
691     const_iterator(iterator i) : inner_(std::move(i)) {}
692 
693     reference operator*() const { return *inner_; }
694     pointer operator->() const { return inner_.operator->(); }
695 
696     const_iterator& operator++() {
697       ++inner_;
698       return *this;
699     }
700     const_iterator operator++(int) { return inner_++; }
701 
702     friend bool operator==(const const_iterator& a, const const_iterator& b) {
703       return a.inner_ == b.inner_;
704     }
705     friend bool operator!=(const const_iterator& a, const const_iterator& b) {
706       return !(a == b);
707     }
708 
709    private:
710     const_iterator(const ctrl_t* ctrl, const slot_type* slot)
711         : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
712 
713     iterator inner_;
714   };
715 
716   using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
717   using insert_return_type = InsertReturnType<iterator, node_type>;
718 
719   raw_hash_set() noexcept(
720       std::is_nothrow_default_constructible<hasher>::value&&
721           std::is_nothrow_default_constructible<key_equal>::value&&
722               std::is_nothrow_default_constructible<allocator_type>::value) {}
723 
724   explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
725                         const key_equal& eq = key_equal(),
726                         const allocator_type& alloc = allocator_type())
727       : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
728     if (bucket_count) {
729       capacity_ = NormalizeCapacity(bucket_count);
730       reset_growth_left();
731       initialize_slots();
732     }
733   }
734 
735   raw_hash_set(size_t bucket_count, const hasher& hash,
736                const allocator_type& alloc)
737       : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
738 
739   raw_hash_set(size_t bucket_count, const allocator_type& alloc)
740       : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
741 
742   explicit raw_hash_set(const allocator_type& alloc)
743       : raw_hash_set(0, hasher(), key_equal(), alloc) {}
744 
745   template <class InputIter>
746   raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
747                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
748                const allocator_type& alloc = allocator_type())
749       : raw_hash_set(bucket_count, hash, eq, alloc) {
750     insert(first, last);
751   }
752 
753   template <class InputIter>
754   raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
755                const hasher& hash, const allocator_type& alloc)
756       : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
757 
758   template <class InputIter>
759   raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
760                const allocator_type& alloc)
761       : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
762 
763   template <class InputIter>
764   raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
765       : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
766 
767   // Instead of accepting std::initializer_list<value_type> as the first
768   // argument like std::unordered_set<value_type> does, we have two overloads
769   // that accept std::initializer_list<T> and std::initializer_list<init_type>.
770   // This is advantageous for performance.
771   //
772   //   // Turns {"abc", "def"} into std::initializer_list<std::string>, then
773   //   // copies the strings into the set.
774   //   std::unordered_set<std::string> s = {"abc", "def"};
775   //
776   //   // Turns {"abc", "def"} into std::initializer_list<const char*>, then
777   //   // copies the strings into the set.
778   //   absl::flat_hash_set<std::string> s = {"abc", "def"};
779   //
780   // The same trick is used in insert().
781   //
782   // The enabler is necessary to prevent this constructor from triggering where
783   // the copy constructor is meant to be called.
784   //
785   //   absl::flat_hash_set<int> a, b{a};
786   //
787   // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
788   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
789   raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
790                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
791                const allocator_type& alloc = allocator_type())
792       : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
793 
794   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
795                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
796                const allocator_type& alloc = allocator_type())
797       : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
798 
799   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
800   raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
801                const hasher& hash, const allocator_type& alloc)
802       : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
803 
804   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
805                const hasher& hash, const allocator_type& alloc)
806       : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
807 
808   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
809   raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
810                const allocator_type& alloc)
811       : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
812 
813   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
814                const allocator_type& alloc)
815       : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
816 
817   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
818   raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
819       : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
820 
821   raw_hash_set(std::initializer_list<init_type> init,
822                const allocator_type& alloc)
823       : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
824 
825   raw_hash_set(const raw_hash_set& that)
826       : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
827                                that.alloc_ref())) {}
828 
829   raw_hash_set(const raw_hash_set& that, const allocator_type& a)
830       : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
831     reserve(that.size());
832     // Because the table is guaranteed to be empty, we can do something faster
833     // than a full `insert`.
834     for (const auto& v : that) {
835       const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
836       auto target = find_first_non_full(hash);
837       set_ctrl(target.offset, H2(hash));
838       emplace_at(target.offset, v);
839       infoz_.RecordInsert(hash, target.probe_length);
840     }
841     size_ = that.size();
842     growth_left() -= that.size();
843   }
844 
845   raw_hash_set(raw_hash_set&& that) noexcept(
846       std::is_nothrow_copy_constructible<hasher>::value&&
847           std::is_nothrow_copy_constructible<key_equal>::value&&
848               std::is_nothrow_copy_constructible<allocator_type>::value)
849       : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
850         slots_(absl::exchange(that.slots_, nullptr)),
851         size_(absl::exchange(that.size_, 0)),
852         capacity_(absl::exchange(that.capacity_, 0)),
853         infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())),
854         // Hash, equality and allocator are copied instead of moved because
855         // `that` must be left valid. If Hash is std::function<Key>, moving it
856         // would create a nullptr functor that cannot be called.
857         settings_(that.settings_) {
858     // growth_left was copied above, reset the one from `that`.
859     that.growth_left() = 0;
860   }
861 
862   raw_hash_set(raw_hash_set&& that, const allocator_type& a)
863       : ctrl_(EmptyGroup()),
864         slots_(nullptr),
865         size_(0),
866         capacity_(0),
867         settings_(0, that.hash_ref(), that.eq_ref(), a) {
868     if (a == that.alloc_ref()) {
869       std::swap(ctrl_, that.ctrl_);
870       std::swap(slots_, that.slots_);
871       std::swap(size_, that.size_);
872       std::swap(capacity_, that.capacity_);
873       std::swap(growth_left(), that.growth_left());
874       std::swap(infoz_, that.infoz_);
875     } else {
876       reserve(that.size());
877       // Note: this will copy elements of dense_set and unordered_set instead of
878       // moving them. This can be fixed if it ever becomes an issue.
879       for (auto& elem : that) insert(std::move(elem));
880     }
881   }
882 
883   raw_hash_set& operator=(const raw_hash_set& that) {
884     raw_hash_set tmp(that,
885                      AllocTraits::propagate_on_container_copy_assignment::value
886                          ? that.alloc_ref()
887                          : alloc_ref());
888     swap(tmp);
889     return *this;
890   }
891 
892   raw_hash_set& operator=(raw_hash_set&& that) noexcept(
893       absl::allocator_traits<allocator_type>::is_always_equal::value&&
894           std::is_nothrow_move_assignable<hasher>::value&&
895               std::is_nothrow_move_assignable<key_equal>::value) {
896     // TODO(sbenza): We should only use the operations from the noexcept clause
897     // to make sure we actually adhere to that contract.
898     return move_assign(
899         std::move(that),
900         typename AllocTraits::propagate_on_container_move_assignment());
901   }
902 
903   ~raw_hash_set() { destroy_slots(); }
904 
905   iterator begin() {
906     auto it = iterator_at(0);
907     it.skip_empty_or_deleted();
908     return it;
909   }
910   iterator end() { return {ctrl_ + capacity_}; }
911 
912   const_iterator begin() const {
913     return const_cast<raw_hash_set*>(this)->begin();
914   }
915   const_iterator end() const { return const_cast<raw_hash_set*>(this)->end(); }
916   const_iterator cbegin() const { return begin(); }
917   const_iterator cend() const { return end(); }
918 
919   bool empty() const { return !size(); }
920   size_t size() const { return size_; }
921   size_t capacity() const { return capacity_; }
922   size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
923 
924   ABSL_ATTRIBUTE_REINITIALIZES void clear() {
925     // Iterating over this container is O(bucket_count()). When bucket_count()
926     // is much greater than size(), iteration becomes prohibitively expensive.
927     // For clear() it is more important to reuse the allocated array when the
928     // container is small because allocation takes comparatively long time
929     // compared to destruction of the elements of the container. So we pick the
930     // largest bucket_count() threshold for which iteration is still fast and
931     // past that we simply deallocate the array.
932     if (capacity_ > 127) {
933       destroy_slots();
934     } else if (capacity_) {
935       for (size_t i = 0; i != capacity_; ++i) {
936         if (IsFull(ctrl_[i])) {
937           PolicyTraits::destroy(&alloc_ref(), slots_ + i);
938         }
939       }
940       size_ = 0;
941       reset_ctrl();
942       reset_growth_left();
943     }
944     assert(empty());
945     infoz_.RecordStorageChanged(0, capacity_);
946   }
947 
948   // This overload kicks in when the argument is an rvalue of insertable and
949   // decomposable type other than init_type.
950   //
951   //   flat_hash_map<std::string, int> m;
952   //   m.insert(std::make_pair("abc", 42));
953   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
954   // bug.
955   template <class T, RequiresInsertable<T> = 0,
956             class T2 = T,
957             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
958             T* = nullptr>
959   std::pair<iterator, bool> insert(T&& value) {
960     return emplace(std::forward<T>(value));
961   }
962 
963   // This overload kicks in when the argument is a bitfield or an lvalue of
964   // insertable and decomposable type.
965   //
966   //   union { int n : 1; };
967   //   flat_hash_set<int> s;
968   //   s.insert(n);
969   //
970   //   flat_hash_set<std::string> s;
971   //   const char* p = "hello";
972   //   s.insert(p);
973   //
974   // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
975   // RequiresInsertable<T> with RequiresInsertable<const T&>.
976   // We are hitting this bug: https://godbolt.org/g/1Vht4f.
977   template <
978       class T, RequiresInsertable<T> = 0,
979       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
980   std::pair<iterator, bool> insert(const T& value) {
981     return emplace(value);
982   }
983 
984   // This overload kicks in when the argument is an rvalue of init_type. Its
985   // purpose is to handle brace-init-list arguments.
986   //
987   //   flat_hash_map<std::string, int> s;
988   //   s.insert({"abc", 42});
989   std::pair<iterator, bool> insert(init_type&& value) {
990     return emplace(std::move(value));
991   }
992 
993   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
994   // bug.
995   template <class T, RequiresInsertable<T> = 0, class T2 = T,
996             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
997             T* = nullptr>
998   iterator insert(const_iterator, T&& value) {
999     return insert(std::forward<T>(value)).first;
1000   }
1001 
1002   // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
1003   // RequiresInsertable<T> with RequiresInsertable<const T&>.
1004   // We are hitting this bug: https://godbolt.org/g/1Vht4f.
1005   template <
1006       class T, RequiresInsertable<T> = 0,
1007       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
1008   iterator insert(const_iterator, const T& value) {
1009     return insert(value).first;
1010   }
1011 
1012   iterator insert(const_iterator, init_type&& value) {
1013     return insert(std::move(value)).first;
1014   }
1015 
1016   template <class InputIt>
1017   void insert(InputIt first, InputIt last) {
1018     for (; first != last; ++first) insert(*first);
1019   }
1020 
1021   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
1022   void insert(std::initializer_list<T> ilist) {
1023     insert(ilist.begin(), ilist.end());
1024   }
1025 
1026   void insert(std::initializer_list<init_type> ilist) {
1027     insert(ilist.begin(), ilist.end());
1028   }
1029 
1030   insert_return_type insert(node_type&& node) {
1031     if (!node) return {end(), false, node_type()};
1032     const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
1033     auto res = PolicyTraits::apply(
1034         InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
1035         elem);
1036     if (res.second) {
1037       CommonAccess::Reset(&node);
1038       return {res.first, true, node_type()};
1039     } else {
1040       return {res.first, false, std::move(node)};
1041     }
1042   }
1043 
1044   iterator insert(const_iterator, node_type&& node) {
1045     return insert(std::move(node)).first;
1046   }
1047 
1048   // This overload kicks in if we can deduce the key from args. This enables us
1049   // to avoid constructing value_type if an entry with the same key already
1050   // exists.
1051   //
1052   // For example:
1053   //
1054   //   flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
1055   //   // Creates no std::string copies and makes no heap allocations.
1056   //   m.emplace("abc", "xyz");
1057   template <class... Args, typename std::enable_if<
1058                                IsDecomposable<Args...>::value, int>::type = 0>
1059   std::pair<iterator, bool> emplace(Args&&... args) {
1060     return PolicyTraits::apply(EmplaceDecomposable{*this},
1061                                std::forward<Args>(args)...);
1062   }
1063 
1064   // This overload kicks in if we cannot deduce the key from args. It constructs
1065   // value_type unconditionally and then either moves it into the table or
1066   // destroys.
1067   template <class... Args, typename std::enable_if<
1068                                !IsDecomposable<Args...>::value, int>::type = 0>
1069   std::pair<iterator, bool> emplace(Args&&... args) {
1070     alignas(slot_type) unsigned char raw[sizeof(slot_type)];
1071     slot_type* slot = reinterpret_cast<slot_type*>(&raw);
1072 
1073     PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
1074     const auto& elem = PolicyTraits::element(slot);
1075     return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
1076   }
1077 
1078   template <class... Args>
1079   iterator emplace_hint(const_iterator, Args&&... args) {
1080     return emplace(std::forward<Args>(args)...).first;
1081   }
1082 
1083   // Extension API: support for lazy emplace.
1084   //
1085   // Looks up key in the table. If found, returns the iterator to the element.
1086   // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
1087   //
1088   // `f` must abide by several restrictions:
1089   //  - it MUST call `raw_hash_set::constructor` with arguments as if a
1090   //    `raw_hash_set::value_type` is constructed,
1091   //  - it MUST NOT access the container before the call to
1092   //    `raw_hash_set::constructor`, and
1093   //  - it MUST NOT erase the lazily emplaced element.
1094   // Doing any of these is undefined behavior.
1095   //
1096   // For example:
1097   //
1098   //   std::unordered_set<ArenaString> s;
1099   //   // Makes ArenaStr even if "abc" is in the map.
1100   //   s.insert(ArenaString(&arena, "abc"));
1101   //
1102   //   flat_hash_set<ArenaStr> s;
1103   //   // Makes ArenaStr only if "abc" is not in the map.
1104   //   s.lazy_emplace("abc", [&](const constructor& ctor) {
1105   //     ctor(&arena, "abc");
1106   //   });
1107   //
1108   // WARNING: This API is currently experimental. If there is a way to implement
1109   // the same thing with the rest of the API, prefer that.
1110   class constructor {
1111     friend class raw_hash_set;
1112 
1113    public:
1114     template <class... Args>
1115     void operator()(Args&&... args) const {
1116       assert(*slot_);
1117       PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
1118       *slot_ = nullptr;
1119     }
1120 
1121    private:
1122     constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
1123 
1124     allocator_type* alloc_;
1125     slot_type** slot_;
1126   };
1127 
1128   template <class K = key_type, class F>
1129   iterator lazy_emplace(const key_arg<K>& key, F&& f) {
1130     auto res = find_or_prepare_insert(key);
1131     if (res.second) {
1132       slot_type* slot = slots_ + res.first;
1133       std::forward<F>(f)(constructor(&alloc_ref(), &slot));
1134       assert(!slot);
1135     }
1136     return iterator_at(res.first);
1137   }
1138 
1139   // Extension API: support for heterogeneous keys.
1140   //
1141   //   std::unordered_set<std::string> s;
1142   //   // Turns "abc" into std::string.
1143   //   s.erase("abc");
1144   //
1145   //   flat_hash_set<std::string> s;
1146   //   // Uses "abc" directly without copying it into std::string.
1147   //   s.erase("abc");
1148   template <class K = key_type>
1149   size_type erase(const key_arg<K>& key) {
1150     auto it = find(key);
1151     if (it == end()) return 0;
1152     erase(it);
1153     return 1;
1154   }
1155 
1156   // Erases the element pointed to by `it`.  Unlike `std::unordered_set::erase`,
1157   // this method returns void to reduce algorithmic complexity to O(1).  The
1158   // iterator is invalidated, so any increment should be done before calling
1159   // erase.  In order to erase while iterating across a map, use the following
1160   // idiom (which also works for standard containers):
1161   //
1162   // for (auto it = m.begin(), end = m.end(); it != end;) {
1163   //   // `erase()` will invalidate `it`, so advance `it` first.
1164   //   auto copy_it = it++;
1165   //   if (<pred>) {
1166   //     m.erase(copy_it);
1167   //   }
1168   // }
1169   void erase(const_iterator cit) { erase(cit.inner_); }
1170 
1171   // This overload is necessary because otherwise erase<K>(const K&) would be
1172   // a better match if non-const iterator is passed as an argument.
1173   void erase(iterator it) {
1174     it.assert_is_full();
1175     PolicyTraits::destroy(&alloc_ref(), it.slot_);
1176     erase_meta_only(it);
1177   }
1178 
1179   iterator erase(const_iterator first, const_iterator last) {
1180     while (first != last) {
1181       erase(first++);
1182     }
1183     return last.inner_;
1184   }
1185 
1186   // Moves elements from `src` into `this`.
1187   // If the element already exists in `this`, it is left unmodified in `src`.
1188   template <typename H, typename E>
1189   void merge(raw_hash_set<Policy, H, E, Alloc>& src) {  // NOLINT
1190     assert(this != &src);
1191     for (auto it = src.begin(), e = src.end(); it != e;) {
1192       auto next = std::next(it);
1193       if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
1194                               PolicyTraits::element(it.slot_))
1195               .second) {
1196         src.erase_meta_only(it);
1197       }
1198       it = next;
1199     }
1200   }
1201 
1202   template <typename H, typename E>
1203   void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
1204     merge(src);
1205   }
1206 
1207   node_type extract(const_iterator position) {
1208     position.inner_.assert_is_full();
1209     auto node =
1210         CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
1211     erase_meta_only(position);
1212     return node;
1213   }
1214 
1215   template <
1216       class K = key_type,
1217       typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
1218   node_type extract(const key_arg<K>& key) {
1219     auto it = find(key);
1220     return it == end() ? node_type() : extract(const_iterator{it});
1221   }
1222 
1223   void swap(raw_hash_set& that) noexcept(
1224       IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
1225       (!AllocTraits::propagate_on_container_swap::value ||
1226        IsNoThrowSwappable<allocator_type>())) {
1227     using std::swap;
1228     swap(ctrl_, that.ctrl_);
1229     swap(slots_, that.slots_);
1230     swap(size_, that.size_);
1231     swap(capacity_, that.capacity_);
1232     swap(growth_left(), that.growth_left());
1233     swap(hash_ref(), that.hash_ref());
1234     swap(eq_ref(), that.eq_ref());
1235     swap(infoz_, that.infoz_);
1236     if (AllocTraits::propagate_on_container_swap::value) {
1237       swap(alloc_ref(), that.alloc_ref());
1238     } else {
1239       // If the allocators do not compare equal it is officially undefined
1240       // behavior. We choose to do nothing.
1241     }
1242   }
1243 
1244   void rehash(size_t n) {
1245     if (n == 0 && capacity_ == 0) return;
1246     if (n == 0 && size_ == 0) {
1247       destroy_slots();
1248       infoz_.RecordStorageChanged(0, 0);
1249       return;
1250     }
1251     // bitor is a faster way of doing `max` here. We will round up to the next
1252     // power-of-2-minus-1, so bitor is good enough.
1253     auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
1254     // n == 0 unconditionally rehashes as per the standard.
1255     if (n == 0 || m > capacity_) {
1256       resize(m);
1257     }
1258   }
1259 
1260   void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); }
1261 
1262   // Extension API: support for heterogeneous keys.
1263   //
1264   //   std::unordered_set<std::string> s;
1265   //   // Turns "abc" into std::string.
1266   //   s.count("abc");
1267   //
1268   //   ch_set<std::string> s;
1269   //   // Uses "abc" directly without copying it into std::string.
1270   //   s.count("abc");
1271   template <class K = key_type>
1272   size_t count(const key_arg<K>& key) const {
1273     return find(key) == end() ? 0 : 1;
1274   }
1275 
1276   // Issues CPU prefetch instructions for the memory needed to find or insert
1277   // a key.  Like all lookup functions, this support heterogeneous keys.
1278   //
1279   // NOTE: This is a very low level operation and should not be used without
1280   // specific benchmarks indicating its importance.
1281   template <class K = key_type>
1282   void prefetch(const key_arg<K>& key) const {
1283     (void)key;
1284 #if defined(__GNUC__)
1285     auto seq = probe(hash_ref()(key));
1286     __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
1287     __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
1288 #endif  // __GNUC__
1289   }
1290 
1291   // The API of find() has two extensions.
1292   //
1293   // 1. The hash can be passed by the user. It must be equal to the hash of the
1294   // key.
1295   //
1296   // 2. The type of the key argument doesn't have to be key_type. This is so
1297   // called heterogeneous key support.
1298   template <class K = key_type>
1299   iterator find(const key_arg<K>& key, size_t hash) {
1300     auto seq = probe(hash);
1301     while (true) {
1302       Group g{ctrl_ + seq.offset()};
1303       for (int i : g.Match(H2(hash))) {
1304         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
1305                 EqualElement<K>{key, eq_ref()},
1306                 PolicyTraits::element(slots_ + seq.offset(i)))))
1307           return iterator_at(seq.offset(i));
1308       }
1309       if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
1310       seq.next();
1311     }
1312   }
1313   template <class K = key_type>
1314   iterator find(const key_arg<K>& key) {
1315     return find(key, hash_ref()(key));
1316   }
1317 
1318   template <class K = key_type>
1319   const_iterator find(const key_arg<K>& key, size_t hash) const {
1320     return const_cast<raw_hash_set*>(this)->find(key, hash);
1321   }
1322   template <class K = key_type>
1323   const_iterator find(const key_arg<K>& key) const {
1324     return find(key, hash_ref()(key));
1325   }
1326 
1327   template <class K = key_type>
1328   bool contains(const key_arg<K>& key) const {
1329     return find(key) != end();
1330   }
1331 
1332   template <class K = key_type>
1333   std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
1334     auto it = find(key);
1335     if (it != end()) return {it, std::next(it)};
1336     return {it, it};
1337   }
1338   template <class K = key_type>
1339   std::pair<const_iterator, const_iterator> equal_range(
1340       const key_arg<K>& key) const {
1341     auto it = find(key);
1342     if (it != end()) return {it, std::next(it)};
1343     return {it, it};
1344   }
1345 
1346   size_t bucket_count() const { return capacity_; }
1347   float load_factor() const {
1348     return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
1349   }
1350   float max_load_factor() const { return 1.0f; }
1351   void max_load_factor(float) {
1352     // Does nothing.
1353   }
1354 
1355   hasher hash_function() const { return hash_ref(); }
1356   key_equal key_eq() const { return eq_ref(); }
1357   allocator_type get_allocator() const { return alloc_ref(); }
1358 
1359   friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
1360     if (a.size() != b.size()) return false;
1361     const raw_hash_set* outer = &a;
1362     const raw_hash_set* inner = &b;
1363     if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
1364     for (const value_type& elem : *outer)
1365       if (!inner->has_element(elem)) return false;
1366     return true;
1367   }
1368 
1369   friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
1370     return !(a == b);
1371   }
1372 
1373   friend void swap(raw_hash_set& a,
1374                    raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
1375     a.swap(b);
1376   }
1377 
1378  private:
1379   template <class Container, typename Enabler>
1380   friend struct absl::container_internal::hashtable_debug_internal::
1381       HashtableDebugAccess;
1382 
1383   struct FindElement {
1384     template <class K, class... Args>
1385     const_iterator operator()(const K& key, Args&&...) const {
1386       return s.find(key);
1387     }
1388     const raw_hash_set& s;
1389   };
1390 
1391   struct HashElement {
1392     template <class K, class... Args>
1393     size_t operator()(const K& key, Args&&...) const {
1394       return h(key);
1395     }
1396     const hasher& h;
1397   };
1398 
1399   template <class K1>
1400   struct EqualElement {
1401     template <class K2, class... Args>
1402     bool operator()(const K2& lhs, Args&&...) const {
1403       return eq(lhs, rhs);
1404     }
1405     const K1& rhs;
1406     const key_equal& eq;
1407   };
1408 
1409   struct EmplaceDecomposable {
1410     template <class K, class... Args>
1411     std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
1412       auto res = s.find_or_prepare_insert(key);
1413       if (res.second) {
1414         s.emplace_at(res.first, std::forward<Args>(args)...);
1415       }
1416       return {s.iterator_at(res.first), res.second};
1417     }
1418     raw_hash_set& s;
1419   };
1420 
1421   template <bool do_destroy>
1422   struct InsertSlot {
1423     template <class K, class... Args>
1424     std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
1425       auto res = s.find_or_prepare_insert(key);
1426       if (res.second) {
1427         PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
1428       } else if (do_destroy) {
1429         PolicyTraits::destroy(&s.alloc_ref(), &slot);
1430       }
1431       return {s.iterator_at(res.first), res.second};
1432     }
1433     raw_hash_set& s;
1434     // Constructed slot. Either moved into place or destroyed.
1435     slot_type&& slot;
1436   };
1437 
1438   // "erases" the object from the container, except that it doesn't actually
1439   // destroy the object. It only updates all the metadata of the class.
1440   // This can be used in conjunction with Policy::transfer to move the object to
1441   // another place.
1442   void erase_meta_only(const_iterator it) {
1443     assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
1444     --size_;
1445     const size_t index = it.inner_.ctrl_ - ctrl_;
1446     const size_t index_before = (index - Group::kWidth) & capacity_;
1447     const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
1448     const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
1449 
1450     // We count how many consecutive non empties we have to the right and to the
1451     // left of `it`. If the sum is >= kWidth then there is at least one probe
1452     // window that might have seen a full group.
1453     bool was_never_full =
1454         empty_before && empty_after &&
1455         static_cast<size_t>(empty_after.TrailingZeros() +
1456                             empty_before.LeadingZeros()) < Group::kWidth;
1457 
1458     set_ctrl(index, was_never_full ? kEmpty : kDeleted);
1459     growth_left() += was_never_full;
1460     infoz_.RecordErase();
1461   }
1462 
1463   void initialize_slots() {
1464     assert(capacity_);
1465     // Folks with custom allocators often make unwarranted assumptions about the
1466     // behavior of their classes vis-a-vis trivial destructability and what
1467     // calls they will or wont make.  Avoid sampling for people with custom
1468     // allocators to get us out of this mess.  This is not a hard guarantee but
1469     // a workaround while we plan the exact guarantee we want to provide.
1470     //
1471     // People are often sloppy with the exact type of their allocator (sometimes
1472     // it has an extra const or is missing the pair, but rebinds made it work
1473     // anyway).  To avoid the ambiguity, we work off SlotAlloc which we have
1474     // bound more carefully.
1475     if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
1476         slots_ == nullptr) {
1477       infoz_ = Sample();
1478     }
1479 
1480     auto layout = MakeLayout(capacity_);
1481     char* mem = static_cast<char*>(
1482         Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
1483     ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
1484     slots_ = layout.template Pointer<1>(mem);
1485     reset_ctrl();
1486     reset_growth_left();
1487     infoz_.RecordStorageChanged(size_, capacity_);
1488   }
1489 
1490   void destroy_slots() {
1491     if (!capacity_) return;
1492     for (size_t i = 0; i != capacity_; ++i) {
1493       if (IsFull(ctrl_[i])) {
1494         PolicyTraits::destroy(&alloc_ref(), slots_ + i);
1495       }
1496     }
1497     auto layout = MakeLayout(capacity_);
1498     // Unpoison before returning the memory to the allocator.
1499     SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
1500     Deallocate<Layout::Alignment()>(&alloc_ref(), ctrl_, layout.AllocSize());
1501     ctrl_ = EmptyGroup();
1502     slots_ = nullptr;
1503     size_ = 0;
1504     capacity_ = 0;
1505     growth_left() = 0;
1506   }
1507 
1508   void resize(size_t new_capacity) {
1509     assert(IsValidCapacity(new_capacity));
1510     auto* old_ctrl = ctrl_;
1511     auto* old_slots = slots_;
1512     const size_t old_capacity = capacity_;
1513     capacity_ = new_capacity;
1514     initialize_slots();
1515 
1516     size_t total_probe_length = 0;
1517     for (size_t i = 0; i != old_capacity; ++i) {
1518       if (IsFull(old_ctrl[i])) {
1519         size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
1520                                           PolicyTraits::element(old_slots + i));
1521         auto target = find_first_non_full(hash);
1522         size_t new_i = target.offset;
1523         total_probe_length += target.probe_length;
1524         set_ctrl(new_i, H2(hash));
1525         PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
1526       }
1527     }
1528     if (old_capacity) {
1529       SanitizerUnpoisonMemoryRegion(old_slots,
1530                                     sizeof(slot_type) * old_capacity);
1531       auto layout = MakeLayout(old_capacity);
1532       Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
1533                                       layout.AllocSize());
1534     }
1535     infoz_.RecordRehash(total_probe_length);
1536   }
1537 
1538   void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
1539     assert(IsValidCapacity(capacity_));
1540     assert(!is_small());
1541     // Algorithm:
1542     // - mark all DELETED slots as EMPTY
1543     // - mark all FULL slots as DELETED
1544     // - for each slot marked as DELETED
1545     //     hash = Hash(element)
1546     //     target = find_first_non_full(hash)
1547     //     if target is in the same group
1548     //       mark slot as FULL
1549     //     else if target is EMPTY
1550     //       transfer element to target
1551     //       mark slot as EMPTY
1552     //       mark target as FULL
1553     //     else if target is DELETED
1554     //       swap current element with target element
1555     //       mark target as FULL
1556     //       repeat procedure for current slot with moved from element (target)
1557     ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
1558     alignas(slot_type) unsigned char raw[sizeof(slot_type)];
1559     size_t total_probe_length = 0;
1560     slot_type* slot = reinterpret_cast<slot_type*>(&raw);
1561     for (size_t i = 0; i != capacity_; ++i) {
1562       if (!IsDeleted(ctrl_[i])) continue;
1563       size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
1564                                         PolicyTraits::element(slots_ + i));
1565       auto target = find_first_non_full(hash);
1566       size_t new_i = target.offset;
1567       total_probe_length += target.probe_length;
1568 
1569       // Verify if the old and new i fall within the same group wrt the hash.
1570       // If they do, we don't need to move the object as it falls already in the
1571       // best probe we can.
1572       const auto probe_index = [&](size_t pos) {
1573         return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth;
1574       };
1575 
1576       // Element doesn't move.
1577       if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
1578         set_ctrl(i, H2(hash));
1579         continue;
1580       }
1581       if (IsEmpty(ctrl_[new_i])) {
1582         // Transfer element to the empty spot.
1583         // set_ctrl poisons/unpoisons the slots so we have to call it at the
1584         // right time.
1585         set_ctrl(new_i, H2(hash));
1586         PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
1587         set_ctrl(i, kEmpty);
1588       } else {
1589         assert(IsDeleted(ctrl_[new_i]));
1590         set_ctrl(new_i, H2(hash));
1591         // Until we are done rehashing, DELETED marks previously FULL slots.
1592         // Swap i and new_i elements.
1593         PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
1594         PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
1595         PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
1596         --i;  // repeat
1597       }
1598     }
1599     reset_growth_left();
1600     infoz_.RecordRehash(total_probe_length);
1601   }
1602 
1603   void rehash_and_grow_if_necessary() {
1604     if (capacity_ == 0) {
1605       resize(1);
1606     } else if (size() <= CapacityToGrowth(capacity()) / 2) {
1607       // Squash DELETED without growing if there is enough capacity.
1608       drop_deletes_without_resize();
1609     } else {
1610       // Otherwise grow the container.
1611       resize(capacity_ * 2 + 1);
1612     }
1613   }
1614 
1615   bool has_element(const value_type& elem) const {
1616     size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
1617     auto seq = probe(hash);
1618     while (true) {
1619       Group g{ctrl_ + seq.offset()};
1620       for (int i : g.Match(H2(hash))) {
1621         if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
1622                               elem))
1623           return true;
1624       }
1625       if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
1626       seq.next();
1627       assert(seq.index() < capacity_ && "full table!");
1628     }
1629     return false;
1630   }
1631 
1632   // Probes the raw_hash_set with the probe sequence for hash and returns the
1633   // pointer to the first empty or deleted slot.
1634   // NOTE: this function must work with tables having both kEmpty and kDelete
1635   // in one group. Such tables appears during drop_deletes_without_resize.
1636   //
1637   // This function is very useful when insertions happen and:
1638   // - the input is already a set
1639   // - there are enough slots
1640   // - the element with the hash is not in the table
1641   struct FindInfo {
1642     size_t offset;
1643     size_t probe_length;
1644   };
1645   FindInfo find_first_non_full(size_t hash) {
1646     auto seq = probe(hash);
1647     while (true) {
1648       Group g{ctrl_ + seq.offset()};
1649       auto mask = g.MatchEmptyOrDeleted();
1650       if (mask) {
1651 #if !defined(NDEBUG)
1652         // We want to add entropy even when ASLR is not enabled.
1653         // In debug build we will randomly insert in either the front or back of
1654         // the group.
1655         // TODO(kfm,sbenza): revisit after we do unconditional mixing
1656         if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) {
1657           return {seq.offset(mask.HighestBitSet()), seq.index()};
1658         }
1659 #endif
1660         return {seq.offset(mask.LowestBitSet()), seq.index()};
1661       }
1662       assert(seq.index() < capacity_ && "full table!");
1663       seq.next();
1664     }
1665   }
1666 
1667   // TODO(alkis): Optimize this assuming *this and that don't overlap.
1668   raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
1669     raw_hash_set tmp(std::move(that));
1670     swap(tmp);
1671     return *this;
1672   }
1673   raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
1674     raw_hash_set tmp(std::move(that), alloc_ref());
1675     swap(tmp);
1676     return *this;
1677   }
1678 
1679  protected:
1680   template <class K>
1681   std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
1682     auto hash = hash_ref()(key);
1683     auto seq = probe(hash);
1684     while (true) {
1685       Group g{ctrl_ + seq.offset()};
1686       for (int i : g.Match(H2(hash))) {
1687         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
1688                 EqualElement<K>{key, eq_ref()},
1689                 PolicyTraits::element(slots_ + seq.offset(i)))))
1690           return {seq.offset(i), false};
1691       }
1692       if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
1693       seq.next();
1694     }
1695     return {prepare_insert(hash), true};
1696   }
1697 
1698   size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
1699     auto target = find_first_non_full(hash);
1700     if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
1701                            !IsDeleted(ctrl_[target.offset]))) {
1702       rehash_and_grow_if_necessary();
1703       target = find_first_non_full(hash);
1704     }
1705     ++size_;
1706     growth_left() -= IsEmpty(ctrl_[target.offset]);
1707     set_ctrl(target.offset, H2(hash));
1708     infoz_.RecordInsert(hash, target.probe_length);
1709     return target.offset;
1710   }
1711 
1712   // Constructs the value in the space pointed by the iterator. This only works
1713   // after an unsuccessful find_or_prepare_insert() and before any other
1714   // modifications happen in the raw_hash_set.
1715   //
1716   // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
1717   // k is the key decomposed from `forward<Args>(args)...`, and the bool
1718   // returned by find_or_prepare_insert(k) was true.
1719   // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
1720   template <class... Args>
1721   void emplace_at(size_t i, Args&&... args) {
1722     PolicyTraits::construct(&alloc_ref(), slots_ + i,
1723                             std::forward<Args>(args)...);
1724 
1725     assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
1726                iterator_at(i) &&
1727            "constructed value does not match the lookup key");
1728   }
1729 
1730   iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
1731   const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
1732 
1733  private:
1734   friend struct RawHashSetTestOnlyAccess;
1735 
1736   probe_seq<Group::kWidth> probe(size_t hash) const {
1737     return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
1738   }
1739 
1740   // Reset all ctrl bytes back to kEmpty, except the sentinel.
1741   void reset_ctrl() {
1742     std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
1743     ctrl_[capacity_] = kSentinel;
1744     SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
1745   }
1746 
1747   void reset_growth_left() {
1748     growth_left() = CapacityToGrowth(capacity()) - size_;
1749   }
1750 
1751   // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
1752   // the end too.
1753   void set_ctrl(size_t i, ctrl_t h) {
1754     assert(i < capacity_);
1755 
1756     if (IsFull(h)) {
1757       SanitizerUnpoisonObject(slots_ + i);
1758     } else {
1759       SanitizerPoisonObject(slots_ + i);
1760     }
1761 
1762     ctrl_[i] = h;
1763     ctrl_[((i - Group::kWidth) & capacity_) + 1 +
1764           ((Group::kWidth - 1) & capacity_)] = h;
1765   }
1766 
1767   size_t& growth_left() { return settings_.template get<0>(); }
1768 
1769   // The representation of the object has two modes:
1770   //  - small: For capacities < kWidth-1
1771   //  - large: For the rest.
1772   //
1773   // Differences:
1774   //  - In small mode we are able to use the whole capacity. The extra control
1775   //  bytes give us at least one "empty" control byte to stop the iteration.
1776   //  This is important to make 1 a valid capacity.
1777   //
1778   //  - In small mode only the first `capacity()` control bytes after the
1779   //  sentinel are valid. The rest contain dummy kEmpty values that do not
1780   //  represent a real slot. This is important to take into account on
1781   //  find_first_non_full(), where we never try ShouldInsertBackwards() for
1782   //  small tables.
1783   bool is_small() const { return capacity_ < Group::kWidth - 1; }
1784 
1785   hasher& hash_ref() { return settings_.template get<1>(); }
1786   const hasher& hash_ref() const { return settings_.template get<1>(); }
1787   key_equal& eq_ref() { return settings_.template get<2>(); }
1788   const key_equal& eq_ref() const { return settings_.template get<2>(); }
1789   allocator_type& alloc_ref() { return settings_.template get<3>(); }
1790   const allocator_type& alloc_ref() const {
1791     return settings_.template get<3>();
1792   }
1793 
1794   // TODO(alkis): Investigate removing some of these fields:
1795   // - ctrl/slots can be derived from each other
1796   // - size can be moved into the slot array
1797   ctrl_t* ctrl_ = EmptyGroup();    // [(capacity + 1) * ctrl_t]
1798   slot_type* slots_ = nullptr;     // [capacity * slot_type]
1799   size_t size_ = 0;                // number of full slots
1800   size_t capacity_ = 0;            // total number of slots
1801   HashtablezInfoHandle infoz_;
1802   absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher,
1803                                             key_equal, allocator_type>
1804       settings_{0, hasher{}, key_equal{}, allocator_type{}};
1805 };
1806 
1807 // Erases all elements that satisfy the predicate `pred` from the container `c`.
1808 template <typename P, typename H, typename E, typename A, typename Predicate>
1809 void EraseIf(Predicate pred, raw_hash_set<P, H, E, A>* c) {
1810   for (auto it = c->begin(), last = c->end(); it != last;) {
1811     auto copy_it = it++;
1812     if (pred(*copy_it)) {
1813       c->erase(copy_it);
1814     }
1815   }
1816 }
1817 
1818 namespace hashtable_debug_internal {
1819 template <typename Set>
1820 struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
1821   using Traits = typename Set::PolicyTraits;
1822   using Slot = typename Traits::slot_type;
1823 
1824   static size_t GetNumProbes(const Set& set,
1825                              const typename Set::key_type& key) {
1826     size_t num_probes = 0;
1827     size_t hash = set.hash_ref()(key);
1828     auto seq = set.probe(hash);
1829     while (true) {
1830       container_internal::Group g{set.ctrl_ + seq.offset()};
1831       for (int i : g.Match(container_internal::H2(hash))) {
1832         if (Traits::apply(
1833                 typename Set::template EqualElement<typename Set::key_type>{
1834                     key, set.eq_ref()},
1835                 Traits::element(set.slots_ + seq.offset(i))))
1836           return num_probes;
1837         ++num_probes;
1838       }
1839       if (g.MatchEmpty()) return num_probes;
1840       seq.next();
1841       ++num_probes;
1842     }
1843   }
1844 
1845   static size_t AllocatedByteSize(const Set& c) {
1846     size_t capacity = c.capacity_;
1847     if (capacity == 0) return 0;
1848     auto layout = Set::MakeLayout(capacity);
1849     size_t m = layout.AllocSize();
1850 
1851     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
1852     if (per_slot != ~size_t{}) {
1853       m += per_slot * c.size();
1854     } else {
1855       for (size_t i = 0; i != capacity; ++i) {
1856         if (container_internal::IsFull(c.ctrl_[i])) {
1857           m += Traits::space_used(c.slots_ + i);
1858         }
1859       }
1860     }
1861     return m;
1862   }
1863 
1864   static size_t LowerBoundAllocatedByteSize(size_t size) {
1865     size_t capacity = GrowthToLowerboundCapacity(size);
1866     if (capacity == 0) return 0;
1867     auto layout = Set::MakeLayout(NormalizeCapacity(capacity));
1868     size_t m = layout.AllocSize();
1869     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
1870     if (per_slot != ~size_t{}) {
1871       m += per_slot * size;
1872     }
1873     return m;
1874   }
1875 };
1876 
1877 }  // namespace hashtable_debug_internal
1878 }  // namespace container_internal
1879 ABSL_NAMESPACE_END
1880 }  // namespace absl
1881 
1882 #endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
1883