1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_BASE_HASH_SET_H_
18 #define ART_RUNTIME_BASE_HASH_SET_H_
19
20 #include <functional>
21 #include <iterator>
22 #include <memory>
23 #include <stdint.h>
24 #include <utility>
25
26 #include "bit_utils.h"
27 #include "logging.h"
28
29 namespace art {
30
31 // Returns true if an item is empty.
32 template <class T>
33 class DefaultEmptyFn {
34 public:
MakeEmpty(T & item)35 void MakeEmpty(T& item) const {
36 item = T();
37 }
IsEmpty(const T & item)38 bool IsEmpty(const T& item) const {
39 return item == T();
40 }
41 };
42
43 template <class T>
44 class DefaultEmptyFn<T*> {
45 public:
MakeEmpty(T * & item)46 void MakeEmpty(T*& item) const {
47 item = nullptr;
48 }
IsEmpty(T * const & item)49 bool IsEmpty(T* const& item) const {
50 return item == nullptr;
51 }
52 };
53
54 // Low memory version of a hash set, uses less memory than std::unordered_set since elements aren't
55 // boxed. Uses linear probing to resolve collisions.
56 // EmptyFn needs to implement two functions MakeEmpty(T& item) and IsEmpty(const T& item).
57 // TODO: We could get rid of this requirement by using a bitmap, though maybe this would be slower
58 // and more complicated.
59 template <class T, class EmptyFn = DefaultEmptyFn<T>, class HashFn = std::hash<T>,
60 class Pred = std::equal_to<T>, class Alloc = std::allocator<T>>
61 class HashSet {
62 template <class Elem, class HashSetType>
63 class BaseIterator : std::iterator<std::forward_iterator_tag, Elem> {
64 public:
65 BaseIterator(const BaseIterator&) = default;
66 BaseIterator(BaseIterator&&) = default;
BaseIterator(HashSetType * hash_set,size_t index)67 BaseIterator(HashSetType* hash_set, size_t index) : index_(index), hash_set_(hash_set) {
68 }
69 BaseIterator& operator=(const BaseIterator&) = default;
70 BaseIterator& operator=(BaseIterator&&) = default;
71
72 bool operator==(const BaseIterator& other) const {
73 return hash_set_ == other.hash_set_ && this->index_ == other.index_;
74 }
75
76 bool operator!=(const BaseIterator& other) const {
77 return !(*this == other);
78 }
79
80 BaseIterator operator++() { // Value after modification.
81 this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
82 return *this;
83 }
84
85 BaseIterator operator++(int) {
86 BaseIterator temp = *this;
87 this->index_ = this->NextNonEmptySlot(this->index_, hash_set_);
88 return temp;
89 }
90
91 Elem& operator*() const {
92 DCHECK(!hash_set_->IsFreeSlot(this->index_));
93 return hash_set_->ElementForIndex(this->index_);
94 }
95
96 Elem* operator->() const {
97 return &**this;
98 }
99
100 // TODO: Operator -- --(int) (and use std::bidirectional_iterator_tag)
101
102 private:
103 size_t index_;
104 HashSetType* hash_set_;
105
NextNonEmptySlot(size_t index,const HashSet * hash_set)106 size_t NextNonEmptySlot(size_t index, const HashSet* hash_set) const {
107 const size_t num_buckets = hash_set->NumBuckets();
108 DCHECK_LT(index, num_buckets);
109 do {
110 ++index;
111 } while (index < num_buckets && hash_set->IsFreeSlot(index));
112 return index;
113 }
114
115 friend class HashSet;
116 };
117
118 public:
119 using value_type = T;
120 using allocator_type = Alloc;
121 using reference = T&;
122 using const_reference = const T&;
123 using pointer = T*;
124 using const_pointer = const T*;
125 using iterator = BaseIterator<T, HashSet>;
126 using const_iterator = BaseIterator<const T, const HashSet>;
127 using size_type = size_t;
128 using difference_type = ptrdiff_t;
129
130 static constexpr double kDefaultMinLoadFactor = 0.4;
131 static constexpr double kDefaultMaxLoadFactor = 0.7;
132 static constexpr size_t kMinBuckets = 1000;
133
134 // If we don't own the data, this will create a new array which owns the data.
Clear()135 void Clear() {
136 DeallocateStorage();
137 num_elements_ = 0;
138 elements_until_expand_ = 0;
139 }
140
HashSet()141 HashSet() : HashSet(kDefaultMinLoadFactor, kDefaultMaxLoadFactor) {}
142
HashSet(double min_load_factor,double max_load_factor)143 HashSet(double min_load_factor, double max_load_factor) noexcept
144 : num_elements_(0u),
145 num_buckets_(0u),
146 elements_until_expand_(0u),
147 owns_data_(false),
148 data_(nullptr),
149 min_load_factor_(min_load_factor),
150 max_load_factor_(max_load_factor) {
151 DCHECK_GT(min_load_factor, 0.0);
152 DCHECK_LT(max_load_factor, 1.0);
153 }
154
HashSet(const allocator_type & alloc)155 explicit HashSet(const allocator_type& alloc) noexcept
156 : allocfn_(alloc),
157 hashfn_(),
158 emptyfn_(),
159 pred_(),
160 num_elements_(0u),
161 num_buckets_(0u),
162 elements_until_expand_(0u),
163 owns_data_(false),
164 data_(nullptr),
165 min_load_factor_(kDefaultMinLoadFactor),
166 max_load_factor_(kDefaultMaxLoadFactor) {
167 }
168
HashSet(const HashSet & other)169 HashSet(const HashSet& other) noexcept
170 : allocfn_(other.allocfn_),
171 hashfn_(other.hashfn_),
172 emptyfn_(other.emptyfn_),
173 pred_(other.pred_),
174 num_elements_(other.num_elements_),
175 num_buckets_(0),
176 elements_until_expand_(other.elements_until_expand_),
177 owns_data_(false),
178 data_(nullptr),
179 min_load_factor_(other.min_load_factor_),
180 max_load_factor_(other.max_load_factor_) {
181 AllocateStorage(other.NumBuckets());
182 for (size_t i = 0; i < num_buckets_; ++i) {
183 ElementForIndex(i) = other.data_[i];
184 }
185 }
186
187 // noexcept required so that the move constructor is used instead of copy constructor.
188 // b/27860101
HashSet(HashSet && other)189 HashSet(HashSet&& other) noexcept
190 : allocfn_(std::move(other.allocfn_)),
191 hashfn_(std::move(other.hashfn_)),
192 emptyfn_(std::move(other.emptyfn_)),
193 pred_(std::move(other.pred_)),
194 num_elements_(other.num_elements_),
195 num_buckets_(other.num_buckets_),
196 elements_until_expand_(other.elements_until_expand_),
197 owns_data_(other.owns_data_),
198 data_(other.data_),
199 min_load_factor_(other.min_load_factor_),
200 max_load_factor_(other.max_load_factor_) {
201 other.num_elements_ = 0u;
202 other.num_buckets_ = 0u;
203 other.elements_until_expand_ = 0u;
204 other.owns_data_ = false;
205 other.data_ = nullptr;
206 }
207
208 // Construct from existing data.
209 // Read from a block of memory, if make_copy_of_data is false, then data_ points to within the
210 // passed in ptr_.
HashSet(const uint8_t * ptr,bool make_copy_of_data,size_t * read_count)211 HashSet(const uint8_t* ptr, bool make_copy_of_data, size_t* read_count) noexcept {
212 uint64_t temp;
213 size_t offset = 0;
214 offset = ReadFromBytes(ptr, offset, &temp);
215 num_elements_ = static_cast<uint64_t>(temp);
216 offset = ReadFromBytes(ptr, offset, &temp);
217 num_buckets_ = static_cast<uint64_t>(temp);
218 CHECK_LE(num_elements_, num_buckets_);
219 offset = ReadFromBytes(ptr, offset, &temp);
220 elements_until_expand_ = static_cast<uint64_t>(temp);
221 offset = ReadFromBytes(ptr, offset, &min_load_factor_);
222 offset = ReadFromBytes(ptr, offset, &max_load_factor_);
223 if (!make_copy_of_data) {
224 owns_data_ = false;
225 data_ = const_cast<T*>(reinterpret_cast<const T*>(ptr + offset));
226 offset += sizeof(*data_) * num_buckets_;
227 } else {
228 AllocateStorage(num_buckets_);
229 // Write elements, not that this may not be safe for cross compilation if the elements are
230 // pointer sized.
231 for (size_t i = 0; i < num_buckets_; ++i) {
232 offset = ReadFromBytes(ptr, offset, &data_[i]);
233 }
234 }
235 // Caller responsible for aligning.
236 *read_count = offset;
237 }
238
239 // Returns how large the table is after being written. If target is null, then no writing happens
240 // but the size is still returned. Target must be 8 byte aligned.
WriteToMemory(uint8_t * ptr)241 size_t WriteToMemory(uint8_t* ptr) const {
242 size_t offset = 0;
243 offset = WriteToBytes(ptr, offset, static_cast<uint64_t>(num_elements_));
244 offset = WriteToBytes(ptr, offset, static_cast<uint64_t>(num_buckets_));
245 offset = WriteToBytes(ptr, offset, static_cast<uint64_t>(elements_until_expand_));
246 offset = WriteToBytes(ptr, offset, min_load_factor_);
247 offset = WriteToBytes(ptr, offset, max_load_factor_);
248 // Write elements, not that this may not be safe for cross compilation if the elements are
249 // pointer sized.
250 for (size_t i = 0; i < num_buckets_; ++i) {
251 offset = WriteToBytes(ptr, offset, data_[i]);
252 }
253 // Caller responsible for aligning.
254 return offset;
255 }
256
~HashSet()257 ~HashSet() {
258 DeallocateStorage();
259 }
260
261 HashSet& operator=(HashSet&& other) noexcept {
262 HashSet(std::move(other)).swap(*this);
263 return *this;
264 }
265
266 HashSet& operator=(const HashSet& other) noexcept {
267 HashSet(other).swap(*this); // NOLINT(runtime/explicit) - a case of lint gone mad.
268 return *this;
269 }
270
271 // Lower case for c++11 for each.
begin()272 iterator begin() {
273 iterator ret(this, 0);
274 if (num_buckets_ != 0 && IsFreeSlot(ret.index_)) {
275 ++ret; // Skip all the empty slots.
276 }
277 return ret;
278 }
279
280 // Lower case for c++11 for each. const version.
begin()281 const_iterator begin() const {
282 const_iterator ret(this, 0);
283 if (num_buckets_ != 0 && IsFreeSlot(ret.index_)) {
284 ++ret; // Skip all the empty slots.
285 }
286 return ret;
287 }
288
289 // Lower case for c++11 for each.
end()290 iterator end() {
291 return iterator(this, NumBuckets());
292 }
293
294 // Lower case for c++11 for each. const version.
end()295 const_iterator end() const {
296 return const_iterator(this, NumBuckets());
297 }
298
Empty()299 bool Empty() const {
300 return Size() == 0;
301 }
302
303 // Return true if the hash set has ownership of the underlying data.
OwnsData()304 bool OwnsData() const {
305 return owns_data_;
306 }
307
308 // Erase algorithm:
309 // Make an empty slot where the iterator is pointing.
310 // Scan forwards until we hit another empty slot.
311 // If an element in between doesn't rehash to the range from the current empty slot to the
312 // iterator. It must be before the empty slot, in that case we can move it to the empty slot
313 // and set the empty slot to be the location we just moved from.
314 // Relies on maintaining the invariant that there's no empty slots from the 'ideal' index of an
315 // element to its actual location/index.
Erase(iterator it)316 iterator Erase(iterator it) {
317 // empty_index is the index that will become empty.
318 size_t empty_index = it.index_;
319 DCHECK(!IsFreeSlot(empty_index));
320 size_t next_index = empty_index;
321 bool filled = false; // True if we filled the empty index.
322 while (true) {
323 next_index = NextIndex(next_index);
324 T& next_element = ElementForIndex(next_index);
325 // If the next element is empty, we are done. Make sure to clear the current empty index.
326 if (emptyfn_.IsEmpty(next_element)) {
327 emptyfn_.MakeEmpty(ElementForIndex(empty_index));
328 break;
329 }
330 // Otherwise try to see if the next element can fill the current empty index.
331 const size_t next_hash = hashfn_(next_element);
332 // Calculate the ideal index, if it is within empty_index + 1 to next_index then there is
333 // nothing we can do.
334 size_t next_ideal_index = IndexForHash(next_hash);
335 // Loop around if needed for our check.
336 size_t unwrapped_next_index = next_index;
337 if (unwrapped_next_index < empty_index) {
338 unwrapped_next_index += NumBuckets();
339 }
340 // Loop around if needed for our check.
341 size_t unwrapped_next_ideal_index = next_ideal_index;
342 if (unwrapped_next_ideal_index < empty_index) {
343 unwrapped_next_ideal_index += NumBuckets();
344 }
345 if (unwrapped_next_ideal_index <= empty_index ||
346 unwrapped_next_ideal_index > unwrapped_next_index) {
347 // If the target index isn't within our current range it must have been probed from before
348 // the empty index.
349 ElementForIndex(empty_index) = std::move(next_element);
350 filled = true; // TODO: Optimize
351 empty_index = next_index;
352 }
353 }
354 --num_elements_;
355 // If we didn't fill the slot then we need go to the next non free slot.
356 if (!filled) {
357 ++it;
358 }
359 return it;
360 }
361
362 // Find an element, returns end() if not found.
363 // Allows custom key (K) types, example of when this is useful:
364 // Set of Class* sorted by name, want to find a class with a name but can't allocate a dummy
365 // object in the heap for performance solution.
366 template <typename K>
Find(const K & key)367 iterator Find(const K& key) {
368 return FindWithHash(key, hashfn_(key));
369 }
370
371 template <typename K>
Find(const K & key)372 const_iterator Find(const K& key) const {
373 return FindWithHash(key, hashfn_(key));
374 }
375
376 template <typename K>
FindWithHash(const K & key,size_t hash)377 iterator FindWithHash(const K& key, size_t hash) {
378 return iterator(this, FindIndex(key, hash));
379 }
380
381 template <typename K>
FindWithHash(const K & key,size_t hash)382 const_iterator FindWithHash(const K& key, size_t hash) const {
383 return const_iterator(this, FindIndex(key, hash));
384 }
385
386 // Insert an element, allows duplicates.
Insert(const T & element)387 void Insert(const T& element) {
388 InsertWithHash(element, hashfn_(element));
389 }
390
InsertWithHash(const T & element,size_t hash)391 void InsertWithHash(const T& element, size_t hash) {
392 DCHECK_EQ(hash, hashfn_(element));
393 if (num_elements_ >= elements_until_expand_) {
394 Expand();
395 DCHECK_LT(num_elements_, elements_until_expand_);
396 }
397 const size_t index = FirstAvailableSlot(IndexForHash(hash));
398 data_[index] = element;
399 ++num_elements_;
400 }
401
Size()402 size_t Size() const {
403 return num_elements_;
404 }
405
swap(HashSet & other)406 void swap(HashSet& other) {
407 // Use argument-dependent lookup with fall-back to std::swap() for function objects.
408 using std::swap;
409 swap(allocfn_, other.allocfn_);
410 swap(hashfn_, other.hashfn_);
411 swap(emptyfn_, other.emptyfn_);
412 swap(pred_, other.pred_);
413 std::swap(data_, other.data_);
414 std::swap(num_buckets_, other.num_buckets_);
415 std::swap(num_elements_, other.num_elements_);
416 std::swap(elements_until_expand_, other.elements_until_expand_);
417 std::swap(min_load_factor_, other.min_load_factor_);
418 std::swap(max_load_factor_, other.max_load_factor_);
419 std::swap(owns_data_, other.owns_data_);
420 }
421
get_allocator()422 allocator_type get_allocator() const {
423 return allocfn_;
424 }
425
ShrinkToMaximumLoad()426 void ShrinkToMaximumLoad() {
427 Resize(Size() / max_load_factor_);
428 }
429
430 // Reserve enough room to insert until Size() == num_elements without requiring to grow the hash
431 // set. No-op if the hash set is already large enough to do this.
Reserve(size_t num_elements)432 void Reserve(size_t num_elements) {
433 size_t num_buckets = num_elements / max_load_factor_;
434 // Deal with rounding errors. Add one for rounding.
435 while (static_cast<size_t>(num_buckets * max_load_factor_) <= num_elements + 1u) {
436 ++num_buckets;
437 }
438 if (num_buckets > NumBuckets()) {
439 Resize(num_buckets);
440 }
441 }
442
443 // To distance that inserted elements were probed. Used for measuring how good hash functions
444 // are.
TotalProbeDistance()445 size_t TotalProbeDistance() const {
446 size_t total = 0;
447 for (size_t i = 0; i < NumBuckets(); ++i) {
448 const T& element = ElementForIndex(i);
449 if (!emptyfn_.IsEmpty(element)) {
450 size_t ideal_location = IndexForHash(hashfn_(element));
451 if (ideal_location > i) {
452 total += i + NumBuckets() - ideal_location;
453 } else {
454 total += i - ideal_location;
455 }
456 }
457 }
458 return total;
459 }
460
461 // Calculate the current load factor and return it.
CalculateLoadFactor()462 double CalculateLoadFactor() const {
463 return static_cast<double>(Size()) / static_cast<double>(NumBuckets());
464 }
465
466 // Make sure that everything reinserts in the right spot. Returns the number of errors.
Verify()467 size_t Verify() NO_THREAD_SAFETY_ANALYSIS {
468 size_t errors = 0;
469 for (size_t i = 0; i < num_buckets_; ++i) {
470 T& element = data_[i];
471 if (!emptyfn_.IsEmpty(element)) {
472 T temp;
473 emptyfn_.MakeEmpty(temp);
474 std::swap(temp, element);
475 size_t first_slot = FirstAvailableSlot(IndexForHash(hashfn_(temp)));
476 if (i != first_slot) {
477 LOG(ERROR) << "Element " << i << " should be in slot " << first_slot;
478 ++errors;
479 }
480 std::swap(temp, element);
481 }
482 }
483 return errors;
484 }
485
GetMinLoadFactor()486 double GetMinLoadFactor() const {
487 return min_load_factor_;
488 }
489
GetMaxLoadFactor()490 double GetMaxLoadFactor() const {
491 return max_load_factor_;
492 }
493
494 // Change the load factor of the hash set. If the current load factor is greater than the max
495 // specified, then we resize the hash table storage.
SetLoadFactor(double min_load_factor,double max_load_factor)496 void SetLoadFactor(double min_load_factor, double max_load_factor) {
497 DCHECK_LT(min_load_factor, max_load_factor);
498 DCHECK_GT(min_load_factor, 0.0);
499 DCHECK_LT(max_load_factor, 1.0);
500 min_load_factor_ = min_load_factor;
501 max_load_factor_ = max_load_factor;
502 elements_until_expand_ = NumBuckets() * max_load_factor_;
503 // If the current load factor isn't in the range, then resize to the mean of the minimum and
504 // maximum load factor.
505 const double load_factor = CalculateLoadFactor();
506 if (load_factor > max_load_factor_) {
507 Resize(Size() / ((min_load_factor_ + max_load_factor_) * 0.5));
508 }
509 }
510
511 // The hash set expands when Size() reaches ElementsUntilExpand().
ElementsUntilExpand()512 size_t ElementsUntilExpand() const {
513 return elements_until_expand_;
514 }
515
NumBuckets()516 size_t NumBuckets() const {
517 return num_buckets_;
518 }
519
520 private:
ElementForIndex(size_t index)521 T& ElementForIndex(size_t index) {
522 DCHECK_LT(index, NumBuckets());
523 DCHECK(data_ != nullptr);
524 return data_[index];
525 }
526
ElementForIndex(size_t index)527 const T& ElementForIndex(size_t index) const {
528 DCHECK_LT(index, NumBuckets());
529 DCHECK(data_ != nullptr);
530 return data_[index];
531 }
532
IndexForHash(size_t hash)533 size_t IndexForHash(size_t hash) const {
534 // Protect against undefined behavior (division by zero).
535 if (UNLIKELY(num_buckets_ == 0)) {
536 return 0;
537 }
538 return hash % num_buckets_;
539 }
540
NextIndex(size_t index)541 size_t NextIndex(size_t index) const {
542 if (UNLIKELY(++index >= num_buckets_)) {
543 DCHECK_EQ(index, NumBuckets());
544 return 0;
545 }
546 return index;
547 }
548
549 // Find the hash table slot for an element, or return NumBuckets() if not found.
550 // This value for not found is important so that iterator(this, FindIndex(...)) == end().
551 template <typename K>
FindIndex(const K & element,size_t hash)552 size_t FindIndex(const K& element, size_t hash) const {
553 // Guard against failing to get an element for a non-existing index.
554 if (UNLIKELY(NumBuckets() == 0)) {
555 return 0;
556 }
557 DCHECK_EQ(hashfn_(element), hash);
558 size_t index = IndexForHash(hash);
559 while (true) {
560 const T& slot = ElementForIndex(index);
561 if (emptyfn_.IsEmpty(slot)) {
562 return NumBuckets();
563 }
564 if (pred_(slot, element)) {
565 return index;
566 }
567 index = NextIndex(index);
568 }
569 }
570
IsFreeSlot(size_t index)571 bool IsFreeSlot(size_t index) const {
572 return emptyfn_.IsEmpty(ElementForIndex(index));
573 }
574
575 // Allocate a number of buckets.
AllocateStorage(size_t num_buckets)576 void AllocateStorage(size_t num_buckets) {
577 num_buckets_ = num_buckets;
578 data_ = allocfn_.allocate(num_buckets_);
579 owns_data_ = true;
580 for (size_t i = 0; i < num_buckets_; ++i) {
581 allocfn_.construct(allocfn_.address(data_[i]));
582 emptyfn_.MakeEmpty(data_[i]);
583 }
584 }
585
DeallocateStorage()586 void DeallocateStorage() {
587 if (owns_data_) {
588 for (size_t i = 0; i < NumBuckets(); ++i) {
589 allocfn_.destroy(allocfn_.address(data_[i]));
590 }
591 if (data_ != nullptr) {
592 allocfn_.deallocate(data_, NumBuckets());
593 }
594 owns_data_ = false;
595 }
596 data_ = nullptr;
597 num_buckets_ = 0;
598 }
599
600 // Expand the set based on the load factors.
Expand()601 void Expand() {
602 size_t min_index = static_cast<size_t>(Size() / min_load_factor_);
603 // Resize based on the minimum load factor.
604 Resize(min_index);
605 }
606
607 // Expand / shrink the table to the new specified size.
Resize(size_t new_size)608 void Resize(size_t new_size) {
609 if (new_size < kMinBuckets) {
610 new_size = kMinBuckets;
611 }
612 DCHECK_GE(new_size, Size());
613 T* const old_data = data_;
614 size_t old_num_buckets = num_buckets_;
615 // Reinsert all of the old elements.
616 const bool owned_data = owns_data_;
617 AllocateStorage(new_size);
618 for (size_t i = 0; i < old_num_buckets; ++i) {
619 T& element = old_data[i];
620 if (!emptyfn_.IsEmpty(element)) {
621 data_[FirstAvailableSlot(IndexForHash(hashfn_(element)))] = std::move(element);
622 }
623 if (owned_data) {
624 allocfn_.destroy(allocfn_.address(element));
625 }
626 }
627 if (owned_data) {
628 allocfn_.deallocate(old_data, old_num_buckets);
629 }
630
631 // When we hit elements_until_expand_, we are at the max load factor and must expand again.
632 elements_until_expand_ = NumBuckets() * max_load_factor_;
633 }
634
FirstAvailableSlot(size_t index)635 ALWAYS_INLINE size_t FirstAvailableSlot(size_t index) const {
636 DCHECK_LT(index, NumBuckets()); // Don't try to get a slot out of range.
637 size_t non_empty_count = 0;
638 while (!emptyfn_.IsEmpty(data_[index])) {
639 index = NextIndex(index);
640 non_empty_count++;
641 DCHECK_LE(non_empty_count, NumBuckets()); // Don't loop forever.
642 }
643 return index;
644 }
645
646 // Return new offset.
647 template <typename Elem>
WriteToBytes(uint8_t * ptr,size_t offset,Elem n)648 static size_t WriteToBytes(uint8_t* ptr, size_t offset, Elem n) {
649 DCHECK_ALIGNED(ptr + offset, sizeof(n));
650 if (ptr != nullptr) {
651 *reinterpret_cast<Elem*>(ptr + offset) = n;
652 }
653 return offset + sizeof(n);
654 }
655
656 template <typename Elem>
ReadFromBytes(const uint8_t * ptr,size_t offset,Elem * out)657 static size_t ReadFromBytes(const uint8_t* ptr, size_t offset, Elem* out) {
658 DCHECK(ptr != nullptr);
659 DCHECK_ALIGNED(ptr + offset, sizeof(*out));
660 *out = *reinterpret_cast<const Elem*>(ptr + offset);
661 return offset + sizeof(*out);
662 }
663
664 Alloc allocfn_; // Allocator function.
665 HashFn hashfn_; // Hashing function.
666 EmptyFn emptyfn_; // IsEmpty/SetEmpty function.
667 Pred pred_; // Equals function.
668 size_t num_elements_; // Number of inserted elements.
669 size_t num_buckets_; // Number of hash table buckets.
670 size_t elements_until_expand_; // Maximum number of elements until we expand the table.
671 bool owns_data_; // If we own data_ and are responsible for freeing it.
672 T* data_; // Backing storage.
673 double min_load_factor_;
674 double max_load_factor_;
675
676 ART_FRIEND_TEST(InternTableTest, CrossHash);
677 };
678
679 template <class T, class EmptyFn, class HashFn, class Pred, class Alloc>
swap(HashSet<T,EmptyFn,HashFn,Pred,Alloc> & lhs,HashSet<T,EmptyFn,HashFn,Pred,Alloc> & rhs)680 void swap(HashSet<T, EmptyFn, HashFn, Pred, Alloc>& lhs,
681 HashSet<T, EmptyFn, HashFn, Pred, Alloc>& rhs) {
682 lhs.swap(rhs);
683 }
684
685 } // namespace art
686
687 #endif // ART_RUNTIME_BASE_HASH_SET_H_
688