1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // An open-addressing
16 // hashtable with quadratic probing.
17 //
18 // This is a low level hashtable on top of which different interfaces can be
19 // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
20 //
21 // The table interface is similar to that of std::unordered_set. Notable
22 // differences are that most member functions support heterogeneous keys when
23 // BOTH the hash and eq functions are marked as transparent. They do so by
24 // providing a typedef called `is_transparent`.
25 //
26 // When heterogeneous lookup is enabled, functions that take key_type act as if
27 // they have an overload set like:
28 //
29 //   iterator find(const key_type& key);
30 //   template <class K>
31 //   iterator find(const K& key);
32 //
33 //   size_type erase(const key_type& key);
34 //   template <class K>
35 //   size_type erase(const K& key);
36 //
37 //   std::pair<iterator, iterator> equal_range(const key_type& key);
38 //   template <class K>
39 //   std::pair<iterator, iterator> equal_range(const K& key);
40 //
41 // When heterogeneous lookup is disabled, only the explicit `key_type` overloads
42 // exist.
43 //
44 // find() also supports passing the hash explicitly:
45 //
46 //   iterator find(const key_type& key, size_t hash);
47 //   template <class U>
48 //   iterator find(const U& key, size_t hash);
49 //
50 // In addition the pointer to element and iterator stability guarantees are
51 // weaker: all iterators and pointers are invalidated after a new element is
52 // inserted.
53 //
54 // IMPLEMENTATION DETAILS
55 //
56 // # Table Layout
57 //
58 // A raw_hash_set's backing array consists of control bytes followed by slots
59 // that may or may not contain objects.
60 //
61 // The layout of the backing array, for `capacity` slots, is thus, as a
62 // pseudo-struct:
63 //
64 //   struct BackingArray {
65 //     // Control bytes for the "real" slots.
66 //     ctrl_t ctrl[capacity];
67 //     // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
68 //     // stop and serves no other purpose.
69 //     ctrl_t sentinel;
70 //     // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
71 //     // that if a probe sequence picks a value near the end of `ctrl`,
72 //     // `Group` will have valid control bytes to look at.
73 //     ctrl_t clones[kWidth - 1];
74 //     // The actual slot data.
75 //     slot_type slots[capacity];
76 //   };
77 //
78 // The length of this array is computed by `AllocSize()` below.
79 //
80 // Control bytes (`ctrl_t`) are bytes (collected into groups of a
81 // platform-specific size) that define the state of the corresponding slot in
82 // the slot array. Group manipulation is tightly optimized to be as efficient
83 // as possible: SSE and friends on x86, clever bit operations on other arches.
84 //
85 //      Group 1         Group 2        Group 3
86 // +---------------+---------------+---------------+
87 // | | | | | | | | | | | | | | | | | | | | | | | | |
88 // +---------------+---------------+---------------+
89 //
90 // Each control byte is either a special value for empty slots, deleted slots
91 // (sometimes called *tombstones*), and a special end-of-table marker used by
92 // iterators, or, if occupied, seven bits (H2) from the hash of the value in the
93 // corresponding slot.
94 //
95 // Storing control bytes in a separate array also has beneficial cache effects,
96 // since more logical slots will fit into a cache line.
97 //
98 // # Hashing
99 //
100 // We compute two separate hashes, `H1` and `H2`, from the hash of an object.
101 // `H1(hash(x))` is an index into `slots`, and essentially the starting point
102 // for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
103 // objects that cannot possibly be the one we are looking for.
104 //
105 // # Table operations.
106 //
107 // The key operations are `insert`, `find`, and `erase`.
108 //
109 // Since `insert` and `erase` are implemented in terms of `find`, we describe
110 // `find` first. To `find` a value `x`, we compute `hash(x)`. From
111 // `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
112 // group of slots in some interesting order.
113 //
114 // We now walk through these indices. At each index, we select the entire group
115 // starting with that index and extract potential candidates: occupied slots
116 // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
117 // group, we stop and return an error. Each candidate slot `y` is compared with
118 // `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the
119 // next probe index. Tombstones effectively behave like full slots that never
120 // match the value we're looking for.
121 //
122 // The `H2` bits ensure when we compare a slot to an object with `==`, we are
123 // likely to have actually found the object.  That is, the chance is low that
124 // `==` is called and returns `false`.  Thus, when we search for an object, we
125 // are unlikely to call `==` many times.  This likelyhood can be analyzed as
126 // follows (assuming that H2 is a random enough hash function).
127 //
128 // Let's assume that there are `k` "wrong" objects that must be examined in a
129 // probe sequence.  For example, when doing a `find` on an object that is in the
130 // table, `k` is the number of objects between the start of the probe sequence
131 // and the final found object (not including the final found object).  The
132 // expected number of objects with an H2 match is then `k/128`.  Measurements
133 // and analysis indicate that even at high load factors, `k` is less than 32,
134 // meaning that the number of "false positive" comparisons we must perform is
135 // less than 1/8 per `find`.
136 
137 // `insert` is implemented in terms of `unchecked_insert`, which inserts a
138 // value presumed to not be in the table (violating this requirement will cause
139 // the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
140 // it, we construct a `probe_seq` once again, and use it to find the first
141 // group with an unoccupied (empty *or* deleted) slot. We place `x` into the
142 // first such slot in the group and mark it as full with `x`'s H2.
143 //
144 // To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
145 // perform a `find` to see if it's already present; if it is, we're done. If
146 // it's not, we may decide the table is getting overcrowded (i.e. the load
147 // factor is greater than 7/8 for big tables; `is_small()` tables use a max load
148 // factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
149 // each element of the table into the new array (we know that no insertion here
150 // will insert an already-present value), and discard the old backing array. At
151 // this point, we may `unchecked_insert` the value `x`.
152 //
153 // Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
154 // presents a viable, initialized slot pointee to the caller.
155 //
156 // `erase` is implemented in terms of `erase_at`, which takes an index to a
157 // slot. Given an offset, we simply create a tombstone and destroy its contents.
158 // If we can prove that the slot would not appear in a probe sequence, we can
159 // make the slot as empty, instead. We can prove this by observing that if a
160 // group has any empty slots, it has never been full (assuming we never create
161 // an empty slot in a group with no empties, which this heuristic guarantees we
162 // never do) and find would stop at this group anyways (since it does not probe
163 // beyond groups with empties).
164 //
165 // `erase` is `erase_at` composed with `find`: if we
166 // have a value `x`, we can perform a `find`, and then `erase_at` the resulting
167 // slot.
168 //
169 // To iterate, we simply traverse the array, skipping empty and deleted slots
170 // and stopping when we hit a `kSentinel`.
171 
172 #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
173 #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
174 
175 #include <algorithm>
176 #include <cmath>
177 #include <cstdint>
178 #include <cstring>
179 #include <iterator>
180 #include <limits>
181 #include <memory>
182 #include <tuple>
183 #include <type_traits>
184 #include <utility>
185 
186 #include "absl/base/config.h"
187 #include "absl/base/internal/endian.h"
188 #include "absl/base/internal/prefetch.h"
189 #include "absl/base/internal/raw_logging.h"
190 #include "absl/base/optimization.h"
191 #include "absl/base/port.h"
192 #include "absl/container/internal/common.h"
193 #include "absl/container/internal/compressed_tuple.h"
194 #include "absl/container/internal/container_memory.h"
195 #include "absl/container/internal/hash_policy_traits.h"
196 #include "absl/container/internal/hashtable_debug_hooks.h"
197 #include "absl/container/internal/hashtablez_sampler.h"
198 #include "absl/memory/memory.h"
199 #include "absl/meta/type_traits.h"
200 #include "absl/numeric/bits.h"
201 #include "absl/utility/utility.h"
202 
203 #ifdef ABSL_INTERNAL_HAVE_SSE2
204 #include <emmintrin.h>
205 #endif
206 
207 #ifdef ABSL_INTERNAL_HAVE_SSSE3
208 #include <tmmintrin.h>
209 #endif
210 
211 #ifdef _MSC_VER
212 #include <intrin.h>
213 #endif
214 
215 #ifdef ABSL_INTERNAL_HAVE_ARM_NEON
216 #include <arm_neon.h>
217 #endif
218 
219 namespace absl {
220 ABSL_NAMESPACE_BEGIN
221 namespace container_internal {
222 
223 #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
224 #error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
225 #elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
226     defined(ABSL_HAVE_MEMORY_SANITIZER)
227 // When compiled in sanitizer mode, we add generation integers to the backing
228 // array and iterators. In the backing array, we store the generation between
229 // the control bytes and the slots. When iterators are dereferenced, we assert
230 // that the container has not been mutated in a way that could cause iterator
231 // invalidation since the iterator was initialized.
232 #define ABSL_SWISSTABLE_ENABLE_GENERATIONS
233 #endif
234 
235 // We use uint8_t so we don't need to worry about padding.
236 using GenerationType = uint8_t;
237 
238 #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
SwisstableGenerationsEnabled()239 constexpr bool SwisstableGenerationsEnabled() { return true; }
NumGenerationBytes()240 constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
241 #else
SwisstableGenerationsEnabled()242 constexpr bool SwisstableGenerationsEnabled() { return false; }
NumGenerationBytes()243 constexpr size_t NumGenerationBytes() { return 0; }
244 #endif
245 
246 template <typename AllocType>
SwapAlloc(AllocType & lhs,AllocType & rhs,std::true_type)247 void SwapAlloc(AllocType& lhs, AllocType& rhs,
248                std::true_type /* propagate_on_container_swap */) {
249   using std::swap;
250   swap(lhs, rhs);
251 }
252 template <typename AllocType>
SwapAlloc(AllocType &,AllocType &,std::false_type)253 void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
254                std::false_type /* propagate_on_container_swap */) {}
255 
256 // The state for a probe sequence.
257 //
258 // Currently, the sequence is a triangular progression of the form
259 //
260 //   p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
261 //
262 // The use of `Width` ensures that each probe step does not overlap groups;
263 // the sequence effectively outputs the addresses of *groups* (although not
264 // necessarily aligned to any boundary). The `Group` machinery allows us
265 // to check an entire group with minimal branching.
266 //
267 // Wrapping around at `mask + 1` is important, but not for the obvious reason.
268 // As described above, the first few entries of the control byte array
269 // are mirrored at the end of the array, which `Group` will find and use
270 // for selecting candidates. However, when those candidates' slots are
271 // actually inspected, there are no corresponding slots for the cloned bytes,
272 // so we need to make sure we've treated those offsets as "wrapping around".
273 //
274 // It turns out that this probe sequence visits every group exactly once if the
275 // number of groups is a power of two, since (i^2+i)/2 is a bijection in
276 // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
277 template <size_t Width>
278 class probe_seq {
279  public:
280   // Creates a new probe sequence using `hash` as the initial value of the
281   // sequence and `mask` (usually the capacity of the table) as the mask to
282   // apply to each value in the progression.
probe_seq(size_t hash,size_t mask)283   probe_seq(size_t hash, size_t mask) {
284     assert(((mask + 1) & mask) == 0 && "not a mask");
285     mask_ = mask;
286     offset_ = hash & mask_;
287   }
288 
289   // The offset within the table, i.e., the value `p(i)` above.
offset()290   size_t offset() const { return offset_; }
offset(size_t i)291   size_t offset(size_t i) const { return (offset_ + i) & mask_; }
292 
next()293   void next() {
294     index_ += Width;
295     offset_ += index_;
296     offset_ &= mask_;
297   }
298   // 0-based probe index, a multiple of `Width`.
index()299   size_t index() const { return index_; }
300 
301  private:
302   size_t mask_;
303   size_t offset_;
304   size_t index_ = 0;
305 };
306 
307 template <class ContainerKey, class Hash, class Eq>
308 struct RequireUsableKey {
309   template <class PassedKey, class... Args>
310   std::pair<
311       decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
312       decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
313                                          std::declval<const PassedKey&>()))>*
314   operator()(const PassedKey&, const Args&...) const;
315 };
316 
317 template <class E, class Policy, class Hash, class Eq, class... Ts>
318 struct IsDecomposable : std::false_type {};
319 
320 template <class Policy, class Hash, class Eq, class... Ts>
321 struct IsDecomposable<
322     absl::void_t<decltype(Policy::apply(
323         RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
324         std::declval<Ts>()...))>,
325     Policy, Hash, Eq, Ts...> : std::true_type {};
326 
327 // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
328 template <class T>
329 constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
330   using std::swap;
331   return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
332 }
333 template <class T>
334 constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
335   return false;
336 }
337 
338 template <typename T>
339 uint32_t TrailingZeros(T x) {
340   ABSL_ASSUME(x != 0);
341   return static_cast<uint32_t>(countr_zero(x));
342 }
343 
344 // An abstract bitmask, such as that emitted by a SIMD instruction.
345 //
346 // Specifically, this type implements a simple bitset whose representation is
347 // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
348 // of abstract bits in the bitset, while `Shift` is the log-base-two of the
349 // width of an abstract bit in the representation.
350 // This mask provides operations for any number of real bits set in an abstract
351 // bit. To add iteration on top of that, implementation must guarantee no more
352 // than one real bit is set in an abstract bit.
353 template <class T, int SignificantBits, int Shift = 0>
354 class NonIterableBitMask {
355  public:
356   explicit NonIterableBitMask(T mask) : mask_(mask) {}
357 
358   explicit operator bool() const { return this->mask_ != 0; }
359 
360   // Returns the index of the lowest *abstract* bit set in `self`.
361   uint32_t LowestBitSet() const {
362     return container_internal::TrailingZeros(mask_) >> Shift;
363   }
364 
365   // Returns the index of the highest *abstract* bit set in `self`.
366   uint32_t HighestBitSet() const {
367     return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
368   }
369 
370   // Return the number of trailing zero *abstract* bits.
371   uint32_t TrailingZeros() const {
372     return container_internal::TrailingZeros(mask_) >> Shift;
373   }
374 
375   // Return the number of leading zero *abstract* bits.
376   uint32_t LeadingZeros() const {
377     constexpr int total_significant_bits = SignificantBits << Shift;
378     constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
379     return static_cast<uint32_t>(countl_zero(mask_ << extra_bits)) >> Shift;
380   }
381 
382   T mask_;
383 };
384 
385 // Mask that can be iterable
386 //
387 // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
388 // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
389 // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
390 // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
391 //
392 // For example:
393 //   for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
394 //   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
395 template <class T, int SignificantBits, int Shift = 0>
396 class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
397   using Base = NonIterableBitMask<T, SignificantBits, Shift>;
398   static_assert(std::is_unsigned<T>::value, "");
399   static_assert(Shift == 0 || Shift == 3, "");
400 
401  public:
402   explicit BitMask(T mask) : Base(mask) {}
403   // BitMask is an iterator over the indices of its abstract bits.
404   using value_type = int;
405   using iterator = BitMask;
406   using const_iterator = BitMask;
407 
408   BitMask& operator++() {
409     this->mask_ &= (this->mask_ - 1);
410     return *this;
411   }
412 
413   uint32_t operator*() const { return Base::LowestBitSet(); }
414 
415   BitMask begin() const { return *this; }
416   BitMask end() const { return BitMask(0); }
417 
418  private:
419   friend bool operator==(const BitMask& a, const BitMask& b) {
420     return a.mask_ == b.mask_;
421   }
422   friend bool operator!=(const BitMask& a, const BitMask& b) {
423     return a.mask_ != b.mask_;
424   }
425 };
426 
427 using h2_t = uint8_t;
428 
429 // The values here are selected for maximum performance. See the static asserts
430 // below for details.
431 
432 // A `ctrl_t` is a single control byte, which can have one of four
433 // states: empty, deleted, full (which has an associated seven-bit h2_t value)
434 // and the sentinel. They have the following bit patterns:
435 //
436 //      empty: 1 0 0 0 0 0 0 0
437 //    deleted: 1 1 1 1 1 1 1 0
438 //       full: 0 h h h h h h h  // h represents the hash bits.
439 //   sentinel: 1 1 1 1 1 1 1 1
440 //
441 // These values are specifically tuned for SSE-flavored SIMD.
442 // The static_asserts below detail the source of these choices.
443 //
444 // We use an enum class so that when strict aliasing is enabled, the compiler
445 // knows ctrl_t doesn't alias other types.
446 enum class ctrl_t : int8_t {
447   kEmpty = -128,   // 0b10000000
448   kDeleted = -2,   // 0b11111110
449   kSentinel = -1,  // 0b11111111
450 };
451 static_assert(
452     (static_cast<int8_t>(ctrl_t::kEmpty) &
453      static_cast<int8_t>(ctrl_t::kDeleted) &
454      static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
455     "Special markers need to have the MSB to make checking for them efficient");
456 static_assert(
457     ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
458     "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
459     "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
460 static_assert(
461     ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
462     "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
463     "registers (pcmpeqd xmm, xmm)");
464 static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
465               "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
466               "existence efficient (psignb xmm, xmm)");
467 static_assert(
468     (~static_cast<int8_t>(ctrl_t::kEmpty) &
469      ~static_cast<int8_t>(ctrl_t::kDeleted) &
470      static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
471     "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
472     "shared by ctrl_t::kSentinel to make the scalar test for "
473     "MaskEmptyOrDeleted() efficient");
474 static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
475               "ctrl_t::kDeleted must be -2 to make the implementation of "
476               "ConvertSpecialToEmptyAndFullToDeleted efficient");
477 
478 ABSL_DLL extern const ctrl_t kEmptyGroup[17];
479 
480 // Returns a pointer to a control byte group that can be used by empty tables.
481 inline ctrl_t* EmptyGroup() {
482   // Const must be cast away here; no uses of this function will actually write
483   // to it, because it is only used for empty tables.
484   return const_cast<ctrl_t*>(kEmptyGroup);
485 }
486 
487 // Returns a pointer to the generation byte at the end of the empty group, if it
488 // exists.
489 inline GenerationType* EmptyGeneration() {
490   return reinterpret_cast<GenerationType*>(EmptyGroup() + 16);
491 }
492 
493 // Mixes a randomly generated per-process seed with `hash` and `ctrl` to
494 // randomize insertion order within groups.
495 bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
496 
497 // Returns a per-table, hash salt, which changes on resize. This gets mixed into
498 // H1 to randomize iteration order per-table.
499 //
500 // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
501 // non-determinism of iteration order in most cases.
502 inline size_t PerTableSalt(const ctrl_t* ctrl) {
503   // The low bits of the pointer have little or no entropy because of
504   // alignment. We shift the pointer to try to use higher entropy bits. A
505   // good number seems to be 12 bits, because that aligns with page size.
506   return reinterpret_cast<uintptr_t>(ctrl) >> 12;
507 }
508 // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
509 inline size_t H1(size_t hash, const ctrl_t* ctrl) {
510   return (hash >> 7) ^ PerTableSalt(ctrl);
511 }
512 
513 // Extracts the H2 portion of a hash: the 7 bits not used for H1.
514 //
515 // These are used as an occupied control byte.
516 inline h2_t H2(size_t hash) { return hash & 0x7F; }
517 
518 // Helpers for checking the state of a control byte.
519 inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
520 inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
521 inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
522 inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
523 
524 #ifdef ABSL_INTERNAL_HAVE_SSE2
525 // Quick reference guide for intrinsics used below:
526 //
527 // * __m128i: An XMM (128-bit) word.
528 //
529 // * _mm_setzero_si128: Returns a zero vector.
530 // * _mm_set1_epi8:     Returns a vector with the same i8 in each lane.
531 //
532 // * _mm_subs_epi8:    Saturating-subtracts two i8 vectors.
533 // * _mm_and_si128:    Ands two i128s together.
534 // * _mm_or_si128:     Ors two i128s together.
535 // * _mm_andnot_si128: And-nots two i128s together.
536 //
537 // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
538 //                   filling each lane with 0x00 or 0xff.
539 // * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
540 //
541 // * _mm_loadu_si128:  Performs an unaligned load of an i128.
542 // * _mm_storeu_si128: Performs an unaligned store of an i128.
543 //
544 // * _mm_sign_epi8:     Retains, negates, or zeroes each i8 lane of the first
545 //                      argument if the corresponding lane of the second
546 //                      argument is positive, negative, or zero, respectively.
547 // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
548 //                      bitmask consisting of those bits.
549 // * _mm_shuffle_epi8:  Selects i8s from the first argument, using the low
550 //                      four bits of each i8 lane in the second argument as
551 //                      indices.
552 
553 // https://github.com/abseil/abseil-cpp/issues/209
554 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
555 // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
556 // Work around this by using the portable implementation of Group
557 // when using -funsigned-char under GCC.
558 inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
559 #if defined(__GNUC__) && !defined(__clang__)
560   if (std::is_unsigned<char>::value) {
561     const __m128i mask = _mm_set1_epi8(0x80);
562     const __m128i diff = _mm_subs_epi8(b, a);
563     return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
564   }
565 #endif
566   return _mm_cmpgt_epi8(a, b);
567 }
568 
569 struct GroupSse2Impl {
570   static constexpr size_t kWidth = 16;  // the number of slots per group
571 
572   explicit GroupSse2Impl(const ctrl_t* pos) {
573     ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
574   }
575 
576   // Returns a bitmask representing the positions of slots that match hash.
577   BitMask<uint32_t, kWidth> Match(h2_t hash) const {
578     auto match = _mm_set1_epi8(static_cast<char>(hash));
579     return BitMask<uint32_t, kWidth>(
580         static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
581   }
582 
583   // Returns a bitmask representing the positions of empty slots.
584   NonIterableBitMask<uint32_t, kWidth> MaskEmpty() const {
585 #ifdef ABSL_INTERNAL_HAVE_SSSE3
586     // This only works because ctrl_t::kEmpty is -128.
587     return NonIterableBitMask<uint32_t, kWidth>(
588         static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
589 #else
590     auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
591     return NonIterableBitMask<uint32_t, kWidth>(
592         static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
593 #endif
594   }
595 
596   // Returns a bitmask representing the positions of empty or deleted slots.
597   NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
598     auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
599     return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
600         _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
601   }
602 
603   // Returns the number of trailing empty or deleted elements in the group.
604   uint32_t CountLeadingEmptyOrDeleted() const {
605     auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
606     return TrailingZeros(static_cast<uint32_t>(
607         _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
608   }
609 
610   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
611     auto msbs = _mm_set1_epi8(static_cast<char>(-128));
612     auto x126 = _mm_set1_epi8(126);
613 #ifdef ABSL_INTERNAL_HAVE_SSSE3
614     auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
615 #else
616     auto zero = _mm_setzero_si128();
617     auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
618     auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
619 #endif
620     _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
621   }
622 
623   __m128i ctrl;
624 };
625 #endif  // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
626 
627 #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
628 struct GroupAArch64Impl {
629   static constexpr size_t kWidth = 8;
630 
631   explicit GroupAArch64Impl(const ctrl_t* pos) {
632     ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
633   }
634 
635   BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
636     uint8x8_t dup = vdup_n_u8(hash);
637     auto mask = vceq_u8(ctrl, dup);
638     constexpr uint64_t msbs = 0x8080808080808080ULL;
639     return BitMask<uint64_t, kWidth, 3>(
640         vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs);
641   }
642 
643   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
644     uint64_t mask =
645         vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
646                           vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
647                           vreinterpret_s8_u8(ctrl))),
648                       0);
649     return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
650   }
651 
652   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
653     uint64_t mask =
654         vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
655                           vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
656                           vreinterpret_s8_u8(ctrl))),
657                       0);
658     return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
659   }
660 
661   uint32_t CountLeadingEmptyOrDeleted() const {
662     uint64_t mask =
663         vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
664                           vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
665                           vreinterpret_s8_u8(ctrl))),
666                       0);
667     // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
668     // produced bitfield. We then count number of trailing zeros.
669     // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
670     // so we should be fine.
671     return static_cast<uint32_t>(countr_zero(mask)) >> 3;
672   }
673 
674   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
675     uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
676     constexpr uint64_t msbs = 0x8080808080808080ULL;
677     constexpr uint64_t lsbs = 0x0101010101010101ULL;
678     auto x = mask & msbs;
679     auto res = (~x + (x >> 7)) & ~lsbs;
680     little_endian::Store64(dst, res);
681   }
682 
683   uint8x8_t ctrl;
684 };
685 #endif  // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
686 
687 struct GroupPortableImpl {
688   static constexpr size_t kWidth = 8;
689 
690   explicit GroupPortableImpl(const ctrl_t* pos)
691       : ctrl(little_endian::Load64(pos)) {}
692 
693   BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
694     // For the technique, see:
695     // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
696     // (Determine if a word has a byte equal to n).
697     //
698     // Caveat: there are false positives but:
699     // - they only occur if there is a real match
700     // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
701     // - they will be handled gracefully by subsequent checks in code
702     //
703     // Example:
704     //   v = 0x1716151413121110
705     //   hash = 0x12
706     //   retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
707     constexpr uint64_t msbs = 0x8080808080808080ULL;
708     constexpr uint64_t lsbs = 0x0101010101010101ULL;
709     auto x = ctrl ^ (lsbs * hash);
710     return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
711   }
712 
713   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
714     constexpr uint64_t msbs = 0x8080808080808080ULL;
715     return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) &
716                                                    msbs);
717   }
718 
719   NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
720     constexpr uint64_t msbs = 0x8080808080808080ULL;
721     return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) &
722                                                    msbs);
723   }
724 
725   uint32_t CountLeadingEmptyOrDeleted() const {
726     // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
727     // kDeleted. We lower all other bits and count number of trailing zeros.
728     constexpr uint64_t bits = 0x0101010101010101ULL;
729     return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
730                                  3);
731   }
732 
733   void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
734     constexpr uint64_t msbs = 0x8080808080808080ULL;
735     constexpr uint64_t lsbs = 0x0101010101010101ULL;
736     auto x = ctrl & msbs;
737     auto res = (~x + (x >> 7)) & ~lsbs;
738     little_endian::Store64(dst, res);
739   }
740 
741   uint64_t ctrl;
742 };
743 
744 #ifdef ABSL_INTERNAL_HAVE_SSE2
745 using Group = GroupSse2Impl;
746 #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
747 using Group = GroupAArch64Impl;
748 #else
749 using Group = GroupPortableImpl;
750 #endif
751 
752 class CommonFieldsGenerationInfoEnabled {
753   // A sentinel value for reserved_growth_ indicating that we just ran out of
754   // reserved growth on the last insertion. When reserve is called and then
755   // insertions take place, reserved_growth_'s state machine is N, ..., 1,
756   // kReservedGrowthJustRanOut, 0.
757   static constexpr size_t kReservedGrowthJustRanOut =
758       (std::numeric_limits<size_t>::max)();
759 
760  public:
761   CommonFieldsGenerationInfoEnabled() = default;
762   CommonFieldsGenerationInfoEnabled(CommonFieldsGenerationInfoEnabled&& that)
763       : reserved_growth_(that.reserved_growth_), generation_(that.generation_) {
764     that.reserved_growth_ = 0;
765     that.generation_ = EmptyGeneration();
766   }
767   CommonFieldsGenerationInfoEnabled& operator=(
768       CommonFieldsGenerationInfoEnabled&&) = default;
769 
770   // Whether we should rehash on insert in order to detect bugs of using invalid
771   // references. We rehash on the first insertion after reserved_growth_ reaches
772   // 0 after a call to reserve.
773   // TODO(b/254649633): we could potentially do a rehash with low probability
774   // whenever reserved_growth_ is zero.
775   bool should_rehash_for_bug_detection_on_insert() const {
776     return reserved_growth_ == kReservedGrowthJustRanOut;
777   }
778   void maybe_increment_generation_on_insert() {
779     if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
780 
781     if (reserved_growth_ > 0) {
782       if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
783     } else {
784       ++*generation_;
785     }
786   }
787   void reset_reserved_growth(size_t reservation, size_t size) {
788     reserved_growth_ = reservation - size;
789   }
790   size_t reserved_growth() const { return reserved_growth_; }
791   void set_reserved_growth(size_t r) { reserved_growth_ = r; }
792   GenerationType generation() const { return *generation_; }
793   void set_generation(GenerationType g) { *generation_ = g; }
794   GenerationType* generation_ptr() const { return generation_; }
795   void set_generation_ptr(GenerationType* g) { generation_ = g; }
796 
797  private:
798   // The number of insertions remaining that are guaranteed to not rehash due to
799   // a prior call to reserve. Note: we store reserved growth rather than
800   // reservation size because calls to erase() decrease size_ but don't decrease
801   // reserved growth.
802   size_t reserved_growth_ = 0;
803   // Pointer to the generation counter, which is used to validate iterators and
804   // is stored in the backing array between the control bytes and the slots.
805   // Note that we can't store the generation inside the container itself and
806   // keep a pointer to the container in the iterators because iterators must
807   // remain valid when the container is moved.
808   // Note: we could derive this pointer from the control pointer, but it makes
809   // the code more complicated, and there's a benefit in having the sizes of
810   // raw_hash_set in sanitizer mode and non-sanitizer mode a bit more different,
811   // which is that tests are less likely to rely on the size remaining the same.
812   GenerationType* generation_ = EmptyGeneration();
813 };
814 
815 class CommonFieldsGenerationInfoDisabled {
816  public:
817   CommonFieldsGenerationInfoDisabled() = default;
818   CommonFieldsGenerationInfoDisabled(CommonFieldsGenerationInfoDisabled&&) =
819       default;
820   CommonFieldsGenerationInfoDisabled& operator=(
821       CommonFieldsGenerationInfoDisabled&&) = default;
822 
823   bool should_rehash_for_bug_detection_on_insert() const { return false; }
824   void maybe_increment_generation_on_insert() {}
825   void reset_reserved_growth(size_t, size_t) {}
826   size_t reserved_growth() const { return 0; }
827   void set_reserved_growth(size_t) {}
828   GenerationType generation() const { return 0; }
829   void set_generation(GenerationType) {}
830   GenerationType* generation_ptr() const { return nullptr; }
831   void set_generation_ptr(GenerationType*) {}
832 };
833 
834 class HashSetIteratorGenerationInfoEnabled {
835  public:
836   HashSetIteratorGenerationInfoEnabled() = default;
837   explicit HashSetIteratorGenerationInfoEnabled(
838       const GenerationType* generation_ptr)
839       : generation_ptr_(generation_ptr), generation_(*generation_ptr) {}
840 
841   GenerationType generation() const { return generation_; }
842   void reset_generation() { generation_ = *generation_ptr_; }
843   const GenerationType* generation_ptr() const { return generation_ptr_; }
844   void set_generation_ptr(const GenerationType* ptr) { generation_ptr_ = ptr; }
845 
846  private:
847   const GenerationType* generation_ptr_ = EmptyGeneration();
848   GenerationType generation_ = *generation_ptr_;
849 };
850 
851 class HashSetIteratorGenerationInfoDisabled {
852  public:
853   HashSetIteratorGenerationInfoDisabled() = default;
854   explicit HashSetIteratorGenerationInfoDisabled(const GenerationType*) {}
855 
856   GenerationType generation() const { return 0; }
857   void reset_generation() {}
858   const GenerationType* generation_ptr() const { return nullptr; }
859   void set_generation_ptr(const GenerationType*) {}
860 };
861 
862 #ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
863 using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
864 using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
865 #else
866 using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoDisabled;
867 using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
868 #endif
869 
870 // CommonFields hold the fields in raw_hash_set that do not depend
871 // on template parameters. This allows us to conveniently pass all
872 // of this state to helper functions as a single argument.
873 class CommonFields : public CommonFieldsGenerationInfo {
874  public:
875   CommonFields() = default;
876 
877   // Not copyable
878   CommonFields(const CommonFields&) = delete;
879   CommonFields& operator=(const CommonFields&) = delete;
880 
881   // Movable
882   CommonFields(CommonFields&& that)
883       : CommonFieldsGenerationInfo(
884             std::move(static_cast<CommonFieldsGenerationInfo&&>(that))),
885         // Explicitly copying fields into "this" and then resetting "that"
886         // fields generates less code then calling absl::exchange per field.
887         control_(that.control_),
888         slots_(that.slots_),
889         size_(that.size_),
890         capacity_(that.capacity_),
891         compressed_tuple_(that.growth_left(), std::move(that.infoz())) {
892     that.control_ = EmptyGroup();
893     that.slots_ = nullptr;
894     that.size_ = 0;
895     that.capacity_ = 0;
896     that.growth_left() = 0;
897   }
898   CommonFields& operator=(CommonFields&&) = default;
899 
900   // The number of slots we can still fill without needing to rehash.
901   size_t& growth_left() { return compressed_tuple_.template get<0>(); }
902 
903   HashtablezInfoHandle& infoz() { return compressed_tuple_.template get<1>(); }
904   const HashtablezInfoHandle& infoz() const {
905     return compressed_tuple_.template get<1>();
906   }
907 
908   void reset_reserved_growth(size_t reservation) {
909     CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size_);
910   }
911 
912   // TODO(b/259599413): Investigate removing some of these fields:
913   // - control/slots can be derived from each other
914   // - size can be moved into the slot array
915 
916   // The control bytes (and, also, a pointer to the base of the backing array).
917   //
918   // This contains `capacity + 1 + NumClonedBytes()` entries, even
919   // when the table is empty (hence EmptyGroup).
920   ctrl_t* control_ = EmptyGroup();
921 
922   // The beginning of the slots, located at `SlotOffset()` bytes after
923   // `control`. May be null for empty tables.
924   void* slots_ = nullptr;
925 
926   // The number of filled slots.
927   size_t size_ = 0;
928 
929   // The total number of available slots.
930   size_t capacity_ = 0;
931 
932   // Bundle together growth_left and HashtablezInfoHandle to ensure EBO for
933   // HashtablezInfoHandle when sampling is turned off.
934   absl::container_internal::CompressedTuple<size_t, HashtablezInfoHandle>
935       compressed_tuple_{0u, HashtablezInfoHandle{}};
936 };
937 
938 // Returns he number of "cloned control bytes".
939 //
940 // This is the number of control bytes that are present both at the beginning
941 // of the control byte array and at the end, such that we can create a
942 // `Group::kWidth`-width probe window starting from any control byte.
943 constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
944 
945 template <class Policy, class Hash, class Eq, class Alloc>
946 class raw_hash_set;
947 
948 // Returns whether `n` is a valid capacity (i.e., number of slots).
949 //
950 // A valid capacity is a non-zero integer `2^m - 1`.
951 inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
952 
953 // Returns the next valid capacity after `n`.
954 inline size_t NextCapacity(size_t n) {
955   assert(IsValidCapacity(n) || n == 0);
956   return n * 2 + 1;
957 }
958 
959 // Applies the following mapping to every byte in the control array:
960 //   * kDeleted -> kEmpty
961 //   * kEmpty -> kEmpty
962 //   * _ -> kDeleted
963 // PRECONDITION:
964 //   IsValidCapacity(capacity)
965 //   ctrl[capacity] == ctrl_t::kSentinel
966 //   ctrl[i] != ctrl_t::kSentinel for all i < capacity
967 void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
968 
969 // Converts `n` into the next valid capacity, per `IsValidCapacity`.
970 inline size_t NormalizeCapacity(size_t n) {
971   return n ? ~size_t{} >> countl_zero(n) : 1;
972 }
973 
974 // General notes on capacity/growth methods below:
975 // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
976 //   average of two empty slots per group.
977 // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
978 // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
979 //   never need to probe (the whole table fits in one group) so we don't need a
980 //   load factor less than 1.
981 
982 // Given `capacity`, applies the load factor; i.e., it returns the maximum
983 // number of values we should put into the table before a resizing rehash.
984 inline size_t CapacityToGrowth(size_t capacity) {
985   assert(IsValidCapacity(capacity));
986   // `capacity*7/8`
987   if (Group::kWidth == 8 && capacity == 7) {
988     // x-x/8 does not work when x==7.
989     return 6;
990   }
991   return capacity - capacity / 8;
992 }
993 
994 // Given `growth`, "unapplies" the load factor to find how large the capacity
995 // should be to stay within the load factor.
996 //
997 // This might not be a valid capacity and `NormalizeCapacity()` should be
998 // called on this.
999 inline size_t GrowthToLowerboundCapacity(size_t growth) {
1000   // `growth*8/7`
1001   if (Group::kWidth == 8 && growth == 7) {
1002     // x+(x-1)/7 does not work when x==7.
1003     return 8;
1004   }
1005   return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
1006 }
1007 
1008 template <class InputIter>
1009 size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
1010                                      size_t bucket_count) {
1011   if (bucket_count != 0) {
1012     return bucket_count;
1013   }
1014   using InputIterCategory =
1015       typename std::iterator_traits<InputIter>::iterator_category;
1016   if (std::is_base_of<std::random_access_iterator_tag,
1017                       InputIterCategory>::value) {
1018     return GrowthToLowerboundCapacity(
1019         static_cast<size_t>(std::distance(first, last)));
1020   }
1021   return 0;
1022 }
1023 
1024 #define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, generation, generation_ptr,         \
1025                                      operation)                                \
1026   do {                                                                         \
1027     ABSL_HARDENING_ASSERT(                                                     \
1028         (ctrl != nullptr) && operation                                         \
1029         " called on invalid iterator. The iterator might be an end() "         \
1030         "iterator or may have been default constructed.");                     \
1031     if (SwisstableGenerationsEnabled() && generation != *generation_ptr)       \
1032       ABSL_INTERNAL_LOG(FATAL, operation                                       \
1033                         " called on invalidated iterator. The table could "    \
1034                         "have rehashed since this iterator was initialized."); \
1035     ABSL_HARDENING_ASSERT(                                                     \
1036         (IsFull(*ctrl)) && operation                                           \
1037         " called on invalid iterator. The element might have been erased or "  \
1038         "the table might have rehashed.");                                     \
1039   } while (0)
1040 
1041 // Note that for comparisons, null/end iterators are valid.
1042 inline void AssertIsValidForComparison(const ctrl_t* ctrl,
1043                                        GenerationType generation,
1044                                        const GenerationType* generation_ptr) {
1045   ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
1046                         "Invalid iterator comparison. The element might have "
1047                         "been erased or the table might have rehashed.");
1048   if (SwisstableGenerationsEnabled() && generation != *generation_ptr) {
1049     ABSL_INTERNAL_LOG(FATAL,
1050                       "Invalid iterator comparison. The table could have "
1051                       "rehashed since this iterator was initialized.");
1052   }
1053 }
1054 
1055 // If the two iterators come from the same container, then their pointers will
1056 // interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
1057 // Note: we take slots by reference so that it's not UB if they're uninitialized
1058 // as long as we don't read them (when ctrl is null).
1059 inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
1060                                       const ctrl_t* ctrl_b,
1061                                       const void* const& slot_a,
1062                                       const void* const& slot_b) {
1063   // If either control byte is null, then we can't tell.
1064   if (ctrl_a == nullptr || ctrl_b == nullptr) return true;
1065   const void* low_slot = slot_a;
1066   const void* hi_slot = slot_b;
1067   if (ctrl_a > ctrl_b) {
1068     std::swap(ctrl_a, ctrl_b);
1069     std::swap(low_slot, hi_slot);
1070   }
1071   return ctrl_b < low_slot && low_slot <= hi_slot;
1072 }
1073 
1074 // Asserts that two iterators come from the same container.
1075 // Note: we take slots by reference so that it's not UB if they're uninitialized
1076 // as long as we don't read them (when ctrl is null).
1077 // TODO(b/254649633): when generations are enabled, we can detect more cases of
1078 // different containers by comparing the pointers to the generations - this
1079 // can cover cases of end iterators that we would otherwise miss.
1080 inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
1081                                 const void* const& slot_a,
1082                                 const void* const& slot_b) {
1083   ABSL_HARDENING_ASSERT(
1084       AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
1085       "Invalid iterator comparison. The iterators may be from different "
1086       "containers or the container might have rehashed.");
1087 }
1088 
1089 struct FindInfo {
1090   size_t offset;
1091   size_t probe_length;
1092 };
1093 
1094 // Whether a table is "small". A small table fits entirely into a probing
1095 // group, i.e., has a capacity < `Group::kWidth`.
1096 //
1097 // In small mode we are able to use the whole capacity. The extra control
1098 // bytes give us at least one "empty" control byte to stop the iteration.
1099 // This is important to make 1 a valid capacity.
1100 //
1101 // In small mode only the first `capacity` control bytes after the sentinel
1102 // are valid. The rest contain dummy ctrl_t::kEmpty values that do not
1103 // represent a real slot. This is important to take into account on
1104 // `find_first_non_full()`, where we never try
1105 // `ShouldInsertBackwards()` for small tables.
1106 inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
1107 
1108 // Begins a probing operation on `common.control`, using `hash`.
1109 inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {
1110   const ctrl_t* ctrl = common.control_;
1111   const size_t capacity = common.capacity_;
1112   return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
1113 }
1114 
1115 // Probes an array of control bits using a probe sequence derived from `hash`,
1116 // and returns the offset corresponding to the first deleted or empty slot.
1117 //
1118 // Behavior when the entire table is full is undefined.
1119 //
1120 // NOTE: this function must work with tables having both empty and deleted
1121 // slots in the same group. Such tables appear during `erase()`.
1122 template <typename = void>
1123 inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
1124   auto seq = probe(common, hash);
1125   const ctrl_t* ctrl = common.control_;
1126   while (true) {
1127     Group g{ctrl + seq.offset()};
1128     auto mask = g.MaskEmptyOrDeleted();
1129     if (mask) {
1130 #if !defined(NDEBUG)
1131       // We want to add entropy even when ASLR is not enabled.
1132       // In debug build we will randomly insert in either the front or back of
1133       // the group.
1134       // TODO(kfm,sbenza): revisit after we do unconditional mixing
1135       if (!is_small(common.capacity_) && ShouldInsertBackwards(hash, ctrl)) {
1136         return {seq.offset(mask.HighestBitSet()), seq.index()};
1137       }
1138 #endif
1139       return {seq.offset(mask.LowestBitSet()), seq.index()};
1140     }
1141     seq.next();
1142     assert(seq.index() <= common.capacity_ && "full table!");
1143   }
1144 }
1145 
1146 // Extern template for inline function keep possibility of inlining.
1147 // When compiler decided to not inline, no symbols will be added to the
1148 // corresponding translation unit.
1149 extern template FindInfo find_first_non_full(const CommonFields&, size_t);
1150 
1151 // Non-inlined version of find_first_non_full for use in less
1152 // performance critical routines.
1153 FindInfo find_first_non_full_outofline(const CommonFields&, size_t);
1154 
1155 inline void ResetGrowthLeft(CommonFields& common) {
1156   common.growth_left() = CapacityToGrowth(common.capacity_) - common.size_;
1157 }
1158 
1159 // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
1160 // array as marked as empty.
1161 inline void ResetCtrl(CommonFields& common, size_t slot_size) {
1162   const size_t capacity = common.capacity_;
1163   ctrl_t* ctrl = common.control_;
1164   std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
1165               capacity + 1 + NumClonedBytes());
1166   ctrl[capacity] = ctrl_t::kSentinel;
1167   SanitizerPoisonMemoryRegion(common.slots_, slot_size * capacity);
1168   ResetGrowthLeft(common);
1169 }
1170 
1171 // Sets `ctrl[i]` to `h`.
1172 //
1173 // Unlike setting it directly, this function will perform bounds checks and
1174 // mirror the value to the cloned tail if necessary.
1175 inline void SetCtrl(const CommonFields& common, size_t i, ctrl_t h,
1176                     size_t slot_size) {
1177   const size_t capacity = common.capacity_;
1178   assert(i < capacity);
1179 
1180   auto* slot_i = static_cast<const char*>(common.slots_) + i * slot_size;
1181   if (IsFull(h)) {
1182     SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
1183   } else {
1184     SanitizerPoisonMemoryRegion(slot_i, slot_size);
1185   }
1186 
1187   ctrl_t* ctrl = common.control_;
1188   ctrl[i] = h;
1189   ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
1190 }
1191 
1192 // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
1193 inline void SetCtrl(const CommonFields& common, size_t i, h2_t h,
1194                     size_t slot_size) {
1195   SetCtrl(common, i, static_cast<ctrl_t>(h), slot_size);
1196 }
1197 
1198 // Given the capacity of a table, computes the offset (from the start of the
1199 // backing allocation) of the generation counter (if it exists).
1200 inline size_t GenerationOffset(size_t capacity) {
1201   assert(IsValidCapacity(capacity));
1202   const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
1203   return num_control_bytes;
1204 }
1205 
1206 // Given the capacity of a table, computes the offset (from the start of the
1207 // backing allocation) at which the slots begin.
1208 inline size_t SlotOffset(size_t capacity, size_t slot_align) {
1209   assert(IsValidCapacity(capacity));
1210   const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
1211   return (num_control_bytes + NumGenerationBytes() + slot_align - 1) &
1212          (~slot_align + 1);
1213 }
1214 
1215 // Given the capacity of a table, computes the total size of the backing
1216 // array.
1217 inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
1218   return SlotOffset(capacity, slot_align) + capacity * slot_size;
1219 }
1220 
1221 template <typename Alloc, size_t SizeOfSlot, size_t AlignOfSlot>
1222 ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
1223   assert(c.capacity_);
1224   // Folks with custom allocators often make unwarranted assumptions about the
1225   // behavior of their classes vis-a-vis trivial destructability and what
1226   // calls they will or won't make.  Avoid sampling for people with custom
1227   // allocators to get us out of this mess.  This is not a hard guarantee but
1228   // a workaround while we plan the exact guarantee we want to provide.
1229   const size_t sample_size =
1230       (std::is_same<Alloc, std::allocator<char>>::value && c.slots_ == nullptr)
1231           ? SizeOfSlot
1232           : 0;
1233 
1234   const size_t cap = c.capacity_;
1235   char* mem = static_cast<char*>(
1236       Allocate<AlignOfSlot>(&alloc, AllocSize(cap, SizeOfSlot, AlignOfSlot)));
1237   const GenerationType old_generation = c.generation();
1238   c.set_generation_ptr(
1239       reinterpret_cast<GenerationType*>(mem + GenerationOffset(cap)));
1240   c.set_generation(old_generation + 1);
1241   c.control_ = reinterpret_cast<ctrl_t*>(mem);
1242   c.slots_ = mem + SlotOffset(cap, AlignOfSlot);
1243   ResetCtrl(c, SizeOfSlot);
1244   if (sample_size) {
1245     c.infoz() = Sample(sample_size);
1246   }
1247   c.infoz().RecordStorageChanged(c.size_, cap);
1248 }
1249 
1250 // PolicyFunctions bundles together some information for a particular
1251 // raw_hash_set<T, ...> instantiation. This information is passed to
1252 // type-erased functions that want to do small amounts of type-specific
1253 // work.
1254 struct PolicyFunctions {
1255   size_t slot_size;
1256 
1257   // Return the hash of the pointed-to slot.
1258   size_t (*hash_slot)(void* set, void* slot);
1259 
1260   // Transfer the contents of src_slot to dst_slot.
1261   void (*transfer)(void* set, void* dst_slot, void* src_slot);
1262 
1263   // Deallocate the specified backing store which is sized for n slots.
1264   void (*dealloc)(void* set, const PolicyFunctions& policy, ctrl_t* ctrl,
1265                   void* slot_array, size_t n);
1266 };
1267 
1268 // ClearBackingArray clears the backing array, either modifying it in place,
1269 // or creating a new one based on the value of "reuse".
1270 // REQUIRES: c.capacity > 0
1271 void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
1272                        bool reuse);
1273 
1274 // Type-erased version of raw_hash_set::erase_meta_only.
1275 void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size);
1276 
1277 // Function to place in PolicyFunctions::dealloc for raw_hash_sets
1278 // that are using std::allocator. This allows us to share the same
1279 // function body for raw_hash_set instantiations that have the
1280 // same slot alignment.
1281 template <size_t AlignOfSlot>
1282 ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(void*,
1283                                                 const PolicyFunctions& policy,
1284                                                 ctrl_t* ctrl, void* slot_array,
1285                                                 size_t n) {
1286   // Unpoison before returning the memory to the allocator.
1287   SanitizerUnpoisonMemoryRegion(slot_array, policy.slot_size * n);
1288 
1289   std::allocator<char> alloc;
1290   Deallocate<AlignOfSlot>(&alloc, ctrl,
1291                           AllocSize(n, policy.slot_size, AlignOfSlot));
1292 }
1293 
1294 // For trivially relocatable types we use memcpy directly. This allows us to
1295 // share the same function body for raw_hash_set instantiations that have the
1296 // same slot size as long as they are relocatable.
1297 template <size_t SizeOfSlot>
1298 ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {
1299   memcpy(dst, src, SizeOfSlot);
1300 }
1301 
1302 // Type-erased version of raw_hash_set::drop_deletes_without_resize.
1303 void DropDeletesWithoutResize(CommonFields& common,
1304                               const PolicyFunctions& policy, void* tmp_space);
1305 
1306 // A SwissTable.
1307 //
1308 // Policy: a policy defines how to perform different operations on
1309 // the slots of the hashtable (see hash_policy_traits.h for the full interface
1310 // of policy).
1311 //
1312 // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
1313 // functor should accept a key and return size_t as hash. For best performance
1314 // it is important that the hash function provides high entropy across all bits
1315 // of the hash.
1316 //
1317 // Eq: a (possibly polymorphic) functor that compares two keys for equality. It
1318 // should accept two (of possibly different type) keys and return a bool: true
1319 // if they are equal, false if they are not. If two keys compare equal, then
1320 // their hash values as defined by Hash MUST be equal.
1321 //
1322 // Allocator: an Allocator
1323 // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
1324 // the storage of the hashtable will be allocated and the elements will be
1325 // constructed and destroyed.
1326 template <class Policy, class Hash, class Eq, class Alloc>
1327 class raw_hash_set {
1328   using PolicyTraits = hash_policy_traits<Policy>;
1329   using KeyArgImpl =
1330       KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
1331 
1332  public:
1333   using init_type = typename PolicyTraits::init_type;
1334   using key_type = typename PolicyTraits::key_type;
1335   // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
1336   // code fixes!
1337   using slot_type = typename PolicyTraits::slot_type;
1338   using allocator_type = Alloc;
1339   using size_type = size_t;
1340   using difference_type = ptrdiff_t;
1341   using hasher = Hash;
1342   using key_equal = Eq;
1343   using policy_type = Policy;
1344   using value_type = typename PolicyTraits::value_type;
1345   using reference = value_type&;
1346   using const_reference = const value_type&;
1347   using pointer = typename absl::allocator_traits<
1348       allocator_type>::template rebind_traits<value_type>::pointer;
1349   using const_pointer = typename absl::allocator_traits<
1350       allocator_type>::template rebind_traits<value_type>::const_pointer;
1351 
1352   // Alias used for heterogeneous lookup functions.
1353   // `key_arg<K>` evaluates to `K` when the functors are transparent and to
1354   // `key_type` otherwise. It permits template argument deduction on `K` for the
1355   // transparent case.
1356   template <class K>
1357   using key_arg = typename KeyArgImpl::template type<K, key_type>;
1358 
1359  private:
1360   // Give an early error when key_type is not hashable/eq.
1361   auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
1362   auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
1363 
1364   using AllocTraits = absl::allocator_traits<allocator_type>;
1365   using SlotAlloc = typename absl::allocator_traits<
1366       allocator_type>::template rebind_alloc<slot_type>;
1367   using SlotAllocTraits = typename absl::allocator_traits<
1368       allocator_type>::template rebind_traits<slot_type>;
1369 
1370   static_assert(std::is_lvalue_reference<reference>::value,
1371                 "Policy::element() must return a reference");
1372 
1373   template <typename T>
1374   struct SameAsElementReference
1375       : std::is_same<typename std::remove_cv<
1376                          typename std::remove_reference<reference>::type>::type,
1377                      typename std::remove_cv<
1378                          typename std::remove_reference<T>::type>::type> {};
1379 
1380   // An enabler for insert(T&&): T must be convertible to init_type or be the
1381   // same as [cv] value_type [ref].
1382   // Note: we separate SameAsElementReference into its own type to avoid using
1383   // reference unless we need to. MSVC doesn't seem to like it in some
1384   // cases.
1385   template <class T>
1386   using RequiresInsertable = typename std::enable_if<
1387       absl::disjunction<std::is_convertible<T, init_type>,
1388                         SameAsElementReference<T>>::value,
1389       int>::type;
1390 
1391   // RequiresNotInit is a workaround for gcc prior to 7.1.
1392   // See https://godbolt.org/g/Y4xsUh.
1393   template <class T>
1394   using RequiresNotInit =
1395       typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
1396 
1397   template <class... Ts>
1398   using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
1399 
1400  public:
1401   static_assert(std::is_same<pointer, value_type*>::value,
1402                 "Allocators with custom pointer types are not supported");
1403   static_assert(std::is_same<const_pointer, const value_type*>::value,
1404                 "Allocators with custom pointer types are not supported");
1405 
1406   class iterator : private HashSetIteratorGenerationInfo {
1407     friend class raw_hash_set;
1408 
1409    public:
1410     using iterator_category = std::forward_iterator_tag;
1411     using value_type = typename raw_hash_set::value_type;
1412     using reference =
1413         absl::conditional_t<PolicyTraits::constant_iterators::value,
1414                             const value_type&, value_type&>;
1415     using pointer = absl::remove_reference_t<reference>*;
1416     using difference_type = typename raw_hash_set::difference_type;
1417 
1418     iterator() {}
1419 
1420     // PRECONDITION: not an end() iterator.
1421     reference operator*() const {
1422       ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, generation(), generation_ptr(),
1423                                    "operator*()");
1424       return PolicyTraits::element(slot_);
1425     }
1426 
1427     // PRECONDITION: not an end() iterator.
1428     pointer operator->() const {
1429       ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, generation(), generation_ptr(),
1430                                    "operator->");
1431       return &operator*();
1432     }
1433 
1434     // PRECONDITION: not an end() iterator.
1435     iterator& operator++() {
1436       ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, generation(), generation_ptr(),
1437                                    "operator++");
1438       ++ctrl_;
1439       ++slot_;
1440       skip_empty_or_deleted();
1441       return *this;
1442     }
1443     // PRECONDITION: not an end() iterator.
1444     iterator operator++(int) {
1445       auto tmp = *this;
1446       ++*this;
1447       return tmp;
1448     }
1449 
1450     friend bool operator==(const iterator& a, const iterator& b) {
1451       AssertSameContainer(a.ctrl_, b.ctrl_, a.slot_, b.slot_);
1452       AssertIsValidForComparison(a.ctrl_, a.generation(), a.generation_ptr());
1453       AssertIsValidForComparison(b.ctrl_, b.generation(), b.generation_ptr());
1454       return a.ctrl_ == b.ctrl_;
1455     }
1456     friend bool operator!=(const iterator& a, const iterator& b) {
1457       return !(a == b);
1458     }
1459 
1460    private:
1461     iterator(ctrl_t* ctrl, slot_type* slot,
1462              const GenerationType* generation_ptr)
1463         : HashSetIteratorGenerationInfo(generation_ptr),
1464           ctrl_(ctrl),
1465           slot_(slot) {
1466       // This assumption helps the compiler know that any non-end iterator is
1467       // not equal to any end iterator.
1468       ABSL_ASSUME(ctrl != nullptr);
1469     }
1470     // For end() iterators.
1471     explicit iterator(const GenerationType* generation_ptr)
1472         : HashSetIteratorGenerationInfo(generation_ptr) {}
1473 
1474     // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until
1475     // they reach one.
1476     //
1477     // If a sentinel is reached, we null `ctrl_` out instead.
1478     void skip_empty_or_deleted() {
1479       while (IsEmptyOrDeleted(*ctrl_)) {
1480         uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
1481         ctrl_ += shift;
1482         slot_ += shift;
1483       }
1484       if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
1485     }
1486 
1487     ctrl_t* ctrl_ = nullptr;
1488     // To avoid uninitialized member warnings, put slot_ in an anonymous union.
1489     // The member is not initialized on singleton and end iterators.
1490     union {
1491       slot_type* slot_;
1492     };
1493   };
1494 
1495   class const_iterator {
1496     friend class raw_hash_set;
1497 
1498    public:
1499     using iterator_category = typename iterator::iterator_category;
1500     using value_type = typename raw_hash_set::value_type;
1501     using reference = typename raw_hash_set::const_reference;
1502     using pointer = typename raw_hash_set::const_pointer;
1503     using difference_type = typename raw_hash_set::difference_type;
1504 
1505     const_iterator() = default;
1506     // Implicit construction from iterator.
1507     const_iterator(iterator i) : inner_(std::move(i)) {}  // NOLINT
1508 
1509     reference operator*() const { return *inner_; }
1510     pointer operator->() const { return inner_.operator->(); }
1511 
1512     const_iterator& operator++() {
1513       ++inner_;
1514       return *this;
1515     }
1516     const_iterator operator++(int) { return inner_++; }
1517 
1518     friend bool operator==(const const_iterator& a, const const_iterator& b) {
1519       return a.inner_ == b.inner_;
1520     }
1521     friend bool operator!=(const const_iterator& a, const const_iterator& b) {
1522       return !(a == b);
1523     }
1524 
1525    private:
1526     const_iterator(const ctrl_t* ctrl, const slot_type* slot,
1527                    const GenerationType* gen)
1528         : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
1529     }
1530 
1531     iterator inner_;
1532   };
1533 
1534   using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
1535   using insert_return_type = InsertReturnType<iterator, node_type>;
1536 
1537   // Note: can't use `= default` due to non-default noexcept (causes
1538   // problems for some compilers). NOLINTNEXTLINE
1539   raw_hash_set() noexcept(
1540       std::is_nothrow_default_constructible<hasher>::value&&
1541           std::is_nothrow_default_constructible<key_equal>::value&&
1542               std::is_nothrow_default_constructible<allocator_type>::value) {}
1543 
1544   ABSL_ATTRIBUTE_NOINLINE explicit raw_hash_set(
1545       size_t bucket_count, const hasher& hash = hasher(),
1546       const key_equal& eq = key_equal(),
1547       const allocator_type& alloc = allocator_type())
1548       : settings_(CommonFields{}, hash, eq, alloc) {
1549     if (bucket_count) {
1550       common().capacity_ = NormalizeCapacity(bucket_count);
1551       initialize_slots();
1552     }
1553   }
1554 
1555   raw_hash_set(size_t bucket_count, const hasher& hash,
1556                const allocator_type& alloc)
1557       : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
1558 
1559   raw_hash_set(size_t bucket_count, const allocator_type& alloc)
1560       : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
1561 
1562   explicit raw_hash_set(const allocator_type& alloc)
1563       : raw_hash_set(0, hasher(), key_equal(), alloc) {}
1564 
1565   template <class InputIter>
1566   raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
1567                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
1568                const allocator_type& alloc = allocator_type())
1569       : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
1570                      hash, eq, alloc) {
1571     insert(first, last);
1572   }
1573 
1574   template <class InputIter>
1575   raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
1576                const hasher& hash, const allocator_type& alloc)
1577       : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
1578 
1579   template <class InputIter>
1580   raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
1581                const allocator_type& alloc)
1582       : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
1583 
1584   template <class InputIter>
1585   raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
1586       : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
1587 
1588   // Instead of accepting std::initializer_list<value_type> as the first
1589   // argument like std::unordered_set<value_type> does, we have two overloads
1590   // that accept std::initializer_list<T> and std::initializer_list<init_type>.
1591   // This is advantageous for performance.
1592   //
1593   //   // Turns {"abc", "def"} into std::initializer_list<std::string>, then
1594   //   // copies the strings into the set.
1595   //   std::unordered_set<std::string> s = {"abc", "def"};
1596   //
1597   //   // Turns {"abc", "def"} into std::initializer_list<const char*>, then
1598   //   // copies the strings into the set.
1599   //   absl::flat_hash_set<std::string> s = {"abc", "def"};
1600   //
1601   // The same trick is used in insert().
1602   //
1603   // The enabler is necessary to prevent this constructor from triggering where
1604   // the copy constructor is meant to be called.
1605   //
1606   //   absl::flat_hash_set<int> a, b{a};
1607   //
1608   // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
1609   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1610   raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
1611                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
1612                const allocator_type& alloc = allocator_type())
1613       : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
1614 
1615   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
1616                const hasher& hash = hasher(), const key_equal& eq = key_equal(),
1617                const allocator_type& alloc = allocator_type())
1618       : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
1619 
1620   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1621   raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
1622                const hasher& hash, const allocator_type& alloc)
1623       : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
1624 
1625   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
1626                const hasher& hash, const allocator_type& alloc)
1627       : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
1628 
1629   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1630   raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
1631                const allocator_type& alloc)
1632       : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
1633 
1634   raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
1635                const allocator_type& alloc)
1636       : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
1637 
1638   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
1639   raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
1640       : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
1641 
1642   raw_hash_set(std::initializer_list<init_type> init,
1643                const allocator_type& alloc)
1644       : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
1645 
1646   raw_hash_set(const raw_hash_set& that)
1647       : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
1648                                that.alloc_ref())) {}
1649 
1650   raw_hash_set(const raw_hash_set& that, const allocator_type& a)
1651       : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
1652     reserve(that.size());
1653     // Because the table is guaranteed to be empty, we can do something faster
1654     // than a full `insert`.
1655     for (const auto& v : that) {
1656       const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
1657       auto target = find_first_non_full_outofline(common(), hash);
1658       SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
1659       emplace_at(target.offset, v);
1660       common().maybe_increment_generation_on_insert();
1661       infoz().RecordInsert(hash, target.probe_length);
1662     }
1663     common().size_ = that.size();
1664     growth_left() -= that.size();
1665   }
1666 
1667   ABSL_ATTRIBUTE_NOINLINE raw_hash_set(raw_hash_set&& that) noexcept(
1668       std::is_nothrow_copy_constructible<hasher>::value&&
1669           std::is_nothrow_copy_constructible<key_equal>::value&&
1670               std::is_nothrow_copy_constructible<allocator_type>::value)
1671       :  // Hash, equality and allocator are copied instead of moved because
1672          // `that` must be left valid. If Hash is std::function<Key>, moving it
1673          // would create a nullptr functor that cannot be called.
1674         settings_(absl::exchange(that.common(), CommonFields{}),
1675                   that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
1676 
1677   raw_hash_set(raw_hash_set&& that, const allocator_type& a)
1678       : settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
1679     if (a == that.alloc_ref()) {
1680       std::swap(common(), that.common());
1681     } else {
1682       reserve(that.size());
1683       // Note: this will copy elements of dense_set and unordered_set instead of
1684       // moving them. This can be fixed if it ever becomes an issue.
1685       for (auto& elem : that) insert(std::move(elem));
1686     }
1687   }
1688 
1689   raw_hash_set& operator=(const raw_hash_set& that) {
1690     raw_hash_set tmp(that,
1691                      AllocTraits::propagate_on_container_copy_assignment::value
1692                          ? that.alloc_ref()
1693                          : alloc_ref());
1694     swap(tmp);
1695     return *this;
1696   }
1697 
1698   raw_hash_set& operator=(raw_hash_set&& that) noexcept(
1699       absl::allocator_traits<allocator_type>::is_always_equal::value&&
1700           std::is_nothrow_move_assignable<hasher>::value&&
1701               std::is_nothrow_move_assignable<key_equal>::value) {
1702     // TODO(sbenza): We should only use the operations from the noexcept clause
1703     // to make sure we actually adhere to that contract.
1704     // NOLINTNEXTLINE: not returning *this for performance.
1705     return move_assign(
1706         std::move(that),
1707         typename AllocTraits::propagate_on_container_move_assignment());
1708   }
1709 
1710   ~raw_hash_set() {
1711     const size_t cap = capacity();
1712     if (!cap) return;
1713     destroy_slots();
1714 
1715     // Unpoison before returning the memory to the allocator.
1716     SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
1717     Deallocate<alignof(slot_type)>(
1718         &alloc_ref(), control(),
1719         AllocSize(cap, sizeof(slot_type), alignof(slot_type)));
1720 
1721     infoz().Unregister();
1722   }
1723 
1724   iterator begin() {
1725     auto it = iterator_at(0);
1726     it.skip_empty_or_deleted();
1727     return it;
1728   }
1729   iterator end() { return iterator(common().generation_ptr()); }
1730 
1731   const_iterator begin() const {
1732     return const_cast<raw_hash_set*>(this)->begin();
1733   }
1734   const_iterator end() const { return iterator(common().generation_ptr()); }
1735   const_iterator cbegin() const { return begin(); }
1736   const_iterator cend() const { return end(); }
1737 
1738   bool empty() const { return !size(); }
1739   size_t size() const { return common().size_; }
1740   size_t capacity() const { return common().capacity_; }
1741   size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
1742 
1743   ABSL_ATTRIBUTE_REINITIALIZES void clear() {
1744     // Iterating over this container is O(bucket_count()). When bucket_count()
1745     // is much greater than size(), iteration becomes prohibitively expensive.
1746     // For clear() it is more important to reuse the allocated array when the
1747     // container is small because allocation takes comparatively long time
1748     // compared to destruction of the elements of the container. So we pick the
1749     // largest bucket_count() threshold for which iteration is still fast and
1750     // past that we simply deallocate the array.
1751     const size_t cap = capacity();
1752     if (cap == 0) {
1753       // Already guaranteed to be empty; so nothing to do.
1754     } else {
1755       destroy_slots();
1756       ClearBackingArray(common(), GetPolicyFunctions(),
1757                         /*reuse=*/cap < 128);
1758     }
1759     common().set_reserved_growth(0);
1760   }
1761 
1762   inline void destroy_slots() {
1763     const size_t cap = capacity();
1764     const ctrl_t* ctrl = control();
1765     slot_type* slot = slot_array();
1766     for (size_t i = 0; i != cap; ++i) {
1767       if (IsFull(ctrl[i])) {
1768         PolicyTraits::destroy(&alloc_ref(), slot + i);
1769       }
1770     }
1771   }
1772 
1773   // This overload kicks in when the argument is an rvalue of insertable and
1774   // decomposable type other than init_type.
1775   //
1776   //   flat_hash_map<std::string, int> m;
1777   //   m.insert(std::make_pair("abc", 42));
1778   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
1779   // bug.
1780   template <class T, RequiresInsertable<T> = 0, class T2 = T,
1781             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
1782             T* = nullptr>
1783   std::pair<iterator, bool> insert(T&& value) {
1784     return emplace(std::forward<T>(value));
1785   }
1786 
1787   // This overload kicks in when the argument is a bitfield or an lvalue of
1788   // insertable and decomposable type.
1789   //
1790   //   union { int n : 1; };
1791   //   flat_hash_set<int> s;
1792   //   s.insert(n);
1793   //
1794   //   flat_hash_set<std::string> s;
1795   //   const char* p = "hello";
1796   //   s.insert(p);
1797   //
1798   // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
1799   // RequiresInsertable<T> with RequiresInsertable<const T&>.
1800   // We are hitting this bug: https://godbolt.org/g/1Vht4f.
1801   template <
1802       class T, RequiresInsertable<T> = 0,
1803       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
1804   std::pair<iterator, bool> insert(const T& value) {
1805     return emplace(value);
1806   }
1807 
1808   // This overload kicks in when the argument is an rvalue of init_type. Its
1809   // purpose is to handle brace-init-list arguments.
1810   //
1811   //   flat_hash_map<std::string, int> s;
1812   //   s.insert({"abc", 42});
1813   std::pair<iterator, bool> insert(init_type&& value) {
1814     return emplace(std::move(value));
1815   }
1816 
1817   // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
1818   // bug.
1819   template <class T, RequiresInsertable<T> = 0, class T2 = T,
1820             typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
1821             T* = nullptr>
1822   iterator insert(const_iterator, T&& value) {
1823     return insert(std::forward<T>(value)).first;
1824   }
1825 
1826   // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
1827   // RequiresInsertable<T> with RequiresInsertable<const T&>.
1828   // We are hitting this bug: https://godbolt.org/g/1Vht4f.
1829   template <
1830       class T, RequiresInsertable<T> = 0,
1831       typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
1832   iterator insert(const_iterator, const T& value) {
1833     return insert(value).first;
1834   }
1835 
1836   iterator insert(const_iterator, init_type&& value) {
1837     return insert(std::move(value)).first;
1838   }
1839 
1840   template <class InputIt>
1841   void insert(InputIt first, InputIt last) {
1842     for (; first != last; ++first) emplace(*first);
1843   }
1844 
1845   template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
1846   void insert(std::initializer_list<T> ilist) {
1847     insert(ilist.begin(), ilist.end());
1848   }
1849 
1850   void insert(std::initializer_list<init_type> ilist) {
1851     insert(ilist.begin(), ilist.end());
1852   }
1853 
1854   insert_return_type insert(node_type&& node) {
1855     if (!node) return {end(), false, node_type()};
1856     const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
1857     auto res = PolicyTraits::apply(
1858         InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
1859         elem);
1860     if (res.second) {
1861       CommonAccess::Reset(&node);
1862       return {res.first, true, node_type()};
1863     } else {
1864       return {res.first, false, std::move(node)};
1865     }
1866   }
1867 
1868   iterator insert(const_iterator, node_type&& node) {
1869     auto res = insert(std::move(node));
1870     node = std::move(res.node);
1871     return res.position;
1872   }
1873 
1874   // This overload kicks in if we can deduce the key from args. This enables us
1875   // to avoid constructing value_type if an entry with the same key already
1876   // exists.
1877   //
1878   // For example:
1879   //
1880   //   flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
1881   //   // Creates no std::string copies and makes no heap allocations.
1882   //   m.emplace("abc", "xyz");
1883   template <class... Args, typename std::enable_if<
1884                                IsDecomposable<Args...>::value, int>::type = 0>
1885   std::pair<iterator, bool> emplace(Args&&... args) {
1886     return PolicyTraits::apply(EmplaceDecomposable{*this},
1887                                std::forward<Args>(args)...);
1888   }
1889 
1890   // This overload kicks in if we cannot deduce the key from args. It constructs
1891   // value_type unconditionally and then either moves it into the table or
1892   // destroys.
1893   template <class... Args, typename std::enable_if<
1894                                !IsDecomposable<Args...>::value, int>::type = 0>
1895   std::pair<iterator, bool> emplace(Args&&... args) {
1896     alignas(slot_type) unsigned char raw[sizeof(slot_type)];
1897     slot_type* slot = reinterpret_cast<slot_type*>(&raw);
1898 
1899     PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
1900     const auto& elem = PolicyTraits::element(slot);
1901     return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
1902   }
1903 
1904   template <class... Args>
1905   iterator emplace_hint(const_iterator, Args&&... args) {
1906     return emplace(std::forward<Args>(args)...).first;
1907   }
1908 
1909   // Extension API: support for lazy emplace.
1910   //
1911   // Looks up key in the table. If found, returns the iterator to the element.
1912   // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
1913   //
1914   // `f` must abide by several restrictions:
1915   //  - it MUST call `raw_hash_set::constructor` with arguments as if a
1916   //    `raw_hash_set::value_type` is constructed,
1917   //  - it MUST NOT access the container before the call to
1918   //    `raw_hash_set::constructor`, and
1919   //  - it MUST NOT erase the lazily emplaced element.
1920   // Doing any of these is undefined behavior.
1921   //
1922   // For example:
1923   //
1924   //   std::unordered_set<ArenaString> s;
1925   //   // Makes ArenaStr even if "abc" is in the map.
1926   //   s.insert(ArenaString(&arena, "abc"));
1927   //
1928   //   flat_hash_set<ArenaStr> s;
1929   //   // Makes ArenaStr only if "abc" is not in the map.
1930   //   s.lazy_emplace("abc", [&](const constructor& ctor) {
1931   //     ctor(&arena, "abc");
1932   //   });
1933   //
1934   // WARNING: This API is currently experimental. If there is a way to implement
1935   // the same thing with the rest of the API, prefer that.
1936   class constructor {
1937     friend class raw_hash_set;
1938 
1939    public:
1940     template <class... Args>
1941     void operator()(Args&&... args) const {
1942       assert(*slot_);
1943       PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
1944       *slot_ = nullptr;
1945     }
1946 
1947    private:
1948     constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
1949 
1950     allocator_type* alloc_;
1951     slot_type** slot_;
1952   };
1953 
1954   template <class K = key_type, class F>
1955   iterator lazy_emplace(const key_arg<K>& key, F&& f) {
1956     auto res = find_or_prepare_insert(key);
1957     if (res.second) {
1958       slot_type* slot = slot_array() + res.first;
1959       std::forward<F>(f)(constructor(&alloc_ref(), &slot));
1960       assert(!slot);
1961     }
1962     return iterator_at(res.first);
1963   }
1964 
1965   // Extension API: support for heterogeneous keys.
1966   //
1967   //   std::unordered_set<std::string> s;
1968   //   // Turns "abc" into std::string.
1969   //   s.erase("abc");
1970   //
1971   //   flat_hash_set<std::string> s;
1972   //   // Uses "abc" directly without copying it into std::string.
1973   //   s.erase("abc");
1974   template <class K = key_type>
1975   size_type erase(const key_arg<K>& key) {
1976     auto it = find(key);
1977     if (it == end()) return 0;
1978     erase(it);
1979     return 1;
1980   }
1981 
1982   // Erases the element pointed to by `it`.  Unlike `std::unordered_set::erase`,
1983   // this method returns void to reduce algorithmic complexity to O(1).  The
1984   // iterator is invalidated, so any increment should be done before calling
1985   // erase.  In order to erase while iterating across a map, use the following
1986   // idiom (which also works for standard containers):
1987   //
1988   // for (auto it = m.begin(), end = m.end(); it != end;) {
1989   //   // `erase()` will invalidate `it`, so advance `it` first.
1990   //   auto copy_it = it++;
1991   //   if (<pred>) {
1992   //     m.erase(copy_it);
1993   //   }
1994   // }
1995   void erase(const_iterator cit) { erase(cit.inner_); }
1996 
1997   // This overload is necessary because otherwise erase<K>(const K&) would be
1998   // a better match if non-const iterator is passed as an argument.
1999   void erase(iterator it) {
2000     ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, it.generation(), it.generation_ptr(),
2001                                  "erase()");
2002     PolicyTraits::destroy(&alloc_ref(), it.slot_);
2003     erase_meta_only(it);
2004   }
2005 
2006   iterator erase(const_iterator first, const_iterator last) {
2007     while (first != last) {
2008       erase(first++);
2009     }
2010     return last.inner_;
2011   }
2012 
2013   // Moves elements from `src` into `this`.
2014   // If the element already exists in `this`, it is left unmodified in `src`.
2015   template <typename H, typename E>
2016   void merge(raw_hash_set<Policy, H, E, Alloc>& src) {  // NOLINT
2017     assert(this != &src);
2018     for (auto it = src.begin(), e = src.end(); it != e;) {
2019       auto next = std::next(it);
2020       if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
2021                               PolicyTraits::element(it.slot_))
2022               .second) {
2023         src.erase_meta_only(it);
2024       }
2025       it = next;
2026     }
2027   }
2028 
2029   template <typename H, typename E>
2030   void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
2031     merge(src);
2032   }
2033 
2034   node_type extract(const_iterator position) {
2035     ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_,
2036                                  position.inner_.generation(),
2037                                  position.inner_.generation_ptr(), "extract()");
2038     auto node =
2039         CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
2040     erase_meta_only(position);
2041     return node;
2042   }
2043 
2044   template <
2045       class K = key_type,
2046       typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
2047   node_type extract(const key_arg<K>& key) {
2048     auto it = find(key);
2049     return it == end() ? node_type() : extract(const_iterator{it});
2050   }
2051 
2052   void swap(raw_hash_set& that) noexcept(
2053       IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
2054       IsNoThrowSwappable<allocator_type>(
2055           typename AllocTraits::propagate_on_container_swap{})) {
2056     using std::swap;
2057     swap(common(), that.common());
2058     swap(hash_ref(), that.hash_ref());
2059     swap(eq_ref(), that.eq_ref());
2060     SwapAlloc(alloc_ref(), that.alloc_ref(),
2061               typename AllocTraits::propagate_on_container_swap{});
2062   }
2063 
2064   void rehash(size_t n) {
2065     if (n == 0 && capacity() == 0) return;
2066     if (n == 0 && size() == 0) {
2067       ClearBackingArray(common(), GetPolicyFunctions(),
2068                         /*reuse=*/false);
2069       return;
2070     }
2071 
2072     // bitor is a faster way of doing `max` here. We will round up to the next
2073     // power-of-2-minus-1, so bitor is good enough.
2074     auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
2075     // n == 0 unconditionally rehashes as per the standard.
2076     if (n == 0 || m > capacity()) {
2077       resize(m);
2078 
2079       // This is after resize, to ensure that we have completed the allocation
2080       // and have potentially sampled the hashtable.
2081       infoz().RecordReservation(n);
2082     }
2083   }
2084 
2085   void reserve(size_t n) {
2086     if (n > size() + growth_left()) {
2087       size_t m = GrowthToLowerboundCapacity(n);
2088       resize(NormalizeCapacity(m));
2089 
2090       // This is after resize, to ensure that we have completed the allocation
2091       // and have potentially sampled the hashtable.
2092       infoz().RecordReservation(n);
2093     }
2094     common().reset_reserved_growth(n);
2095   }
2096 
2097   // Extension API: support for heterogeneous keys.
2098   //
2099   //   std::unordered_set<std::string> s;
2100   //   // Turns "abc" into std::string.
2101   //   s.count("abc");
2102   //
2103   //   ch_set<std::string> s;
2104   //   // Uses "abc" directly without copying it into std::string.
2105   //   s.count("abc");
2106   template <class K = key_type>
2107   size_t count(const key_arg<K>& key) const {
2108     return find(key) == end() ? 0 : 1;
2109   }
2110 
2111   // Issues CPU prefetch instructions for the memory needed to find or insert
2112   // a key.  Like all lookup functions, this support heterogeneous keys.
2113   //
2114   // NOTE: This is a very low level operation and should not be used without
2115   // specific benchmarks indicating its importance.
2116   template <class K = key_type>
2117   void prefetch(const key_arg<K>& key) const {
2118     (void)key;
2119     // Avoid probing if we won't be able to prefetch the addresses received.
2120 #ifdef ABSL_INTERNAL_HAVE_PREFETCH
2121     prefetch_heap_block();
2122     auto seq = probe(common(), hash_ref()(key));
2123     base_internal::PrefetchT0(control() + seq.offset());
2124     base_internal::PrefetchT0(slot_array() + seq.offset());
2125 #endif  // ABSL_INTERNAL_HAVE_PREFETCH
2126   }
2127 
2128   // The API of find() has two extensions.
2129   //
2130   // 1. The hash can be passed by the user. It must be equal to the hash of the
2131   // key.
2132   //
2133   // 2. The type of the key argument doesn't have to be key_type. This is so
2134   // called heterogeneous key support.
2135   template <class K = key_type>
2136   iterator find(const key_arg<K>& key, size_t hash) {
2137     auto seq = probe(common(), hash);
2138     slot_type* slot_ptr = slot_array();
2139     const ctrl_t* ctrl = control();
2140     while (true) {
2141       Group g{ctrl + seq.offset()};
2142       for (uint32_t i : g.Match(H2(hash))) {
2143         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
2144                 EqualElement<K>{key, eq_ref()},
2145                 PolicyTraits::element(slot_ptr + seq.offset(i)))))
2146           return iterator_at(seq.offset(i));
2147       }
2148       if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end();
2149       seq.next();
2150       assert(seq.index() <= capacity() && "full table!");
2151     }
2152   }
2153   template <class K = key_type>
2154   iterator find(const key_arg<K>& key) {
2155     prefetch_heap_block();
2156     return find(key, hash_ref()(key));
2157   }
2158 
2159   template <class K = key_type>
2160   const_iterator find(const key_arg<K>& key, size_t hash) const {
2161     return const_cast<raw_hash_set*>(this)->find(key, hash);
2162   }
2163   template <class K = key_type>
2164   const_iterator find(const key_arg<K>& key) const {
2165     prefetch_heap_block();
2166     return find(key, hash_ref()(key));
2167   }
2168 
2169   template <class K = key_type>
2170   bool contains(const key_arg<K>& key) const {
2171     return find(key) != end();
2172   }
2173 
2174   template <class K = key_type>
2175   std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
2176     auto it = find(key);
2177     if (it != end()) return {it, std::next(it)};
2178     return {it, it};
2179   }
2180   template <class K = key_type>
2181   std::pair<const_iterator, const_iterator> equal_range(
2182       const key_arg<K>& key) const {
2183     auto it = find(key);
2184     if (it != end()) return {it, std::next(it)};
2185     return {it, it};
2186   }
2187 
2188   size_t bucket_count() const { return capacity(); }
2189   float load_factor() const {
2190     return capacity() ? static_cast<double>(size()) / capacity() : 0.0;
2191   }
2192   float max_load_factor() const { return 1.0f; }
2193   void max_load_factor(float) {
2194     // Does nothing.
2195   }
2196 
2197   hasher hash_function() const { return hash_ref(); }
2198   key_equal key_eq() const { return eq_ref(); }
2199   allocator_type get_allocator() const { return alloc_ref(); }
2200 
2201   friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
2202     if (a.size() != b.size()) return false;
2203     const raw_hash_set* outer = &a;
2204     const raw_hash_set* inner = &b;
2205     if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
2206     for (const value_type& elem : *outer)
2207       if (!inner->has_element(elem)) return false;
2208     return true;
2209   }
2210 
2211   friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
2212     return !(a == b);
2213   }
2214 
2215   template <typename H>
2216   friend typename std::enable_if<H::template is_hashable<value_type>::value,
2217                                  H>::type
2218   AbslHashValue(H h, const raw_hash_set& s) {
2219     return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()),
2220                       s.size());
2221   }
2222 
2223   friend void swap(raw_hash_set& a,
2224                    raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
2225     a.swap(b);
2226   }
2227 
2228  private:
2229   template <class Container, typename Enabler>
2230   friend struct absl::container_internal::hashtable_debug_internal::
2231       HashtableDebugAccess;
2232 
2233   struct FindElement {
2234     template <class K, class... Args>
2235     const_iterator operator()(const K& key, Args&&...) const {
2236       return s.find(key);
2237     }
2238     const raw_hash_set& s;
2239   };
2240 
2241   struct HashElement {
2242     template <class K, class... Args>
2243     size_t operator()(const K& key, Args&&...) const {
2244       return h(key);
2245     }
2246     const hasher& h;
2247   };
2248 
2249   template <class K1>
2250   struct EqualElement {
2251     template <class K2, class... Args>
2252     bool operator()(const K2& lhs, Args&&...) const {
2253       return eq(lhs, rhs);
2254     }
2255     const K1& rhs;
2256     const key_equal& eq;
2257   };
2258 
2259   struct EmplaceDecomposable {
2260     template <class K, class... Args>
2261     std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
2262       auto res = s.find_or_prepare_insert(key);
2263       if (res.second) {
2264         s.emplace_at(res.first, std::forward<Args>(args)...);
2265       }
2266       return {s.iterator_at(res.first), res.second};
2267     }
2268     raw_hash_set& s;
2269   };
2270 
2271   template <bool do_destroy>
2272   struct InsertSlot {
2273     template <class K, class... Args>
2274     std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
2275       auto res = s.find_or_prepare_insert(key);
2276       if (res.second) {
2277         PolicyTraits::transfer(&s.alloc_ref(), s.slot_array() + res.first,
2278                                &slot);
2279       } else if (do_destroy) {
2280         PolicyTraits::destroy(&s.alloc_ref(), &slot);
2281       }
2282       return {s.iterator_at(res.first), res.second};
2283     }
2284     raw_hash_set& s;
2285     // Constructed slot. Either moved into place or destroyed.
2286     slot_type&& slot;
2287   };
2288 
2289   // Erases, but does not destroy, the value pointed to by `it`.
2290   //
2291   // This merely updates the pertinent control byte. This can be used in
2292   // conjunction with Policy::transfer to move the object to another place.
2293   void erase_meta_only(const_iterator it) {
2294     EraseMetaOnly(common(), it.inner_.ctrl_, sizeof(slot_type));
2295   }
2296 
2297   // Allocates a backing array for `self` and initializes its control bytes.
2298   // This reads `capacity` and updates all other fields based on the result of
2299   // the allocation.
2300   //
2301   // This does not free the currently held array; `capacity` must be nonzero.
2302   inline void initialize_slots() {
2303     // People are often sloppy with the exact type of their allocator (sometimes
2304     // it has an extra const or is missing the pair, but rebinds made it work
2305     // anyway).
2306     using CharAlloc =
2307         typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
2308     InitializeSlots<CharAlloc, sizeof(slot_type), alignof(slot_type)>(
2309         common(), CharAlloc(alloc_ref()));
2310   }
2311 
2312   ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
2313     assert(IsValidCapacity(new_capacity));
2314     auto* old_ctrl = control();
2315     auto* old_slots = slot_array();
2316     const size_t old_capacity = common().capacity_;
2317     common().capacity_ = new_capacity;
2318     initialize_slots();
2319 
2320     auto* new_slots = slot_array();
2321     size_t total_probe_length = 0;
2322     for (size_t i = 0; i != old_capacity; ++i) {
2323       if (IsFull(old_ctrl[i])) {
2324         size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
2325                                           PolicyTraits::element(old_slots + i));
2326         auto target = find_first_non_full(common(), hash);
2327         size_t new_i = target.offset;
2328         total_probe_length += target.probe_length;
2329         SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
2330         PolicyTraits::transfer(&alloc_ref(), new_slots + new_i, old_slots + i);
2331       }
2332     }
2333     if (old_capacity) {
2334       SanitizerUnpoisonMemoryRegion(old_slots,
2335                                     sizeof(slot_type) * old_capacity);
2336       Deallocate<alignof(slot_type)>(
2337           &alloc_ref(), old_ctrl,
2338           AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
2339     }
2340     infoz().RecordRehash(total_probe_length);
2341   }
2342 
2343   // Prunes control bytes to remove as many tombstones as possible.
2344   //
2345   // See the comment on `rehash_and_grow_if_necessary()`.
2346   inline void drop_deletes_without_resize() {
2347     // Stack-allocate space for swapping elements.
2348     alignas(slot_type) unsigned char tmp[sizeof(slot_type)];
2349     DropDeletesWithoutResize(common(), GetPolicyFunctions(), tmp);
2350   }
2351 
2352   // Called whenever the table *might* need to conditionally grow.
2353   //
2354   // This function is an optimization opportunity to perform a rehash even when
2355   // growth is unnecessary, because vacating tombstones is beneficial for
2356   // performance in the long-run.
2357   void rehash_and_grow_if_necessary() {
2358     const size_t cap = capacity();
2359     if (cap > Group::kWidth &&
2360         // Do these calcuations in 64-bit to avoid overflow.
2361         size() * uint64_t{32} <= cap* uint64_t{25}) {
2362       // Squash DELETED without growing if there is enough capacity.
2363       //
2364       // Rehash in place if the current size is <= 25/32 of capacity.
2365       // Rationale for such a high factor: 1) drop_deletes_without_resize() is
2366       // faster than resize, and 2) it takes quite a bit of work to add
2367       // tombstones.  In the worst case, seems to take approximately 4
2368       // insert/erase pairs to create a single tombstone and so if we are
2369       // rehashing because of tombstones, we can afford to rehash-in-place as
2370       // long as we are reclaiming at least 1/8 the capacity without doing more
2371       // than 2X the work.  (Where "work" is defined to be size() for rehashing
2372       // or rehashing in place, and 1 for an insert or erase.)  But rehashing in
2373       // place is faster per operation than inserting or even doubling the size
2374       // of the table, so we actually afford to reclaim even less space from a
2375       // resize-in-place.  The decision is to rehash in place if we can reclaim
2376       // at about 1/8th of the usable capacity (specifically 3/28 of the
2377       // capacity) which means that the total cost of rehashing will be a small
2378       // fraction of the total work.
2379       //
2380       // Here is output of an experiment using the BM_CacheInSteadyState
2381       // benchmark running the old case (where we rehash-in-place only if we can
2382       // reclaim at least 7/16*capacity) vs. this code (which rehashes in place
2383       // if we can recover 3/32*capacity).
2384       //
2385       // Note that although in the worst-case number of rehashes jumped up from
2386       // 15 to 190, but the number of operations per second is almost the same.
2387       //
2388       // Abridged output of running BM_CacheInSteadyState benchmark from
2389       // raw_hash_set_benchmark.   N is the number of insert/erase operations.
2390       //
2391       //      | OLD (recover >= 7/16        | NEW (recover >= 3/32)
2392       // size |    N/s LoadFactor NRehashes |    N/s LoadFactor NRehashes
2393       //  448 | 145284       0.44        18 | 140118       0.44        19
2394       //  493 | 152546       0.24        11 | 151417       0.48        28
2395       //  538 | 151439       0.26        11 | 151152       0.53        38
2396       //  583 | 151765       0.28        11 | 150572       0.57        50
2397       //  628 | 150241       0.31        11 | 150853       0.61        66
2398       //  672 | 149602       0.33        12 | 150110       0.66        90
2399       //  717 | 149998       0.35        12 | 149531       0.70       129
2400       //  762 | 149836       0.37        13 | 148559       0.74       190
2401       //  807 | 149736       0.39        14 | 151107       0.39        14
2402       //  852 | 150204       0.42        15 | 151019       0.42        15
2403       drop_deletes_without_resize();
2404     } else {
2405       // Otherwise grow the container.
2406       resize(NextCapacity(cap));
2407     }
2408   }
2409 
2410   bool has_element(const value_type& elem) const {
2411     size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
2412     auto seq = probe(common(), hash);
2413     const ctrl_t* ctrl = control();
2414     while (true) {
2415       Group g{ctrl + seq.offset()};
2416       for (uint32_t i : g.Match(H2(hash))) {
2417         if (ABSL_PREDICT_TRUE(
2418                 PolicyTraits::element(slot_array() + seq.offset(i)) == elem))
2419           return true;
2420       }
2421       if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false;
2422       seq.next();
2423       assert(seq.index() <= capacity() && "full table!");
2424     }
2425     return false;
2426   }
2427 
2428   // TODO(alkis): Optimize this assuming *this and that don't overlap.
2429   raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
2430     raw_hash_set tmp(std::move(that));
2431     swap(tmp);
2432     return *this;
2433   }
2434   raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
2435     raw_hash_set tmp(std::move(that), alloc_ref());
2436     swap(tmp);
2437     return *this;
2438   }
2439 
2440  protected:
2441   // Attempts to find `key` in the table; if it isn't found, returns a slot that
2442   // the value can be inserted into, with the control byte already set to
2443   // `key`'s H2.
2444   template <class K>
2445   std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
2446     prefetch_heap_block();
2447     auto hash = hash_ref()(key);
2448     auto seq = probe(common(), hash);
2449     const ctrl_t* ctrl = control();
2450     while (true) {
2451       Group g{ctrl + seq.offset()};
2452       for (uint32_t i : g.Match(H2(hash))) {
2453         if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
2454                 EqualElement<K>{key, eq_ref()},
2455                 PolicyTraits::element(slot_array() + seq.offset(i)))))
2456           return {seq.offset(i), false};
2457       }
2458       if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break;
2459       seq.next();
2460       assert(seq.index() <= capacity() && "full table!");
2461     }
2462     return {prepare_insert(hash), true};
2463   }
2464 
2465   // Given the hash of a value not currently in the table, finds the next
2466   // viable slot index to insert it at.
2467   //
2468   // REQUIRES: At least one non-full slot available.
2469   size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
2470     const bool rehash_for_bug_detection =
2471         common().should_rehash_for_bug_detection_on_insert();
2472     if (rehash_for_bug_detection) {
2473       // Move to a different heap allocation in order to detect bugs.
2474       const size_t cap = capacity();
2475       resize(growth_left() > 0 ? cap : NextCapacity(cap));
2476     }
2477     auto target = find_first_non_full(common(), hash);
2478     if (!rehash_for_bug_detection &&
2479         ABSL_PREDICT_FALSE(growth_left() == 0 &&
2480                            !IsDeleted(control()[target.offset]))) {
2481       rehash_and_grow_if_necessary();
2482       target = find_first_non_full(common(), hash);
2483     }
2484     ++common().size_;
2485     growth_left() -= IsEmpty(control()[target.offset]);
2486     SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
2487     common().maybe_increment_generation_on_insert();
2488     infoz().RecordInsert(hash, target.probe_length);
2489     return target.offset;
2490   }
2491 
2492   // Constructs the value in the space pointed by the iterator. This only works
2493   // after an unsuccessful find_or_prepare_insert() and before any other
2494   // modifications happen in the raw_hash_set.
2495   //
2496   // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
2497   // k is the key decomposed from `forward<Args>(args)...`, and the bool
2498   // returned by find_or_prepare_insert(k) was true.
2499   // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
2500   template <class... Args>
2501   void emplace_at(size_t i, Args&&... args) {
2502     PolicyTraits::construct(&alloc_ref(), slot_array() + i,
2503                             std::forward<Args>(args)...);
2504 
2505     assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
2506                iterator_at(i) &&
2507            "constructed value does not match the lookup key");
2508   }
2509 
2510   iterator iterator_at(size_t i) {
2511     return {control() + i, slot_array() + i, common().generation_ptr()};
2512   }
2513   const_iterator iterator_at(size_t i) const {
2514     return {control() + i, slot_array() + i, common().generation_ptr()};
2515   }
2516 
2517  private:
2518   friend struct RawHashSetTestOnlyAccess;
2519 
2520   // The number of slots we can still fill without needing to rehash.
2521   //
2522   // This is stored separately due to tombstones: we do not include tombstones
2523   // in the growth capacity, because we'd like to rehash when the table is
2524   // otherwise filled with tombstones: otherwise, probe sequences might get
2525   // unacceptably long without triggering a rehash. Callers can also force a
2526   // rehash via the standard `rehash(0)`, which will recompute this value as a
2527   // side-effect.
2528   //
2529   // See `CapacityToGrowth()`.
2530   size_t& growth_left() { return common().growth_left(); }
2531 
2532   // Prefetch the heap-allocated memory region to resolve potential TLB misses.
2533   // This is intended to overlap with execution of calculating the hash for a
2534   // key.
2535   void prefetch_heap_block() const { base_internal::PrefetchT2(control()); }
2536 
2537   CommonFields& common() { return settings_.template get<0>(); }
2538   const CommonFields& common() const { return settings_.template get<0>(); }
2539 
2540   ctrl_t* control() const { return common().control_; }
2541   slot_type* slot_array() const {
2542     return static_cast<slot_type*>(common().slots_);
2543   }
2544   HashtablezInfoHandle& infoz() { return common().infoz(); }
2545 
2546   hasher& hash_ref() { return settings_.template get<1>(); }
2547   const hasher& hash_ref() const { return settings_.template get<1>(); }
2548   key_equal& eq_ref() { return settings_.template get<2>(); }
2549   const key_equal& eq_ref() const { return settings_.template get<2>(); }
2550   allocator_type& alloc_ref() { return settings_.template get<3>(); }
2551   const allocator_type& alloc_ref() const {
2552     return settings_.template get<3>();
2553   }
2554 
2555   // Make type-specific functions for this type's PolicyFunctions struct.
2556   static size_t hash_slot_fn(void* set, void* slot) {
2557     auto* h = static_cast<raw_hash_set*>(set);
2558     return PolicyTraits::apply(
2559         HashElement{h->hash_ref()},
2560         PolicyTraits::element(static_cast<slot_type*>(slot)));
2561   }
2562   static void transfer_slot_fn(void* set, void* dst, void* src) {
2563     auto* h = static_cast<raw_hash_set*>(set);
2564     PolicyTraits::transfer(&h->alloc_ref(), static_cast<slot_type*>(dst),
2565                            static_cast<slot_type*>(src));
2566   }
2567   // Note: dealloc_fn will only be used if we have a non-standard allocator.
2568   static void dealloc_fn(void* set, const PolicyFunctions&, ctrl_t* ctrl,
2569                          void* slot_mem, size_t n) {
2570     auto* h = static_cast<raw_hash_set*>(set);
2571 
2572     // Unpoison before returning the memory to the allocator.
2573     SanitizerUnpoisonMemoryRegion(slot_mem, sizeof(slot_type) * n);
2574 
2575     Deallocate<alignof(slot_type)>(
2576         &h->alloc_ref(), ctrl,
2577         AllocSize(n, sizeof(slot_type), alignof(slot_type)));
2578   }
2579 
2580   static const PolicyFunctions& GetPolicyFunctions() {
2581     static constexpr PolicyFunctions value = {
2582         sizeof(slot_type),
2583         &raw_hash_set::hash_slot_fn,
2584         PolicyTraits::transfer_uses_memcpy()
2585             ? TransferRelocatable<sizeof(slot_type)>
2586             : &raw_hash_set::transfer_slot_fn,
2587         (std::is_same<SlotAlloc, std::allocator<slot_type>>::value
2588              ? &DeallocateStandard<alignof(slot_type)>
2589              : &raw_hash_set::dealloc_fn),
2590     };
2591     return value;
2592   }
2593 
2594   // Bundle together CommonFields plus other objects which might be empty.
2595   // CompressedTuple will ensure that sizeof is not affected by any of the empty
2596   // fields that occur after CommonFields.
2597   absl::container_internal::CompressedTuple<CommonFields, hasher, key_equal,
2598                                             allocator_type>
2599       settings_{CommonFields{}, hasher{}, key_equal{}, allocator_type{}};
2600 };
2601 
2602 // Erases all elements that satisfy the predicate `pred` from the container `c`.
2603 template <typename P, typename H, typename E, typename A, typename Predicate>
2604 typename raw_hash_set<P, H, E, A>::size_type EraseIf(
2605     Predicate& pred, raw_hash_set<P, H, E, A>* c) {
2606   const auto initial_size = c->size();
2607   for (auto it = c->begin(), last = c->end(); it != last;) {
2608     if (pred(*it)) {
2609       c->erase(it++);
2610     } else {
2611       ++it;
2612     }
2613   }
2614   return initial_size - c->size();
2615 }
2616 
2617 namespace hashtable_debug_internal {
2618 template <typename Set>
2619 struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
2620   using Traits = typename Set::PolicyTraits;
2621   using Slot = typename Traits::slot_type;
2622 
2623   static size_t GetNumProbes(const Set& set,
2624                              const typename Set::key_type& key) {
2625     size_t num_probes = 0;
2626     size_t hash = set.hash_ref()(key);
2627     auto seq = probe(set.common(), hash);
2628     const ctrl_t* ctrl = set.control();
2629     while (true) {
2630       container_internal::Group g{ctrl + seq.offset()};
2631       for (uint32_t i : g.Match(container_internal::H2(hash))) {
2632         if (Traits::apply(
2633                 typename Set::template EqualElement<typename Set::key_type>{
2634                     key, set.eq_ref()},
2635                 Traits::element(set.slot_array() + seq.offset(i))))
2636           return num_probes;
2637         ++num_probes;
2638       }
2639       if (g.MaskEmpty()) return num_probes;
2640       seq.next();
2641       ++num_probes;
2642     }
2643   }
2644 
2645   static size_t AllocatedByteSize(const Set& c) {
2646     size_t capacity = c.capacity();
2647     if (capacity == 0) return 0;
2648     size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
2649 
2650     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
2651     if (per_slot != ~size_t{}) {
2652       m += per_slot * c.size();
2653     } else {
2654       const ctrl_t* ctrl = c.control();
2655       for (size_t i = 0; i != capacity; ++i) {
2656         if (container_internal::IsFull(ctrl[i])) {
2657           m += Traits::space_used(c.slot_array() + i);
2658         }
2659       }
2660     }
2661     return m;
2662   }
2663 
2664   static size_t LowerBoundAllocatedByteSize(size_t size) {
2665     size_t capacity = GrowthToLowerboundCapacity(size);
2666     if (capacity == 0) return 0;
2667     size_t m =
2668         AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
2669     size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
2670     if (per_slot != ~size_t{}) {
2671       m += per_slot * size;
2672     }
2673     return m;
2674   }
2675 };
2676 
2677 }  // namespace hashtable_debug_internal
2678 }  // namespace container_internal
2679 ABSL_NAMESPACE_END
2680 }  // namespace absl
2681 
2682 #undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
2683 #undef ABSL_INTERNAL_ASSERT_IS_FULL
2684 
2685 #endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
2686