1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "absl/container/internal/raw_hash_set.h"
16
17 #include <algorithm>
18 #include <atomic>
19 #include <cmath>
20 #include <cstdint>
21 #include <deque>
22 #include <functional>
23 #include <iostream>
24 #include <iterator>
25 #include <list>
26 #include <map>
27 #include <memory>
28 #include <numeric>
29 #include <ostream>
30 #include <random>
31 #include <string>
32 #include <type_traits>
33 #include <unordered_map>
34 #include <unordered_set>
35 #include <utility>
36 #include <vector>
37
38 #include "gmock/gmock.h"
39 #include "gtest/gtest.h"
40 #include "absl/base/attributes.h"
41 #include "absl/base/config.h"
42 #include "absl/base/internal/cycleclock.h"
43 #include "absl/base/internal/prefetch.h"
44 #include "absl/base/internal/raw_logging.h"
45 #include "absl/container/flat_hash_map.h"
46 #include "absl/container/flat_hash_set.h"
47 #include "absl/container/internal/container_memory.h"
48 #include "absl/container/internal/hash_function_defaults.h"
49 #include "absl/container/internal/hash_policy_testing.h"
50 #include "absl/container/internal/hashtable_debug.h"
51 #include "absl/log/log.h"
52 #include "absl/strings/string_view.h"
53
54 namespace absl {
55 ABSL_NAMESPACE_BEGIN
56 namespace container_internal {
57
58 struct RawHashSetTestOnlyAccess {
59 template <typename C>
GetSlotsabsl::container_internal::RawHashSetTestOnlyAccess60 static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
61 return c.slot_array();
62 }
63 };
64
65 namespace {
66
67 using ::testing::ElementsAre;
68 using ::testing::Eq;
69 using ::testing::Ge;
70 using ::testing::Lt;
71 using ::testing::Pair;
72 using ::testing::UnorderedElementsAre;
73
74 // Convenience function to static cast to ctrl_t.
CtrlT(int i)75 ctrl_t CtrlT(int i) { return static_cast<ctrl_t>(i); }
76
TEST(Util,NormalizeCapacity)77 TEST(Util, NormalizeCapacity) {
78 EXPECT_EQ(1, NormalizeCapacity(0));
79 EXPECT_EQ(1, NormalizeCapacity(1));
80 EXPECT_EQ(3, NormalizeCapacity(2));
81 EXPECT_EQ(3, NormalizeCapacity(3));
82 EXPECT_EQ(7, NormalizeCapacity(4));
83 EXPECT_EQ(7, NormalizeCapacity(7));
84 EXPECT_EQ(15, NormalizeCapacity(8));
85 EXPECT_EQ(15, NormalizeCapacity(15));
86 EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
87 EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
88 }
89
TEST(Util,GrowthAndCapacity)90 TEST(Util, GrowthAndCapacity) {
91 // Verify that GrowthToCapacity gives the minimum capacity that has enough
92 // growth.
93 for (size_t growth = 0; growth < 10000; ++growth) {
94 SCOPED_TRACE(growth);
95 size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
96 // The capacity is large enough for `growth`.
97 EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
98 // For (capacity+1) < kWidth, growth should equal capacity.
99 if (capacity + 1 < Group::kWidth) {
100 EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity));
101 } else {
102 EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity));
103 }
104 if (growth != 0 && capacity > 1) {
105 // There is no smaller capacity that works.
106 EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
107 }
108 }
109
110 for (size_t capacity = Group::kWidth - 1; capacity < 10000;
111 capacity = 2 * capacity + 1) {
112 SCOPED_TRACE(capacity);
113 size_t growth = CapacityToGrowth(capacity);
114 EXPECT_THAT(growth, Lt(capacity));
115 EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
116 EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
117 }
118 }
119
TEST(Util,probe_seq)120 TEST(Util, probe_seq) {
121 probe_seq<16> seq(0, 127);
122 auto gen = [&]() {
123 size_t res = seq.offset();
124 seq.next();
125 return res;
126 };
127 std::vector<size_t> offsets(8);
128 std::generate_n(offsets.begin(), 8, gen);
129 EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
130 seq = probe_seq<16>(128, 127);
131 std::generate_n(offsets.begin(), 8, gen);
132 EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
133 }
134
TEST(BitMask,Smoke)135 TEST(BitMask, Smoke) {
136 EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
137 EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
138
139 EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
140 EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
141 EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
142 EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
143 EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
144 EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
145 EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
146 EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
147 }
148
TEST(BitMask,WithShift)149 TEST(BitMask, WithShift) {
150 // See the non-SSE version of Group for details on what this math is for.
151 uint64_t ctrl = 0x1716151413121110;
152 uint64_t hash = 0x12;
153 constexpr uint64_t msbs = 0x8080808080808080ULL;
154 constexpr uint64_t lsbs = 0x0101010101010101ULL;
155 auto x = ctrl ^ (lsbs * hash);
156 uint64_t mask = (x - lsbs) & ~x & msbs;
157 EXPECT_EQ(0x0000000080800000, mask);
158
159 BitMask<uint64_t, 8, 3> b(mask);
160 EXPECT_EQ(*b, 2);
161 }
162
TEST(BitMask,LeadingTrailing)163 TEST(BitMask, LeadingTrailing) {
164 EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
165 EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
166
167 EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
168 EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
169
170 EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
171 EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
172
173 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
174 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
175
176 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
177 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
178
179 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
180 EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
181 }
182
TEST(Group,EmptyGroup)183 TEST(Group, EmptyGroup) {
184 for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
185 }
186
TEST(Group,Match)187 TEST(Group, Match) {
188 if (Group::kWidth == 16) {
189 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
190 ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
191 CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
192 CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
193 EXPECT_THAT(Group{group}.Match(0), ElementsAre());
194 EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
195 EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
196 EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
197 EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
198 } else if (Group::kWidth == 8) {
199 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
200 ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
201 ctrl_t::kSentinel, CtrlT(1)};
202 EXPECT_THAT(Group{group}.Match(0), ElementsAre());
203 EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
204 EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
205 } else {
206 FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
207 }
208 }
209
TEST(Group,MaskEmpty)210 TEST(Group, MaskEmpty) {
211 if (Group::kWidth == 16) {
212 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
213 ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
214 CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
215 CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
216 EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
217 EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4);
218 } else if (Group::kWidth == 8) {
219 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
220 ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
221 ctrl_t::kSentinel, CtrlT(1)};
222 EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0);
223 EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0);
224 } else {
225 FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
226 }
227 }
228
TEST(Group,MaskEmptyOrDeleted)229 TEST(Group, MaskEmptyOrDeleted) {
230 if (Group::kWidth == 16) {
231 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3),
232 ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
233 CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
234 CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
235 EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
236 EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4);
237 } else if (Group::kWidth == 8) {
238 ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
239 ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
240 ctrl_t::kSentinel, CtrlT(1)};
241 EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0);
242 EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3);
243 } else {
244 FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
245 }
246 }
247
TEST(Batch,DropDeletes)248 TEST(Batch, DropDeletes) {
249 constexpr size_t kCapacity = 63;
250 constexpr size_t kGroupWidth = container_internal::Group::kWidth;
251 std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
252 ctrl[kCapacity] = ctrl_t::kSentinel;
253 std::vector<ctrl_t> pattern = {
254 ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2),
255 ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted};
256 for (size_t i = 0; i != kCapacity; ++i) {
257 ctrl[i] = pattern[i % pattern.size()];
258 if (i < kGroupWidth - 1)
259 ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
260 }
261 ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
262 ASSERT_EQ(ctrl[kCapacity], ctrl_t::kSentinel);
263 for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) {
264 ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
265 if (i == kCapacity) expected = ctrl_t::kSentinel;
266 if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty;
267 if (IsFull(expected)) expected = ctrl_t::kDeleted;
268 EXPECT_EQ(ctrl[i], expected)
269 << i << " " << static_cast<int>(pattern[i % pattern.size()]);
270 }
271 }
272
TEST(Group,CountLeadingEmptyOrDeleted)273 TEST(Group, CountLeadingEmptyOrDeleted) {
274 const std::vector<ctrl_t> empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted};
275 const std::vector<ctrl_t> full_examples = {
276 CtrlT(0), CtrlT(1), CtrlT(2), CtrlT(3),
277 CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel};
278
279 for (ctrl_t empty : empty_examples) {
280 std::vector<ctrl_t> e(Group::kWidth, empty);
281 EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
282 for (ctrl_t full : full_examples) {
283 for (size_t i = 0; i != Group::kWidth; ++i) {
284 std::vector<ctrl_t> f(Group::kWidth, empty);
285 f[i] = full;
286 EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
287 }
288 std::vector<ctrl_t> f(Group::kWidth, empty);
289 f[Group::kWidth * 2 / 3] = full;
290 f[Group::kWidth / 2] = full;
291 EXPECT_EQ(
292 Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted());
293 }
294 }
295 }
296
297 template <class T>
298 struct ValuePolicy {
299 using slot_type = T;
300 using key_type = T;
301 using init_type = T;
302
303 template <class Allocator, class... Args>
constructabsl::container_internal::__anon2dc4f7f50111::ValuePolicy304 static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
305 absl::allocator_traits<Allocator>::construct(*alloc, slot,
306 std::forward<Args>(args)...);
307 }
308
309 template <class Allocator>
destroyabsl::container_internal::__anon2dc4f7f50111::ValuePolicy310 static void destroy(Allocator* alloc, slot_type* slot) {
311 absl::allocator_traits<Allocator>::destroy(*alloc, slot);
312 }
313
314 template <class Allocator>
transferabsl::container_internal::__anon2dc4f7f50111::ValuePolicy315 static void transfer(Allocator* alloc, slot_type* new_slot,
316 slot_type* old_slot) {
317 construct(alloc, new_slot, std::move(*old_slot));
318 destroy(alloc, old_slot);
319 }
320
elementabsl::container_internal::__anon2dc4f7f50111::ValuePolicy321 static T& element(slot_type* slot) { return *slot; }
322
323 template <class F, class... Args>
324 static decltype(absl::container_internal::DecomposeValue(
325 std::declval<F>(), std::declval<Args>()...))
applyabsl::container_internal::__anon2dc4f7f50111::ValuePolicy326 apply(F&& f, Args&&... args) {
327 return absl::container_internal::DecomposeValue(
328 std::forward<F>(f), std::forward<Args>(args)...);
329 }
330 };
331
332 using IntPolicy = ValuePolicy<int64_t>;
333 using Uint8Policy = ValuePolicy<uint8_t>;
334
335 class StringPolicy {
336 template <class F, class K, class V,
337 class = typename std::enable_if<
338 std::is_convertible<const K&, absl::string_view>::value>::type>
339 decltype(std::declval<F>()(
340 std::declval<const absl::string_view&>(), std::piecewise_construct,
341 std::declval<std::tuple<K>>(),
apply_impl(F && f,std::pair<std::tuple<K>,V> p)342 std::declval<V>())) static apply_impl(F&& f,
343 std::pair<std::tuple<K>, V> p) {
344 const absl::string_view& key = std::get<0>(p.first);
345 return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
346 std::move(p.second));
347 }
348
349 public:
350 struct slot_type {
351 struct ctor {};
352
353 template <class... Ts>
slot_typeabsl::container_internal::__anon2dc4f7f50111::StringPolicy::slot_type354 explicit slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
355
356 std::pair<std::string, std::string> pair;
357 };
358
359 using key_type = std::string;
360 using init_type = std::pair<std::string, std::string>;
361
362 template <class allocator_type, class... Args>
construct(allocator_type * alloc,slot_type * slot,Args...args)363 static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
364 std::allocator_traits<allocator_type>::construct(
365 *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
366 }
367
368 template <class allocator_type>
destroy(allocator_type * alloc,slot_type * slot)369 static void destroy(allocator_type* alloc, slot_type* slot) {
370 std::allocator_traits<allocator_type>::destroy(*alloc, slot);
371 }
372
373 template <class allocator_type>
transfer(allocator_type * alloc,slot_type * new_slot,slot_type * old_slot)374 static void transfer(allocator_type* alloc, slot_type* new_slot,
375 slot_type* old_slot) {
376 construct(alloc, new_slot, std::move(old_slot->pair));
377 destroy(alloc, old_slot);
378 }
379
element(slot_type * slot)380 static std::pair<std::string, std::string>& element(slot_type* slot) {
381 return slot->pair;
382 }
383
384 template <class F, class... Args>
apply(F && f,Args &&...args)385 static auto apply(F&& f, Args&&... args)
386 -> decltype(apply_impl(std::forward<F>(f),
387 PairArgs(std::forward<Args>(args)...))) {
388 return apply_impl(std::forward<F>(f),
389 PairArgs(std::forward<Args>(args)...));
390 }
391 };
392
393 struct StringHash : absl::Hash<absl::string_view> {
394 using is_transparent = void;
395 };
396 struct StringEq : std::equal_to<absl::string_view> {
397 using is_transparent = void;
398 };
399
400 struct StringTable
401 : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
402 using Base = typename StringTable::raw_hash_set;
403 StringTable() = default;
404 using Base::Base;
405 };
406
407 struct IntTable
408 : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
409 std::equal_to<int64_t>, std::allocator<int64_t>> {
410 using Base = typename IntTable::raw_hash_set;
411 using Base::Base;
412 };
413
414 struct Uint8Table
415 : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
416 std::equal_to<uint8_t>, std::allocator<uint8_t>> {
417 using Base = typename Uint8Table::raw_hash_set;
418 using Base::Base;
419 };
420
421 template <typename T>
422 struct CustomAlloc : std::allocator<T> {
423 CustomAlloc() = default;
424
425 template <typename U>
CustomAllocabsl::container_internal::__anon2dc4f7f50111::CustomAlloc426 explicit CustomAlloc(const CustomAlloc<U>& /*other*/) {}
427
428 template<class U> struct rebind {
429 using other = CustomAlloc<U>;
430 };
431 };
432
433 struct CustomAllocIntTable
434 : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
435 std::equal_to<int64_t>, CustomAlloc<int64_t>> {
436 using Base = typename CustomAllocIntTable::raw_hash_set;
437 using Base::Base;
438 };
439
440 struct BadFastHash {
441 template <class T>
operator ()absl::container_internal::__anon2dc4f7f50111::BadFastHash442 size_t operator()(const T&) const {
443 return 0;
444 }
445 };
446
447 struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
448 std::allocator<int>> {
449 using Base = typename BadTable::raw_hash_set;
450 BadTable() = default;
451 using Base::Base;
452 };
453
TEST(Table,EmptyFunctorOptimization)454 TEST(Table, EmptyFunctorOptimization) {
455 static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
456 static_assert(std::is_empty<std::allocator<int>>::value, "");
457
458 struct MockTable {
459 void* infoz;
460 void* ctrl;
461 void* slots;
462 size_t size;
463 size_t capacity;
464 size_t growth_left;
465 };
466 struct MockTableInfozDisabled {
467 void* ctrl;
468 void* slots;
469 size_t size;
470 size_t capacity;
471 size_t growth_left;
472 };
473 struct StatelessHash {
474 size_t operator()(absl::string_view) const { return 0; }
475 };
476 struct StatefulHash : StatelessHash {
477 size_t dummy;
478 };
479
480 struct GenerationData {
481 size_t reserved_growth;
482 GenerationType* generation;
483 };
484
485 // Ignore unreachable-code warning. Compiler thinks one branch of each ternary
486 // conditional is unreachable.
487 #if defined(__clang__)
488 #pragma clang diagnostic push
489 #pragma clang diagnostic ignored "-Wunreachable-code"
490 #endif
491 constexpr size_t mock_size = std::is_empty<HashtablezInfoHandle>()
492 ? sizeof(MockTableInfozDisabled)
493 : sizeof(MockTable);
494 constexpr size_t generation_size =
495 SwisstableGenerationsEnabled() ? sizeof(GenerationData) : 0;
496 #if defined(__clang__)
497 #pragma clang diagnostic pop
498 #endif
499
500 EXPECT_EQ(
501 mock_size + generation_size,
502 sizeof(
503 raw_hash_set<StringPolicy, StatelessHash,
504 std::equal_to<absl::string_view>, std::allocator<int>>));
505
506 EXPECT_EQ(
507 mock_size + sizeof(StatefulHash) + generation_size,
508 sizeof(
509 raw_hash_set<StringPolicy, StatefulHash,
510 std::equal_to<absl::string_view>, std::allocator<int>>));
511 }
512
TEST(Table,Empty)513 TEST(Table, Empty) {
514 IntTable t;
515 EXPECT_EQ(0, t.size());
516 EXPECT_TRUE(t.empty());
517 }
518
TEST(Table,LookupEmpty)519 TEST(Table, LookupEmpty) {
520 IntTable t;
521 auto it = t.find(0);
522 EXPECT_TRUE(it == t.end());
523 }
524
TEST(Table,Insert1)525 TEST(Table, Insert1) {
526 IntTable t;
527 EXPECT_TRUE(t.find(0) == t.end());
528 auto res = t.emplace(0);
529 EXPECT_TRUE(res.second);
530 EXPECT_THAT(*res.first, 0);
531 EXPECT_EQ(1, t.size());
532 EXPECT_THAT(*t.find(0), 0);
533 }
534
TEST(Table,Insert2)535 TEST(Table, Insert2) {
536 IntTable t;
537 EXPECT_TRUE(t.find(0) == t.end());
538 auto res = t.emplace(0);
539 EXPECT_TRUE(res.second);
540 EXPECT_THAT(*res.first, 0);
541 EXPECT_EQ(1, t.size());
542 EXPECT_TRUE(t.find(1) == t.end());
543 res = t.emplace(1);
544 EXPECT_TRUE(res.second);
545 EXPECT_THAT(*res.first, 1);
546 EXPECT_EQ(2, t.size());
547 EXPECT_THAT(*t.find(0), 0);
548 EXPECT_THAT(*t.find(1), 1);
549 }
550
TEST(Table,InsertCollision)551 TEST(Table, InsertCollision) {
552 BadTable t;
553 EXPECT_TRUE(t.find(1) == t.end());
554 auto res = t.emplace(1);
555 EXPECT_TRUE(res.second);
556 EXPECT_THAT(*res.first, 1);
557 EXPECT_EQ(1, t.size());
558
559 EXPECT_TRUE(t.find(2) == t.end());
560 res = t.emplace(2);
561 EXPECT_THAT(*res.first, 2);
562 EXPECT_TRUE(res.second);
563 EXPECT_EQ(2, t.size());
564
565 EXPECT_THAT(*t.find(1), 1);
566 EXPECT_THAT(*t.find(2), 2);
567 }
568
569 // Test that we do not add existent element in case we need to search through
570 // many groups with deleted elements
TEST(Table,InsertCollisionAndFindAfterDelete)571 TEST(Table, InsertCollisionAndFindAfterDelete) {
572 BadTable t; // all elements go to the same group.
573 // Have at least 2 groups with Group::kWidth collisions
574 // plus some extra collisions in the last group.
575 constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
576 for (size_t i = 0; i < kNumInserts; ++i) {
577 auto res = t.emplace(i);
578 EXPECT_TRUE(res.second);
579 EXPECT_THAT(*res.first, i);
580 EXPECT_EQ(i + 1, t.size());
581 }
582
583 // Remove elements one by one and check
584 // that we still can find all other elements.
585 for (size_t i = 0; i < kNumInserts; ++i) {
586 EXPECT_EQ(1, t.erase(i)) << i;
587 for (size_t j = i + 1; j < kNumInserts; ++j) {
588 EXPECT_THAT(*t.find(j), j);
589 auto res = t.emplace(j);
590 EXPECT_FALSE(res.second) << i << " " << j;
591 EXPECT_THAT(*res.first, j);
592 EXPECT_EQ(kNumInserts - i - 1, t.size());
593 }
594 }
595 EXPECT_TRUE(t.empty());
596 }
597
TEST(Table,InsertWithinCapacity)598 TEST(Table, InsertWithinCapacity) {
599 IntTable t;
600 t.reserve(10);
601 const size_t original_capacity = t.capacity();
602 const auto addr = [&](int i) {
603 return reinterpret_cast<uintptr_t>(&*t.find(i));
604 };
605 // Inserting an element does not change capacity.
606 t.insert(0);
607 EXPECT_THAT(t.capacity(), original_capacity);
608 const uintptr_t original_addr_0 = addr(0);
609 // Inserting another element does not rehash.
610 t.insert(1);
611 EXPECT_THAT(t.capacity(), original_capacity);
612 EXPECT_THAT(addr(0), original_addr_0);
613 // Inserting lots of duplicate elements does not rehash.
614 for (int i = 0; i < 100; ++i) {
615 t.insert(i % 10);
616 }
617 EXPECT_THAT(t.capacity(), original_capacity);
618 EXPECT_THAT(addr(0), original_addr_0);
619 // Inserting a range of duplicate elements does not rehash.
620 std::vector<int> dup_range;
621 for (int i = 0; i < 100; ++i) {
622 dup_range.push_back(i % 10);
623 }
624 t.insert(dup_range.begin(), dup_range.end());
625 EXPECT_THAT(t.capacity(), original_capacity);
626 EXPECT_THAT(addr(0), original_addr_0);
627 }
628
TEST(Table,LazyEmplace)629 TEST(Table, LazyEmplace) {
630 StringTable t;
631 bool called = false;
632 auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
633 called = true;
634 f("abc", "ABC");
635 });
636 EXPECT_TRUE(called);
637 EXPECT_THAT(*it, Pair("abc", "ABC"));
638 called = false;
639 it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
640 called = true;
641 f("abc", "DEF");
642 });
643 EXPECT_FALSE(called);
644 EXPECT_THAT(*it, Pair("abc", "ABC"));
645 }
646
TEST(Table,ContainsEmpty)647 TEST(Table, ContainsEmpty) {
648 IntTable t;
649
650 EXPECT_FALSE(t.contains(0));
651 }
652
TEST(Table,Contains1)653 TEST(Table, Contains1) {
654 IntTable t;
655
656 EXPECT_TRUE(t.insert(0).second);
657 EXPECT_TRUE(t.contains(0));
658 EXPECT_FALSE(t.contains(1));
659
660 EXPECT_EQ(1, t.erase(0));
661 EXPECT_FALSE(t.contains(0));
662 }
663
TEST(Table,Contains2)664 TEST(Table, Contains2) {
665 IntTable t;
666
667 EXPECT_TRUE(t.insert(0).second);
668 EXPECT_TRUE(t.contains(0));
669 EXPECT_FALSE(t.contains(1));
670
671 t.clear();
672 EXPECT_FALSE(t.contains(0));
673 }
674
675 int decompose_constructed;
676 int decompose_copy_constructed;
677 int decompose_copy_assigned;
678 int decompose_move_constructed;
679 int decompose_move_assigned;
680 struct DecomposeType {
DecomposeTypeabsl::container_internal::__anon2dc4f7f50111::DecomposeType681 DecomposeType(int i = 0) : i(i) { // NOLINT
682 ++decompose_constructed;
683 }
684
DecomposeTypeabsl::container_internal::__anon2dc4f7f50111::DecomposeType685 explicit DecomposeType(const char* d) : DecomposeType(*d) {}
686
DecomposeTypeabsl::container_internal::__anon2dc4f7f50111::DecomposeType687 DecomposeType(const DecomposeType& other) : i(other.i) {
688 ++decompose_copy_constructed;
689 }
operator =absl::container_internal::__anon2dc4f7f50111::DecomposeType690 DecomposeType& operator=(const DecomposeType& other) {
691 ++decompose_copy_assigned;
692 i = other.i;
693 return *this;
694 }
DecomposeTypeabsl::container_internal::__anon2dc4f7f50111::DecomposeType695 DecomposeType(DecomposeType&& other) : i(other.i) {
696 ++decompose_move_constructed;
697 }
operator =absl::container_internal::__anon2dc4f7f50111::DecomposeType698 DecomposeType& operator=(DecomposeType&& other) {
699 ++decompose_move_assigned;
700 i = other.i;
701 return *this;
702 }
703
704 int i;
705 };
706
707 struct DecomposeHash {
708 using is_transparent = void;
operator ()absl::container_internal::__anon2dc4f7f50111::DecomposeHash709 size_t operator()(const DecomposeType& a) const { return a.i; }
operator ()absl::container_internal::__anon2dc4f7f50111::DecomposeHash710 size_t operator()(int a) const { return a; }
operator ()absl::container_internal::__anon2dc4f7f50111::DecomposeHash711 size_t operator()(const char* a) const { return *a; }
712 };
713
714 struct DecomposeEq {
715 using is_transparent = void;
operator ()absl::container_internal::__anon2dc4f7f50111::DecomposeEq716 bool operator()(const DecomposeType& a, const DecomposeType& b) const {
717 return a.i == b.i;
718 }
operator ()absl::container_internal::__anon2dc4f7f50111::DecomposeEq719 bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
operator ()absl::container_internal::__anon2dc4f7f50111::DecomposeEq720 bool operator()(const DecomposeType& a, const char* b) const {
721 return a.i == *b;
722 }
723 };
724
725 struct DecomposePolicy {
726 using slot_type = DecomposeType;
727 using key_type = DecomposeType;
728 using init_type = DecomposeType;
729
730 template <typename T>
constructabsl::container_internal::__anon2dc4f7f50111::DecomposePolicy731 static void construct(void*, DecomposeType* slot, T&& v) {
732 ::new (slot) DecomposeType(std::forward<T>(v));
733 }
destroyabsl::container_internal::__anon2dc4f7f50111::DecomposePolicy734 static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); }
elementabsl::container_internal::__anon2dc4f7f50111::DecomposePolicy735 static DecomposeType& element(slot_type* slot) { return *slot; }
736
737 template <class F, class T>
applyabsl::container_internal::__anon2dc4f7f50111::DecomposePolicy738 static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
739 return std::forward<F>(f)(x, x);
740 }
741 };
742
743 template <typename Hash, typename Eq>
TestDecompose(bool construct_three)744 void TestDecompose(bool construct_three) {
745 DecomposeType elem{0};
746 const int one = 1;
747 const char* three_p = "3";
748 const auto& three = three_p;
749 const int elem_vector_count = 256;
750 std::vector<DecomposeType> elem_vector(elem_vector_count, DecomposeType{0});
751 std::iota(elem_vector.begin(), elem_vector.end(), 0);
752
753 using DecomposeSet =
754 raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>>;
755 DecomposeSet set1;
756
757 decompose_constructed = 0;
758 int expected_constructed = 0;
759 EXPECT_EQ(expected_constructed, decompose_constructed);
760 set1.insert(elem);
761 EXPECT_EQ(expected_constructed, decompose_constructed);
762 set1.insert(1);
763 EXPECT_EQ(++expected_constructed, decompose_constructed);
764 set1.emplace("3");
765 EXPECT_EQ(++expected_constructed, decompose_constructed);
766 EXPECT_EQ(expected_constructed, decompose_constructed);
767
768 { // insert(T&&)
769 set1.insert(1);
770 EXPECT_EQ(expected_constructed, decompose_constructed);
771 }
772
773 { // insert(const T&)
774 set1.insert(one);
775 EXPECT_EQ(expected_constructed, decompose_constructed);
776 }
777
778 { // insert(hint, T&&)
779 set1.insert(set1.begin(), 1);
780 EXPECT_EQ(expected_constructed, decompose_constructed);
781 }
782
783 { // insert(hint, const T&)
784 set1.insert(set1.begin(), one);
785 EXPECT_EQ(expected_constructed, decompose_constructed);
786 }
787
788 { // emplace(...)
789 set1.emplace(1);
790 EXPECT_EQ(expected_constructed, decompose_constructed);
791 set1.emplace("3");
792 expected_constructed += construct_three;
793 EXPECT_EQ(expected_constructed, decompose_constructed);
794 set1.emplace(one);
795 EXPECT_EQ(expected_constructed, decompose_constructed);
796 set1.emplace(three);
797 expected_constructed += construct_three;
798 EXPECT_EQ(expected_constructed, decompose_constructed);
799 }
800
801 { // emplace_hint(...)
802 set1.emplace_hint(set1.begin(), 1);
803 EXPECT_EQ(expected_constructed, decompose_constructed);
804 set1.emplace_hint(set1.begin(), "3");
805 expected_constructed += construct_three;
806 EXPECT_EQ(expected_constructed, decompose_constructed);
807 set1.emplace_hint(set1.begin(), one);
808 EXPECT_EQ(expected_constructed, decompose_constructed);
809 set1.emplace_hint(set1.begin(), three);
810 expected_constructed += construct_three;
811 EXPECT_EQ(expected_constructed, decompose_constructed);
812 }
813
814 decompose_copy_constructed = 0;
815 decompose_copy_assigned = 0;
816 decompose_move_constructed = 0;
817 decompose_move_assigned = 0;
818 int expected_copy_constructed = 0;
819 int expected_move_constructed = 0;
820 { // raw_hash_set(first, last) with random-access iterators
821 DecomposeSet set2(elem_vector.begin(), elem_vector.end());
822 // Expect exactly one copy-constructor call for each element if no
823 // rehashing is done.
824 expected_copy_constructed += elem_vector_count;
825 EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
826 EXPECT_EQ(expected_move_constructed, decompose_move_constructed);
827 EXPECT_EQ(0, decompose_move_assigned);
828 EXPECT_EQ(0, decompose_copy_assigned);
829 }
830
831 { // raw_hash_set(first, last) with forward iterators
832 std::list<DecomposeType> elem_list(elem_vector.begin(), elem_vector.end());
833 expected_copy_constructed = decompose_copy_constructed;
834 DecomposeSet set2(elem_list.begin(), elem_list.end());
835 // Expect exactly N elements copied into set, expect at most 2*N elements
836 // moving internally for all resizing needed (for a growth factor of 2).
837 expected_copy_constructed += elem_vector_count;
838 EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
839 expected_move_constructed += elem_vector_count;
840 EXPECT_LT(expected_move_constructed, decompose_move_constructed);
841 expected_move_constructed += elem_vector_count;
842 EXPECT_GE(expected_move_constructed, decompose_move_constructed);
843 EXPECT_EQ(0, decompose_move_assigned);
844 EXPECT_EQ(0, decompose_copy_assigned);
845 expected_copy_constructed = decompose_copy_constructed;
846 expected_move_constructed = decompose_move_constructed;
847 }
848
849 { // insert(first, last)
850 DecomposeSet set2;
851 set2.insert(elem_vector.begin(), elem_vector.end());
852 // Expect exactly N elements copied into set, expect at most 2*N elements
853 // moving internally for all resizing needed (for a growth factor of 2).
854 const int expected_new_elements = elem_vector_count;
855 const int expected_max_element_moves = 2 * elem_vector_count;
856 expected_copy_constructed += expected_new_elements;
857 EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
858 expected_move_constructed += expected_max_element_moves;
859 EXPECT_GE(expected_move_constructed, decompose_move_constructed);
860 EXPECT_EQ(0, decompose_move_assigned);
861 EXPECT_EQ(0, decompose_copy_assigned);
862 expected_copy_constructed = decompose_copy_constructed;
863 expected_move_constructed = decompose_move_constructed;
864 }
865 }
866
TEST(Table,Decompose)867 TEST(Table, Decompose) {
868 TestDecompose<DecomposeHash, DecomposeEq>(false);
869
870 struct TransparentHashIntOverload {
871 size_t operator()(const DecomposeType& a) const { return a.i; }
872 size_t operator()(int a) const { return a; }
873 };
874 struct TransparentEqIntOverload {
875 bool operator()(const DecomposeType& a, const DecomposeType& b) const {
876 return a.i == b.i;
877 }
878 bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
879 };
880 TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
881 TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
882 TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
883 }
884
885 // Returns the largest m such that a table with m elements has the same number
886 // of buckets as a table with n elements.
MaxDensitySize(size_t n)887 size_t MaxDensitySize(size_t n) {
888 IntTable t;
889 t.reserve(n);
890 for (size_t i = 0; i != n; ++i) t.emplace(i);
891 const size_t c = t.bucket_count();
892 while (c == t.bucket_count()) t.emplace(n++);
893 return t.size() - 1;
894 }
895
896 struct Modulo1000Hash {
operator ()absl::container_internal::__anon2dc4f7f50111::Modulo1000Hash897 size_t operator()(int x) const { return x % 1000; }
898 };
899
900 struct Modulo1000HashTable
901 : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
902 std::allocator<int>> {};
903
904 // Test that rehash with no resize happen in case of many deleted slots.
TEST(Table,RehashWithNoResize)905 TEST(Table, RehashWithNoResize) {
906 Modulo1000HashTable t;
907 // Adding the same length (and the same hash) strings
908 // to have at least kMinFullGroups groups
909 // with Group::kWidth collisions. Then fill up to MaxDensitySize;
910 const size_t kMinFullGroups = 7;
911 std::vector<int> keys;
912 for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
913 int k = i * 1000;
914 t.emplace(k);
915 keys.push_back(k);
916 }
917 const size_t capacity = t.capacity();
918
919 // Remove elements from all groups except the first and the last one.
920 // All elements removed from full groups will be marked as ctrl_t::kDeleted.
921 const size_t erase_begin = Group::kWidth / 2;
922 const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
923 for (size_t i = erase_begin; i < erase_end; ++i) {
924 EXPECT_EQ(1, t.erase(keys[i])) << i;
925 }
926 keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
927
928 auto last_key = keys.back();
929 size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
930
931 // Make sure that we have to make a lot of probes for last key.
932 ASSERT_GT(last_key_num_probes, kMinFullGroups);
933
934 int x = 1;
935 // Insert and erase one element, before inplace rehash happen.
936 while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
937 t.emplace(x);
938 ASSERT_EQ(capacity, t.capacity());
939 // All elements should be there.
940 ASSERT_TRUE(t.find(x) != t.end()) << x;
941 for (const auto& k : keys) {
942 ASSERT_TRUE(t.find(k) != t.end()) << k;
943 }
944 t.erase(x);
945 ++x;
946 }
947 }
948
TEST(Table,InsertEraseStressTest)949 TEST(Table, InsertEraseStressTest) {
950 IntTable t;
951 const size_t kMinElementCount = 250;
952 std::deque<int> keys;
953 size_t i = 0;
954 for (; i < MaxDensitySize(kMinElementCount); ++i) {
955 t.emplace(i);
956 keys.push_back(i);
957 }
958 const size_t kNumIterations = 1000000;
959 for (; i < kNumIterations; ++i) {
960 ASSERT_EQ(1, t.erase(keys.front()));
961 keys.pop_front();
962 t.emplace(i);
963 keys.push_back(i);
964 }
965 }
966
TEST(Table,InsertOverloads)967 TEST(Table, InsertOverloads) {
968 StringTable t;
969 // These should all trigger the insert(init_type) overload.
970 t.insert({{}, {}});
971 t.insert({"ABC", {}});
972 t.insert({"DEF", "!!!"});
973
974 EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
975 Pair("DEF", "!!!")));
976 }
977
TEST(Table,LargeTable)978 TEST(Table, LargeTable) {
979 IntTable t;
980 for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
981 for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
982 }
983
984 // Timeout if copy is quadratic as it was in Rust.
TEST(Table,EnsureNonQuadraticAsInRust)985 TEST(Table, EnsureNonQuadraticAsInRust) {
986 static const size_t kLargeSize = 1 << 15;
987
988 IntTable t;
989 for (size_t i = 0; i != kLargeSize; ++i) {
990 t.insert(i);
991 }
992
993 // If this is quadratic, the test will timeout.
994 IntTable t2;
995 for (const auto& entry : t) t2.insert(entry);
996 }
997
TEST(Table,ClearBug)998 TEST(Table, ClearBug) {
999 IntTable t;
1000 constexpr size_t capacity = container_internal::Group::kWidth - 1;
1001 constexpr size_t max_size = capacity / 2 + 1;
1002 for (size_t i = 0; i < max_size; ++i) {
1003 t.insert(i);
1004 }
1005 ASSERT_EQ(capacity, t.capacity());
1006 intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
1007 t.clear();
1008 ASSERT_EQ(capacity, t.capacity());
1009 for (size_t i = 0; i < max_size; ++i) {
1010 t.insert(i);
1011 }
1012 ASSERT_EQ(capacity, t.capacity());
1013 intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
1014 // We are checking that original and second are close enough to each other
1015 // that they are probably still in the same group. This is not strictly
1016 // guaranteed.
1017 EXPECT_LT(static_cast<size_t>(std::abs(original - second)),
1018 capacity * sizeof(IntTable::value_type));
1019 }
1020
TEST(Table,Erase)1021 TEST(Table, Erase) {
1022 IntTable t;
1023 EXPECT_TRUE(t.find(0) == t.end());
1024 auto res = t.emplace(0);
1025 EXPECT_TRUE(res.second);
1026 EXPECT_EQ(1, t.size());
1027 t.erase(res.first);
1028 EXPECT_EQ(0, t.size());
1029 EXPECT_TRUE(t.find(0) == t.end());
1030 }
1031
TEST(Table,EraseMaintainsValidIterator)1032 TEST(Table, EraseMaintainsValidIterator) {
1033 IntTable t;
1034 const int kNumElements = 100;
1035 for (int i = 0; i < kNumElements; i ++) {
1036 EXPECT_TRUE(t.emplace(i).second);
1037 }
1038 EXPECT_EQ(t.size(), kNumElements);
1039
1040 int num_erase_calls = 0;
1041 auto it = t.begin();
1042 while (it != t.end()) {
1043 t.erase(it++);
1044 num_erase_calls++;
1045 }
1046
1047 EXPECT_TRUE(t.empty());
1048 EXPECT_EQ(num_erase_calls, kNumElements);
1049 }
1050
1051 // Collect N bad keys by following algorithm:
1052 // 1. Create an empty table and reserve it to 2 * N.
1053 // 2. Insert N random elements.
1054 // 3. Take first Group::kWidth - 1 to bad_keys array.
1055 // 4. Clear the table without resize.
1056 // 5. Go to point 2 while N keys not collected
CollectBadMergeKeys(size_t N)1057 std::vector<int64_t> CollectBadMergeKeys(size_t N) {
1058 static constexpr int kGroupSize = Group::kWidth - 1;
1059
1060 auto topk_range = [](size_t b, size_t e,
1061 IntTable* t) -> std::vector<int64_t> {
1062 for (size_t i = b; i != e; ++i) {
1063 t->emplace(i);
1064 }
1065 std::vector<int64_t> res;
1066 res.reserve(kGroupSize);
1067 auto it = t->begin();
1068 for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
1069 res.push_back(*it);
1070 }
1071 return res;
1072 };
1073
1074 std::vector<int64_t> bad_keys;
1075 bad_keys.reserve(N);
1076 IntTable t;
1077 t.reserve(N * 2);
1078
1079 for (size_t b = 0; bad_keys.size() < N; b += N) {
1080 auto keys = topk_range(b, b + N, &t);
1081 bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
1082 t.erase(t.begin(), t.end());
1083 EXPECT_TRUE(t.empty());
1084 }
1085 return bad_keys;
1086 }
1087
1088 struct ProbeStats {
1089 // Number of elements with specific probe length over all tested tables.
1090 std::vector<size_t> all_probes_histogram;
1091 // Ratios total_probe_length/size for every tested table.
1092 std::vector<double> single_table_ratios;
1093
1094 // Average ratio total_probe_length/size over tables.
AvgRatioabsl::container_internal::__anon2dc4f7f50111::ProbeStats1095 double AvgRatio() const {
1096 return std::accumulate(single_table_ratios.begin(),
1097 single_table_ratios.end(), 0.0) /
1098 single_table_ratios.size();
1099 }
1100
1101 // Maximum ratio total_probe_length/size over tables.
MaxRatioabsl::container_internal::__anon2dc4f7f50111::ProbeStats1102 double MaxRatio() const {
1103 return *std::max_element(single_table_ratios.begin(),
1104 single_table_ratios.end());
1105 }
1106
1107 // Percentile ratio total_probe_length/size over tables.
PercentileRatioabsl::container_internal::__anon2dc4f7f50111::ProbeStats1108 double PercentileRatio(double Percentile = 0.95) const {
1109 auto r = single_table_ratios;
1110 auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
1111 if (mid != r.end()) {
1112 std::nth_element(r.begin(), mid, r.end());
1113 return *mid;
1114 } else {
1115 return MaxRatio();
1116 }
1117 }
1118
1119 // Maximum probe length over all elements and all tables.
MaxProbeabsl::container_internal::__anon2dc4f7f50111::ProbeStats1120 size_t MaxProbe() const { return all_probes_histogram.size(); }
1121
1122 // Fraction of elements with specified probe length.
ProbeNormalizedHistogramabsl::container_internal::__anon2dc4f7f50111::ProbeStats1123 std::vector<double> ProbeNormalizedHistogram() const {
1124 double total_elements = std::accumulate(all_probes_histogram.begin(),
1125 all_probes_histogram.end(), 0ull);
1126 std::vector<double> res;
1127 for (size_t p : all_probes_histogram) {
1128 res.push_back(p / total_elements);
1129 }
1130 return res;
1131 }
1132
PercentileProbeabsl::container_internal::__anon2dc4f7f50111::ProbeStats1133 size_t PercentileProbe(double Percentile = 0.99) const {
1134 size_t idx = 0;
1135 for (double p : ProbeNormalizedHistogram()) {
1136 if (Percentile > p) {
1137 Percentile -= p;
1138 ++idx;
1139 } else {
1140 return idx;
1141 }
1142 }
1143 return idx;
1144 }
1145
operator <<(std::ostream & out,const ProbeStats & s)1146 friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
1147 out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
1148 << ", PercentileRatio:" << s.PercentileRatio()
1149 << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
1150 for (double p : s.ProbeNormalizedHistogram()) {
1151 out << p << ",";
1152 }
1153 out << "]}";
1154
1155 return out;
1156 }
1157 };
1158
1159 struct ExpectedStats {
1160 double avg_ratio;
1161 double max_ratio;
1162 std::vector<std::pair<double, double>> pecentile_ratios;
1163 std::vector<std::pair<double, double>> pecentile_probes;
1164
operator <<(std::ostream & out,const ExpectedStats & s)1165 friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
1166 out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
1167 << ", PercentileRatios: [";
1168 for (auto el : s.pecentile_ratios) {
1169 out << el.first << ":" << el.second << ", ";
1170 }
1171 out << "], PercentileProbes: [";
1172 for (auto el : s.pecentile_probes) {
1173 out << el.first << ":" << el.second << ", ";
1174 }
1175 out << "]}";
1176
1177 return out;
1178 }
1179 };
1180
VerifyStats(size_t size,const ExpectedStats & exp,const ProbeStats & stats)1181 void VerifyStats(size_t size, const ExpectedStats& exp,
1182 const ProbeStats& stats) {
1183 EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
1184 EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
1185 for (auto pr : exp.pecentile_ratios) {
1186 EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
1187 << size << " " << pr.first << " " << stats;
1188 }
1189
1190 for (auto pr : exp.pecentile_probes) {
1191 EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
1192 << size << " " << pr.first << " " << stats;
1193 }
1194 }
1195
1196 using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
1197
1198 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1199 // 1. Create new table and reserve it to keys.size() * 2
1200 // 2. Insert all keys xored with seed
1201 // 3. Collect ProbeStats from final table.
CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t> & keys,size_t num_iters)1202 ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
1203 const std::vector<int64_t>& keys, size_t num_iters) {
1204 const size_t reserve_size = keys.size() * 2;
1205
1206 ProbeStats stats;
1207
1208 int64_t seed = 0x71b1a19b907d6e33;
1209 while (num_iters--) {
1210 seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
1211 IntTable t1;
1212 t1.reserve(reserve_size);
1213 for (const auto& key : keys) {
1214 t1.emplace(key ^ seed);
1215 }
1216
1217 auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1218 stats.all_probes_histogram.resize(
1219 std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1220 std::transform(probe_histogram.begin(), probe_histogram.end(),
1221 stats.all_probes_histogram.begin(),
1222 stats.all_probes_histogram.begin(), std::plus<size_t>());
1223
1224 size_t total_probe_seq_length = 0;
1225 for (size_t i = 0; i < probe_histogram.size(); ++i) {
1226 total_probe_seq_length += i * probe_histogram[i];
1227 }
1228 stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1229 keys.size());
1230 t1.erase(t1.begin(), t1.end());
1231 }
1232 return stats;
1233 }
1234
XorSeedExpectedStats()1235 ExpectedStats XorSeedExpectedStats() {
1236 constexpr bool kRandomizesInserts =
1237 #ifdef NDEBUG
1238 false;
1239 #else // NDEBUG
1240 true;
1241 #endif // NDEBUG
1242
1243 // The effective load factor is larger in non-opt mode because we insert
1244 // elements out of order.
1245 switch (container_internal::Group::kWidth) {
1246 case 8:
1247 if (kRandomizesInserts) {
1248 return {0.05,
1249 1.0,
1250 {{0.95, 0.5}},
1251 {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1252 } else {
1253 return {0.05,
1254 2.0,
1255 {{0.95, 0.1}},
1256 {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
1257 }
1258 case 16:
1259 if (kRandomizesInserts) {
1260 return {0.1,
1261 2.0,
1262 {{0.95, 0.1}},
1263 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1264 } else {
1265 return {0.05,
1266 1.0,
1267 {{0.95, 0.05}},
1268 {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
1269 }
1270 }
1271 ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
1272 return {};
1273 }
1274
1275 // TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC
TEST(Table,DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength)1276 TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
1277 ProbeStatsPerSize stats;
1278 std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1279 for (size_t size : sizes) {
1280 stats[size] =
1281 CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
1282 }
1283 auto expected = XorSeedExpectedStats();
1284 for (size_t size : sizes) {
1285 auto& stat = stats[size];
1286 VerifyStats(size, expected, stat);
1287 LOG(INFO) << size << " " << stat;
1288 }
1289 }
1290
1291 // Collect total ProbeStats on num_iters iterations of the following algorithm:
1292 // 1. Create new table
1293 // 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
1294 // 3. Collect ProbeStats from final table
CollectProbeStatsOnLinearlyTransformedKeys(const std::vector<int64_t> & keys,size_t num_iters)1295 ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
1296 const std::vector<int64_t>& keys, size_t num_iters) {
1297 ProbeStats stats;
1298
1299 std::random_device rd;
1300 std::mt19937 rng(rd());
1301 auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
1302 std::uniform_int_distribution<size_t> dist(0, keys.size()-1);
1303 while (num_iters--) {
1304 IntTable t1;
1305 size_t num_keys = keys.size() / 10;
1306 size_t start = dist(rng);
1307 for (size_t i = 0; i != num_keys; ++i) {
1308 for (size_t j = 0; j != 10; ++j) {
1309 t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
1310 }
1311 }
1312
1313 auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
1314 stats.all_probes_histogram.resize(
1315 std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
1316 std::transform(probe_histogram.begin(), probe_histogram.end(),
1317 stats.all_probes_histogram.begin(),
1318 stats.all_probes_histogram.begin(), std::plus<size_t>());
1319
1320 size_t total_probe_seq_length = 0;
1321 for (size_t i = 0; i < probe_histogram.size(); ++i) {
1322 total_probe_seq_length += i * probe_histogram[i];
1323 }
1324 stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
1325 t1.size());
1326 t1.erase(t1.begin(), t1.end());
1327 }
1328 return stats;
1329 }
1330
LinearTransformExpectedStats()1331 ExpectedStats LinearTransformExpectedStats() {
1332 constexpr bool kRandomizesInserts =
1333 #ifdef NDEBUG
1334 false;
1335 #else // NDEBUG
1336 true;
1337 #endif // NDEBUG
1338
1339 // The effective load factor is larger in non-opt mode because we insert
1340 // elements out of order.
1341 switch (container_internal::Group::kWidth) {
1342 case 8:
1343 if (kRandomizesInserts) {
1344 return {0.1,
1345 0.5,
1346 {{0.95, 0.3}},
1347 {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
1348 } else {
1349 return {0.4,
1350 0.6,
1351 {{0.95, 0.5}},
1352 {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}};
1353 }
1354 case 16:
1355 if (kRandomizesInserts) {
1356 return {0.1,
1357 0.4,
1358 {{0.95, 0.3}},
1359 {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}};
1360 } else {
1361 return {0.05,
1362 0.2,
1363 {{0.95, 0.1}},
1364 {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
1365 }
1366 }
1367 ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
1368 return {};
1369 }
1370
1371 // TODO(b/80415403): Figure out why this test is so flaky.
TEST(Table,DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength)1372 TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
1373 ProbeStatsPerSize stats;
1374 std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
1375 for (size_t size : sizes) {
1376 stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
1377 CollectBadMergeKeys(size), 300);
1378 }
1379 auto expected = LinearTransformExpectedStats();
1380 for (size_t size : sizes) {
1381 auto& stat = stats[size];
1382 VerifyStats(size, expected, stat);
1383 LOG(INFO) << size << " " << stat;
1384 }
1385 }
1386
TEST(Table,EraseCollision)1387 TEST(Table, EraseCollision) {
1388 BadTable t;
1389
1390 // 1 2 3
1391 t.emplace(1);
1392 t.emplace(2);
1393 t.emplace(3);
1394 EXPECT_THAT(*t.find(1), 1);
1395 EXPECT_THAT(*t.find(2), 2);
1396 EXPECT_THAT(*t.find(3), 3);
1397 EXPECT_EQ(3, t.size());
1398
1399 // 1 DELETED 3
1400 t.erase(t.find(2));
1401 EXPECT_THAT(*t.find(1), 1);
1402 EXPECT_TRUE(t.find(2) == t.end());
1403 EXPECT_THAT(*t.find(3), 3);
1404 EXPECT_EQ(2, t.size());
1405
1406 // DELETED DELETED 3
1407 t.erase(t.find(1));
1408 EXPECT_TRUE(t.find(1) == t.end());
1409 EXPECT_TRUE(t.find(2) == t.end());
1410 EXPECT_THAT(*t.find(3), 3);
1411 EXPECT_EQ(1, t.size());
1412
1413 // DELETED DELETED DELETED
1414 t.erase(t.find(3));
1415 EXPECT_TRUE(t.find(1) == t.end());
1416 EXPECT_TRUE(t.find(2) == t.end());
1417 EXPECT_TRUE(t.find(3) == t.end());
1418 EXPECT_EQ(0, t.size());
1419 }
1420
TEST(Table,EraseInsertProbing)1421 TEST(Table, EraseInsertProbing) {
1422 BadTable t(100);
1423
1424 // 1 2 3 4
1425 t.emplace(1);
1426 t.emplace(2);
1427 t.emplace(3);
1428 t.emplace(4);
1429
1430 // 1 DELETED 3 DELETED
1431 t.erase(t.find(2));
1432 t.erase(t.find(4));
1433
1434 // 1 10 3 11 12
1435 t.emplace(10);
1436 t.emplace(11);
1437 t.emplace(12);
1438
1439 EXPECT_EQ(5, t.size());
1440 EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
1441 }
1442
TEST(Table,Clear)1443 TEST(Table, Clear) {
1444 IntTable t;
1445 EXPECT_TRUE(t.find(0) == t.end());
1446 t.clear();
1447 EXPECT_TRUE(t.find(0) == t.end());
1448 auto res = t.emplace(0);
1449 EXPECT_TRUE(res.second);
1450 EXPECT_EQ(1, t.size());
1451 t.clear();
1452 EXPECT_EQ(0, t.size());
1453 EXPECT_TRUE(t.find(0) == t.end());
1454 }
1455
TEST(Table,Swap)1456 TEST(Table, Swap) {
1457 IntTable t;
1458 EXPECT_TRUE(t.find(0) == t.end());
1459 auto res = t.emplace(0);
1460 EXPECT_TRUE(res.second);
1461 EXPECT_EQ(1, t.size());
1462 IntTable u;
1463 t.swap(u);
1464 EXPECT_EQ(0, t.size());
1465 EXPECT_EQ(1, u.size());
1466 EXPECT_TRUE(t.find(0) == t.end());
1467 EXPECT_THAT(*u.find(0), 0);
1468 }
1469
TEST(Table,Rehash)1470 TEST(Table, Rehash) {
1471 IntTable t;
1472 EXPECT_TRUE(t.find(0) == t.end());
1473 t.emplace(0);
1474 t.emplace(1);
1475 EXPECT_EQ(2, t.size());
1476 t.rehash(128);
1477 EXPECT_EQ(2, t.size());
1478 EXPECT_THAT(*t.find(0), 0);
1479 EXPECT_THAT(*t.find(1), 1);
1480 }
1481
TEST(Table,RehashDoesNotRehashWhenNotNecessary)1482 TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
1483 IntTable t;
1484 t.emplace(0);
1485 t.emplace(1);
1486 auto* p = &*t.find(0);
1487 t.rehash(1);
1488 EXPECT_EQ(p, &*t.find(0));
1489 }
1490
TEST(Table,RehashZeroDoesNotAllocateOnEmptyTable)1491 TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
1492 IntTable t;
1493 t.rehash(0);
1494 EXPECT_EQ(0, t.bucket_count());
1495 }
1496
TEST(Table,RehashZeroDeallocatesEmptyTable)1497 TEST(Table, RehashZeroDeallocatesEmptyTable) {
1498 IntTable t;
1499 t.emplace(0);
1500 t.clear();
1501 EXPECT_NE(0, t.bucket_count());
1502 t.rehash(0);
1503 EXPECT_EQ(0, t.bucket_count());
1504 }
1505
TEST(Table,RehashZeroForcesRehash)1506 TEST(Table, RehashZeroForcesRehash) {
1507 IntTable t;
1508 t.emplace(0);
1509 t.emplace(1);
1510 auto* p = &*t.find(0);
1511 t.rehash(0);
1512 EXPECT_NE(p, &*t.find(0));
1513 }
1514
TEST(Table,ConstructFromInitList)1515 TEST(Table, ConstructFromInitList) {
1516 using P = std::pair<std::string, std::string>;
1517 struct Q {
1518 operator P() const { return {}; } // NOLINT
1519 };
1520 StringTable t = {P(), Q(), {}, {{}, {}}};
1521 }
1522
TEST(Table,CopyConstruct)1523 TEST(Table, CopyConstruct) {
1524 IntTable t;
1525 t.emplace(0);
1526 EXPECT_EQ(1, t.size());
1527 {
1528 IntTable u(t);
1529 EXPECT_EQ(1, u.size());
1530 EXPECT_THAT(*u.find(0), 0);
1531 }
1532 {
1533 IntTable u{t};
1534 EXPECT_EQ(1, u.size());
1535 EXPECT_THAT(*u.find(0), 0);
1536 }
1537 {
1538 IntTable u = t;
1539 EXPECT_EQ(1, u.size());
1540 EXPECT_THAT(*u.find(0), 0);
1541 }
1542 }
1543
TEST(Table,CopyConstructWithAlloc)1544 TEST(Table, CopyConstructWithAlloc) {
1545 StringTable t;
1546 t.emplace("a", "b");
1547 EXPECT_EQ(1, t.size());
1548 StringTable u(t, Alloc<std::pair<std::string, std::string>>());
1549 EXPECT_EQ(1, u.size());
1550 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1551 }
1552
1553 struct ExplicitAllocIntTable
1554 : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
1555 std::equal_to<int64_t>, Alloc<int64_t>> {
1556 ExplicitAllocIntTable() = default;
1557 };
1558
TEST(Table,AllocWithExplicitCtor)1559 TEST(Table, AllocWithExplicitCtor) {
1560 ExplicitAllocIntTable t;
1561 EXPECT_EQ(0, t.size());
1562 }
1563
TEST(Table,MoveConstruct)1564 TEST(Table, MoveConstruct) {
1565 {
1566 StringTable t;
1567 t.emplace("a", "b");
1568 EXPECT_EQ(1, t.size());
1569
1570 StringTable u(std::move(t));
1571 EXPECT_EQ(1, u.size());
1572 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1573 }
1574 {
1575 StringTable t;
1576 t.emplace("a", "b");
1577 EXPECT_EQ(1, t.size());
1578
1579 StringTable u{std::move(t)};
1580 EXPECT_EQ(1, u.size());
1581 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1582 }
1583 {
1584 StringTable t;
1585 t.emplace("a", "b");
1586 EXPECT_EQ(1, t.size());
1587
1588 StringTable u = std::move(t);
1589 EXPECT_EQ(1, u.size());
1590 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1591 }
1592 }
1593
TEST(Table,MoveConstructWithAlloc)1594 TEST(Table, MoveConstructWithAlloc) {
1595 StringTable t;
1596 t.emplace("a", "b");
1597 EXPECT_EQ(1, t.size());
1598 StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
1599 EXPECT_EQ(1, u.size());
1600 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1601 }
1602
TEST(Table,CopyAssign)1603 TEST(Table, CopyAssign) {
1604 StringTable t;
1605 t.emplace("a", "b");
1606 EXPECT_EQ(1, t.size());
1607 StringTable u;
1608 u = t;
1609 EXPECT_EQ(1, u.size());
1610 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1611 }
1612
TEST(Table,CopySelfAssign)1613 TEST(Table, CopySelfAssign) {
1614 StringTable t;
1615 t.emplace("a", "b");
1616 EXPECT_EQ(1, t.size());
1617 t = *&t;
1618 EXPECT_EQ(1, t.size());
1619 EXPECT_THAT(*t.find("a"), Pair("a", "b"));
1620 }
1621
TEST(Table,MoveAssign)1622 TEST(Table, MoveAssign) {
1623 StringTable t;
1624 t.emplace("a", "b");
1625 EXPECT_EQ(1, t.size());
1626 StringTable u;
1627 u = std::move(t);
1628 EXPECT_EQ(1, u.size());
1629 EXPECT_THAT(*u.find("a"), Pair("a", "b"));
1630 }
1631
TEST(Table,Equality)1632 TEST(Table, Equality) {
1633 StringTable t;
1634 std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
1635 {"aa", "bb"}};
1636 t.insert(std::begin(v), std::end(v));
1637 StringTable u = t;
1638 EXPECT_EQ(u, t);
1639 }
1640
TEST(Table,Equality2)1641 TEST(Table, Equality2) {
1642 StringTable t;
1643 std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
1644 {"aa", "bb"}};
1645 t.insert(std::begin(v1), std::end(v1));
1646 StringTable u;
1647 std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
1648 {"aa", "aa"}};
1649 u.insert(std::begin(v2), std::end(v2));
1650 EXPECT_NE(u, t);
1651 }
1652
TEST(Table,Equality3)1653 TEST(Table, Equality3) {
1654 StringTable t;
1655 std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
1656 {"bb", "bb"}};
1657 t.insert(std::begin(v1), std::end(v1));
1658 StringTable u;
1659 std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
1660 {"aa", "aa"}};
1661 u.insert(std::begin(v2), std::end(v2));
1662 EXPECT_NE(u, t);
1663 }
1664
TEST(Table,NumDeletedRegression)1665 TEST(Table, NumDeletedRegression) {
1666 IntTable t;
1667 t.emplace(0);
1668 t.erase(t.find(0));
1669 // construct over a deleted slot.
1670 t.emplace(0);
1671 t.clear();
1672 }
1673
TEST(Table,FindFullDeletedRegression)1674 TEST(Table, FindFullDeletedRegression) {
1675 IntTable t;
1676 for (int i = 0; i < 1000; ++i) {
1677 t.emplace(i);
1678 t.erase(t.find(i));
1679 }
1680 EXPECT_EQ(0, t.size());
1681 }
1682
TEST(Table,ReplacingDeletedSlotDoesNotRehash)1683 TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
1684 size_t n;
1685 {
1686 // Compute n such that n is the maximum number of elements before rehash.
1687 IntTable t;
1688 t.emplace(0);
1689 size_t c = t.bucket_count();
1690 for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
1691 --n;
1692 }
1693 IntTable t;
1694 t.rehash(n);
1695 const size_t c = t.bucket_count();
1696 for (size_t i = 0; i != n; ++i) t.emplace(i);
1697 EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
1698 t.erase(0);
1699 t.emplace(0);
1700 EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
1701 }
1702
TEST(Table,NoThrowMoveConstruct)1703 TEST(Table, NoThrowMoveConstruct) {
1704 ASSERT_TRUE(
1705 std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
1706 ASSERT_TRUE(std::is_nothrow_copy_constructible<
1707 std::equal_to<absl::string_view>>::value);
1708 ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
1709 EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value);
1710 }
1711
TEST(Table,NoThrowMoveAssign)1712 TEST(Table, NoThrowMoveAssign) {
1713 ASSERT_TRUE(
1714 std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
1715 ASSERT_TRUE(
1716 std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
1717 ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
1718 ASSERT_TRUE(
1719 absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
1720 EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value);
1721 }
1722
TEST(Table,NoThrowSwappable)1723 TEST(Table, NoThrowSwappable) {
1724 ASSERT_TRUE(
1725 container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>());
1726 ASSERT_TRUE(container_internal::IsNoThrowSwappable<
1727 std::equal_to<absl::string_view>>());
1728 ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
1729 EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
1730 }
1731
TEST(Table,HeterogeneousLookup)1732 TEST(Table, HeterogeneousLookup) {
1733 struct Hash {
1734 size_t operator()(int64_t i) const { return i; }
1735 size_t operator()(double i) const {
1736 ADD_FAILURE();
1737 return i;
1738 }
1739 };
1740 struct Eq {
1741 bool operator()(int64_t a, int64_t b) const { return a == b; }
1742 bool operator()(double a, int64_t b) const {
1743 ADD_FAILURE();
1744 return a == b;
1745 }
1746 bool operator()(int64_t a, double b) const {
1747 ADD_FAILURE();
1748 return a == b;
1749 }
1750 bool operator()(double a, double b) const {
1751 ADD_FAILURE();
1752 return a == b;
1753 }
1754 };
1755
1756 struct THash {
1757 using is_transparent = void;
1758 size_t operator()(int64_t i) const { return i; }
1759 size_t operator()(double i) const { return i; }
1760 };
1761 struct TEq {
1762 using is_transparent = void;
1763 bool operator()(int64_t a, int64_t b) const { return a == b; }
1764 bool operator()(double a, int64_t b) const { return a == b; }
1765 bool operator()(int64_t a, double b) const { return a == b; }
1766 bool operator()(double a, double b) const { return a == b; }
1767 };
1768
1769 raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
1770 // It will convert to int64_t before the query.
1771 EXPECT_EQ(1, *s.find(double{1.1}));
1772
1773 raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
1774 // It will try to use the double, and fail to find the object.
1775 EXPECT_TRUE(ts.find(1.1) == ts.end());
1776 }
1777
1778 template <class Table>
1779 using CallFind = decltype(std::declval<Table&>().find(17));
1780
1781 template <class Table>
1782 using CallErase = decltype(std::declval<Table&>().erase(17));
1783
1784 template <class Table>
1785 using CallExtract = decltype(std::declval<Table&>().extract(17));
1786
1787 template <class Table>
1788 using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
1789
1790 template <class Table>
1791 using CallCount = decltype(std::declval<Table&>().count(17));
1792
1793 template <template <typename> class C, class Table, class = void>
1794 struct VerifyResultOf : std::false_type {};
1795
1796 template <template <typename> class C, class Table>
1797 struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
1798
TEST(Table,HeterogeneousLookupOverloads)1799 TEST(Table, HeterogeneousLookupOverloads) {
1800 using NonTransparentTable =
1801 raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
1802 std::equal_to<absl::string_view>, std::allocator<int>>;
1803
1804 EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
1805 EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
1806 EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
1807 EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
1808 EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
1809
1810 using TransparentTable = raw_hash_set<
1811 StringPolicy,
1812 absl::container_internal::hash_default_hash<absl::string_view>,
1813 absl::container_internal::hash_default_eq<absl::string_view>,
1814 std::allocator<int>>;
1815
1816 EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
1817 EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
1818 EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
1819 EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
1820 EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
1821 }
1822
TEST(Iterator,IsDefaultConstructible)1823 TEST(Iterator, IsDefaultConstructible) {
1824 StringTable::iterator i;
1825 EXPECT_TRUE(i == StringTable::iterator());
1826 }
1827
TEST(ConstIterator,IsDefaultConstructible)1828 TEST(ConstIterator, IsDefaultConstructible) {
1829 StringTable::const_iterator i;
1830 EXPECT_TRUE(i == StringTable::const_iterator());
1831 }
1832
TEST(Iterator,ConvertsToConstIterator)1833 TEST(Iterator, ConvertsToConstIterator) {
1834 StringTable::iterator i;
1835 EXPECT_TRUE(i == StringTable::const_iterator());
1836 }
1837
TEST(Iterator,Iterates)1838 TEST(Iterator, Iterates) {
1839 IntTable t;
1840 for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
1841 EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
1842 }
1843
TEST(Table,Merge)1844 TEST(Table, Merge) {
1845 StringTable t1, t2;
1846 t1.emplace("0", "-0");
1847 t1.emplace("1", "-1");
1848 t2.emplace("0", "~0");
1849 t2.emplace("2", "~2");
1850
1851 EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
1852 EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
1853
1854 t1.merge(t2);
1855 EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
1856 Pair("2", "~2")));
1857 EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
1858 }
1859
TEST(Table,IteratorEmplaceConstructibleRequirement)1860 TEST(Table, IteratorEmplaceConstructibleRequirement) {
1861 struct Value {
1862 explicit Value(absl::string_view view) : value(view) {}
1863 std::string value;
1864
1865 bool operator==(const Value& other) const { return value == other.value; }
1866 };
1867 struct H {
1868 size_t operator()(const Value& v) const {
1869 return absl::Hash<std::string>{}(v.value);
1870 }
1871 };
1872
1873 struct Table : raw_hash_set<ValuePolicy<Value>, H, std::equal_to<Value>,
1874 std::allocator<Value>> {
1875 using Base = typename Table::raw_hash_set;
1876 using Base::Base;
1877 };
1878
1879 std::string input[3]{"A", "B", "C"};
1880
1881 Table t(std::begin(input), std::end(input));
1882 EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}));
1883
1884 input[0] = "D";
1885 input[1] = "E";
1886 input[2] = "F";
1887 t.insert(std::begin(input), std::end(input));
1888 EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"},
1889 Value{"D"}, Value{"E"}, Value{"F"}));
1890 }
1891
TEST(Nodes,EmptyNodeType)1892 TEST(Nodes, EmptyNodeType) {
1893 using node_type = StringTable::node_type;
1894 node_type n;
1895 EXPECT_FALSE(n);
1896 EXPECT_TRUE(n.empty());
1897
1898 EXPECT_TRUE((std::is_same<node_type::allocator_type,
1899 StringTable::allocator_type>::value));
1900 }
1901
TEST(Nodes,ExtractInsert)1902 TEST(Nodes, ExtractInsert) {
1903 constexpr char k0[] = "Very long string zero.";
1904 constexpr char k1[] = "Very long string one.";
1905 constexpr char k2[] = "Very long string two.";
1906 StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
1907 EXPECT_THAT(t,
1908 UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
1909
1910 auto node = t.extract(k0);
1911 EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1912 EXPECT_TRUE(node);
1913 EXPECT_FALSE(node.empty());
1914
1915 StringTable t2;
1916 StringTable::insert_return_type res = t2.insert(std::move(node));
1917 EXPECT_TRUE(res.inserted);
1918 EXPECT_THAT(*res.position, Pair(k0, ""));
1919 EXPECT_FALSE(res.node);
1920 EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
1921
1922 // Not there.
1923 EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1924 node = t.extract("Not there!");
1925 EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
1926 EXPECT_FALSE(node);
1927
1928 // Inserting nothing.
1929 res = t2.insert(std::move(node));
1930 EXPECT_FALSE(res.inserted);
1931 EXPECT_EQ(res.position, t2.end());
1932 EXPECT_FALSE(res.node);
1933 EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
1934
1935 t.emplace(k0, "1");
1936 node = t.extract(k0);
1937
1938 // Insert duplicate.
1939 res = t2.insert(std::move(node));
1940 EXPECT_FALSE(res.inserted);
1941 EXPECT_THAT(*res.position, Pair(k0, ""));
1942 EXPECT_TRUE(res.node);
1943 EXPECT_FALSE(node); // NOLINT(bugprone-use-after-move)
1944 }
1945
TEST(Nodes,HintInsert)1946 TEST(Nodes, HintInsert) {
1947 IntTable t = {1, 2, 3};
1948 auto node = t.extract(1);
1949 EXPECT_THAT(t, UnorderedElementsAre(2, 3));
1950 auto it = t.insert(t.begin(), std::move(node));
1951 EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
1952 EXPECT_EQ(*it, 1);
1953 EXPECT_FALSE(node); // NOLINT(bugprone-use-after-move)
1954
1955 node = t.extract(2);
1956 EXPECT_THAT(t, UnorderedElementsAre(1, 3));
1957 // reinsert 2 to make the next insert fail.
1958 t.insert(2);
1959 EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
1960 it = t.insert(t.begin(), std::move(node));
1961 EXPECT_EQ(*it, 2);
1962 // The node was not emptied by the insert call.
1963 EXPECT_TRUE(node); // NOLINT(bugprone-use-after-move)
1964 }
1965
MakeSimpleTable(size_t size)1966 IntTable MakeSimpleTable(size_t size) {
1967 IntTable t;
1968 while (t.size() < size) t.insert(t.size());
1969 return t;
1970 }
1971
OrderOfIteration(const IntTable & t)1972 std::vector<int> OrderOfIteration(const IntTable& t) {
1973 return {t.begin(), t.end()};
1974 }
1975
1976 // These IterationOrderChanges tests depend on non-deterministic behavior.
1977 // We are injecting non-determinism from the pointer of the table, but do so in
1978 // a way that only the page matters. We have to retry enough times to make sure
1979 // we are touching different memory pages to cause the ordering to change.
1980 // We also need to keep the old tables around to avoid getting the same memory
1981 // blocks over and over.
TEST(Table,IterationOrderChangesByInstance)1982 TEST(Table, IterationOrderChangesByInstance) {
1983 for (size_t size : {2, 6, 12, 20}) {
1984 const auto reference_table = MakeSimpleTable(size);
1985 const auto reference = OrderOfIteration(reference_table);
1986
1987 std::vector<IntTable> tables;
1988 bool found_difference = false;
1989 for (int i = 0; !found_difference && i < 5000; ++i) {
1990 tables.push_back(MakeSimpleTable(size));
1991 found_difference = OrderOfIteration(tables.back()) != reference;
1992 }
1993 if (!found_difference) {
1994 FAIL()
1995 << "Iteration order remained the same across many attempts with size "
1996 << size;
1997 }
1998 }
1999 }
2000
TEST(Table,IterationOrderChangesOnRehash)2001 TEST(Table, IterationOrderChangesOnRehash) {
2002 std::vector<IntTable> garbage;
2003 for (int i = 0; i < 5000; ++i) {
2004 auto t = MakeSimpleTable(20);
2005 const auto reference = OrderOfIteration(t);
2006 // Force rehash to the same size.
2007 t.rehash(0);
2008 auto trial = OrderOfIteration(t);
2009 if (trial != reference) {
2010 // We are done.
2011 return;
2012 }
2013 garbage.push_back(std::move(t));
2014 }
2015 FAIL() << "Iteration order remained the same across many attempts.";
2016 }
2017
2018 // Verify that pointers are invalidated as soon as a second element is inserted.
2019 // This prevents dependency on pointer stability on small tables.
TEST(Table,UnstablePointers)2020 TEST(Table, UnstablePointers) {
2021 IntTable table;
2022
2023 const auto addr = [&](int i) {
2024 return reinterpret_cast<uintptr_t>(&*table.find(i));
2025 };
2026
2027 table.insert(0);
2028 const uintptr_t old_ptr = addr(0);
2029
2030 // This causes a rehash.
2031 table.insert(1);
2032
2033 EXPECT_NE(old_ptr, addr(0));
2034 }
2035
IsAssertEnabled()2036 bool IsAssertEnabled() {
2037 // Use an assert with side-effects to figure out if they are actually enabled.
2038 bool assert_enabled = false;
2039 assert([&]() { // NOLINT
2040 assert_enabled = true;
2041 return true;
2042 }());
2043 return assert_enabled;
2044 }
2045
TEST(TableDeathTest,InvalidIteratorAsserts)2046 TEST(TableDeathTest, InvalidIteratorAsserts) {
2047 if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
2048
2049 IntTable t;
2050 // Extra simple "regexp" as regexp support is highly varied across platforms.
2051 EXPECT_DEATH_IF_SUPPORTED(
2052 t.erase(t.end()),
2053 "erase.* called on invalid iterator. The iterator might be an "
2054 "end.*iterator or may have been default constructed.");
2055 typename IntTable::iterator iter;
2056 EXPECT_DEATH_IF_SUPPORTED(
2057 ++iter,
2058 "operator.* called on invalid iterator. The iterator might be an "
2059 "end.*iterator or may have been default constructed.");
2060 t.insert(0);
2061 iter = t.begin();
2062 t.erase(iter);
2063 EXPECT_DEATH_IF_SUPPORTED(
2064 ++iter,
2065 "operator.* called on invalid iterator. The element might have been "
2066 "erased or .*the table might have rehashed.");
2067 }
2068
TEST(TableDeathTest,IteratorInvalidAssertsEqualityOperator)2069 TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
2070 if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
2071
2072 IntTable t;
2073 t.insert(1);
2074 t.insert(2);
2075 t.insert(3);
2076 auto iter1 = t.begin();
2077 auto iter2 = std::next(iter1);
2078 ASSERT_NE(iter1, t.end());
2079 ASSERT_NE(iter2, t.end());
2080 t.erase(iter1);
2081 // Extra simple "regexp" as regexp support is highly varied across platforms.
2082 const char* const kErasedDeathMessage =
2083 "Invalid iterator comparison. The element might have .*been erased or "
2084 "the table might have rehashed.";
2085 EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
2086 EXPECT_DEATH_IF_SUPPORTED(void(iter2 != iter1), kErasedDeathMessage);
2087 t.erase(iter2);
2088 EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
2089
2090 IntTable t1, t2;
2091 t1.insert(0);
2092 t2.insert(0);
2093 iter1 = t1.begin();
2094 iter2 = t2.begin();
2095 const char* const kContainerDiffDeathMessage =
2096 "Invalid iterator comparison. The iterators may be from different "
2097 ".*containers or the container might have rehashed.";
2098 EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
2099 EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
2100
2101 for (int i = 0; i < 10; ++i) t1.insert(i);
2102 // There should have been a rehash in t1.
2103 EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()),
2104 kContainerDiffDeathMessage);
2105 }
2106
2107 #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(RawHashSamplerTest,Sample)2108 TEST(RawHashSamplerTest, Sample) {
2109 // Enable the feature even if the prod default is off.
2110 SetHashtablezEnabled(true);
2111 SetHashtablezSampleParameter(100);
2112
2113 auto& sampler = GlobalHashtablezSampler();
2114 size_t start_size = 0;
2115 absl::flat_hash_set<const HashtablezInfo*> preexisting_info;
2116 start_size += sampler.Iterate([&](const HashtablezInfo& info) {
2117 preexisting_info.insert(&info);
2118 ++start_size;
2119 });
2120
2121 std::vector<IntTable> tables;
2122 for (int i = 0; i < 1000000; ++i) {
2123 tables.emplace_back();
2124
2125 const bool do_reserve = (i % 10 > 5);
2126 const bool do_rehash = !do_reserve && (i % 10 > 0);
2127
2128 if (do_reserve) {
2129 // Don't reserve on all tables.
2130 tables.back().reserve(10 * (i % 10));
2131 }
2132
2133 tables.back().insert(1);
2134 tables.back().insert(i % 5);
2135
2136 if (do_rehash) {
2137 // Rehash some other tables.
2138 tables.back().rehash(10 * (i % 10));
2139 }
2140 }
2141 size_t end_size = 0;
2142 absl::flat_hash_map<size_t, int> observed_checksums;
2143 absl::flat_hash_map<ssize_t, int> reservations;
2144 end_size += sampler.Iterate([&](const HashtablezInfo& info) {
2145 if (preexisting_info.count(&info) == 0) {
2146 observed_checksums[info.hashes_bitwise_xor.load(
2147 std::memory_order_relaxed)]++;
2148 reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
2149 }
2150 EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
2151 ++end_size;
2152 });
2153
2154 EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
2155 0.01, 0.005);
2156 EXPECT_EQ(observed_checksums.size(), 5);
2157 for (const auto& [_, count] : observed_checksums) {
2158 EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
2159 }
2160
2161 EXPECT_EQ(reservations.size(), 10);
2162 for (const auto& [reservation, count] : reservations) {
2163 EXPECT_GE(reservation, 0);
2164 EXPECT_LT(reservation, 100);
2165
2166 EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.1, 0.05)
2167 << reservation;
2168 }
2169 }
2170 #endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
2171
TEST(RawHashSamplerTest,DoNotSampleCustomAllocators)2172 TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
2173 // Enable the feature even if the prod default is off.
2174 SetHashtablezEnabled(true);
2175 SetHashtablezSampleParameter(100);
2176
2177 auto& sampler = GlobalHashtablezSampler();
2178 size_t start_size = 0;
2179 start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
2180
2181 std::vector<CustomAllocIntTable> tables;
2182 for (int i = 0; i < 1000000; ++i) {
2183 tables.emplace_back();
2184 tables.back().insert(1);
2185 }
2186 size_t end_size = 0;
2187 end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
2188
2189 EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
2190 0.00, 0.001);
2191 }
2192
2193 #ifdef ABSL_HAVE_ADDRESS_SANITIZER
TEST(Sanitizer,PoisoningUnused)2194 TEST(Sanitizer, PoisoningUnused) {
2195 IntTable t;
2196 t.reserve(5);
2197 // Insert something to force an allocation.
2198 int64_t& v1 = *t.insert(0).first;
2199
2200 // Make sure there is something to test.
2201 ASSERT_GT(t.capacity(), 1);
2202
2203 int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
2204 for (size_t i = 0; i < t.capacity(); ++i) {
2205 EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i));
2206 }
2207 }
2208
TEST(Sanitizer,PoisoningOnErase)2209 TEST(Sanitizer, PoisoningOnErase) {
2210 IntTable t;
2211 int64_t& v = *t.insert(0).first;
2212
2213 EXPECT_FALSE(__asan_address_is_poisoned(&v));
2214 t.erase(0);
2215 EXPECT_TRUE(__asan_address_is_poisoned(&v));
2216 }
2217 #endif // ABSL_HAVE_ADDRESS_SANITIZER
2218
TEST(Table,AlignOne)2219 TEST(Table, AlignOne) {
2220 // We previously had a bug in which we were copying a control byte over the
2221 // first slot when alignof(value_type) is 1. We test repeated
2222 // insertions/erases and verify that the behavior is correct.
2223 Uint8Table t;
2224 std::unordered_set<uint8_t> verifier; // NOLINT
2225
2226 // Do repeated insertions/erases from the table.
2227 for (int64_t i = 0; i < 100000; ++i) {
2228 SCOPED_TRACE(i);
2229 const uint8_t u = (i * -i) & 0xFF;
2230 auto it = t.find(u);
2231 auto verifier_it = verifier.find(u);
2232 if (it == t.end()) {
2233 ASSERT_EQ(verifier_it, verifier.end());
2234 t.insert(u);
2235 verifier.insert(u);
2236 } else {
2237 ASSERT_NE(verifier_it, verifier.end());
2238 t.erase(it);
2239 verifier.erase(verifier_it);
2240 }
2241 }
2242
2243 EXPECT_EQ(t.size(), verifier.size());
2244 for (uint8_t u : t) {
2245 EXPECT_EQ(verifier.count(u), 1);
2246 }
2247 }
2248
2249 // Invalid iterator use can trigger heap-use-after-free in asan,
2250 // use-of-uninitialized-value in msan, or invalidated iterator assertions.
2251 constexpr const char* kInvalidIteratorDeathMessage =
2252 "heap-use-after-free|use-of-uninitialized-value|invalidated "
2253 "iterator|Invalid iterator";
2254
2255 #if defined(__clang__) && defined(_MSC_VER)
2256 constexpr bool kLexan = true;
2257 #else
2258 constexpr bool kLexan = false;
2259 #endif
2260
TEST(Iterator,InvalidUseCrashesWithSanitizers)2261 TEST(Iterator, InvalidUseCrashesWithSanitizers) {
2262 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2263 if (kLexan) GTEST_SKIP() << "Lexan doesn't support | in regexp.";
2264
2265 IntTable t;
2266 // Start with 1 element so that `it` is never an end iterator.
2267 t.insert(-1);
2268 for (int i = 0; i < 10; ++i) {
2269 auto it = t.begin();
2270 t.insert(i);
2271 EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
2272 EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
2273 kInvalidIteratorDeathMessage);
2274 }
2275 }
2276
TEST(Iterator,InvalidUseWithReserveCrashesWithSanitizers)2277 TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
2278 if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
2279 if (kLexan) GTEST_SKIP() << "Lexan doesn't support | in regexp.";
2280
2281 IntTable t;
2282 t.reserve(10);
2283 t.insert(0);
2284 auto it = t.begin();
2285 // Reserved growth can't rehash.
2286 for (int i = 1; i < 10; ++i) {
2287 t.insert(i);
2288 EXPECT_EQ(*it, 0);
2289 }
2290 // ptr will become invalidated on rehash.
2291 const int64_t* ptr = &*it;
2292
2293 // erase decreases size but does not decrease reserved growth so the next
2294 // insertion still invalidates iterators.
2295 t.erase(0);
2296 // The first insert after reserved growth is 0 is guaranteed to rehash when
2297 // generations are enabled.
2298 t.insert(10);
2299 EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
2300 EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
2301 kInvalidIteratorDeathMessage);
2302 EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr,
2303 "heap-use-after-free|use-of-uninitialized-value");
2304 }
2305
TEST(Table,ReservedGrowthUpdatesWhenTableDoesntGrow)2306 TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
2307 IntTable t;
2308 for (int i = 0; i < 8; ++i) t.insert(i);
2309 // Want to insert twice without invalidating iterators so reserve.
2310 const size_t cap = t.capacity();
2311 t.reserve(t.size() + 2);
2312 // We want to be testing the case in which the reserve doesn't grow the table.
2313 ASSERT_EQ(cap, t.capacity());
2314 auto it = t.find(0);
2315 t.insert(100);
2316 t.insert(200);
2317 // `it` shouldn't have been invalidated.
2318 EXPECT_EQ(*it, 0);
2319 }
2320
2321 } // namespace
2322 } // namespace container_internal
2323 ABSL_NAMESPACE_END
2324 } // namespace absl
2325