1 // Copyright 2018 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "absl/container/internal/raw_hash_set.h"
16
17 #include <atomic>
18 #include <cstddef>
19 #include <cstring>
20
21 #include "absl/base/config.h"
22
23 namespace absl {
24 ABSL_NAMESPACE_BEGIN
25 namespace container_internal {
26
27 // A single block of empty control bytes for tables without any slots allocated.
28 // This enables removing a branch in the hot path of find().
29 // We have 17 bytes because there may be a generation counter. Any constant is
30 // fine for the generation counter.
31 alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[17] = {
32 ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
33 ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
34 ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
35 ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
36 static_cast<ctrl_t>(0)};
37
38 #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
39 constexpr size_t Group::kWidth;
40 #endif
41
42 // Returns "random" seed.
RandomSeed()43 inline size_t RandomSeed() {
44 #ifdef ABSL_HAVE_THREAD_LOCAL
45 static thread_local size_t counter = 0;
46 size_t value = ++counter;
47 #else // ABSL_HAVE_THREAD_LOCAL
48 static std::atomic<size_t> counter(0);
49 size_t value = counter.fetch_add(1, std::memory_order_relaxed);
50 #endif // ABSL_HAVE_THREAD_LOCAL
51 return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
52 }
53
ShouldInsertBackwards(size_t hash,const ctrl_t * ctrl)54 bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
55 // To avoid problems with weak hashes and single bit tests, we use % 13.
56 // TODO(kfm,sbenza): revisit after we do unconditional mixing
57 return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
58 }
59
ConvertDeletedToEmptyAndFullToDeleted(ctrl_t * ctrl,size_t capacity)60 void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
61 assert(ctrl[capacity] == ctrl_t::kSentinel);
62 assert(IsValidCapacity(capacity));
63 for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
64 Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
65 }
66 // Copy the cloned ctrl bytes.
67 std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
68 ctrl[capacity] = ctrl_t::kSentinel;
69 }
70 // Extern template instantiation for inline function.
71 template FindInfo find_first_non_full(const CommonFields&, size_t);
72
find_first_non_full_outofline(const CommonFields & common,size_t hash)73 FindInfo find_first_non_full_outofline(const CommonFields& common,
74 size_t hash) {
75 return find_first_non_full(common, hash);
76 }
77
78 // Return address of the ith slot in slots where each slot occupies slot_size.
SlotAddress(void * slot_array,size_t slot,size_t slot_size)79 static inline void* SlotAddress(void* slot_array, size_t slot,
80 size_t slot_size) {
81 return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot_array) +
82 (slot * slot_size));
83 }
84
85 // Return the address of the slot just after slot assuming each slot
86 // has the specified size.
NextSlot(void * slot,size_t slot_size)87 static inline void* NextSlot(void* slot, size_t slot_size) {
88 return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) + slot_size);
89 }
90
91 // Return the address of the slot just before slot assuming each slot
92 // has the specified size.
PrevSlot(void * slot,size_t slot_size)93 static inline void* PrevSlot(void* slot, size_t slot_size) {
94 return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
95 }
96
DropDeletesWithoutResize(CommonFields & common,const PolicyFunctions & policy,void * tmp_space)97 void DropDeletesWithoutResize(CommonFields& common,
98 const PolicyFunctions& policy, void* tmp_space) {
99 void* set = &common;
100 void* slot_array = common.slots_;
101 const size_t capacity = common.capacity_;
102 assert(IsValidCapacity(capacity));
103 assert(!is_small(capacity));
104 // Algorithm:
105 // - mark all DELETED slots as EMPTY
106 // - mark all FULL slots as DELETED
107 // - for each slot marked as DELETED
108 // hash = Hash(element)
109 // target = find_first_non_full(hash)
110 // if target is in the same group
111 // mark slot as FULL
112 // else if target is EMPTY
113 // transfer element to target
114 // mark slot as EMPTY
115 // mark target as FULL
116 // else if target is DELETED
117 // swap current element with target element
118 // mark target as FULL
119 // repeat procedure for current slot with moved from element (target)
120 ctrl_t* ctrl = common.control_;
121 ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
122 auto hasher = policy.hash_slot;
123 auto transfer = policy.transfer;
124 const size_t slot_size = policy.slot_size;
125
126 size_t total_probe_length = 0;
127 void* slot_ptr = SlotAddress(slot_array, 0, slot_size);
128 for (size_t i = 0; i != capacity;
129 ++i, slot_ptr = NextSlot(slot_ptr, slot_size)) {
130 assert(slot_ptr == SlotAddress(slot_array, i, slot_size));
131 if (!IsDeleted(ctrl[i])) continue;
132 const size_t hash = (*hasher)(set, slot_ptr);
133 const FindInfo target = find_first_non_full(common, hash);
134 const size_t new_i = target.offset;
135 total_probe_length += target.probe_length;
136
137 // Verify if the old and new i fall within the same group wrt the hash.
138 // If they do, we don't need to move the object as it falls already in the
139 // best probe we can.
140 const size_t probe_offset = probe(common, hash).offset();
141 const auto probe_index = [probe_offset, capacity](size_t pos) {
142 return ((pos - probe_offset) & capacity) / Group::kWidth;
143 };
144
145 // Element doesn't move.
146 if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
147 SetCtrl(common, i, H2(hash), slot_size);
148 continue;
149 }
150
151 void* new_slot_ptr = SlotAddress(slot_array, new_i, slot_size);
152 if (IsEmpty(ctrl[new_i])) {
153 // Transfer element to the empty spot.
154 // SetCtrl poisons/unpoisons the slots so we have to call it at the
155 // right time.
156 SetCtrl(common, new_i, H2(hash), slot_size);
157 (*transfer)(set, new_slot_ptr, slot_ptr);
158 SetCtrl(common, i, ctrl_t::kEmpty, slot_size);
159 } else {
160 assert(IsDeleted(ctrl[new_i]));
161 SetCtrl(common, new_i, H2(hash), slot_size);
162 // Until we are done rehashing, DELETED marks previously FULL slots.
163
164 // Swap i and new_i elements.
165 (*transfer)(set, tmp_space, new_slot_ptr);
166 (*transfer)(set, new_slot_ptr, slot_ptr);
167 (*transfer)(set, slot_ptr, tmp_space);
168
169 // repeat the processing of the ith slot
170 --i;
171 slot_ptr = PrevSlot(slot_ptr, slot_size);
172 }
173 }
174 ResetGrowthLeft(common);
175 common.infoz().RecordRehash(total_probe_length);
176 }
177
EraseMetaOnly(CommonFields & c,ctrl_t * it,size_t slot_size)178 void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
179 assert(IsFull(*it) && "erasing a dangling iterator");
180 --c.size_;
181 const auto index = static_cast<size_t>(it - c.control_);
182 const size_t index_before = (index - Group::kWidth) & c.capacity_;
183 const auto empty_after = Group(it).MaskEmpty();
184 const auto empty_before = Group(c.control_ + index_before).MaskEmpty();
185
186 // We count how many consecutive non empties we have to the right and to the
187 // left of `it`. If the sum is >= kWidth then there is at least one probe
188 // window that might have seen a full group.
189 bool was_never_full = empty_before && empty_after &&
190 static_cast<size_t>(empty_after.TrailingZeros()) +
191 empty_before.LeadingZeros() <
192 Group::kWidth;
193
194 SetCtrl(c, index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
195 slot_size);
196 c.growth_left() += (was_never_full ? 1 : 0);
197 c.infoz().RecordErase();
198 }
199
ClearBackingArray(CommonFields & c,const PolicyFunctions & policy,bool reuse)200 void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
201 bool reuse) {
202 c.size_ = 0;
203 if (reuse) {
204 ResetCtrl(c, policy.slot_size);
205 c.infoz().RecordStorageChanged(0, c.capacity_);
206 } else {
207 void* set = &c;
208 (*policy.dealloc)(set, policy, c.control_, c.slots_, c.capacity_);
209 c.control_ = EmptyGroup();
210 c.set_generation_ptr(EmptyGeneration());
211 c.slots_ = nullptr;
212 c.capacity_ = 0;
213 c.growth_left() = 0;
214 c.infoz().RecordClearedReservation();
215 assert(c.size_ == 0);
216 c.infoz().RecordStorageChanged(0, 0);
217 }
218 }
219
220 } // namespace container_internal
221 ABSL_NAMESPACE_END
222 } // namespace absl
223