1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the DenseMap class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #ifndef LLVM_ADT_DENSEMAP_H
15 #define LLVM_ADT_DENSEMAP_H
16
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/ADT/EpochTracker.h"
19 #include "llvm/Support/AlignOf.h"
20 #include "llvm/Support/Compiler.h"
21 #include "llvm/Support/MathExtras.h"
22 #include "llvm/Support/PointerLikeTypeTraits.h"
23 #include "llvm/Support/type_traits.h"
24 #include <algorithm>
25 #include <cassert>
26 #include <climits>
27 #include <cstddef>
28 #include <cstring>
29 #include <iterator>
30 #include <new>
31 #include <utility>
32
33 namespace llvm {
34
35 namespace detail {
36 // We extend a pair to allow users to override the bucket type with their own
37 // implementation without requiring two members.
38 template <typename KeyT, typename ValueT>
39 struct DenseMapPair : public std::pair<KeyT, ValueT> {
getFirstDenseMapPair40 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
getFirstDenseMapPair41 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
getSecondDenseMapPair42 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
getSecondDenseMapPair43 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
44 };
45 }
46
47 template <
48 typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>,
49 typename Bucket = detail::DenseMapPair<KeyT, ValueT>, bool IsConst = false>
50 class DenseMapIterator;
51
52 template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
53 typename BucketT>
54 class DenseMapBase : public DebugEpochBase {
55 public:
56 typedef unsigned size_type;
57 typedef KeyT key_type;
58 typedef ValueT mapped_type;
59 typedef BucketT value_type;
60
61 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT> iterator;
62 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>
63 const_iterator;
begin()64 inline iterator begin() {
65 // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
66 return empty() ? end() : iterator(getBuckets(), getBucketsEnd(), *this);
67 }
end()68 inline iterator end() {
69 return iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
70 }
begin()71 inline const_iterator begin() const {
72 return empty() ? end()
73 : const_iterator(getBuckets(), getBucketsEnd(), *this);
74 }
end()75 inline const_iterator end() const {
76 return const_iterator(getBucketsEnd(), getBucketsEnd(), *this, true);
77 }
78
empty()79 bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const {
80 return getNumEntries() == 0;
81 }
size()82 unsigned size() const { return getNumEntries(); }
83
84 /// Grow the densemap so that it can contain at least \p NumEntries items
85 /// before resizing again.
reserve(size_type NumEntries)86 void reserve(size_type NumEntries) {
87 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
88 incrementEpoch();
89 if (NumBuckets > getNumBuckets())
90 grow(NumBuckets);
91 }
92
clear()93 void clear() {
94 incrementEpoch();
95 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
96
97 // If the capacity of the array is huge, and the # elements used is small,
98 // shrink the array.
99 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
100 shrink_and_clear();
101 return;
102 }
103
104 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
105 unsigned NumEntries = getNumEntries();
106 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
107 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
108 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
109 P->getSecond().~ValueT();
110 --NumEntries;
111 }
112 P->getFirst() = EmptyKey;
113 }
114 }
115 assert(NumEntries == 0 && "Node count imbalance!");
116 setNumEntries(0);
117 setNumTombstones(0);
118 }
119
120 /// Return 1 if the specified key is in the map, 0 otherwise.
count(const KeyT & Val)121 size_type count(const KeyT &Val) const {
122 const BucketT *TheBucket;
123 return LookupBucketFor(Val, TheBucket) ? 1 : 0;
124 }
125
find(const KeyT & Val)126 iterator find(const KeyT &Val) {
127 BucketT *TheBucket;
128 if (LookupBucketFor(Val, TheBucket))
129 return iterator(TheBucket, getBucketsEnd(), *this, true);
130 return end();
131 }
find(const KeyT & Val)132 const_iterator find(const KeyT &Val) const {
133 const BucketT *TheBucket;
134 if (LookupBucketFor(Val, TheBucket))
135 return const_iterator(TheBucket, getBucketsEnd(), *this, true);
136 return end();
137 }
138
139 /// Alternate version of find() which allows a different, and possibly
140 /// less expensive, key type.
141 /// The DenseMapInfo is responsible for supplying methods
142 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
143 /// type used.
144 template<class LookupKeyT>
find_as(const LookupKeyT & Val)145 iterator find_as(const LookupKeyT &Val) {
146 BucketT *TheBucket;
147 if (LookupBucketFor(Val, TheBucket))
148 return iterator(TheBucket, getBucketsEnd(), *this, true);
149 return end();
150 }
151 template<class LookupKeyT>
find_as(const LookupKeyT & Val)152 const_iterator find_as(const LookupKeyT &Val) const {
153 const BucketT *TheBucket;
154 if (LookupBucketFor(Val, TheBucket))
155 return const_iterator(TheBucket, getBucketsEnd(), *this, true);
156 return end();
157 }
158
159 /// lookup - Return the entry for the specified key, or a default
160 /// constructed value if no such entry exists.
lookup(const KeyT & Val)161 ValueT lookup(const KeyT &Val) const {
162 const BucketT *TheBucket;
163 if (LookupBucketFor(Val, TheBucket))
164 return TheBucket->getSecond();
165 return ValueT();
166 }
167
168 // Inserts key,value pair into the map if the key isn't already in the map.
169 // If the key is already in the map, it returns false and doesn't update the
170 // value.
insert(const std::pair<KeyT,ValueT> & KV)171 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
172 BucketT *TheBucket;
173 if (LookupBucketFor(KV.first, TheBucket))
174 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
175 false); // Already in map.
176
177 // Otherwise, insert the new element.
178 TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
179 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
180 true);
181 }
182
183 // Inserts key,value pair into the map if the key isn't already in the map.
184 // If the key is already in the map, it returns false and doesn't update the
185 // value.
insert(std::pair<KeyT,ValueT> && KV)186 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
187 BucketT *TheBucket;
188 if (LookupBucketFor(KV.first, TheBucket))
189 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
190 false); // Already in map.
191
192 // Otherwise, insert the new element.
193 TheBucket = InsertIntoBucket(std::move(KV.first),
194 std::move(KV.second),
195 TheBucket);
196 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
197 true);
198 }
199
200 /// Alternate version of insert() which allows a different, and possibly
201 /// less expensive, key type.
202 /// The DenseMapInfo is responsible for supplying methods
203 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
204 /// type used.
205 template <typename LookupKeyT>
insert_as(std::pair<KeyT,ValueT> && KV,const LookupKeyT & Val)206 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
207 const LookupKeyT &Val) {
208 BucketT *TheBucket;
209 if (LookupBucketFor(Val, TheBucket))
210 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
211 false); // Already in map.
212
213 // Otherwise, insert the new element.
214 TheBucket = InsertIntoBucket(std::move(KV.first), std::move(KV.second), Val,
215 TheBucket);
216 return std::make_pair(iterator(TheBucket, getBucketsEnd(), *this, true),
217 true);
218 }
219
220 /// insert - Range insertion of pairs.
221 template<typename InputIt>
insert(InputIt I,InputIt E)222 void insert(InputIt I, InputIt E) {
223 for (; I != E; ++I)
224 insert(*I);
225 }
226
227
erase(const KeyT & Val)228 bool erase(const KeyT &Val) {
229 BucketT *TheBucket;
230 if (!LookupBucketFor(Val, TheBucket))
231 return false; // not in map.
232
233 TheBucket->getSecond().~ValueT();
234 TheBucket->getFirst() = getTombstoneKey();
235 decrementNumEntries();
236 incrementNumTombstones();
237 return true;
238 }
erase(iterator I)239 void erase(iterator I) {
240 BucketT *TheBucket = &*I;
241 TheBucket->getSecond().~ValueT();
242 TheBucket->getFirst() = getTombstoneKey();
243 decrementNumEntries();
244 incrementNumTombstones();
245 }
246
FindAndConstruct(const KeyT & Key)247 value_type& FindAndConstruct(const KeyT &Key) {
248 BucketT *TheBucket;
249 if (LookupBucketFor(Key, TheBucket))
250 return *TheBucket;
251
252 return *InsertIntoBucket(Key, ValueT(), TheBucket);
253 }
254
255 ValueT &operator[](const KeyT &Key) {
256 return FindAndConstruct(Key).second;
257 }
258
FindAndConstruct(KeyT && Key)259 value_type& FindAndConstruct(KeyT &&Key) {
260 BucketT *TheBucket;
261 if (LookupBucketFor(Key, TheBucket))
262 return *TheBucket;
263
264 return *InsertIntoBucket(std::move(Key), ValueT(), TheBucket);
265 }
266
267 ValueT &operator[](KeyT &&Key) {
268 return FindAndConstruct(std::move(Key)).second;
269 }
270
271 /// isPointerIntoBucketsArray - Return true if the specified pointer points
272 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
273 /// value in the DenseMap).
isPointerIntoBucketsArray(const void * Ptr)274 bool isPointerIntoBucketsArray(const void *Ptr) const {
275 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
276 }
277
278 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
279 /// array. In conjunction with the previous method, this can be used to
280 /// determine whether an insertion caused the DenseMap to reallocate.
getPointerIntoBucketsArray()281 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
282
283 protected:
284 DenseMapBase() = default;
285
destroyAll()286 void destroyAll() {
287 if (getNumBuckets() == 0) // Nothing to do.
288 return;
289
290 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
291 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
292 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
293 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
294 P->getSecond().~ValueT();
295 P->getFirst().~KeyT();
296 }
297 }
298
initEmpty()299 void initEmpty() {
300 setNumEntries(0);
301 setNumTombstones(0);
302
303 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
304 "# initial buckets must be a power of two!");
305 const KeyT EmptyKey = getEmptyKey();
306 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
307 ::new (&B->getFirst()) KeyT(EmptyKey);
308 }
309
310 /// Returns the number of buckets to allocate to ensure that the DenseMap can
311 /// accommodate \p NumEntries without need to grow().
getMinBucketToReserveForEntries(unsigned NumEntries)312 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
313 // Ensure that "NumEntries * 4 < NumBuckets * 3"
314 if (NumEntries == 0)
315 return 0;
316 // +1 is required because of the strict equality.
317 // For example if NumEntries is 48, we need to return 401.
318 return NextPowerOf2(NumEntries * 4 / 3 + 1);
319 }
320
moveFromOldBuckets(BucketT * OldBucketsBegin,BucketT * OldBucketsEnd)321 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
322 initEmpty();
323
324 // Insert all the old elements.
325 const KeyT EmptyKey = getEmptyKey();
326 const KeyT TombstoneKey = getTombstoneKey();
327 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
328 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
329 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
330 // Insert the key/value into the new table.
331 BucketT *DestBucket;
332 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
333 (void)FoundVal; // silence warning.
334 assert(!FoundVal && "Key already in new map?");
335 DestBucket->getFirst() = std::move(B->getFirst());
336 ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
337 incrementNumEntries();
338
339 // Free the value.
340 B->getSecond().~ValueT();
341 }
342 B->getFirst().~KeyT();
343 }
344 }
345
346 template <typename OtherBaseT>
copyFrom(const DenseMapBase<OtherBaseT,KeyT,ValueT,KeyInfoT,BucketT> & other)347 void copyFrom(
348 const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
349 assert(&other != this);
350 assert(getNumBuckets() == other.getNumBuckets());
351
352 setNumEntries(other.getNumEntries());
353 setNumTombstones(other.getNumTombstones());
354
355 if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
356 memcpy(getBuckets(), other.getBuckets(),
357 getNumBuckets() * sizeof(BucketT));
358 else
359 for (size_t i = 0; i < getNumBuckets(); ++i) {
360 ::new (&getBuckets()[i].getFirst())
361 KeyT(other.getBuckets()[i].getFirst());
362 if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
363 !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
364 ::new (&getBuckets()[i].getSecond())
365 ValueT(other.getBuckets()[i].getSecond());
366 }
367 }
368
getHashValue(const KeyT & Val)369 static unsigned getHashValue(const KeyT &Val) {
370 return KeyInfoT::getHashValue(Val);
371 }
372 template<typename LookupKeyT>
getHashValue(const LookupKeyT & Val)373 static unsigned getHashValue(const LookupKeyT &Val) {
374 return KeyInfoT::getHashValue(Val);
375 }
getEmptyKey()376 static const KeyT getEmptyKey() {
377 return KeyInfoT::getEmptyKey();
378 }
getTombstoneKey()379 static const KeyT getTombstoneKey() {
380 return KeyInfoT::getTombstoneKey();
381 }
382
383 private:
getNumEntries()384 unsigned getNumEntries() const {
385 return static_cast<const DerivedT *>(this)->getNumEntries();
386 }
setNumEntries(unsigned Num)387 void setNumEntries(unsigned Num) {
388 static_cast<DerivedT *>(this)->setNumEntries(Num);
389 }
incrementNumEntries()390 void incrementNumEntries() {
391 setNumEntries(getNumEntries() + 1);
392 }
decrementNumEntries()393 void decrementNumEntries() {
394 setNumEntries(getNumEntries() - 1);
395 }
getNumTombstones()396 unsigned getNumTombstones() const {
397 return static_cast<const DerivedT *>(this)->getNumTombstones();
398 }
setNumTombstones(unsigned Num)399 void setNumTombstones(unsigned Num) {
400 static_cast<DerivedT *>(this)->setNumTombstones(Num);
401 }
incrementNumTombstones()402 void incrementNumTombstones() {
403 setNumTombstones(getNumTombstones() + 1);
404 }
decrementNumTombstones()405 void decrementNumTombstones() {
406 setNumTombstones(getNumTombstones() - 1);
407 }
getBuckets()408 const BucketT *getBuckets() const {
409 return static_cast<const DerivedT *>(this)->getBuckets();
410 }
getBuckets()411 BucketT *getBuckets() {
412 return static_cast<DerivedT *>(this)->getBuckets();
413 }
getNumBuckets()414 unsigned getNumBuckets() const {
415 return static_cast<const DerivedT *>(this)->getNumBuckets();
416 }
getBucketsEnd()417 BucketT *getBucketsEnd() {
418 return getBuckets() + getNumBuckets();
419 }
getBucketsEnd()420 const BucketT *getBucketsEnd() const {
421 return getBuckets() + getNumBuckets();
422 }
423
grow(unsigned AtLeast)424 void grow(unsigned AtLeast) {
425 static_cast<DerivedT *>(this)->grow(AtLeast);
426 }
427
shrink_and_clear()428 void shrink_and_clear() {
429 static_cast<DerivedT *>(this)->shrink_and_clear();
430 }
431
432
InsertIntoBucket(const KeyT & Key,const ValueT & Value,BucketT * TheBucket)433 BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
434 BucketT *TheBucket) {
435 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
436
437 TheBucket->getFirst() = Key;
438 ::new (&TheBucket->getSecond()) ValueT(Value);
439 return TheBucket;
440 }
441
InsertIntoBucket(const KeyT & Key,ValueT && Value,BucketT * TheBucket)442 BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
443 BucketT *TheBucket) {
444 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
445
446 TheBucket->getFirst() = Key;
447 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
448 return TheBucket;
449 }
450
InsertIntoBucket(KeyT && Key,ValueT && Value,BucketT * TheBucket)451 BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
452 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
453
454 TheBucket->getFirst() = std::move(Key);
455 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
456 return TheBucket;
457 }
458
459 template <typename LookupKeyT>
InsertIntoBucket(KeyT && Key,ValueT && Value,LookupKeyT & Lookup,BucketT * TheBucket)460 BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, LookupKeyT &Lookup,
461 BucketT *TheBucket) {
462 TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
463
464 TheBucket->getFirst() = std::move(Key);
465 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
466 return TheBucket;
467 }
468
469 template <typename LookupKeyT>
InsertIntoBucketImpl(const KeyT & Key,const LookupKeyT & Lookup,BucketT * TheBucket)470 BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
471 BucketT *TheBucket) {
472 incrementEpoch();
473
474 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
475 // the buckets are empty (meaning that many are filled with tombstones),
476 // grow the table.
477 //
478 // The later case is tricky. For example, if we had one empty bucket with
479 // tons of tombstones, failing lookups (e.g. for insertion) would have to
480 // probe almost the entire table until it found the empty bucket. If the
481 // table completely filled with tombstones, no lookup would ever succeed,
482 // causing infinite loops in lookup.
483 unsigned NewNumEntries = getNumEntries() + 1;
484 unsigned NumBuckets = getNumBuckets();
485 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)) {
486 this->grow(NumBuckets * 2);
487 LookupBucketFor(Lookup, TheBucket);
488 NumBuckets = getNumBuckets();
489 } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=
490 NumBuckets/8)) {
491 this->grow(NumBuckets);
492 LookupBucketFor(Lookup, TheBucket);
493 }
494 assert(TheBucket);
495
496 // Only update the state after we've grown our bucket space appropriately
497 // so that when growing buckets we have self-consistent entry count.
498 incrementNumEntries();
499
500 // If we are writing over a tombstone, remember this.
501 const KeyT EmptyKey = getEmptyKey();
502 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
503 decrementNumTombstones();
504
505 return TheBucket;
506 }
507
508 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
509 /// FoundBucket. If the bucket contains the key and a value, this returns
510 /// true, otherwise it returns a bucket with an empty marker or tombstone and
511 /// returns false.
512 template<typename LookupKeyT>
LookupBucketFor(const LookupKeyT & Val,const BucketT * & FoundBucket)513 bool LookupBucketFor(const LookupKeyT &Val,
514 const BucketT *&FoundBucket) const {
515 const BucketT *BucketsPtr = getBuckets();
516 const unsigned NumBuckets = getNumBuckets();
517
518 if (NumBuckets == 0) {
519 FoundBucket = nullptr;
520 return false;
521 }
522
523 // FoundTombstone - Keep track of whether we find a tombstone while probing.
524 const BucketT *FoundTombstone = nullptr;
525 const KeyT EmptyKey = getEmptyKey();
526 const KeyT TombstoneKey = getTombstoneKey();
527 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
528 !KeyInfoT::isEqual(Val, TombstoneKey) &&
529 "Empty/Tombstone value shouldn't be inserted into map!");
530
531 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
532 unsigned ProbeAmt = 1;
533 while (1) {
534 const BucketT *ThisBucket = BucketsPtr + BucketNo;
535 // Found Val's bucket? If so, return it.
536 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
537 FoundBucket = ThisBucket;
538 return true;
539 }
540
541 // If we found an empty bucket, the key doesn't exist in the set.
542 // Insert it and return the default value.
543 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
544 // If we've already seen a tombstone while probing, fill it in instead
545 // of the empty bucket we eventually probed to.
546 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
547 return false;
548 }
549
550 // If this is a tombstone, remember it. If Val ends up not in the map, we
551 // prefer to return it than something that would require more probing.
552 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
553 !FoundTombstone)
554 FoundTombstone = ThisBucket; // Remember the first tombstone found.
555
556 // Otherwise, it's a hash collision or a tombstone, continue quadratic
557 // probing.
558 BucketNo += ProbeAmt++;
559 BucketNo &= (NumBuckets-1);
560 }
561 }
562
563 template <typename LookupKeyT>
LookupBucketFor(const LookupKeyT & Val,BucketT * & FoundBucket)564 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
565 const BucketT *ConstFoundBucket;
566 bool Result = const_cast<const DenseMapBase *>(this)
567 ->LookupBucketFor(Val, ConstFoundBucket);
568 FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
569 return Result;
570 }
571
572 public:
573 /// Return the approximate size (in bytes) of the actual map.
574 /// This is just the raw memory used by DenseMap.
575 /// If entries are pointers to objects, the size of the referenced objects
576 /// are not included.
getMemorySize()577 size_t getMemorySize() const {
578 return getNumBuckets() * sizeof(BucketT);
579 }
580 };
581
582 template <typename KeyT, typename ValueT,
583 typename KeyInfoT = DenseMapInfo<KeyT>,
584 typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
585 class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
586 KeyT, ValueT, KeyInfoT, BucketT> {
587 // Lift some types from the dependent base class into this class for
588 // simplicity of referring to them.
589 typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
590 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
591
592 BucketT *Buckets;
593 unsigned NumEntries;
594 unsigned NumTombstones;
595 unsigned NumBuckets;
596
597 public:
598 /// Create a DenseMap wth an optional \p InitialReserve that guarantee that
599 /// this number of elements can be inserted in the map without grow()
600 explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
601
DenseMap(const DenseMap & other)602 DenseMap(const DenseMap &other) : BaseT() {
603 init(0);
604 copyFrom(other);
605 }
606
DenseMap(DenseMap && other)607 DenseMap(DenseMap &&other) : BaseT() {
608 init(0);
609 swap(other);
610 }
611
612 template<typename InputIt>
DenseMap(const InputIt & I,const InputIt & E)613 DenseMap(const InputIt &I, const InputIt &E) {
614 init(std::distance(I, E));
615 this->insert(I, E);
616 }
617
~DenseMap()618 ~DenseMap() {
619 this->destroyAll();
620 operator delete(Buckets);
621 }
622
swap(DenseMap & RHS)623 void swap(DenseMap& RHS) {
624 this->incrementEpoch();
625 RHS.incrementEpoch();
626 std::swap(Buckets, RHS.Buckets);
627 std::swap(NumEntries, RHS.NumEntries);
628 std::swap(NumTombstones, RHS.NumTombstones);
629 std::swap(NumBuckets, RHS.NumBuckets);
630 }
631
632 DenseMap& operator=(const DenseMap& other) {
633 if (&other != this)
634 copyFrom(other);
635 return *this;
636 }
637
638 DenseMap& operator=(DenseMap &&other) {
639 this->destroyAll();
640 operator delete(Buckets);
641 init(0);
642 swap(other);
643 return *this;
644 }
645
copyFrom(const DenseMap & other)646 void copyFrom(const DenseMap& other) {
647 this->destroyAll();
648 operator delete(Buckets);
649 if (allocateBuckets(other.NumBuckets)) {
650 this->BaseT::copyFrom(other);
651 } else {
652 NumEntries = 0;
653 NumTombstones = 0;
654 }
655 }
656
init(unsigned InitNumEntries)657 void init(unsigned InitNumEntries) {
658 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
659 if (allocateBuckets(InitBuckets)) {
660 this->BaseT::initEmpty();
661 } else {
662 NumEntries = 0;
663 NumTombstones = 0;
664 }
665 }
666
grow(unsigned AtLeast)667 void grow(unsigned AtLeast) {
668 unsigned OldNumBuckets = NumBuckets;
669 BucketT *OldBuckets = Buckets;
670
671 allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
672 assert(Buckets);
673 if (!OldBuckets) {
674 this->BaseT::initEmpty();
675 return;
676 }
677
678 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
679
680 // Free the old table.
681 operator delete(OldBuckets);
682 }
683
shrink_and_clear()684 void shrink_and_clear() {
685 unsigned OldNumEntries = NumEntries;
686 this->destroyAll();
687
688 // Reduce the number of buckets.
689 unsigned NewNumBuckets = 0;
690 if (OldNumEntries)
691 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
692 if (NewNumBuckets == NumBuckets) {
693 this->BaseT::initEmpty();
694 return;
695 }
696
697 operator delete(Buckets);
698 init(NewNumBuckets);
699 }
700
701 private:
getNumEntries()702 unsigned getNumEntries() const {
703 return NumEntries;
704 }
setNumEntries(unsigned Num)705 void setNumEntries(unsigned Num) {
706 NumEntries = Num;
707 }
708
getNumTombstones()709 unsigned getNumTombstones() const {
710 return NumTombstones;
711 }
setNumTombstones(unsigned Num)712 void setNumTombstones(unsigned Num) {
713 NumTombstones = Num;
714 }
715
getBuckets()716 BucketT *getBuckets() const {
717 return Buckets;
718 }
719
getNumBuckets()720 unsigned getNumBuckets() const {
721 return NumBuckets;
722 }
723
allocateBuckets(unsigned Num)724 bool allocateBuckets(unsigned Num) {
725 NumBuckets = Num;
726 if (NumBuckets == 0) {
727 Buckets = nullptr;
728 return false;
729 }
730
731 Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
732 return true;
733 }
734 };
735
736 template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
737 typename KeyInfoT = DenseMapInfo<KeyT>,
738 typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
739 class SmallDenseMap
740 : public DenseMapBase<
741 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
742 ValueT, KeyInfoT, BucketT> {
743 // Lift some types from the dependent base class into this class for
744 // simplicity of referring to them.
745 typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
746 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
747
748 unsigned Small : 1;
749 unsigned NumEntries : 31;
750 unsigned NumTombstones;
751
752 struct LargeRep {
753 BucketT *Buckets;
754 unsigned NumBuckets;
755 };
756
757 /// A "union" of an inline bucket array and the struct representing
758 /// a large bucket. This union will be discriminated by the 'Small' bit.
759 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
760
761 public:
762 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
763 init(NumInitBuckets);
764 }
765
SmallDenseMap(const SmallDenseMap & other)766 SmallDenseMap(const SmallDenseMap &other) : BaseT() {
767 init(0);
768 copyFrom(other);
769 }
770
SmallDenseMap(SmallDenseMap && other)771 SmallDenseMap(SmallDenseMap &&other) : BaseT() {
772 init(0);
773 swap(other);
774 }
775
776 template<typename InputIt>
SmallDenseMap(const InputIt & I,const InputIt & E)777 SmallDenseMap(const InputIt &I, const InputIt &E) {
778 init(NextPowerOf2(std::distance(I, E)));
779 this->insert(I, E);
780 }
781
~SmallDenseMap()782 ~SmallDenseMap() {
783 this->destroyAll();
784 deallocateBuckets();
785 }
786
swap(SmallDenseMap & RHS)787 void swap(SmallDenseMap& RHS) {
788 unsigned TmpNumEntries = RHS.NumEntries;
789 RHS.NumEntries = NumEntries;
790 NumEntries = TmpNumEntries;
791 std::swap(NumTombstones, RHS.NumTombstones);
792
793 const KeyT EmptyKey = this->getEmptyKey();
794 const KeyT TombstoneKey = this->getTombstoneKey();
795 if (Small && RHS.Small) {
796 // If we're swapping inline bucket arrays, we have to cope with some of
797 // the tricky bits of DenseMap's storage system: the buckets are not
798 // fully initialized. Thus we swap every key, but we may have
799 // a one-directional move of the value.
800 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
801 BucketT *LHSB = &getInlineBuckets()[i],
802 *RHSB = &RHS.getInlineBuckets()[i];
803 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
804 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
805 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
806 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
807 if (hasLHSValue && hasRHSValue) {
808 // Swap together if we can...
809 std::swap(*LHSB, *RHSB);
810 continue;
811 }
812 // Swap separately and handle any assymetry.
813 std::swap(LHSB->getFirst(), RHSB->getFirst());
814 if (hasLHSValue) {
815 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
816 LHSB->getSecond().~ValueT();
817 } else if (hasRHSValue) {
818 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
819 RHSB->getSecond().~ValueT();
820 }
821 }
822 return;
823 }
824 if (!Small && !RHS.Small) {
825 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
826 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
827 return;
828 }
829
830 SmallDenseMap &SmallSide = Small ? *this : RHS;
831 SmallDenseMap &LargeSide = Small ? RHS : *this;
832
833 // First stash the large side's rep and move the small side across.
834 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
835 LargeSide.getLargeRep()->~LargeRep();
836 LargeSide.Small = true;
837 // This is similar to the standard move-from-old-buckets, but the bucket
838 // count hasn't actually rotated in this case. So we have to carefully
839 // move construct the keys and values into their new locations, but there
840 // is no need to re-hash things.
841 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
842 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
843 *OldB = &SmallSide.getInlineBuckets()[i];
844 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
845 OldB->getFirst().~KeyT();
846 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
847 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
848 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
849 OldB->getSecond().~ValueT();
850 }
851 }
852
853 // The hard part of moving the small buckets across is done, just move
854 // the TmpRep into its new home.
855 SmallSide.Small = false;
856 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
857 }
858
859 SmallDenseMap& operator=(const SmallDenseMap& other) {
860 if (&other != this)
861 copyFrom(other);
862 return *this;
863 }
864
865 SmallDenseMap& operator=(SmallDenseMap &&other) {
866 this->destroyAll();
867 deallocateBuckets();
868 init(0);
869 swap(other);
870 return *this;
871 }
872
copyFrom(const SmallDenseMap & other)873 void copyFrom(const SmallDenseMap& other) {
874 this->destroyAll();
875 deallocateBuckets();
876 Small = true;
877 if (other.getNumBuckets() > InlineBuckets) {
878 Small = false;
879 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
880 }
881 this->BaseT::copyFrom(other);
882 }
883
init(unsigned InitBuckets)884 void init(unsigned InitBuckets) {
885 Small = true;
886 if (InitBuckets > InlineBuckets) {
887 Small = false;
888 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
889 }
890 this->BaseT::initEmpty();
891 }
892
grow(unsigned AtLeast)893 void grow(unsigned AtLeast) {
894 if (AtLeast >= InlineBuckets)
895 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
896
897 if (Small) {
898 if (AtLeast < InlineBuckets)
899 return; // Nothing to do.
900
901 // First move the inline buckets into a temporary storage.
902 AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
903 BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
904 BucketT *TmpEnd = TmpBegin;
905
906 // Loop over the buckets, moving non-empty, non-tombstones into the
907 // temporary storage. Have the loop move the TmpEnd forward as it goes.
908 const KeyT EmptyKey = this->getEmptyKey();
909 const KeyT TombstoneKey = this->getTombstoneKey();
910 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
911 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
912 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
913 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
914 "Too many inline buckets!");
915 ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
916 ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
917 ++TmpEnd;
918 P->getSecond().~ValueT();
919 }
920 P->getFirst().~KeyT();
921 }
922
923 // Now make this map use the large rep, and move all the entries back
924 // into it.
925 Small = false;
926 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
927 this->moveFromOldBuckets(TmpBegin, TmpEnd);
928 return;
929 }
930
931 LargeRep OldRep = std::move(*getLargeRep());
932 getLargeRep()->~LargeRep();
933 if (AtLeast <= InlineBuckets) {
934 Small = true;
935 } else {
936 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
937 }
938
939 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
940
941 // Free the old table.
942 operator delete(OldRep.Buckets);
943 }
944
shrink_and_clear()945 void shrink_and_clear() {
946 unsigned OldSize = this->size();
947 this->destroyAll();
948
949 // Reduce the number of buckets.
950 unsigned NewNumBuckets = 0;
951 if (OldSize) {
952 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
953 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
954 NewNumBuckets = 64;
955 }
956 if ((Small && NewNumBuckets <= InlineBuckets) ||
957 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
958 this->BaseT::initEmpty();
959 return;
960 }
961
962 deallocateBuckets();
963 init(NewNumBuckets);
964 }
965
966 private:
getNumEntries()967 unsigned getNumEntries() const {
968 return NumEntries;
969 }
setNumEntries(unsigned Num)970 void setNumEntries(unsigned Num) {
971 assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
972 NumEntries = Num;
973 }
974
getNumTombstones()975 unsigned getNumTombstones() const {
976 return NumTombstones;
977 }
setNumTombstones(unsigned Num)978 void setNumTombstones(unsigned Num) {
979 NumTombstones = Num;
980 }
981
getInlineBuckets()982 const BucketT *getInlineBuckets() const {
983 assert(Small);
984 // Note that this cast does not violate aliasing rules as we assert that
985 // the memory's dynamic type is the small, inline bucket buffer, and the
986 // 'storage.buffer' static type is 'char *'.
987 return reinterpret_cast<const BucketT *>(storage.buffer);
988 }
getInlineBuckets()989 BucketT *getInlineBuckets() {
990 return const_cast<BucketT *>(
991 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
992 }
getLargeRep()993 const LargeRep *getLargeRep() const {
994 assert(!Small);
995 // Note, same rule about aliasing as with getInlineBuckets.
996 return reinterpret_cast<const LargeRep *>(storage.buffer);
997 }
getLargeRep()998 LargeRep *getLargeRep() {
999 return const_cast<LargeRep *>(
1000 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1001 }
1002
getBuckets()1003 const BucketT *getBuckets() const {
1004 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1005 }
getBuckets()1006 BucketT *getBuckets() {
1007 return const_cast<BucketT *>(
1008 const_cast<const SmallDenseMap *>(this)->getBuckets());
1009 }
getNumBuckets()1010 unsigned getNumBuckets() const {
1011 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1012 }
1013
deallocateBuckets()1014 void deallocateBuckets() {
1015 if (Small)
1016 return;
1017
1018 operator delete(getLargeRep()->Buckets);
1019 getLargeRep()->~LargeRep();
1020 }
1021
allocateBuckets(unsigned Num)1022 LargeRep allocateBuckets(unsigned Num) {
1023 assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1024 LargeRep Rep = {
1025 static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
1026 };
1027 return Rep;
1028 }
1029 };
1030
1031 template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1032 bool IsConst>
1033 class DenseMapIterator : DebugEpochBase::HandleBase {
1034 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true> ConstIterator;
1035 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1036 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1037
1038 public:
1039 typedef ptrdiff_t difference_type;
1040 typedef typename std::conditional<IsConst, const Bucket, Bucket>::type
1041 value_type;
1042 typedef value_type *pointer;
1043 typedef value_type &reference;
1044 typedef std::forward_iterator_tag iterator_category;
1045 private:
1046 pointer Ptr, End;
1047 public:
DenseMapIterator()1048 DenseMapIterator() : Ptr(nullptr), End(nullptr) {}
1049
1050 DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
1051 bool NoAdvance = false)
1052 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1053 assert(isHandleInSync() && "invalid construction!");
1054 if (!NoAdvance) AdvancePastEmptyBuckets();
1055 }
1056
1057 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1058 // for const iterator destinations so it doesn't end up as a user defined copy
1059 // constructor.
1060 template <bool IsConstSrc,
1061 typename = typename std::enable_if<!IsConstSrc && IsConst>::type>
DenseMapIterator(const DenseMapIterator<KeyT,ValueT,KeyInfoT,Bucket,IsConstSrc> & I)1062 DenseMapIterator(
1063 const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1064 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1065
1066 reference operator*() const {
1067 assert(isHandleInSync() && "invalid iterator access!");
1068 return *Ptr;
1069 }
1070 pointer operator->() const {
1071 assert(isHandleInSync() && "invalid iterator access!");
1072 return Ptr;
1073 }
1074
1075 bool operator==(const ConstIterator &RHS) const {
1076 assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1077 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1078 assert(getEpochAddress() == RHS.getEpochAddress() &&
1079 "comparing incomparable iterators!");
1080 return Ptr == RHS.Ptr;
1081 }
1082 bool operator!=(const ConstIterator &RHS) const {
1083 assert((!Ptr || isHandleInSync()) && "handle not in sync!");
1084 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!");
1085 assert(getEpochAddress() == RHS.getEpochAddress() &&
1086 "comparing incomparable iterators!");
1087 return Ptr != RHS.Ptr;
1088 }
1089
1090 inline DenseMapIterator& operator++() { // Preincrement
1091 assert(isHandleInSync() && "invalid iterator access!");
1092 ++Ptr;
1093 AdvancePastEmptyBuckets();
1094 return *this;
1095 }
1096 DenseMapIterator operator++(int) { // Postincrement
1097 assert(isHandleInSync() && "invalid iterator access!");
1098 DenseMapIterator tmp = *this; ++*this; return tmp;
1099 }
1100
1101 private:
AdvancePastEmptyBuckets()1102 void AdvancePastEmptyBuckets() {
1103 const KeyT Empty = KeyInfoT::getEmptyKey();
1104 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1105
1106 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1107 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1108 ++Ptr;
1109 }
1110 };
1111
1112 template<typename KeyT, typename ValueT, typename KeyInfoT>
1113 static inline size_t
capacity_in_bytes(const DenseMap<KeyT,ValueT,KeyInfoT> & X)1114 capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1115 return X.getMemorySize();
1116 }
1117
1118 } // end namespace llvm
1119
1120 #endif
1121