1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the DenseMap class.
12 //===----------------------------------------------------------------------===//
14 // Taken from clang-1100.247.11.10.9
16 #ifndef LLVM_ADT_DENSEMAP_H
17 #define LLVM_ADT_DENSEMAP_H
19 #include "llvm-type_traits.h"
20 #include "llvm-MathExtras.h"
21 #include "llvm-AlignOf.h"
22 #include "llvm-DenseMapInfo.h"
29 #include <type_traits>
31 #include <TargetConditionals.h>
33 #include "objc-private.h"
36 #define MIN_COMPACT 1024
38 #define LLVM_UNLIKELY slowpath
39 #define LLVM_LIKELY fastpath
45 // We extend a pair to allow users to override the bucket type with their own
46 // implementation without requiring two members.
47 template <typename KeyT
, typename ValueT
>
48 struct DenseMapPair
: public std::pair
<KeyT
, ValueT
> {
50 // FIXME: Switch to inheriting constructors when we drop support for older
52 // NOTE: This default constructor is declared with '{}' rather than
53 // '= default' to work around a separate bug in clang-3.8. This can
54 // also go when we switch to inheriting constructors.
57 DenseMapPair(const KeyT
&Key
, const ValueT
&Value
)
58 : std::pair
<KeyT
, ValueT
>(Key
, Value
) {}
60 DenseMapPair(KeyT
&&Key
, ValueT
&&Value
)
61 : std::pair
<KeyT
, ValueT
>(std::move(Key
), std::move(Value
)) {}
63 template <typename AltKeyT
, typename AltValueT
>
64 DenseMapPair(AltKeyT
&&AltKey
, AltValueT
&&AltValue
,
65 typename
std::enable_if
<
66 std::is_convertible
<AltKeyT
, KeyT
>::value
&&
67 std::is_convertible
<AltValueT
, ValueT
>::value
>::type
* = 0)
68 : std::pair
<KeyT
, ValueT
>(std::forward
<AltKeyT
>(AltKey
),
69 std::forward
<AltValueT
>(AltValue
)) {}
71 template <typename AltPairT
>
72 DenseMapPair(AltPairT
&&AltPair
,
73 typename
std::enable_if
<std::is_convertible
<
74 AltPairT
, std::pair
<KeyT
, ValueT
>>::value
>::type
* = 0)
75 : std::pair
<KeyT
, ValueT
>(std::forward
<AltPairT
>(AltPair
)) {}
77 KeyT
&getFirst() { return std::pair
<KeyT
, ValueT
>::first
; }
78 const KeyT
&getFirst() const { return std::pair
<KeyT
, ValueT
>::first
; }
79 ValueT
&getSecond() { return std::pair
<KeyT
, ValueT
>::second
; }
80 const ValueT
&getSecond() const { return std::pair
<KeyT
, ValueT
>::second
; }
83 } // end namespace detail
86 typename KeyT
, typename ValueT
,
87 typename ValueInfoT
= DenseMapValueInfo
<ValueT
>,
88 typename KeyInfoT
= DenseMapInfo
<KeyT
>,
89 typename Bucket
= detail::DenseMapPair
<KeyT
, ValueT
>,
91 class DenseMapIterator
;
93 // ValueInfoT is used by the refcount table.
94 // A key/value pair with value==0 is not required to be stored
95 // in the refcount table; it could correctly be erased instead.
96 // For performance, we do keep zero values in the table when the
97 // true refcount decreases to 1: this makes any future retain faster.
98 // For memory size, we allow rehashes and table insertions to
99 // remove a zero value as if it were a tombstone.
100 template <typename DerivedT
, typename KeyT
, typename ValueT
,
101 typename ValueInfoT
, typename KeyInfoT
, typename BucketT
>
103 template <typename T
>
104 using const_arg_type_t
= typename const_pointer_or_const_ref
<T
>::type
;
107 using size_type
= unsigned;
108 using key_type
= KeyT
;
109 using mapped_type
= ValueT
;
110 using value_type
= BucketT
;
112 using iterator
= DenseMapIterator
<KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
>;
113 using const_iterator
=
114 DenseMapIterator
<KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
, true>;
116 inline iterator
begin() {
117 // When the map is empty, avoid the overhead of advancing/retreating past
121 return makeIterator(getBuckets(), getBucketsEnd());
123 inline iterator
end() {
124 return makeIterator(getBucketsEnd(), getBucketsEnd(), true);
126 inline const_iterator
begin() const {
129 return makeConstIterator(getBuckets(), getBucketsEnd());
131 inline const_iterator
end() const {
132 return makeConstIterator(getBucketsEnd(), getBucketsEnd(), true);
136 return getNumEntries() == 0;
138 unsigned size() const { return getNumEntries(); }
140 /// Grow the densemap so that it can contain at least \p NumEntries items
141 /// before resizing again.
142 void reserve(size_type NumEntries
) {
143 auto NumBuckets
= getMinBucketToReserveForEntries(NumEntries
);
144 if (NumBuckets
> getNumBuckets())
149 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
151 // If the capacity of the array is huge, and the # elements used is small,
153 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > MIN_BUCKETS
) {
158 const KeyT EmptyKey
= getEmptyKey(), TombstoneKey
= getTombstoneKey();
159 if (is_trivially_copyable
<KeyT
>::value
&&
160 is_trivially_copyable
<ValueT
>::value
) {
161 // Use a simpler loop when these are trivial types.
162 for (BucketT
*P
= getBuckets(), *E
= getBucketsEnd(); P
!= E
; ++P
)
163 P
->getFirst() = EmptyKey
;
165 unsigned NumEntries
= getNumEntries();
166 for (BucketT
*P
= getBuckets(), *E
= getBucketsEnd(); P
!= E
; ++P
) {
167 if (!KeyInfoT::isEqual(P
->getFirst(), EmptyKey
)) {
168 if (!KeyInfoT::isEqual(P
->getFirst(), TombstoneKey
)) {
169 P
->getSecond().~ValueT();
172 P
->getFirst() = EmptyKey
;
175 ASSERT(NumEntries
== 0 && "Node count imbalance!");
181 /// Return 1 if the specified key is in the map, 0 otherwise.
182 size_type
count(const_arg_type_t
<KeyT
> Val
) const {
183 const BucketT
*TheBucket
;
184 return LookupBucketFor(Val
, TheBucket
) ? 1 : 0;
187 iterator
find(const_arg_type_t
<KeyT
> Val
) {
189 if (LookupBucketFor(Val
, TheBucket
))
190 return makeIterator(TheBucket
, getBucketsEnd(), true);
193 const_iterator
find(const_arg_type_t
<KeyT
> Val
) const {
194 const BucketT
*TheBucket
;
195 if (LookupBucketFor(Val
, TheBucket
))
196 return makeConstIterator(TheBucket
, getBucketsEnd(), true);
200 /// Alternate version of find() which allows a different, and possibly
201 /// less expensive, key type.
202 /// The DenseMapInfo is responsible for supplying methods
203 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
205 template<class LookupKeyT
>
206 iterator
find_as(const LookupKeyT
&Val
) {
208 if (LookupBucketFor(Val
, TheBucket
))
209 return makeIterator(TheBucket
, getBucketsEnd(), true);
212 template<class LookupKeyT
>
213 const_iterator
find_as(const LookupKeyT
&Val
) const {
214 const BucketT
*TheBucket
;
215 if (LookupBucketFor(Val
, TheBucket
))
216 return makeConstIterator(TheBucket
, getBucketsEnd(), true);
220 /// lookup - Return the entry for the specified key, or a default
221 /// constructed value if no such entry exists.
222 ValueT
lookup(const_arg_type_t
<KeyT
> Val
) const {
223 const BucketT
*TheBucket
;
224 if (LookupBucketFor(Val
, TheBucket
))
225 return TheBucket
->getSecond();
229 // Inserts key,value pair into the map if the key isn't already in the map.
230 // If the key is already in the map, it returns false and doesn't update the
232 std::pair
<iterator
, bool> insert(const std::pair
<KeyT
, ValueT
> &KV
) {
233 return try_emplace(KV
.first
, KV
.second
);
236 // Inserts key,value pair into the map if the key isn't already in the map.
237 // If the key is already in the map, it returns false and doesn't update the
239 std::pair
<iterator
, bool> insert(std::pair
<KeyT
, ValueT
> &&KV
) {
240 return try_emplace(std::move(KV
.first
), std::move(KV
.second
));
243 // Inserts key,value pair into the map if the key isn't already in the map.
244 // The value is constructed in-place if the key is not in the map, otherwise
246 template <typename
... Ts
>
247 std::pair
<iterator
, bool> try_emplace(KeyT
&&Key
, Ts
&&... Args
) {
249 if (LookupBucketFor(Key
, TheBucket
))
250 return std::make_pair(
251 makeIterator(TheBucket
, getBucketsEnd(), true),
252 false); // Already in map.
254 // Otherwise, insert the new element.
256 InsertIntoBucket(TheBucket
, std::move(Key
), std::forward
<Ts
>(Args
)...);
257 return std::make_pair(
258 makeIterator(TheBucket
, getBucketsEnd(), true),
262 // Inserts key,value pair into the map if the key isn't already in the map.
263 // The value is constructed in-place if the key is not in the map, otherwise
265 template <typename
... Ts
>
266 std::pair
<iterator
, bool> try_emplace(const KeyT
&Key
, Ts
&&... Args
) {
268 if (LookupBucketFor(Key
, TheBucket
))
269 return std::make_pair(
270 makeIterator(TheBucket
, getBucketsEnd(), true),
271 false); // Already in map.
273 // Otherwise, insert the new element.
274 TheBucket
= InsertIntoBucket(TheBucket
, Key
, std::forward
<Ts
>(Args
)...);
275 return std::make_pair(
276 makeIterator(TheBucket
, getBucketsEnd(), true),
280 /// Alternate version of insert() which allows a different, and possibly
281 /// less expensive, key type.
282 /// The DenseMapInfo is responsible for supplying methods
283 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
285 template <typename LookupKeyT
>
286 std::pair
<iterator
, bool> insert_as(std::pair
<KeyT
, ValueT
> &&KV
,
287 const LookupKeyT
&Val
) {
289 if (LookupBucketFor(Val
, TheBucket
))
290 return std::make_pair(
291 makeIterator(TheBucket
, getBucketsEnd(), *this, true),
292 false); // Already in map.
294 // Otherwise, insert the new element.
295 TheBucket
= InsertIntoBucketWithLookup(TheBucket
, std::move(KV
.first
),
296 std::move(KV
.second
), Val
);
297 return std::make_pair(
298 makeIterator(TheBucket
, getBucketsEnd(), *this, true),
302 /// insert - Range insertion of pairs.
303 template<typename InputIt
>
304 void insert(InputIt I
, InputIt E
) {
310 // Shrink if at least 15/16 empty and larger than MIN_COMPACT.
312 if (getNumEntries() == 0) {
315 else if (getNumBuckets() / 16 > getNumEntries() &&
316 getNumBuckets() > MIN_COMPACT
)
318 grow(getNumEntries() * 2);
322 bool erase(const KeyT
&Val
) {
324 if (!LookupBucketFor(Val
, TheBucket
))
325 return false; // not in map.
327 TheBucket
->getSecond().~ValueT();
328 TheBucket
->getFirst() = getTombstoneKey();
329 decrementNumEntries();
330 incrementNumTombstones();
334 void erase(iterator I
) {
335 BucketT
*TheBucket
= &*I
;
336 TheBucket
->getSecond().~ValueT();
337 TheBucket
->getFirst() = getTombstoneKey();
338 decrementNumEntries();
339 incrementNumTombstones();
343 value_type
& FindAndConstruct(const KeyT
&Key
) {
345 if (LookupBucketFor(Key
, TheBucket
))
348 return *InsertIntoBucket(TheBucket
, Key
);
351 ValueT
&operator[](const KeyT
&Key
) {
352 return FindAndConstruct(Key
).second
;
355 value_type
& FindAndConstruct(KeyT
&&Key
) {
357 if (LookupBucketFor(Key
, TheBucket
))
360 return *InsertIntoBucket(TheBucket
, std::move(Key
));
363 ValueT
&operator[](KeyT
&&Key
) {
364 return FindAndConstruct(std::move(Key
)).second
;
367 /// isPointerIntoBucketsArray - Return true if the specified pointer points
368 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
369 /// value in the DenseMap).
370 bool isPointerIntoBucketsArray(const void *Ptr
) const {
371 return Ptr
>= getBuckets() && Ptr
< getBucketsEnd();
374 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
375 /// array. In conjunction with the previous method, this can be used to
376 /// determine whether an insertion caused the DenseMap to reallocate.
377 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
380 DenseMapBase() = default;
383 if (getNumBuckets() == 0) // Nothing to do.
386 const KeyT EmptyKey
= getEmptyKey(), TombstoneKey
= getTombstoneKey();
387 for (BucketT
*P
= getBuckets(), *E
= getBucketsEnd(); P
!= E
; ++P
) {
388 if (!KeyInfoT::isEqual(P
->getFirst(), EmptyKey
) &&
389 !KeyInfoT::isEqual(P
->getFirst(), TombstoneKey
))
390 P
->getSecond().~ValueT();
391 P
->getFirst().~KeyT();
399 ASSERT((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
400 "# initial buckets must be a power of two!");
401 const KeyT EmptyKey
= getEmptyKey();
402 for (BucketT
*B
= getBuckets(), *E
= getBucketsEnd(); B
!= E
; ++B
)
403 ::new (&B
->getFirst()) KeyT(EmptyKey
);
406 /// Returns the number of buckets to allocate to ensure that the DenseMap can
407 /// accommodate \p NumEntries without need to grow().
408 unsigned getMinBucketToReserveForEntries(unsigned NumEntries
) {
409 // Ensure that "NumEntries * 4 < NumBuckets * 3"
412 // +1 is required because of the strict equality.
413 // For example if NumEntries is 48, we need to return 401.
414 return NextPowerOf2(NumEntries
* 4 / 3 + 1);
417 void moveFromOldBuckets(BucketT
*OldBucketsBegin
, BucketT
*OldBucketsEnd
) {
420 // Insert all the old elements.
421 const KeyT EmptyKey
= getEmptyKey();
422 const KeyT TombstoneKey
= getTombstoneKey();
423 for (BucketT
*B
= OldBucketsBegin
, *E
= OldBucketsEnd
; B
!= E
; ++B
) {
424 if (ValueInfoT::isPurgeable(B
->getSecond())) {
426 B
->getSecond().~ValueT();
427 } else if (!KeyInfoT::isEqual(B
->getFirst(), EmptyKey
) &&
428 !KeyInfoT::isEqual(B
->getFirst(), TombstoneKey
)) {
429 // Insert the key/value into the new table.
431 bool FoundVal
= LookupBucketFor(B
->getFirst(), DestBucket
);
432 (void)FoundVal
; // silence warning.
433 ASSERT(!FoundVal
&& "Key already in new map?");
434 DestBucket
->getFirst() = std::move(B
->getFirst());
435 ::new (&DestBucket
->getSecond()) ValueT(std::move(B
->getSecond()));
436 incrementNumEntries();
439 B
->getSecond().~ValueT();
441 B
->getFirst().~KeyT();
445 template <typename OtherBaseT
>
447 const DenseMapBase
<OtherBaseT
, KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
> &other
) {
448 ASSERT(&other
!= this);
449 ASSERT(getNumBuckets() == other
.getNumBuckets());
451 setNumEntries(other
.getNumEntries());
452 setNumTombstones(other
.getNumTombstones());
454 if (is_trivially_copyable
<KeyT
>::value
&&
455 is_trivially_copyable
<ValueT
>::value
)
456 memcpy(reinterpret_cast<void *>(getBuckets()), other
.getBuckets(),
457 getNumBuckets() * sizeof(BucketT
));
459 for (size_t i
= 0; i
< getNumBuckets(); ++i
) {
460 ::new (&getBuckets()[i
].getFirst())
461 KeyT(other
.getBuckets()[i
].getFirst());
462 if (!KeyInfoT::isEqual(getBuckets()[i
].getFirst(), getEmptyKey()) &&
463 !KeyInfoT::isEqual(getBuckets()[i
].getFirst(), getTombstoneKey()))
464 ::new (&getBuckets()[i
].getSecond())
465 ValueT(other
.getBuckets()[i
].getSecond());
469 static unsigned getHashValue(const KeyT
&Val
) {
470 return KeyInfoT::getHashValue(Val
);
473 template<typename LookupKeyT
>
474 static unsigned getHashValue(const LookupKeyT
&Val
) {
475 return KeyInfoT::getHashValue(Val
);
478 static const KeyT
getEmptyKey() {
479 static_assert(std::is_base_of
<DenseMapBase
, DerivedT
>::value
,
480 "Must pass the derived type to this template!");
481 return KeyInfoT::getEmptyKey();
484 static const KeyT
getTombstoneKey() {
485 return KeyInfoT::getTombstoneKey();
489 iterator
makeIterator(BucketT
*P
, BucketT
*E
,
490 bool NoAdvance
=false) {
491 return iterator(P
, E
, NoAdvance
);
494 const_iterator
makeConstIterator(const BucketT
*P
, const BucketT
*E
,
495 const bool NoAdvance
=false) const {
496 return const_iterator(P
, E
, NoAdvance
);
499 unsigned getNumEntries() const {
500 return static_cast<const DerivedT
*>(this)->getNumEntries();
503 void setNumEntries(unsigned Num
) {
504 static_cast<DerivedT
*>(this)->setNumEntries(Num
);
507 void incrementNumEntries() {
508 setNumEntries(getNumEntries() + 1);
511 void decrementNumEntries() {
512 setNumEntries(getNumEntries() - 1);
515 unsigned getNumTombstones() const {
516 return static_cast<const DerivedT
*>(this)->getNumTombstones();
519 void setNumTombstones(unsigned Num
) {
520 static_cast<DerivedT
*>(this)->setNumTombstones(Num
);
523 void incrementNumTombstones() {
524 setNumTombstones(getNumTombstones() + 1);
527 void decrementNumTombstones() {
528 setNumTombstones(getNumTombstones() - 1);
531 const BucketT
*getBuckets() const {
532 return static_cast<const DerivedT
*>(this)->getBuckets();
535 BucketT
*getBuckets() {
536 return static_cast<DerivedT
*>(this)->getBuckets();
539 unsigned getNumBuckets() const {
540 return static_cast<const DerivedT
*>(this)->getNumBuckets();
543 BucketT
*getBucketsEnd() {
544 return getBuckets() + getNumBuckets();
547 const BucketT
*getBucketsEnd() const {
548 return getBuckets() + getNumBuckets();
551 void grow(unsigned AtLeast
) {
552 static_cast<DerivedT
*>(this)->grow(AtLeast
);
555 void shrink_and_clear() {
556 static_cast<DerivedT
*>(this)->shrink_and_clear();
559 template <typename KeyArg
, typename
... ValueArgs
>
560 BucketT
*InsertIntoBucket(BucketT
*TheBucket
, KeyArg
&&Key
,
561 ValueArgs
&&... Values
) {
562 TheBucket
= InsertIntoBucketImpl(Key
, Key
, TheBucket
);
564 TheBucket
->getFirst() = std::forward
<KeyArg
>(Key
);
565 ::new (&TheBucket
->getSecond()) ValueT(std::forward
<ValueArgs
>(Values
)...);
569 template <typename LookupKeyT
>
570 BucketT
*InsertIntoBucketWithLookup(BucketT
*TheBucket
, KeyT
&&Key
,
571 ValueT
&&Value
, LookupKeyT
&Lookup
) {
572 TheBucket
= InsertIntoBucketImpl(Key
, Lookup
, TheBucket
);
574 TheBucket
->getFirst() = std::move(Key
);
575 ::new (&TheBucket
->getSecond()) ValueT(std::move(Value
));
579 template <typename LookupKeyT
>
580 BucketT
*InsertIntoBucketImpl(const KeyT
&Key
, const LookupKeyT
&Lookup
,
581 BucketT
*TheBucket
) {
582 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
583 // the buckets are empty (meaning that many are filled with tombstones),
586 // The later case is tricky. For example, if we had one empty bucket with
587 // tons of tombstones, failing lookups (e.g. for insertion) would have to
588 // probe almost the entire table until it found the empty bucket. If the
589 // table completely filled with tombstones, no lookup would ever succeed,
590 // causing infinite loops in lookup.
591 unsigned NewNumEntries
= getNumEntries() + 1;
592 unsigned NumBuckets
= getNumBuckets();
593 if (LLVM_UNLIKELY(NewNumEntries
* 4 >= NumBuckets
* 3)) {
594 this->grow(NumBuckets
* 2);
595 LookupBucketFor(Lookup
, TheBucket
);
596 NumBuckets
= getNumBuckets();
597 } else if (LLVM_UNLIKELY(NumBuckets
-(NewNumEntries
+getNumTombstones()) <=
599 this->grow(NumBuckets
);
600 LookupBucketFor(Lookup
, TheBucket
);
604 // Only update the state after we've grown our bucket space appropriately
605 // so that when growing buckets we have self-consistent entry count.
606 // If we are writing over a tombstone or zero value, remember this.
607 if (KeyInfoT::isEqual(TheBucket
->getFirst(), getEmptyKey())) {
608 // Replacing an empty bucket.
609 incrementNumEntries();
610 } else if (KeyInfoT::isEqual(TheBucket
->getFirst(), getTombstoneKey())) {
611 // Replacing a tombstone.
612 incrementNumEntries();
613 decrementNumTombstones();
615 // we should be purging a zero. No accounting changes.
616 ASSERT(ValueInfoT::isPurgeable(TheBucket
->getSecond()));
617 TheBucket
->getSecond().~ValueT();
623 __attribute__((noinline
, noreturn
, cold
))
624 void FatalCorruptHashTables(const BucketT
*BucketsPtr
, unsigned NumBuckets
) const
626 _objc_fatal("Hash table corrupted. This is probably a memory error "
627 "somewhere. (table at %p, buckets at %p (%zu bytes), "
628 "%u buckets, %u entries, %u tombstones, "
630 this, BucketsPtr
, malloc_size(BucketsPtr
),
631 NumBuckets
, getNumEntries(), getNumTombstones(),
632 ((void**)BucketsPtr
)[0], ((void**)BucketsPtr
)[1],
633 ((void**)BucketsPtr
)[2], ((void**)BucketsPtr
)[3]);
636 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
637 /// FoundBucket. If the bucket contains the key and a value, this returns
638 /// true, otherwise it returns a bucket with an empty marker or tombstone and
640 template<typename LookupKeyT
>
641 bool LookupBucketFor(const LookupKeyT
&Val
,
642 const BucketT
*&FoundBucket
) const {
643 const BucketT
*BucketsPtr
= getBuckets();
644 const unsigned NumBuckets
= getNumBuckets();
646 if (NumBuckets
== 0) {
647 FoundBucket
= nullptr;
651 // FoundTombstone - Keep track of whether we find a tombstone while probing.
652 const BucketT
*FoundTombstone
= nullptr;
653 const KeyT EmptyKey
= getEmptyKey();
654 const KeyT TombstoneKey
= getTombstoneKey();
655 assert(!KeyInfoT::isEqual(Val
, EmptyKey
) &&
656 !KeyInfoT::isEqual(Val
, TombstoneKey
) &&
657 "Empty/Tombstone value shouldn't be inserted into map!");
659 unsigned BucketNo
= getHashValue(Val
) & (NumBuckets
-1);
660 unsigned ProbeAmt
= 1;
662 const BucketT
*ThisBucket
= BucketsPtr
+ BucketNo
;
663 // Found Val's bucket? If so, return it.
664 if (LLVM_LIKELY(KeyInfoT::isEqual(Val
, ThisBucket
->getFirst()))) {
665 FoundBucket
= ThisBucket
;
669 // If we found an empty bucket, the key doesn't exist in the set.
670 // Insert it and return the default value.
671 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket
->getFirst(), EmptyKey
))) {
672 // If we've already seen a tombstone while probing, fill it in instead
673 // of the empty bucket we eventually probed to.
674 FoundBucket
= FoundTombstone
? FoundTombstone
: ThisBucket
;
678 // If this is a tombstone, remember it. If Val ends up not in the map, we
679 // prefer to return it than something that would require more probing.
680 // Ditto for zero values.
681 if (KeyInfoT::isEqual(ThisBucket
->getFirst(), TombstoneKey
) &&
683 FoundTombstone
= ThisBucket
; // Remember the first tombstone found.
684 if (ValueInfoT::isPurgeable(ThisBucket
->getSecond()) && !FoundTombstone
)
685 FoundTombstone
= ThisBucket
;
687 // Otherwise, it's a hash collision or a tombstone, continue quadratic
689 if (ProbeAmt
> NumBuckets
) {
690 FatalCorruptHashTables(BucketsPtr
, NumBuckets
);
692 BucketNo
+= ProbeAmt
++;
693 BucketNo
&= (NumBuckets
-1);
697 template <typename LookupKeyT
>
698 bool LookupBucketFor(const LookupKeyT
&Val
, BucketT
*&FoundBucket
) {
699 const BucketT
*ConstFoundBucket
;
700 bool Result
= const_cast<const DenseMapBase
*>(this)
701 ->LookupBucketFor(Val
, ConstFoundBucket
);
702 FoundBucket
= const_cast<BucketT
*>(ConstFoundBucket
);
707 /// Return the approximate size (in bytes) of the actual map.
708 /// This is just the raw memory used by DenseMap.
709 /// If entries are pointers to objects, the size of the referenced objects
710 /// are not included.
711 size_t getMemorySize() const {
712 return getNumBuckets() * sizeof(BucketT
);
716 /// Equality comparison for DenseMap.
718 /// Iterates over elements of LHS confirming that each (key, value) pair in LHS
719 /// is also in RHS, and that no additional pairs are in RHS.
720 /// Equivalent to N calls to RHS.find and N value comparisons. Amortized
721 /// complexity is linear, worst case is O(N^2) (if every hash collides).
722 template <typename DerivedT
, typename KeyT
, typename ValueT
,
723 typename ValueInfoT
, typename KeyInfoT
, typename BucketT
>
725 const DenseMapBase
<DerivedT
, KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
> &LHS
,
726 const DenseMapBase
<DerivedT
, KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
> &RHS
) {
727 if (LHS
.size() != RHS
.size())
730 for (auto &KV
: LHS
) {
731 auto I
= RHS
.find(KV
.first
);
732 if (I
== RHS
.end() || I
->second
!= KV
.second
)
739 /// Inequality comparison for DenseMap.
741 /// Equivalent to !(LHS == RHS). See operator== for performance notes.
742 template <typename DerivedT
, typename KeyT
, typename ValueT
,
743 typename ValueInfoT
, typename KeyInfoT
, typename BucketT
>
745 const DenseMapBase
<DerivedT
, KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
> &LHS
,
746 const DenseMapBase
<DerivedT
, KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
> &RHS
) {
747 return !(LHS
== RHS
);
750 template <typename KeyT
, typename ValueT
,
751 typename ValueInfoT
= DenseMapValueInfo
<ValueT
>,
752 typename KeyInfoT
= DenseMapInfo
<KeyT
>,
753 typename BucketT
= detail::DenseMapPair
<KeyT
, ValueT
>>
754 class DenseMap
: public DenseMapBase
<DenseMap
<KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
>,
755 KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
> {
756 friend class DenseMapBase
<DenseMap
, KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
>;
758 // Lift some types from the dependent base class into this class for
759 // simplicity of referring to them.
760 using BaseT
= DenseMapBase
<DenseMap
, KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
>;
764 unsigned NumTombstones
;
768 /// Create a DenseMap wth an optional \p InitialReserve that guarantee that
769 /// this number of elements can be inserted in the map without grow()
770 explicit DenseMap(unsigned InitialReserve
= 0) { init(InitialReserve
); }
772 DenseMap(const DenseMap
&other
) : BaseT() {
777 DenseMap(DenseMap
&&other
) : BaseT() {
782 template<typename InputIt
>
783 DenseMap(const InputIt
&I
, const InputIt
&E
) {
784 init(std::distance(I
, E
));
788 DenseMap(std::initializer_list
<typename
BaseT::value_type
> Vals
) {
790 this->insert(Vals
.begin(), Vals
.end());
795 operator delete(Buckets
);
798 void swap(DenseMap
& RHS
) {
799 std::swap(Buckets
, RHS
.Buckets
);
800 std::swap(NumEntries
, RHS
.NumEntries
);
801 std::swap(NumTombstones
, RHS
.NumTombstones
);
802 std::swap(NumBuckets
, RHS
.NumBuckets
);
805 DenseMap
& operator=(const DenseMap
& other
) {
811 DenseMap
& operator=(DenseMap
&&other
) {
813 operator delete(Buckets
);
819 void copyFrom(const DenseMap
& other
) {
821 operator delete(Buckets
);
822 if (allocateBuckets(other
.NumBuckets
)) {
823 this->BaseT::copyFrom(other
);
830 void init(unsigned InitNumEntries
) {
831 auto InitBuckets
= BaseT::getMinBucketToReserveForEntries(InitNumEntries
);
832 if (allocateBuckets(InitBuckets
)) {
833 this->BaseT::initEmpty();
840 void grow(unsigned AtLeast
) {
841 unsigned OldNumBuckets
= NumBuckets
;
842 BucketT
*OldBuckets
= Buckets
;
844 allocateBuckets(std::max
<unsigned>(MIN_BUCKETS
, static_cast<unsigned>(NextPowerOf2(AtLeast
-1))));
847 this->BaseT::initEmpty();
851 this->moveFromOldBuckets(OldBuckets
, OldBuckets
+OldNumBuckets
);
853 // Free the old table.
854 operator delete(OldBuckets
);
857 void shrink_and_clear() {
858 unsigned OldNumEntries
= NumEntries
;
861 // Reduce the number of buckets.
862 unsigned NewNumBuckets
= 0;
864 NewNumBuckets
= std::max(MIN_BUCKETS
, 1 << (Log2_32_Ceil(OldNumEntries
) + 1));
865 if (NewNumBuckets
== NumBuckets
) {
866 this->BaseT::initEmpty();
870 operator delete(Buckets
);
875 unsigned getNumEntries() const {
879 void setNumEntries(unsigned Num
) {
883 unsigned getNumTombstones() const {
884 return NumTombstones
;
887 void setNumTombstones(unsigned Num
) {
891 BucketT
*getBuckets() const {
895 unsigned getNumBuckets() const {
899 bool allocateBuckets(unsigned Num
) {
901 if (NumBuckets
== 0) {
906 Buckets
= static_cast<BucketT
*>(operator new(sizeof(BucketT
) * NumBuckets
));
911 template <typename KeyT
, typename ValueT
, unsigned InlineBuckets
= 4,
912 typename ValueInfoT
= DenseMapValueInfo
<ValueT
>,
913 typename KeyInfoT
= DenseMapInfo
<KeyT
>,
914 typename BucketT
= detail::DenseMapPair
<KeyT
, ValueT
>>
916 : public DenseMapBase
<
917 SmallDenseMap
<KeyT
, ValueT
, InlineBuckets
, ValueInfoT
, KeyInfoT
, BucketT
>, KeyT
,
918 ValueT
, ValueInfoT
, KeyInfoT
, BucketT
> {
919 friend class DenseMapBase
<SmallDenseMap
, KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
>;
921 // Lift some types from the dependent base class into this class for
922 // simplicity of referring to them.
923 using BaseT
= DenseMapBase
<SmallDenseMap
, KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, BucketT
>;
925 static_assert(powerof2(InlineBuckets
),
926 "InlineBuckets must be a power of 2.");
929 unsigned NumEntries
: 31;
930 unsigned NumTombstones
;
937 /// A "union" of an inline bucket array and the struct representing
938 /// a large bucket. This union will be discriminated by the 'Small' bit.
939 AlignedCharArrayUnion
<BucketT
[InlineBuckets
], LargeRep
> storage
;
942 explicit SmallDenseMap(unsigned NumInitBuckets
= 0) {
943 init(NumInitBuckets
);
946 SmallDenseMap(const SmallDenseMap
&other
) : BaseT() {
951 SmallDenseMap(SmallDenseMap
&&other
) : BaseT() {
956 template<typename InputIt
>
957 SmallDenseMap(const InputIt
&I
, const InputIt
&E
) {
958 init(NextPowerOf2(std::distance(I
, E
)));
967 void swap(SmallDenseMap
& RHS
) {
968 unsigned TmpNumEntries
= RHS
.NumEntries
;
969 RHS
.NumEntries
= NumEntries
;
970 NumEntries
= TmpNumEntries
;
971 std::swap(NumTombstones
, RHS
.NumTombstones
);
973 const KeyT EmptyKey
= this->getEmptyKey();
974 const KeyT TombstoneKey
= this->getTombstoneKey();
975 if (Small
&& RHS
.Small
) {
976 // If we're swapping inline bucket arrays, we have to cope with some of
977 // the tricky bits of DenseMap's storage system: the buckets are not
978 // fully initialized. Thus we swap every key, but we may have
979 // a one-directional move of the value.
980 for (unsigned i
= 0, e
= InlineBuckets
; i
!= e
; ++i
) {
981 BucketT
*LHSB
= &getInlineBuckets()[i
],
982 *RHSB
= &RHS
.getInlineBuckets()[i
];
983 bool hasLHSValue
= (!KeyInfoT::isEqual(LHSB
->getFirst(), EmptyKey
) &&
984 !KeyInfoT::isEqual(LHSB
->getFirst(), TombstoneKey
));
985 bool hasRHSValue
= (!KeyInfoT::isEqual(RHSB
->getFirst(), EmptyKey
) &&
986 !KeyInfoT::isEqual(RHSB
->getFirst(), TombstoneKey
));
987 if (hasLHSValue
&& hasRHSValue
) {
988 // Swap together if we can...
989 std::swap(*LHSB
, *RHSB
);
992 // Swap separately and handle any assymetry.
993 std::swap(LHSB
->getFirst(), RHSB
->getFirst());
995 ::new (&RHSB
->getSecond()) ValueT(std::move(LHSB
->getSecond()));
996 LHSB
->getSecond().~ValueT();
997 } else if (hasRHSValue
) {
998 ::new (&LHSB
->getSecond()) ValueT(std::move(RHSB
->getSecond()));
999 RHSB
->getSecond().~ValueT();
1004 if (!Small
&& !RHS
.Small
) {
1005 std::swap(getLargeRep()->Buckets
, RHS
.getLargeRep()->Buckets
);
1006 std::swap(getLargeRep()->NumBuckets
, RHS
.getLargeRep()->NumBuckets
);
1010 SmallDenseMap
&SmallSide
= Small
? *this : RHS
;
1011 SmallDenseMap
&LargeSide
= Small
? RHS
: *this;
1013 // First stash the large side's rep and move the small side across.
1014 LargeRep TmpRep
= std::move(*LargeSide
.getLargeRep());
1015 LargeSide
.getLargeRep()->~LargeRep();
1016 LargeSide
.Small
= true;
1017 // This is similar to the standard move-from-old-buckets, but the bucket
1018 // count hasn't actually rotated in this case. So we have to carefully
1019 // move construct the keys and values into their new locations, but there
1020 // is no need to re-hash things.
1021 for (unsigned i
= 0, e
= InlineBuckets
; i
!= e
; ++i
) {
1022 BucketT
*NewB
= &LargeSide
.getInlineBuckets()[i
],
1023 *OldB
= &SmallSide
.getInlineBuckets()[i
];
1024 ::new (&NewB
->getFirst()) KeyT(std::move(OldB
->getFirst()));
1025 OldB
->getFirst().~KeyT();
1026 if (!KeyInfoT::isEqual(NewB
->getFirst(), EmptyKey
) &&
1027 !KeyInfoT::isEqual(NewB
->getFirst(), TombstoneKey
)) {
1028 ::new (&NewB
->getSecond()) ValueT(std::move(OldB
->getSecond()));
1029 OldB
->getSecond().~ValueT();
1033 // The hard part of moving the small buckets across is done, just move
1034 // the TmpRep into its new home.
1035 SmallSide
.Small
= false;
1036 new (SmallSide
.getLargeRep()) LargeRep(std::move(TmpRep
));
1039 SmallDenseMap
& operator=(const SmallDenseMap
& other
) {
1045 SmallDenseMap
& operator=(SmallDenseMap
&&other
) {
1047 deallocateBuckets();
1053 void copyFrom(const SmallDenseMap
& other
) {
1055 deallocateBuckets();
1057 if (other
.getNumBuckets() > InlineBuckets
) {
1059 new (getLargeRep()) LargeRep(allocateBuckets(other
.getNumBuckets()));
1061 this->BaseT::copyFrom(other
);
1064 void init(unsigned InitBuckets
) {
1066 if (InitBuckets
> InlineBuckets
) {
1068 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets
));
1070 this->BaseT::initEmpty();
1073 void grow(unsigned AtLeast
) {
1074 if (AtLeast
>= InlineBuckets
)
1075 AtLeast
= std::max
<unsigned>(MIN_BUCKETS
, NextPowerOf2(AtLeast
));
1078 if (AtLeast
< InlineBuckets
)
1079 return; // Nothing to do.
1081 // First move the inline buckets into a temporary storage.
1082 AlignedCharArrayUnion
<BucketT
[InlineBuckets
]> TmpStorage
;
1083 BucketT
*TmpBegin
= reinterpret_cast<BucketT
*>(TmpStorage
.buffer
);
1084 BucketT
*TmpEnd
= TmpBegin
;
1086 // Loop over the buckets, moving non-empty, non-tombstones into the
1087 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1088 const KeyT EmptyKey
= this->getEmptyKey();
1089 const KeyT TombstoneKey
= this->getTombstoneKey();
1090 for (BucketT
*P
= getBuckets(), *E
= P
+ InlineBuckets
; P
!= E
; ++P
) {
1091 if (!KeyInfoT::isEqual(P
->getFirst(), EmptyKey
) &&
1092 !KeyInfoT::isEqual(P
->getFirst(), TombstoneKey
)) {
1093 assert(size_t(TmpEnd
- TmpBegin
) < InlineBuckets
&&
1094 "Too many inline buckets!");
1095 ::new (&TmpEnd
->getFirst()) KeyT(std::move(P
->getFirst()));
1096 ::new (&TmpEnd
->getSecond()) ValueT(std::move(P
->getSecond()));
1098 P
->getSecond().~ValueT();
1100 P
->getFirst().~KeyT();
1103 // Now make this map use the large rep, and move all the entries back
1106 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast
));
1107 this->moveFromOldBuckets(TmpBegin
, TmpEnd
);
1111 LargeRep OldRep
= std::move(*getLargeRep());
1112 getLargeRep()->~LargeRep();
1113 if (AtLeast
<= InlineBuckets
) {
1116 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast
));
1119 this->moveFromOldBuckets(OldRep
.Buckets
, OldRep
.Buckets
+OldRep
.NumBuckets
);
1121 // Free the old table.
1122 operator delete(OldRep
.Buckets
);
1125 void shrink_and_clear() {
1126 unsigned OldSize
= this->size();
1129 // Reduce the number of buckets.
1130 unsigned NewNumBuckets
= 0;
1132 NewNumBuckets
= 1 << (Log2_32_Ceil(OldSize
) + 1);
1133 if (NewNumBuckets
> InlineBuckets
&& NewNumBuckets
< MIN_BUCKETS
)
1134 NewNumBuckets
= MIN_BUCKETS
;
1136 if ((Small
&& NewNumBuckets
<= InlineBuckets
) ||
1137 (!Small
&& NewNumBuckets
== getLargeRep()->NumBuckets
)) {
1138 this->BaseT::initEmpty();
1142 deallocateBuckets();
1143 init(NewNumBuckets
);
1147 unsigned getNumEntries() const {
1151 void setNumEntries(unsigned Num
) {
1152 // NumEntries is hardcoded to be 31 bits wide.
1153 ASSERT(Num
< (1U << 31) && "Cannot support more than 1<<31 entries");
1157 unsigned getNumTombstones() const {
1158 return NumTombstones
;
1161 void setNumTombstones(unsigned Num
) {
1162 NumTombstones
= Num
;
1165 const BucketT
*getInlineBuckets() const {
1167 // Note that this cast does not violate aliasing rules as we assert that
1168 // the memory's dynamic type is the small, inline bucket buffer, and the
1169 // 'storage.buffer' static type is 'char *'.
1170 return reinterpret_cast<const BucketT
*>(storage
.buffer
);
1173 BucketT
*getInlineBuckets() {
1174 return const_cast<BucketT
*>(
1175 const_cast<const SmallDenseMap
*>(this)->getInlineBuckets());
1178 const LargeRep
*getLargeRep() const {
1180 // Note, same rule about aliasing as with getInlineBuckets.
1181 return reinterpret_cast<const LargeRep
*>(storage
.buffer
);
1184 LargeRep
*getLargeRep() {
1185 return const_cast<LargeRep
*>(
1186 const_cast<const SmallDenseMap
*>(this)->getLargeRep());
1189 const BucketT
*getBuckets() const {
1190 return Small
? getInlineBuckets() : getLargeRep()->Buckets
;
1193 BucketT
*getBuckets() {
1194 return const_cast<BucketT
*>(
1195 const_cast<const SmallDenseMap
*>(this)->getBuckets());
1198 unsigned getNumBuckets() const {
1199 return Small
? InlineBuckets
: getLargeRep()->NumBuckets
;
1202 void deallocateBuckets() {
1206 operator delete(getLargeRep()->Buckets
);
1207 getLargeRep()->~LargeRep();
1210 LargeRep
allocateBuckets(unsigned Num
) {
1211 ASSERT(Num
> InlineBuckets
&& "Must allocate more buckets than are inline");
1213 static_cast<BucketT
*>(operator new(sizeof(BucketT
) * Num
)), Num
1219 template <typename KeyT
, typename ValueT
, typename ValueInfoT
,
1220 typename KeyInfoT
, typename Bucket
, bool IsConst
>
1221 class DenseMapIterator
{
1222 friend class DenseMapIterator
<KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, Bucket
, true>;
1223 friend class DenseMapIterator
<KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, Bucket
, false>;
1225 using ConstIterator
= DenseMapIterator
<KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, Bucket
, true>;
1228 using difference_type
= ptrdiff_t;
1230 typename
std::conditional
<IsConst
, const Bucket
, Bucket
>::type
;
1231 using pointer
= value_type
*;
1232 using reference
= value_type
&;
1233 using iterator_category
= std::forward_iterator_tag
;
1236 pointer Ptr
= nullptr;
1237 pointer End
= nullptr;
1240 DenseMapIterator() = default;
1242 DenseMapIterator(pointer Pos
, pointer E
,
1243 bool NoAdvance
= false)
1244 : Ptr(Pos
), End(E
) {
1245 if (NoAdvance
) return;
1246 AdvancePastEmptyBuckets();
1249 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1250 // for const iterator destinations so it doesn't end up as a user defined copy
1252 template <bool IsConstSrc
,
1253 typename
= typename
std::enable_if
<!IsConstSrc
&& IsConst
>::type
>
1255 const DenseMapIterator
<KeyT
, ValueT
, ValueInfoT
, KeyInfoT
, Bucket
, IsConstSrc
> &I
)
1256 : Ptr(I
.Ptr
), End(I
.End
) {}
1258 reference
operator*() const {
1261 pointer
operator->() const {
1265 bool operator==(const ConstIterator
&RHS
) const {
1266 return Ptr
== RHS
.Ptr
;
1268 bool operator!=(const ConstIterator
&RHS
) const {
1269 return Ptr
!= RHS
.Ptr
;
1272 inline DenseMapIterator
& operator++() { // Preincrement
1274 AdvancePastEmptyBuckets();
1277 DenseMapIterator
operator++(int) { // Postincrement
1278 DenseMapIterator tmp
= *this; ++*this; return tmp
;
1282 void AdvancePastEmptyBuckets() {
1284 const KeyT Empty
= KeyInfoT::getEmptyKey();
1285 const KeyT Tombstone
= KeyInfoT::getTombstoneKey();
1287 while (Ptr
!= End
&& (KeyInfoT::isEqual(Ptr
->getFirst(), Empty
) ||
1288 KeyInfoT::isEqual(Ptr
->getFirst(), Tombstone
)))
1292 void RetreatPastEmptyBuckets() {
1294 const KeyT Empty
= KeyInfoT::getEmptyKey();
1295 const KeyT Tombstone
= KeyInfoT::getTombstoneKey();
1297 while (Ptr
!= End
&& (KeyInfoT::isEqual(Ptr
[-1].getFirst(), Empty
) ||
1298 KeyInfoT::isEqual(Ptr
[-1].getFirst(), Tombstone
)))
1303 template <typename KeyT
, typename ValueT
, typename KeyInfoT
>
1304 inline size_t capacity_in_bytes(const DenseMap
<KeyT
, ValueT
, KeyInfoT
> &X
) {
1305 return X
.getMemorySize();
1308 } // end namespace objc
1310 #endif // LLVM_ADT_DENSEMAP_H