1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// 
   3 //                     The LLVM Compiler Infrastructure 
   5 // This file is distributed under the University of Illinois Open Source 
   6 // License. See LICENSE.TXT for details. 
   8 //===----------------------------------------------------------------------===// 
  10 // This file defines the DenseMap class. 
  12 //===----------------------------------------------------------------------===// 
  14 // Taken from llvmCore-3425.0.31. 
  16 #ifndef LLVM_ADT_DENSEMAP_H 
  17 #define LLVM_ADT_DENSEMAP_H 
  19 #include "llvm-type_traits.h" 
  20 #include "llvm-MathExtras.h" 
  21 #include "llvm-AlignOf.h" 
  22 #include "llvm-DenseMapInfo.h" 
  31 #include <TargetConditionals.h> 
  33 #include "objc-private.h" 
  35 // From llvm/Support/Compiler.h 
  36 #define LLVM_USE_RVALUE_REFERENCES 1 
  37 #define llvm_move(value) (::std::move(value)) 
  40 #define MIN_COMPACT 1024 
  45 template<typename KeyT
, typename ValueT
, 
  46          typename KeyInfoT 
= DenseMapInfo
<KeyT
>, 
  48 class DenseMapIterator
; 
  50 // ZeroValuesArePurgeable=true is used by the refcount table. 
  51 // A key/value pair with value==0 is not required to be stored  
  52 //   in the refcount table; it could correctly be erased instead. 
  53 // For performance, we do keep zero values in the table when the  
  54 //   true refcount decreases to 1: this makes any future retain faster. 
  55 // For memory size, we allow rehashes and table insertions to  
  56 //   remove a zero value as if it were a tombstone. 
  58 template<typename DerivedT
,  
  59          typename KeyT
, typename ValueT
, typename KeyInfoT
,  
  60          bool ZeroValuesArePurgeable 
= false> 
  63   typedef std::pair
<KeyT
, ValueT
> BucketT
; 
  66   typedef KeyT key_type
; 
  67   typedef ValueT mapped_type
; 
  68   typedef BucketT value_type
; 
  70   typedef DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
> iterator
; 
  71   typedef DenseMapIterator
<KeyT
, ValueT
, 
  72                            KeyInfoT
, true> const_iterator
; 
  73   inline iterator 
begin() { 
  74     // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets(). 
  75     return empty() ? end() : iterator(getBuckets(), getBucketsEnd()); 
  77   inline iterator 
end() { 
  78     return iterator(getBucketsEnd(), getBucketsEnd(), true); 
  80   inline const_iterator 
begin() const { 
  81     return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd()); 
  83   inline const_iterator 
end() const { 
  84     return const_iterator(getBucketsEnd(), getBucketsEnd(), true); 
  87   bool empty() const { return getNumEntries() == 0; } 
  88   unsigned size() const { return getNumEntries(); } 
  90   /// Grow the densemap so that it has at least Size buckets. Does not shrink 
  91   void resize(size_t Size
) { 
  92     if (Size 
> getNumBuckets()) 
  97     if (getNumEntries() == 0 && getNumTombstones() == 0) return; 
  99     // If the capacity of the array is huge, and the # elements used is small, 
 101     if (getNumEntries() * 4 < getNumBuckets() &&  
 102         getNumBuckets() > MIN_BUCKETS
) { 
 107     const KeyT EmptyKey 
= getEmptyKey(), TombstoneKey 
= getTombstoneKey(); 
 108     for (BucketT 
*P 
= getBuckets(), *E 
= getBucketsEnd(); P 
!= E
; ++P
) { 
 109       if (!KeyInfoT::isEqual(P
->first
, EmptyKey
)) { 
 110         if (!KeyInfoT::isEqual(P
->first
, TombstoneKey
)) { 
 112           decrementNumEntries(); 
 117     assert(getNumEntries() == 0 && "Node count imbalance!"); 
 121   /// count - Return true if the specified key is in the map. 
 122   bool count(const KeyT 
&Val
) const { 
 123     const BucketT 
*TheBucket
; 
 124     return LookupBucketFor(Val
, TheBucket
); 
 127   iterator 
find(const KeyT 
&Val
) { 
 129     if (LookupBucketFor(Val
, TheBucket
)) 
 130       return iterator(TheBucket
, getBucketsEnd(), true); 
 133   const_iterator 
find(const KeyT 
&Val
) const { 
 134     const BucketT 
*TheBucket
; 
 135     if (LookupBucketFor(Val
, TheBucket
)) 
 136       return const_iterator(TheBucket
, getBucketsEnd(), true); 
 140   /// Alternate version of find() which allows a different, and possibly 
 141   /// less expensive, key type. 
 142   /// The DenseMapInfo is responsible for supplying methods 
 143   /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key 
 145   template<class LookupKeyT
> 
 146   iterator 
find_as(const LookupKeyT 
&Val
) { 
 148     if (LookupBucketFor(Val
, TheBucket
)) 
 149       return iterator(TheBucket
, getBucketsEnd(), true); 
 152   template<class LookupKeyT
> 
 153   const_iterator 
find_as(const LookupKeyT 
&Val
) const { 
 154     const BucketT 
*TheBucket
; 
 155     if (LookupBucketFor(Val
, TheBucket
)) 
 156       return const_iterator(TheBucket
, getBucketsEnd(), true); 
 160   /// lookup - Return the entry for the specified key, or a default 
 161   /// constructed value if no such entry exists. 
 162   ValueT 
lookup(const KeyT 
&Val
) const { 
 163     const BucketT 
*TheBucket
; 
 164     if (LookupBucketFor(Val
, TheBucket
)) 
 165       return TheBucket
->second
; 
 169   // Inserts key,value pair into the map if the key isn't already in the map. 
 170   // If the key is already in the map, it returns false and doesn't update the 
 172   std::pair
<iterator
, bool> insert(const std::pair
<KeyT
, ValueT
> &KV
) { 
 174     if (LookupBucketFor(KV
.first
, TheBucket
)) 
 175       return std::make_pair(iterator(TheBucket
, getBucketsEnd(), true), 
 176                             false); // Already in map. 
 178     // Otherwise, insert the new element. 
 179     TheBucket 
= InsertIntoBucket(KV
.first
, KV
.second
, TheBucket
); 
 180     return std::make_pair(iterator(TheBucket
, getBucketsEnd(), true), true); 
 183   /// insert - Range insertion of pairs. 
 184   template<typename InputIt
> 
 185   void insert(InputIt I
, InputIt E
) { 
 191   // Shrink if at least 15/16 empty and larger than MIN_COMPACT. 
 193     if (getNumEntries() == 0) { 
 196     else if (getNumBuckets() / 16 > getNumEntries()  &&   
 197              getNumBuckets() > MIN_COMPACT
)  
 199       grow(getNumEntries() * 2); 
 203   bool erase(const KeyT 
&Val
) { 
 205     if (!LookupBucketFor(Val
, TheBucket
)) 
 206       return false; // not in map. 
 208     TheBucket
->second
.~ValueT(); 
 209     TheBucket
->first 
= getTombstoneKey(); 
 210     decrementNumEntries(); 
 211     incrementNumTombstones(); 
 215   void erase(iterator I
) { 
 216     BucketT 
*TheBucket 
= &*I
; 
 217     TheBucket
->second
.~ValueT(); 
 218     TheBucket
->first 
= getTombstoneKey(); 
 219     decrementNumEntries(); 
 220     incrementNumTombstones(); 
 224   value_type
& FindAndConstruct(const KeyT 
&Key
) { 
 226     if (LookupBucketFor(Key
, TheBucket
)) 
 229     return *InsertIntoBucket(Key
, ValueT(), TheBucket
); 
 232   ValueT 
&operator[](const KeyT 
&Key
) { 
 233     return FindAndConstruct(Key
).second
; 
 236 #if LLVM_USE_RVALUE_REFERENCES 
 237   value_type
& FindAndConstruct(KeyT 
&&Key
) { 
 239     if (LookupBucketFor(Key
, TheBucket
)) 
 242     return *InsertIntoBucket(Key
, ValueT(), TheBucket
); 
 245   ValueT 
&operator[](KeyT 
&&Key
) { 
 246     return FindAndConstruct(Key
).second
; 
 250   /// isPointerIntoBucketsArray - Return true if the specified pointer points 
 251   /// somewhere into the DenseMap's array of buckets (i.e. either to a key or 
 252   /// value in the DenseMap). 
 253   bool isPointerIntoBucketsArray(const void *Ptr
) const { 
 254     return Ptr 
>= getBuckets() && Ptr 
< getBucketsEnd(); 
 257   /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets 
 258   /// array.  In conjunction with the previous method, this can be used to 
 259   /// determine whether an insertion caused the DenseMap to reallocate. 
 260   const void *getPointerIntoBucketsArray() const { return getBuckets(); } 
 266     if (getNumBuckets() == 0) // Nothing to do. 
 269     const KeyT EmptyKey 
= getEmptyKey(), TombstoneKey 
= getTombstoneKey(); 
 270     for (BucketT 
*P 
= getBuckets(), *E 
= getBucketsEnd(); P 
!= E
; ++P
) { 
 271       if (!KeyInfoT::isEqual(P
->first
, EmptyKey
) && 
 272           !KeyInfoT::isEqual(P
->first
, TombstoneKey
)) 
 278       memset((void*)getBuckets(), 0x5a, sizeof(BucketT
)*getNumBuckets()); 
 286     assert((getNumBuckets() & (getNumBuckets()-1)) == 0 && 
 287            "# initial buckets must be a power of two!"); 
 288     const KeyT EmptyKey 
= getEmptyKey(); 
 289     for (BucketT 
*B 
= getBuckets(), *E 
= getBucketsEnd(); B 
!= E
; ++B
) 
 290       new (&B
->first
) KeyT(EmptyKey
); 
 293   void moveFromOldBuckets(BucketT 
*OldBucketsBegin
, BucketT 
*OldBucketsEnd
) { 
 296     // Insert all the old elements. 
 297     const KeyT EmptyKey 
= getEmptyKey(); 
 298     const KeyT TombstoneKey 
= getTombstoneKey(); 
 299     for (BucketT 
*B 
= OldBucketsBegin
, *E 
= OldBucketsEnd
; B 
!= E
; ++B
) { 
 300       if (!KeyInfoT::isEqual(B
->first
, EmptyKey
) && 
 301           !KeyInfoT::isEqual(B
->first
, TombstoneKey
) &&  
 302           !(ZeroValuesArePurgeable 
&& B
->second 
== 0)) { 
 303         // Insert the key/value into the new table. 
 305         bool FoundVal 
= LookupBucketFor(B
->first
, DestBucket
); 
 306         (void)FoundVal
; // silence warning. 
 307         assert(!FoundVal 
&& "Key already in new map?"); 
 308         DestBucket
->first 
= llvm_move(B
->first
); 
 309         new (&DestBucket
->second
) ValueT(llvm_move(B
->second
)); 
 310         incrementNumEntries(); 
 319     if (OldBucketsBegin 
!= OldBucketsEnd
) 
 320       memset((void*)OldBucketsBegin
, 0x5a, 
 321              sizeof(BucketT
) * (OldBucketsEnd 
- OldBucketsBegin
)); 
 325   template <typename OtherBaseT
> 
 326   void copyFrom(const DenseMapBase
<OtherBaseT
, KeyT
, ValueT
, KeyInfoT
>& other
) { 
 327     assert(getNumBuckets() == other
.getNumBuckets()); 
 329     setNumEntries(other
.getNumEntries()); 
 330     setNumTombstones(other
.getNumTombstones()); 
 332     if (isPodLike
<KeyT
>::value 
&& isPodLike
<ValueT
>::value
) 
 333       memcpy(getBuckets(), other
.getBuckets(), 
 334              getNumBuckets() * sizeof(BucketT
)); 
 336       for (size_t i 
= 0; i 
< getNumBuckets(); ++i
) { 
 337         new (&getBuckets()[i
].first
) KeyT(other
.getBuckets()[i
].first
); 
 338         if (!KeyInfoT::isEqual(getBuckets()[i
].first
, getEmptyKey()) && 
 339             !KeyInfoT::isEqual(getBuckets()[i
].first
, getTombstoneKey())) 
 340           new (&getBuckets()[i
].second
) ValueT(other
.getBuckets()[i
].second
); 
 344   void swap(DenseMapBase
& RHS
) { 
 345     std::swap(getNumEntries(), RHS
.getNumEntries()); 
 346     std::swap(getNumTombstones(), RHS
.getNumTombstones()); 
 349   static unsigned getHashValue(const KeyT 
&Val
) { 
 350     return KeyInfoT::getHashValue(Val
); 
 352   template<typename LookupKeyT
> 
 353   static unsigned getHashValue(const LookupKeyT 
&Val
) { 
 354     return KeyInfoT::getHashValue(Val
); 
 356   static const KeyT 
getEmptyKey() { 
 357     return KeyInfoT::getEmptyKey(); 
 359   static const KeyT 
getTombstoneKey() { 
 360     return KeyInfoT::getTombstoneKey(); 
 364   unsigned getNumEntries() const { 
 365     return static_cast<const DerivedT 
*>(this)->getNumEntries(); 
 367   void setNumEntries(unsigned Num
) { 
 368     static_cast<DerivedT 
*>(this)->setNumEntries(Num
); 
 370   void incrementNumEntries() { 
 371     setNumEntries(getNumEntries() + 1); 
 373   void decrementNumEntries() { 
 374     setNumEntries(getNumEntries() - 1); 
 376   unsigned getNumTombstones() const { 
 377     return static_cast<const DerivedT 
*>(this)->getNumTombstones(); 
 379   void setNumTombstones(unsigned Num
) { 
 380     static_cast<DerivedT 
*>(this)->setNumTombstones(Num
); 
 382   void incrementNumTombstones() { 
 383     setNumTombstones(getNumTombstones() + 1); 
 385   void decrementNumTombstones() { 
 386     setNumTombstones(getNumTombstones() - 1); 
 388   const BucketT 
*getBuckets() const { 
 389     return static_cast<const DerivedT 
*>(this)->getBuckets(); 
 391   BucketT 
*getBuckets() { 
 392     return static_cast<DerivedT 
*>(this)->getBuckets(); 
 394   unsigned getNumBuckets() const { 
 395     return static_cast<const DerivedT 
*>(this)->getNumBuckets(); 
 397   BucketT 
*getBucketsEnd() { 
 398     return getBuckets() + getNumBuckets(); 
 400   const BucketT 
*getBucketsEnd() const { 
 401     return getBuckets() + getNumBuckets(); 
 404   void grow(unsigned AtLeast
) { 
 405     static_cast<DerivedT 
*>(this)->grow(AtLeast
); 
 408   void shrink_and_clear() { 
 409     static_cast<DerivedT 
*>(this)->shrink_and_clear(); 
 413   BucketT 
*InsertIntoBucket(const KeyT 
&Key
, const ValueT 
&Value
, 
 414                             BucketT 
*TheBucket
) { 
 415     TheBucket 
= InsertIntoBucketImpl(Key
, TheBucket
); 
 417     TheBucket
->first 
= Key
; 
 418     new (&TheBucket
->second
) ValueT(Value
); 
 422 #if LLVM_USE_RVALUE_REFERENCES 
 423   BucketT 
*InsertIntoBucket(const KeyT 
&Key
, ValueT 
&&Value
, 
 424                             BucketT 
*TheBucket
) { 
 425     TheBucket 
= InsertIntoBucketImpl(Key
, TheBucket
); 
 427     TheBucket
->first 
= Key
; 
 428     new (&TheBucket
->second
) ValueT(std::move(Value
)); 
 432   BucketT 
*InsertIntoBucket(KeyT 
&&Key
, ValueT 
&&Value
, BucketT 
*TheBucket
) { 
 433     TheBucket 
= InsertIntoBucketImpl(Key
, TheBucket
); 
 435     TheBucket
->first 
= std::move(Key
); 
 436     new (&TheBucket
->second
) ValueT(std::move(Value
)); 
 441   BucketT 
*InsertIntoBucketImpl(const KeyT 
&Key
, BucketT 
*TheBucket
) { 
 442     // If the load of the hash table is more than 3/4, grow the table.  
 443     // If fewer than 1/8 of the buckets are empty (meaning that many are  
 444     // filled with tombstones), rehash the table without growing. 
 446     // The later case is tricky.  For example, if we had one empty bucket with 
 447     // tons of tombstones, failing lookups (e.g. for insertion) would have to 
 448     // probe almost the entire table until it found the empty bucket.  If the 
 449     // table completely filled with tombstones, no lookup would ever succeed, 
 450     // causing infinite loops in lookup. 
 451     unsigned NewNumEntries 
= getNumEntries() + 1; 
 452     unsigned NumBuckets 
= getNumBuckets(); 
 453     if (NewNumEntries
*4 >= NumBuckets
*3) { 
 454       this->grow(NumBuckets 
* 2); 
 455       LookupBucketFor(Key
, TheBucket
); 
 456       NumBuckets 
= getNumBuckets(); 
 458     if (NumBuckets
-(NewNumEntries
+getNumTombstones()) <= NumBuckets
/8) { 
 459       this->grow(NumBuckets
); 
 460       LookupBucketFor(Key
, TheBucket
); 
 464     // Only update the state after we've grown our bucket space appropriately 
 465     // so that when growing buckets we have self-consistent entry count. 
 466     // If we are writing over a tombstone or zero value, remember this. 
 467     if (KeyInfoT::isEqual(TheBucket
->first
, getEmptyKey())) { 
 468       // Replacing an empty bucket. 
 469       incrementNumEntries();       
 471     else if (KeyInfoT::isEqual(TheBucket
->first
, getTombstoneKey())) { 
 472       // Replacing a tombstone. 
 473       incrementNumEntries(); 
 474       decrementNumTombstones(); 
 476     else if (ZeroValuesArePurgeable  
&&  TheBucket
->second 
== 0) { 
 477       // Purging a zero. No accounting changes. 
 478       TheBucket
->second
.~ValueT(); 
 480       // Updating an existing entry. No accounting changes. 
 486   /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in 
 487   /// FoundBucket.  If the bucket contains the key and a value, this returns 
 488   /// true, otherwise it returns a bucket with an empty marker or tombstone  
 489   /// or zero value and returns false. 
 490   template<typename LookupKeyT
> 
 491   bool LookupBucketFor(const LookupKeyT 
&Val
, 
 492                        const BucketT 
*&FoundBucket
) const { 
 493     const BucketT 
*BucketsPtr 
= getBuckets(); 
 494     const unsigned NumBuckets 
= getNumBuckets(); 
 496     if (NumBuckets 
== 0) { 
 501     // FoundTombstone - Keep track of whether we find a tombstone or zero value while probing. 
 502     const BucketT 
*FoundTombstone 
= 0; 
 503     const KeyT EmptyKey 
= getEmptyKey(); 
 504     const KeyT TombstoneKey 
= getTombstoneKey(); 
 505     assert(!KeyInfoT::isEqual(Val
, EmptyKey
) && 
 506            !KeyInfoT::isEqual(Val
, TombstoneKey
) && 
 507            "Empty/Tombstone value shouldn't be inserted into map!"); 
 509     unsigned BucketNo 
= getHashValue(Val
) & (NumBuckets
-1); 
 510     unsigned ProbeAmt 
= 1; 
 512       const BucketT 
*ThisBucket 
= BucketsPtr 
+ BucketNo
; 
 513       // Found Val's bucket?  If so, return it. 
 514       if (KeyInfoT::isEqual(Val
, ThisBucket
->first
)) { 
 515         FoundBucket 
= ThisBucket
; 
 519       // If we found an empty bucket, the key doesn't exist in the set. 
 520       // Insert it and return the default value. 
 521       if (KeyInfoT::isEqual(ThisBucket
->first
, EmptyKey
)) { 
 522         // If we've already seen a tombstone while probing, fill it in instead 
 523         // of the empty bucket we eventually probed to. 
 524         if (FoundTombstone
) ThisBucket 
= FoundTombstone
; 
 525         FoundBucket 
= FoundTombstone 
? FoundTombstone 
: ThisBucket
; 
 529       // If this is a tombstone, remember it.  If Val ends up not in the map, we 
 530       // prefer to return it than something that would require more probing. 
 531       // Ditto for zero values. 
 532       if (KeyInfoT::isEqual(ThisBucket
->first
, TombstoneKey
) && !FoundTombstone
) 
 533         FoundTombstone 
= ThisBucket
;  // Remember the first tombstone found. 
 534       if (ZeroValuesArePurgeable  
&&  
 535           ThisBucket
->second 
== 0  &&  !FoundTombstone
)  
 536         FoundTombstone 
= ThisBucket
; 
 538       // Otherwise, it's a hash collision or a tombstone, continue quadratic 
 540       if (ProbeAmt 
> NumBuckets
) { 
 541           // No empty buckets in table. Die. 
 542           _objc_fatal("Hash table corrupted. This is probably a memory error " 
 543                       "somewhere. (table at %p, buckets at %p (%zu bytes), " 
 544                       "%u buckets, %u entries, %u tombstones, " 
 546                       this, BucketsPtr
, malloc_size(BucketsPtr
),  
 547                       NumBuckets
, getNumEntries(), getNumTombstones(),  
 548                       ((void**)BucketsPtr
)[0], ((void**)BucketsPtr
)[1],  
 549                       ((void**)BucketsPtr
)[2], ((void**)BucketsPtr
)[3]); 
 551       BucketNo 
+= ProbeAmt
++; 
 552       BucketNo
&= (NumBuckets
-1); 
 556   template <typename LookupKeyT
> 
 557   bool LookupBucketFor(const LookupKeyT 
&Val
, BucketT 
*&FoundBucket
) { 
 558     const BucketT 
*ConstFoundBucket
; 
 559     bool Result 
= const_cast<const DenseMapBase 
*>(this) 
 560       ->LookupBucketFor(Val
, ConstFoundBucket
); 
 561     FoundBucket 
= const_cast<BucketT 
*>(ConstFoundBucket
); 
 566   /// Return the approximate size (in bytes) of the actual map. 
 567   /// This is just the raw memory used by DenseMap. 
 568   /// If entries are pointers to objects, the size of the referenced objects 
 569   /// are not included. 
 570   size_t getMemorySize() const { 
 571     return getNumBuckets() * sizeof(BucketT
); 
 575 template<typename KeyT
, typename ValueT
, 
 576          bool ZeroValuesArePurgeable 
= false,  
 577          typename KeyInfoT 
= DenseMapInfo
<KeyT
> > 
 579     : public DenseMapBase
<DenseMap
<KeyT
, ValueT
, ZeroValuesArePurgeable
, KeyInfoT
>, 
 580                           KeyT
, ValueT
, KeyInfoT
, ZeroValuesArePurgeable
> { 
 581   // Lift some types from the dependent base class into this class for 
 582   // simplicity of referring to them. 
 583   typedef DenseMapBase
<DenseMap
, KeyT
, ValueT
, KeyInfoT
, ZeroValuesArePurgeable
> BaseT
; 
 584   typedef typename 
BaseT::BucketT BucketT
; 
 585   friend class DenseMapBase
<DenseMap
, KeyT
, ValueT
, KeyInfoT
, ZeroValuesArePurgeable
>; 
 589   unsigned NumTombstones
; 
 593   explicit DenseMap(unsigned NumInitBuckets 
= 0) { 
 594     init(NumInitBuckets
); 
 597   DenseMap(const DenseMap 
&other
) { 
 602 #if LLVM_USE_RVALUE_REFERENCES 
 603   DenseMap(DenseMap 
&&other
) { 
 609   template<typename InputIt
> 
 610   DenseMap(const InputIt 
&I
, const InputIt 
&E
) { 
 611     init(NextPowerOf2(std::distance(I
, E
))); 
 617     operator delete(Buckets
); 
 620   void swap(DenseMap
& RHS
) { 
 621     std::swap(Buckets
, RHS
.Buckets
); 
 622     std::swap(NumEntries
, RHS
.NumEntries
); 
 623     std::swap(NumTombstones
, RHS
.NumTombstones
); 
 624     std::swap(NumBuckets
, RHS
.NumBuckets
); 
 627   DenseMap
& operator=(const DenseMap
& other
) { 
 632 #if LLVM_USE_RVALUE_REFERENCES 
 633   DenseMap
& operator=(DenseMap 
&&other
) { 
 635     operator delete(Buckets
); 
 642   void copyFrom(const DenseMap
& other
) { 
 644     operator delete(Buckets
); 
 645     if (allocateBuckets(other
.NumBuckets
)) { 
 646       this->BaseT::copyFrom(other
); 
 653   void init(unsigned InitBuckets
) { 
 654     if (allocateBuckets(InitBuckets
)) { 
 655       this->BaseT::initEmpty(); 
 662   void grow(unsigned AtLeast
) { 
 663     unsigned OldNumBuckets 
= NumBuckets
; 
 664     BucketT 
*OldBuckets 
= Buckets
; 
 666     allocateBuckets(std::max
<unsigned>(MIN_BUCKETS
, NextPowerOf2(AtLeast
))); 
 669       this->BaseT::initEmpty(); 
 673     this->moveFromOldBuckets(OldBuckets
, OldBuckets
+OldNumBuckets
); 
 675     // Free the old table. 
 676     operator delete(OldBuckets
); 
 679   void shrink_and_clear() { 
 680     unsigned OldNumEntries 
= NumEntries
; 
 683     // Reduce the number of buckets. 
 684     unsigned NewNumBuckets 
= 0; 
 686       NewNumBuckets 
= std::max(MIN_BUCKETS
, 1 << (Log2_32_Ceil(OldNumEntries
) + 1)); 
 687     if (NewNumBuckets 
== NumBuckets
) { 
 688       this->BaseT::initEmpty(); 
 692     operator delete(Buckets
); 
 697   unsigned getNumEntries() const { 
 700   void setNumEntries(unsigned Num
) { 
 704   unsigned getNumTombstones() const { 
 705     return NumTombstones
; 
 707   void setNumTombstones(unsigned Num
) { 
 711   BucketT 
*getBuckets() const { 
 715   unsigned getNumBuckets() const { 
 719   bool allocateBuckets(unsigned Num
) { 
 721     if (NumBuckets 
== 0) { 
 726     Buckets 
= static_cast<BucketT
*>(operator new(sizeof(BucketT
)*NumBuckets
)); 
 731 template<typename KeyT
, typename ValueT
, 
 732          unsigned InlineBuckets 
= 4, 
 733          bool ZeroValuesArePurgeable 
= false,  
 734          typename KeyInfoT 
= DenseMapInfo
<KeyT
> > 
 736     : public DenseMapBase
<SmallDenseMap
<KeyT
, ValueT
, InlineBuckets
, ZeroValuesArePurgeable
, KeyInfoT
>, 
 737                           KeyT
, ValueT
, KeyInfoT
, ZeroValuesArePurgeable
> { 
 738   // Lift some types from the dependent base class into this class for 
 739   // simplicity of referring to them. 
 740   typedef DenseMapBase
<SmallDenseMap
, KeyT
, ValueT
, KeyInfoT
, ZeroValuesArePurgeable
> BaseT
; 
 741   typedef typename 
BaseT::BucketT BucketT
; 
 742   friend class DenseMapBase
<SmallDenseMap
, KeyT
, ValueT
, KeyInfoT
, ZeroValuesArePurgeable
>; 
 745   unsigned NumEntries 
: 31; 
 746   unsigned NumTombstones
; 
 753   /// A "union" of an inline bucket array and the struct representing 
 754   /// a large bucket. This union will be discriminated by the 'Small' bit. 
 755   AlignedCharArrayUnion
<BucketT
[InlineBuckets
], LargeRep
> storage
; 
 758   explicit SmallDenseMap(unsigned NumInitBuckets 
= 0) { 
 759     init(NumInitBuckets
); 
 762   SmallDenseMap(const SmallDenseMap 
&other
) { 
 767 #if LLVM_USE_RVALUE_REFERENCES 
 768   SmallDenseMap(SmallDenseMap 
&&other
) { 
 774   template<typename InputIt
> 
 775   SmallDenseMap(const InputIt 
&I
, const InputIt 
&E
) { 
 776     init(NextPowerOf2(std::distance(I
, E
))); 
 785   void swap(SmallDenseMap
& RHS
) { 
 786     unsigned TmpNumEntries 
= RHS
.NumEntries
; 
 787     RHS
.NumEntries 
= NumEntries
; 
 788     NumEntries 
= TmpNumEntries
; 
 789     std::swap(NumTombstones
, RHS
.NumTombstones
); 
 791     const KeyT EmptyKey 
= this->getEmptyKey(); 
 792     const KeyT TombstoneKey 
= this->getTombstoneKey(); 
 793     if (Small 
&& RHS
.Small
) { 
 794       // If we're swapping inline bucket arrays, we have to cope with some of 
 795       // the tricky bits of DenseMap's storage system: the buckets are not 
 796       // fully initialized. Thus we swap every key, but we may have 
 797       // a one-directional move of the value. 
 798       for (unsigned i 
= 0, e 
= InlineBuckets
; i 
!= e
; ++i
) { 
 799         BucketT 
*LHSB 
= &getInlineBuckets()[i
], 
 800                 *RHSB 
= &RHS
.getInlineBuckets()[i
]; 
 801         bool hasLHSValue 
= (!KeyInfoT::isEqual(LHSB
->first
, EmptyKey
) && 
 802                             !KeyInfoT::isEqual(LHSB
->first
, TombstoneKey
)); 
 803         bool hasRHSValue 
= (!KeyInfoT::isEqual(RHSB
->first
, EmptyKey
) && 
 804                             !KeyInfoT::isEqual(RHSB
->first
, TombstoneKey
)); 
 805         if (hasLHSValue 
&& hasRHSValue
) { 
 806           // Swap together if we can... 
 807           std::swap(*LHSB
, *RHSB
); 
 810         // Swap separately and handle any assymetry. 
 811         std::swap(LHSB
->first
, RHSB
->first
); 
 813           new (&RHSB
->second
) ValueT(llvm_move(LHSB
->second
)); 
 814           LHSB
->second
.~ValueT(); 
 815         } else if (hasRHSValue
) { 
 816           new (&LHSB
->second
) ValueT(llvm_move(RHSB
->second
)); 
 817           RHSB
->second
.~ValueT(); 
 822     if (!Small 
&& !RHS
.Small
) { 
 823       std::swap(getLargeRep()->Buckets
, RHS
.getLargeRep()->Buckets
); 
 824       std::swap(getLargeRep()->NumBuckets
, RHS
.getLargeRep()->NumBuckets
); 
 828     SmallDenseMap 
&SmallSide 
= Small 
? *this : RHS
; 
 829     SmallDenseMap 
&LargeSide 
= Small 
? RHS 
: *this; 
 831     // First stash the large side's rep and move the small side across. 
 832     LargeRep TmpRep 
= llvm_move(*LargeSide
.getLargeRep()); 
 833     LargeSide
.getLargeRep()->~LargeRep(); 
 834     LargeSide
.Small 
= true; 
 835     // This is similar to the standard move-from-old-buckets, but the bucket 
 836     // count hasn't actually rotated in this case. So we have to carefully 
 837     // move construct the keys and values into their new locations, but there 
 838     // is no need to re-hash things. 
 839     for (unsigned i 
= 0, e 
= InlineBuckets
; i 
!= e
; ++i
) { 
 840       BucketT 
*NewB 
= &LargeSide
.getInlineBuckets()[i
], 
 841               *OldB 
= &SmallSide
.getInlineBuckets()[i
]; 
 842       new (&NewB
->first
) KeyT(llvm_move(OldB
->first
)); 
 844       if (!KeyInfoT::isEqual(NewB
->first
, EmptyKey
) && 
 845           !KeyInfoT::isEqual(NewB
->first
, TombstoneKey
)) { 
 846         new (&NewB
->second
) ValueT(llvm_move(OldB
->second
)); 
 847         OldB
->second
.~ValueT(); 
 851     // The hard part of moving the small buckets across is done, just move 
 852     // the TmpRep into its new home. 
 853     SmallSide
.Small 
= false; 
 854     new (SmallSide
.getLargeRep()) LargeRep(llvm_move(TmpRep
)); 
 857   SmallDenseMap
& operator=(const SmallDenseMap
& other
) { 
 862 #if LLVM_USE_RVALUE_REFERENCES 
 863   SmallDenseMap
& operator=(SmallDenseMap 
&&other
) { 
 872   void copyFrom(const SmallDenseMap
& other
) { 
 876     if (other
.getNumBuckets() > InlineBuckets
) { 
 878       allocateBuckets(other
.getNumBuckets()); 
 880     this->BaseT::copyFrom(other
); 
 883   void init(unsigned InitBuckets
) { 
 885     if (InitBuckets 
> InlineBuckets
) { 
 887       new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets
)); 
 889     this->BaseT::initEmpty(); 
 892   void grow(unsigned AtLeast
) { 
 893     if (AtLeast 
> InlineBuckets
) 
 894       AtLeast 
= std::max
<unsigned>(MIN_BUCKETS
, NextPowerOf2(AtLeast
)); 
 897       if (AtLeast 
<= InlineBuckets
) 
 898         return; // Nothing to do. 
 900       // First move the inline buckets into a temporary storage. 
 901       AlignedCharArrayUnion
<BucketT
[InlineBuckets
]> TmpStorage
; 
 902       BucketT 
*TmpBegin 
= reinterpret_cast<BucketT 
*>(TmpStorage
.buffer
); 
 903       BucketT 
*TmpEnd 
= TmpBegin
; 
 905       // Loop over the buckets, moving non-empty, non-tombstones into the 
 906       // temporary storage. Have the loop move the TmpEnd forward as it goes. 
 907       const KeyT EmptyKey 
= this->getEmptyKey(); 
 908       const KeyT TombstoneKey 
= this->getTombstoneKey(); 
 909       for (BucketT 
*P 
= getBuckets(), *E 
= P 
+ InlineBuckets
; P 
!= E
; ++P
) { 
 910         if (!KeyInfoT::isEqual(P
->first
, EmptyKey
) && 
 911             !KeyInfoT::isEqual(P
->first
, TombstoneKey
)) { 
 912           assert(size_t(TmpEnd 
- TmpBegin
) < InlineBuckets 
&& 
 913                  "Too many inline buckets!"); 
 914           new (&TmpEnd
->first
) KeyT(llvm_move(P
->first
)); 
 915           new (&TmpEnd
->second
) ValueT(llvm_move(P
->second
)); 
 922       // Now make this map use the large rep, and move all the entries back 
 925       new (getLargeRep()) LargeRep(allocateBuckets(AtLeast
)); 
 926       this->moveFromOldBuckets(TmpBegin
, TmpEnd
); 
 930     LargeRep OldRep 
= llvm_move(*getLargeRep()); 
 931     getLargeRep()->~LargeRep(); 
 932     if (AtLeast 
<= InlineBuckets
) { 
 935       new (getLargeRep()) LargeRep(allocateBuckets(AtLeast
)); 
 938     this->moveFromOldBuckets(OldRep
.Buckets
, OldRep
.Buckets
+OldRep
.NumBuckets
); 
 940     // Free the old table. 
 941     operator delete(OldRep
.Buckets
); 
 944   void shrink_and_clear() { 
 945     unsigned OldSize 
= this->size(); 
 948     // Reduce the number of buckets. 
 949     unsigned NewNumBuckets 
= 0; 
 951       NewNumBuckets 
= 1 << (Log2_32_Ceil(OldSize
) + 1); 
 952       if (NewNumBuckets 
> InlineBuckets 
&& NewNumBuckets 
< MIN_BUCKETS
) 
 953         NewNumBuckets 
= MIN_BUCKETS
; 
 955     if ((Small 
&& NewNumBuckets 
<= InlineBuckets
) || 
 956         (!Small 
&& NewNumBuckets 
== getLargeRep()->NumBuckets
)) { 
 957       this->BaseT::initEmpty(); 
 966   unsigned getNumEntries() const { 
 969   void setNumEntries(unsigned Num
) { 
 970     assert(Num 
< INT_MAX 
&& "Cannot support more than INT_MAX entries"); 
 974   unsigned getNumTombstones() const { 
 975     return NumTombstones
; 
 977   void setNumTombstones(unsigned Num
) { 
 981   const BucketT 
*getInlineBuckets() const { 
 983     // Note that this cast does not violate aliasing rules as we assert that 
 984     // the memory's dynamic type is the small, inline bucket buffer, and the 
 985     // 'storage.buffer' static type is 'char *'. 
 986     return reinterpret_cast<const BucketT 
*>(storage
.buffer
); 
 988   BucketT 
*getInlineBuckets() { 
 989     return const_cast<BucketT 
*>( 
 990       const_cast<const SmallDenseMap 
*>(this)->getInlineBuckets()); 
 992   const LargeRep 
*getLargeRep() const { 
 994     // Note, same rule about aliasing as with getInlineBuckets. 
 995     return reinterpret_cast<const LargeRep 
*>(storage
.buffer
); 
 997   LargeRep 
*getLargeRep() { 
 998     return const_cast<LargeRep 
*>( 
 999       const_cast<const SmallDenseMap 
*>(this)->getLargeRep()); 
1002   const BucketT 
*getBuckets() const { 
1003     return Small 
? getInlineBuckets() : getLargeRep()->Buckets
; 
1005   BucketT 
*getBuckets() { 
1006     return const_cast<BucketT 
*>( 
1007       const_cast<const SmallDenseMap 
*>(this)->getBuckets()); 
1009   unsigned getNumBuckets() const { 
1010     return Small 
? InlineBuckets 
: getLargeRep()->NumBuckets
; 
1013   void deallocateBuckets() { 
1017     operator delete(getLargeRep()->Buckets
); 
1018     getLargeRep()->~LargeRep(); 
1021   LargeRep 
allocateBuckets(unsigned Num
) { 
1022     assert(Num 
> InlineBuckets 
&& "Must allocate more buckets than are inline"); 
1024       static_cast<BucketT
*>(operator new(sizeof(BucketT
) * Num
)), Num
 
1030 template<typename KeyT
, typename ValueT
, 
1031          typename KeyInfoT
, bool IsConst
> 
1032 class DenseMapIterator 
{ 
1033   typedef std::pair
<KeyT
, ValueT
> Bucket
; 
1034   typedef DenseMapIterator
<KeyT
, ValueT
, 
1035                            KeyInfoT
, true> ConstIterator
; 
1036   friend class DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
, true>; 
1038   typedef ptrdiff_t difference_type
; 
1039   typedef typename conditional
<IsConst
, const Bucket
, Bucket
>::type value_type
; 
1040   typedef value_type 
*pointer
; 
1041   typedef value_type 
&reference
; 
1042   typedef std::forward_iterator_tag iterator_category
; 
1046   DenseMapIterator() : Ptr(0), End(0) {} 
1048   DenseMapIterator(pointer Pos
, pointer E
, bool NoAdvance 
= false) 
1049     : Ptr(Pos
), End(E
) { 
1050     if (!NoAdvance
) AdvancePastEmptyBuckets(); 
1053   // If IsConst is true this is a converting constructor from iterator to 
1054   // const_iterator and the default copy constructor is used. 
1055   // Otherwise this is a copy constructor for iterator. 
1056   DenseMapIterator(const DenseMapIterator
<KeyT
, ValueT
, 
1057                                           KeyInfoT
, false>& I
) 
1058     : Ptr(I
.Ptr
), End(I
.End
) {} 
1060   reference 
operator*() const { 
1063   pointer 
operator->() const { 
1067   bool operator==(const ConstIterator 
&RHS
) const { 
1068     return Ptr 
== RHS
.operator->(); 
1070   bool operator!=(const ConstIterator 
&RHS
) const { 
1071     return Ptr 
!= RHS
.operator->(); 
1074   inline DenseMapIterator
& operator++() {  // Preincrement 
1076     AdvancePastEmptyBuckets(); 
1079   DenseMapIterator 
operator++(int) {  // Postincrement 
1080     DenseMapIterator tmp 
= *this; ++*this; return tmp
; 
1084   void AdvancePastEmptyBuckets() { 
1085     const KeyT Empty 
= KeyInfoT::getEmptyKey(); 
1086     const KeyT Tombstone 
= KeyInfoT::getTombstoneKey(); 
1088     while (Ptr 
!= End 
&& 
1089            (KeyInfoT::isEqual(Ptr
->first
, Empty
) || 
1090             KeyInfoT::isEqual(Ptr
->first
, Tombstone
))) 
1095 } // end namespace objc