]> git.saurik.com Git - apple/objc4.git/blob - runtime/llvm-DenseMap.h
objc4-750.1.tar.gz
[apple/objc4.git] / runtime / llvm-DenseMap.h
1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the DenseMap class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 // Taken from llvmCore-3425.0.31.
15
16 #ifndef LLVM_ADT_DENSEMAP_H
17 #define LLVM_ADT_DENSEMAP_H
18
19 #include "llvm-type_traits.h"
20 #include "llvm-MathExtras.h"
21 #include "llvm-AlignOf.h"
22 #include "llvm-DenseMapInfo.h"
23 #include <algorithm>
24 #include <iterator>
25 #include <new>
26 #include <utility>
27 #include <cassert>
28 #include <climits>
29 #include <cstddef>
30 #include <cstring>
31 #include <TargetConditionals.h>
32
33 #include "objc-private.h"
34
35 // From llvm/Support/Compiler.h
36 #define LLVM_USE_RVALUE_REFERENCES 1
37 #define llvm_move(value) (::std::move(value))
38
39 #define MIN_BUCKETS 4
40 #define MIN_COMPACT 1024
41
42
43 namespace objc {
44
45 template<typename KeyT, typename ValueT,
46 typename KeyInfoT = DenseMapInfo<KeyT>,
47 bool IsConst = false>
48 class DenseMapIterator;
49
50 // ZeroValuesArePurgeable=true is used by the refcount table.
51 // A key/value pair with value==0 is not required to be stored
52 // in the refcount table; it could correctly be erased instead.
53 // For performance, we do keep zero values in the table when the
54 // true refcount decreases to 1: this makes any future retain faster.
55 // For memory size, we allow rehashes and table insertions to
56 // remove a zero value as if it were a tombstone.
57
58 template<typename DerivedT,
59 typename KeyT, typename ValueT, typename KeyInfoT,
60 bool ZeroValuesArePurgeable = false>
61 class DenseMapBase {
62 protected:
63 typedef std::pair<KeyT, ValueT> BucketT;
64
65 public:
66 typedef KeyT key_type;
67 typedef ValueT mapped_type;
68 typedef BucketT value_type;
69
70 typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator;
71 typedef DenseMapIterator<KeyT, ValueT,
72 KeyInfoT, true> const_iterator;
73 inline iterator begin() {
74 // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
75 return empty() ? end() : iterator(getBuckets(), getBucketsEnd());
76 }
77 inline iterator end() {
78 return iterator(getBucketsEnd(), getBucketsEnd(), true);
79 }
80 inline const_iterator begin() const {
81 return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd());
82 }
83 inline const_iterator end() const {
84 return const_iterator(getBucketsEnd(), getBucketsEnd(), true);
85 }
86
87 bool empty() const { return getNumEntries() == 0; }
88 unsigned size() const { return getNumEntries(); }
89
90 /// Grow the densemap so that it has at least Size buckets. Does not shrink
91 void resize(size_t Size) {
92 if (Size > getNumBuckets())
93 grow(Size);
94 }
95
96 void clear() {
97 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
98
99 // If the capacity of the array is huge, and the # elements used is small,
100 // shrink the array.
101 if (getNumEntries() * 4 < getNumBuckets() &&
102 getNumBuckets() > MIN_BUCKETS) {
103 shrink_and_clear();
104 return;
105 }
106
107 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
108 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
109 if (!KeyInfoT::isEqual(P->first, EmptyKey)) {
110 if (!KeyInfoT::isEqual(P->first, TombstoneKey)) {
111 P->second.~ValueT();
112 decrementNumEntries();
113 }
114 P->first = EmptyKey;
115 }
116 }
117 assert(getNumEntries() == 0 && "Node count imbalance!");
118 setNumTombstones(0);
119 }
120
121 /// count - Return true if the specified key is in the map.
122 bool count(const KeyT &Val) const {
123 const BucketT *TheBucket;
124 return LookupBucketFor(Val, TheBucket);
125 }
126
127 iterator find(const KeyT &Val) {
128 BucketT *TheBucket;
129 if (LookupBucketFor(Val, TheBucket))
130 return iterator(TheBucket, getBucketsEnd(), true);
131 return end();
132 }
133 const_iterator find(const KeyT &Val) const {
134 const BucketT *TheBucket;
135 if (LookupBucketFor(Val, TheBucket))
136 return const_iterator(TheBucket, getBucketsEnd(), true);
137 return end();
138 }
139
140 /// Alternate version of find() which allows a different, and possibly
141 /// less expensive, key type.
142 /// The DenseMapInfo is responsible for supplying methods
143 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
144 /// type used.
145 template<class LookupKeyT>
146 iterator find_as(const LookupKeyT &Val) {
147 BucketT *TheBucket;
148 if (LookupBucketFor(Val, TheBucket))
149 return iterator(TheBucket, getBucketsEnd(), true);
150 return end();
151 }
152 template<class LookupKeyT>
153 const_iterator find_as(const LookupKeyT &Val) const {
154 const BucketT *TheBucket;
155 if (LookupBucketFor(Val, TheBucket))
156 return const_iterator(TheBucket, getBucketsEnd(), true);
157 return end();
158 }
159
160 /// lookup - Return the entry for the specified key, or a default
161 /// constructed value if no such entry exists.
162 ValueT lookup(const KeyT &Val) const {
163 const BucketT *TheBucket;
164 if (LookupBucketFor(Val, TheBucket))
165 return TheBucket->second;
166 return ValueT();
167 }
168
169 // Inserts key,value pair into the map if the key isn't already in the map.
170 // If the key is already in the map, it returns false and doesn't update the
171 // value.
172 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
173 BucketT *TheBucket;
174 if (LookupBucketFor(KV.first, TheBucket))
175 return std::make_pair(iterator(TheBucket, getBucketsEnd(), true),
176 false); // Already in map.
177
178 // Otherwise, insert the new element.
179 TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
180 return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true);
181 }
182
183 /// insert - Range insertion of pairs.
184 template<typename InputIt>
185 void insert(InputIt I, InputIt E) {
186 for (; I != E; ++I)
187 insert(*I);
188 }
189
190 // Clear if empty.
191 // Shrink if at least 15/16 empty and larger than MIN_COMPACT.
192 void compact() {
193 if (getNumEntries() == 0) {
194 shrink_and_clear();
195 }
196 else if (getNumBuckets() / 16 > getNumEntries() &&
197 getNumBuckets() > MIN_COMPACT)
198 {
199 grow(getNumEntries() * 2);
200 }
201 }
202
203 bool erase(const KeyT &Val) {
204 BucketT *TheBucket;
205 if (!LookupBucketFor(Val, TheBucket))
206 return false; // not in map.
207
208 TheBucket->second.~ValueT();
209 TheBucket->first = getTombstoneKey();
210 decrementNumEntries();
211 incrementNumTombstones();
212 compact();
213 return true;
214 }
215 void erase(iterator I) {
216 BucketT *TheBucket = &*I;
217 TheBucket->second.~ValueT();
218 TheBucket->first = getTombstoneKey();
219 decrementNumEntries();
220 incrementNumTombstones();
221 compact();
222 }
223
224 value_type& FindAndConstruct(const KeyT &Key) {
225 BucketT *TheBucket;
226 if (LookupBucketFor(Key, TheBucket))
227 return *TheBucket;
228
229 return *InsertIntoBucket(Key, ValueT(), TheBucket);
230 }
231
232 ValueT &operator[](const KeyT &Key) {
233 return FindAndConstruct(Key).second;
234 }
235
236 #if LLVM_USE_RVALUE_REFERENCES
237 value_type& FindAndConstruct(KeyT &&Key) {
238 BucketT *TheBucket;
239 if (LookupBucketFor(Key, TheBucket))
240 return *TheBucket;
241
242 return *InsertIntoBucket(Key, ValueT(), TheBucket);
243 }
244
245 ValueT &operator[](KeyT &&Key) {
246 return FindAndConstruct(Key).second;
247 }
248 #endif
249
250 /// isPointerIntoBucketsArray - Return true if the specified pointer points
251 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
252 /// value in the DenseMap).
253 bool isPointerIntoBucketsArray(const void *Ptr) const {
254 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
255 }
256
257 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
258 /// array. In conjunction with the previous method, this can be used to
259 /// determine whether an insertion caused the DenseMap to reallocate.
260 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
261
262 protected:
263 DenseMapBase() {}
264
265 void destroyAll() {
266 if (getNumBuckets() == 0) // Nothing to do.
267 return;
268
269 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
270 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
271 if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
272 !KeyInfoT::isEqual(P->first, TombstoneKey))
273 P->second.~ValueT();
274 P->first.~KeyT();
275 }
276
277 #ifndef NDEBUG
278 memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets());
279 #endif
280 }
281
282 void initEmpty() {
283 setNumEntries(0);
284 setNumTombstones(0);
285
286 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
287 "# initial buckets must be a power of two!");
288 const KeyT EmptyKey = getEmptyKey();
289 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
290 new (&B->first) KeyT(EmptyKey);
291 }
292
293 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
294 initEmpty();
295
296 // Insert all the old elements.
297 const KeyT EmptyKey = getEmptyKey();
298 const KeyT TombstoneKey = getTombstoneKey();
299 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
300 if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
301 !KeyInfoT::isEqual(B->first, TombstoneKey) &&
302 !(ZeroValuesArePurgeable && B->second == 0)) {
303 // Insert the key/value into the new table.
304 BucketT *DestBucket;
305 bool FoundVal = LookupBucketFor(B->first, DestBucket);
306 (void)FoundVal; // silence warning.
307 assert(!FoundVal && "Key already in new map?");
308 DestBucket->first = llvm_move(B->first);
309 new (&DestBucket->second) ValueT(llvm_move(B->second));
310 incrementNumEntries();
311
312 // Free the value.
313 B->second.~ValueT();
314 }
315 B->first.~KeyT();
316 }
317
318 #ifndef NDEBUG
319 if (OldBucketsBegin != OldBucketsEnd)
320 memset((void*)OldBucketsBegin, 0x5a,
321 sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin));
322 #endif
323 }
324
325 template <typename OtherBaseT>
326 void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) {
327 assert(getNumBuckets() == other.getNumBuckets());
328
329 setNumEntries(other.getNumEntries());
330 setNumTombstones(other.getNumTombstones());
331
332 if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
333 memcpy(getBuckets(), other.getBuckets(),
334 getNumBuckets() * sizeof(BucketT));
335 else
336 for (size_t i = 0; i < getNumBuckets(); ++i) {
337 new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first);
338 if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) &&
339 !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey()))
340 new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second);
341 }
342 }
343
344 void swap(DenseMapBase& RHS) {
345 std::swap(getNumEntries(), RHS.getNumEntries());
346 std::swap(getNumTombstones(), RHS.getNumTombstones());
347 }
348
349 static unsigned getHashValue(const KeyT &Val) {
350 return KeyInfoT::getHashValue(Val);
351 }
352 template<typename LookupKeyT>
353 static unsigned getHashValue(const LookupKeyT &Val) {
354 return KeyInfoT::getHashValue(Val);
355 }
356 static const KeyT getEmptyKey() {
357 return KeyInfoT::getEmptyKey();
358 }
359 static const KeyT getTombstoneKey() {
360 return KeyInfoT::getTombstoneKey();
361 }
362
363 private:
364 unsigned getNumEntries() const {
365 return static_cast<const DerivedT *>(this)->getNumEntries();
366 }
367 void setNumEntries(unsigned Num) {
368 static_cast<DerivedT *>(this)->setNumEntries(Num);
369 }
370 void incrementNumEntries() {
371 setNumEntries(getNumEntries() + 1);
372 }
373 void decrementNumEntries() {
374 setNumEntries(getNumEntries() - 1);
375 }
376 unsigned getNumTombstones() const {
377 return static_cast<const DerivedT *>(this)->getNumTombstones();
378 }
379 void setNumTombstones(unsigned Num) {
380 static_cast<DerivedT *>(this)->setNumTombstones(Num);
381 }
382 void incrementNumTombstones() {
383 setNumTombstones(getNumTombstones() + 1);
384 }
385 void decrementNumTombstones() {
386 setNumTombstones(getNumTombstones() - 1);
387 }
388 const BucketT *getBuckets() const {
389 return static_cast<const DerivedT *>(this)->getBuckets();
390 }
391 BucketT *getBuckets() {
392 return static_cast<DerivedT *>(this)->getBuckets();
393 }
394 unsigned getNumBuckets() const {
395 return static_cast<const DerivedT *>(this)->getNumBuckets();
396 }
397 BucketT *getBucketsEnd() {
398 return getBuckets() + getNumBuckets();
399 }
400 const BucketT *getBucketsEnd() const {
401 return getBuckets() + getNumBuckets();
402 }
403
404 void grow(unsigned AtLeast) {
405 static_cast<DerivedT *>(this)->grow(AtLeast);
406 }
407
408 void shrink_and_clear() {
409 static_cast<DerivedT *>(this)->shrink_and_clear();
410 }
411
412
413 BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
414 BucketT *TheBucket) {
415 TheBucket = InsertIntoBucketImpl(Key, TheBucket);
416
417 TheBucket->first = Key;
418 new (&TheBucket->second) ValueT(Value);
419 return TheBucket;
420 }
421
422 #if LLVM_USE_RVALUE_REFERENCES
423 BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
424 BucketT *TheBucket) {
425 TheBucket = InsertIntoBucketImpl(Key, TheBucket);
426
427 TheBucket->first = Key;
428 new (&TheBucket->second) ValueT(std::move(Value));
429 return TheBucket;
430 }
431
432 BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
433 TheBucket = InsertIntoBucketImpl(Key, TheBucket);
434
435 TheBucket->first = std::move(Key);
436 new (&TheBucket->second) ValueT(std::move(Value));
437 return TheBucket;
438 }
439 #endif
440
441 BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) {
442 // If the load of the hash table is more than 3/4, grow the table.
443 // If fewer than 1/8 of the buckets are empty (meaning that many are
444 // filled with tombstones), rehash the table without growing.
445 //
446 // The later case is tricky. For example, if we had one empty bucket with
447 // tons of tombstones, failing lookups (e.g. for insertion) would have to
448 // probe almost the entire table until it found the empty bucket. If the
449 // table completely filled with tombstones, no lookup would ever succeed,
450 // causing infinite loops in lookup.
451 unsigned NewNumEntries = getNumEntries() + 1;
452 unsigned NumBuckets = getNumBuckets();
453 if (NewNumEntries*4 >= NumBuckets*3) {
454 this->grow(NumBuckets * 2);
455 LookupBucketFor(Key, TheBucket);
456 NumBuckets = getNumBuckets();
457 }
458 if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) {
459 this->grow(NumBuckets);
460 LookupBucketFor(Key, TheBucket);
461 }
462 assert(TheBucket);
463
464 // Only update the state after we've grown our bucket space appropriately
465 // so that when growing buckets we have self-consistent entry count.
466 // If we are writing over a tombstone or zero value, remember this.
467 if (KeyInfoT::isEqual(TheBucket->first, getEmptyKey())) {
468 // Replacing an empty bucket.
469 incrementNumEntries();
470 }
471 else if (KeyInfoT::isEqual(TheBucket->first, getTombstoneKey())) {
472 // Replacing a tombstone.
473 incrementNumEntries();
474 decrementNumTombstones();
475 }
476 else if (ZeroValuesArePurgeable && TheBucket->second == 0) {
477 // Purging a zero. No accounting changes.
478 TheBucket->second.~ValueT();
479 } else {
480 // Updating an existing entry. No accounting changes.
481 }
482
483 return TheBucket;
484 }
485
486 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
487 /// FoundBucket. If the bucket contains the key and a value, this returns
488 /// true, otherwise it returns a bucket with an empty marker or tombstone
489 /// or zero value and returns false.
490 template<typename LookupKeyT>
491 bool LookupBucketFor(const LookupKeyT &Val,
492 const BucketT *&FoundBucket) const {
493 const BucketT *BucketsPtr = getBuckets();
494 const unsigned NumBuckets = getNumBuckets();
495
496 if (NumBuckets == 0) {
497 FoundBucket = 0;
498 return false;
499 }
500
501 // FoundTombstone - Keep track of whether we find a tombstone or zero value while probing.
502 const BucketT *FoundTombstone = 0;
503 const KeyT EmptyKey = getEmptyKey();
504 const KeyT TombstoneKey = getTombstoneKey();
505 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
506 !KeyInfoT::isEqual(Val, TombstoneKey) &&
507 "Empty/Tombstone value shouldn't be inserted into map!");
508
509 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
510 unsigned ProbeAmt = 1;
511 while (1) {
512 const BucketT *ThisBucket = BucketsPtr + BucketNo;
513 // Found Val's bucket? If so, return it.
514 if (KeyInfoT::isEqual(Val, ThisBucket->first)) {
515 FoundBucket = ThisBucket;
516 return true;
517 }
518
519 // If we found an empty bucket, the key doesn't exist in the set.
520 // Insert it and return the default value.
521 if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) {
522 // If we've already seen a tombstone while probing, fill it in instead
523 // of the empty bucket we eventually probed to.
524 if (FoundTombstone) ThisBucket = FoundTombstone;
525 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
526 return false;
527 }
528
529 // If this is a tombstone, remember it. If Val ends up not in the map, we
530 // prefer to return it than something that would require more probing.
531 // Ditto for zero values.
532 if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone)
533 FoundTombstone = ThisBucket; // Remember the first tombstone found.
534 if (ZeroValuesArePurgeable &&
535 ThisBucket->second == 0 && !FoundTombstone)
536 FoundTombstone = ThisBucket;
537
538 // Otherwise, it's a hash collision or a tombstone, continue quadratic
539 // probing.
540 if (ProbeAmt > NumBuckets) {
541 // No empty buckets in table. Die.
542 _objc_fatal("Hash table corrupted. This is probably a memory error "
543 "somewhere. (table at %p, buckets at %p (%zu bytes), "
544 "%u buckets, %u entries, %u tombstones, "
545 "data %p %p %p %p)",
546 this, BucketsPtr, malloc_size(BucketsPtr),
547 NumBuckets, getNumEntries(), getNumTombstones(),
548 ((void**)BucketsPtr)[0], ((void**)BucketsPtr)[1],
549 ((void**)BucketsPtr)[2], ((void**)BucketsPtr)[3]);
550 }
551 BucketNo += ProbeAmt++;
552 BucketNo&= (NumBuckets-1);
553 }
554 }
555
556 template <typename LookupKeyT>
557 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
558 const BucketT *ConstFoundBucket;
559 bool Result = const_cast<const DenseMapBase *>(this)
560 ->LookupBucketFor(Val, ConstFoundBucket);
561 FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
562 return Result;
563 }
564
565 public:
566 /// Return the approximate size (in bytes) of the actual map.
567 /// This is just the raw memory used by DenseMap.
568 /// If entries are pointers to objects, the size of the referenced objects
569 /// are not included.
570 size_t getMemorySize() const {
571 return getNumBuckets() * sizeof(BucketT);
572 }
573 };
574
575 template<typename KeyT, typename ValueT,
576 bool ZeroValuesArePurgeable = false,
577 typename KeyInfoT = DenseMapInfo<KeyT> >
578 class DenseMap
579 : public DenseMapBase<DenseMap<KeyT, ValueT, ZeroValuesArePurgeable, KeyInfoT>,
580 KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> {
581 // Lift some types from the dependent base class into this class for
582 // simplicity of referring to them.
583 typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> BaseT;
584 typedef typename BaseT::BucketT BucketT;
585 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable>;
586
587 BucketT *Buckets;
588 unsigned NumEntries;
589 unsigned NumTombstones;
590 unsigned NumBuckets;
591
592 public:
593 explicit DenseMap(unsigned NumInitBuckets = 0) {
594 init(NumInitBuckets);
595 }
596
597 DenseMap(const DenseMap &other) {
598 init(0);
599 copyFrom(other);
600 }
601
602 #if LLVM_USE_RVALUE_REFERENCES
603 DenseMap(DenseMap &&other) {
604 init(0);
605 swap(other);
606 }
607 #endif
608
609 template<typename InputIt>
610 DenseMap(const InputIt &I, const InputIt &E) {
611 init(NextPowerOf2(std::distance(I, E)));
612 this->insert(I, E);
613 }
614
615 ~DenseMap() {
616 this->destroyAll();
617 operator delete(Buckets);
618 }
619
620 void swap(DenseMap& RHS) {
621 std::swap(Buckets, RHS.Buckets);
622 std::swap(NumEntries, RHS.NumEntries);
623 std::swap(NumTombstones, RHS.NumTombstones);
624 std::swap(NumBuckets, RHS.NumBuckets);
625 }
626
627 DenseMap& operator=(const DenseMap& other) {
628 copyFrom(other);
629 return *this;
630 }
631
632 #if LLVM_USE_RVALUE_REFERENCES
633 DenseMap& operator=(DenseMap &&other) {
634 this->destroyAll();
635 operator delete(Buckets);
636 init(0);
637 swap(other);
638 return *this;
639 }
640 #endif
641
642 void copyFrom(const DenseMap& other) {
643 this->destroyAll();
644 operator delete(Buckets);
645 if (allocateBuckets(other.NumBuckets)) {
646 this->BaseT::copyFrom(other);
647 } else {
648 NumEntries = 0;
649 NumTombstones = 0;
650 }
651 }
652
653 void init(unsigned InitBuckets) {
654 if (allocateBuckets(InitBuckets)) {
655 this->BaseT::initEmpty();
656 } else {
657 NumEntries = 0;
658 NumTombstones = 0;
659 }
660 }
661
662 void grow(unsigned AtLeast) {
663 unsigned OldNumBuckets = NumBuckets;
664 BucketT *OldBuckets = Buckets;
665
666 allocateBuckets(std::max<unsigned>(MIN_BUCKETS, NextPowerOf2(AtLeast)));
667 assert(Buckets);
668 if (!OldBuckets) {
669 this->BaseT::initEmpty();
670 return;
671 }
672
673 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
674
675 // Free the old table.
676 operator delete(OldBuckets);
677 }
678
679 void shrink_and_clear() {
680 unsigned OldNumEntries = NumEntries;
681 this->destroyAll();
682
683 // Reduce the number of buckets.
684 unsigned NewNumBuckets = 0;
685 if (OldNumEntries)
686 NewNumBuckets = std::max(MIN_BUCKETS, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
687 if (NewNumBuckets == NumBuckets) {
688 this->BaseT::initEmpty();
689 return;
690 }
691
692 operator delete(Buckets);
693 init(NewNumBuckets);
694 }
695
696 private:
697 unsigned getNumEntries() const {
698 return NumEntries;
699 }
700 void setNumEntries(unsigned Num) {
701 NumEntries = Num;
702 }
703
704 unsigned getNumTombstones() const {
705 return NumTombstones;
706 }
707 void setNumTombstones(unsigned Num) {
708 NumTombstones = Num;
709 }
710
711 BucketT *getBuckets() const {
712 return Buckets;
713 }
714
715 unsigned getNumBuckets() const {
716 return NumBuckets;
717 }
718
719 bool allocateBuckets(unsigned Num) {
720 NumBuckets = Num;
721 if (NumBuckets == 0) {
722 Buckets = 0;
723 return false;
724 }
725
726 Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT)*NumBuckets));
727 return true;
728 }
729 };
730
731 template<typename KeyT, typename ValueT,
732 unsigned InlineBuckets = 4,
733 bool ZeroValuesArePurgeable = false,
734 typename KeyInfoT = DenseMapInfo<KeyT> >
735 class SmallDenseMap
736 : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, ZeroValuesArePurgeable, KeyInfoT>,
737 KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> {
738 // Lift some types from the dependent base class into this class for
739 // simplicity of referring to them.
740 typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> BaseT;
741 typedef typename BaseT::BucketT BucketT;
742 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable>;
743
744 unsigned Small : 1;
745 unsigned NumEntries : 31;
746 unsigned NumTombstones;
747
748 struct LargeRep {
749 BucketT *Buckets;
750 unsigned NumBuckets;
751 };
752
753 /// A "union" of an inline bucket array and the struct representing
754 /// a large bucket. This union will be discriminated by the 'Small' bit.
755 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
756
757 public:
758 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
759 init(NumInitBuckets);
760 }
761
762 SmallDenseMap(const SmallDenseMap &other) {
763 init(0);
764 copyFrom(other);
765 }
766
767 #if LLVM_USE_RVALUE_REFERENCES
768 SmallDenseMap(SmallDenseMap &&other) {
769 init(0);
770 swap(other);
771 }
772 #endif
773
774 template<typename InputIt>
775 SmallDenseMap(const InputIt &I, const InputIt &E) {
776 init(NextPowerOf2(std::distance(I, E)));
777 this->insert(I, E);
778 }
779
780 ~SmallDenseMap() {
781 this->destroyAll();
782 deallocateBuckets();
783 }
784
785 void swap(SmallDenseMap& RHS) {
786 unsigned TmpNumEntries = RHS.NumEntries;
787 RHS.NumEntries = NumEntries;
788 NumEntries = TmpNumEntries;
789 std::swap(NumTombstones, RHS.NumTombstones);
790
791 const KeyT EmptyKey = this->getEmptyKey();
792 const KeyT TombstoneKey = this->getTombstoneKey();
793 if (Small && RHS.Small) {
794 // If we're swapping inline bucket arrays, we have to cope with some of
795 // the tricky bits of DenseMap's storage system: the buckets are not
796 // fully initialized. Thus we swap every key, but we may have
797 // a one-directional move of the value.
798 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
799 BucketT *LHSB = &getInlineBuckets()[i],
800 *RHSB = &RHS.getInlineBuckets()[i];
801 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) &&
802 !KeyInfoT::isEqual(LHSB->first, TombstoneKey));
803 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) &&
804 !KeyInfoT::isEqual(RHSB->first, TombstoneKey));
805 if (hasLHSValue && hasRHSValue) {
806 // Swap together if we can...
807 std::swap(*LHSB, *RHSB);
808 continue;
809 }
810 // Swap separately and handle any assymetry.
811 std::swap(LHSB->first, RHSB->first);
812 if (hasLHSValue) {
813 new (&RHSB->second) ValueT(llvm_move(LHSB->second));
814 LHSB->second.~ValueT();
815 } else if (hasRHSValue) {
816 new (&LHSB->second) ValueT(llvm_move(RHSB->second));
817 RHSB->second.~ValueT();
818 }
819 }
820 return;
821 }
822 if (!Small && !RHS.Small) {
823 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
824 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
825 return;
826 }
827
828 SmallDenseMap &SmallSide = Small ? *this : RHS;
829 SmallDenseMap &LargeSide = Small ? RHS : *this;
830
831 // First stash the large side's rep and move the small side across.
832 LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep());
833 LargeSide.getLargeRep()->~LargeRep();
834 LargeSide.Small = true;
835 // This is similar to the standard move-from-old-buckets, but the bucket
836 // count hasn't actually rotated in this case. So we have to carefully
837 // move construct the keys and values into their new locations, but there
838 // is no need to re-hash things.
839 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
840 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
841 *OldB = &SmallSide.getInlineBuckets()[i];
842 new (&NewB->first) KeyT(llvm_move(OldB->first));
843 OldB->first.~KeyT();
844 if (!KeyInfoT::isEqual(NewB->first, EmptyKey) &&
845 !KeyInfoT::isEqual(NewB->first, TombstoneKey)) {
846 new (&NewB->second) ValueT(llvm_move(OldB->second));
847 OldB->second.~ValueT();
848 }
849 }
850
851 // The hard part of moving the small buckets across is done, just move
852 // the TmpRep into its new home.
853 SmallSide.Small = false;
854 new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep));
855 }
856
857 SmallDenseMap& operator=(const SmallDenseMap& other) {
858 copyFrom(other);
859 return *this;
860 }
861
862 #if LLVM_USE_RVALUE_REFERENCES
863 SmallDenseMap& operator=(SmallDenseMap &&other) {
864 this->destroyAll();
865 deallocateBuckets();
866 init(0);
867 swap(other);
868 return *this;
869 }
870 #endif
871
872 void copyFrom(const SmallDenseMap& other) {
873 this->destroyAll();
874 deallocateBuckets();
875 Small = true;
876 if (other.getNumBuckets() > InlineBuckets) {
877 Small = false;
878 allocateBuckets(other.getNumBuckets());
879 }
880 this->BaseT::copyFrom(other);
881 }
882
883 void init(unsigned InitBuckets) {
884 Small = true;
885 if (InitBuckets > InlineBuckets) {
886 Small = false;
887 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
888 }
889 this->BaseT::initEmpty();
890 }
891
892 void grow(unsigned AtLeast) {
893 if (AtLeast > InlineBuckets)
894 AtLeast = std::max<unsigned>(MIN_BUCKETS, NextPowerOf2(AtLeast));
895
896 if (Small) {
897 if (AtLeast <= InlineBuckets)
898 return; // Nothing to do.
899
900 // First move the inline buckets into a temporary storage.
901 AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
902 BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
903 BucketT *TmpEnd = TmpBegin;
904
905 // Loop over the buckets, moving non-empty, non-tombstones into the
906 // temporary storage. Have the loop move the TmpEnd forward as it goes.
907 const KeyT EmptyKey = this->getEmptyKey();
908 const KeyT TombstoneKey = this->getTombstoneKey();
909 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
910 if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
911 !KeyInfoT::isEqual(P->first, TombstoneKey)) {
912 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
913 "Too many inline buckets!");
914 new (&TmpEnd->first) KeyT(llvm_move(P->first));
915 new (&TmpEnd->second) ValueT(llvm_move(P->second));
916 ++TmpEnd;
917 P->second.~ValueT();
918 }
919 P->first.~KeyT();
920 }
921
922 // Now make this map use the large rep, and move all the entries back
923 // into it.
924 Small = false;
925 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
926 this->moveFromOldBuckets(TmpBegin, TmpEnd);
927 return;
928 }
929
930 LargeRep OldRep = llvm_move(*getLargeRep());
931 getLargeRep()->~LargeRep();
932 if (AtLeast <= InlineBuckets) {
933 Small = true;
934 } else {
935 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
936 }
937
938 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
939
940 // Free the old table.
941 operator delete(OldRep.Buckets);
942 }
943
944 void shrink_and_clear() {
945 unsigned OldSize = this->size();
946 this->destroyAll();
947
948 // Reduce the number of buckets.
949 unsigned NewNumBuckets = 0;
950 if (OldSize) {
951 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
952 if (NewNumBuckets > InlineBuckets && NewNumBuckets < MIN_BUCKETS)
953 NewNumBuckets = MIN_BUCKETS;
954 }
955 if ((Small && NewNumBuckets <= InlineBuckets) ||
956 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
957 this->BaseT::initEmpty();
958 return;
959 }
960
961 deallocateBuckets();
962 init(NewNumBuckets);
963 }
964
965 private:
966 unsigned getNumEntries() const {
967 return NumEntries;
968 }
969 void setNumEntries(unsigned Num) {
970 assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
971 NumEntries = Num;
972 }
973
974 unsigned getNumTombstones() const {
975 return NumTombstones;
976 }
977 void setNumTombstones(unsigned Num) {
978 NumTombstones = Num;
979 }
980
981 const BucketT *getInlineBuckets() const {
982 assert(Small);
983 // Note that this cast does not violate aliasing rules as we assert that
984 // the memory's dynamic type is the small, inline bucket buffer, and the
985 // 'storage.buffer' static type is 'char *'.
986 return reinterpret_cast<const BucketT *>(storage.buffer);
987 }
988 BucketT *getInlineBuckets() {
989 return const_cast<BucketT *>(
990 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
991 }
992 const LargeRep *getLargeRep() const {
993 assert(!Small);
994 // Note, same rule about aliasing as with getInlineBuckets.
995 return reinterpret_cast<const LargeRep *>(storage.buffer);
996 }
997 LargeRep *getLargeRep() {
998 return const_cast<LargeRep *>(
999 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1000 }
1001
1002 const BucketT *getBuckets() const {
1003 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1004 }
1005 BucketT *getBuckets() {
1006 return const_cast<BucketT *>(
1007 const_cast<const SmallDenseMap *>(this)->getBuckets());
1008 }
1009 unsigned getNumBuckets() const {
1010 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1011 }
1012
1013 void deallocateBuckets() {
1014 if (Small)
1015 return;
1016
1017 operator delete(getLargeRep()->Buckets);
1018 getLargeRep()->~LargeRep();
1019 }
1020
1021 LargeRep allocateBuckets(unsigned Num) {
1022 assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
1023 LargeRep Rep = {
1024 static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
1025 };
1026 return Rep;
1027 }
1028 };
1029
1030 template<typename KeyT, typename ValueT,
1031 typename KeyInfoT, bool IsConst>
1032 class DenseMapIterator {
1033 typedef std::pair<KeyT, ValueT> Bucket;
1034 typedef DenseMapIterator<KeyT, ValueT,
1035 KeyInfoT, true> ConstIterator;
1036 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, true>;
1037 public:
1038 typedef ptrdiff_t difference_type;
1039 typedef typename conditional<IsConst, const Bucket, Bucket>::type value_type;
1040 typedef value_type *pointer;
1041 typedef value_type &reference;
1042 typedef std::forward_iterator_tag iterator_category;
1043 private:
1044 pointer Ptr, End;
1045 public:
1046 DenseMapIterator() : Ptr(0), End(0) {}
1047
1048 DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false)
1049 : Ptr(Pos), End(E) {
1050 if (!NoAdvance) AdvancePastEmptyBuckets();
1051 }
1052
1053 // If IsConst is true this is a converting constructor from iterator to
1054 // const_iterator and the default copy constructor is used.
1055 // Otherwise this is a copy constructor for iterator.
1056 DenseMapIterator(const DenseMapIterator<KeyT, ValueT,
1057 KeyInfoT, false>& I)
1058 : Ptr(I.Ptr), End(I.End) {}
1059
1060 reference operator*() const {
1061 return *Ptr;
1062 }
1063 pointer operator->() const {
1064 return Ptr;
1065 }
1066
1067 bool operator==(const ConstIterator &RHS) const {
1068 return Ptr == RHS.operator->();
1069 }
1070 bool operator!=(const ConstIterator &RHS) const {
1071 return Ptr != RHS.operator->();
1072 }
1073
1074 inline DenseMapIterator& operator++() { // Preincrement
1075 ++Ptr;
1076 AdvancePastEmptyBuckets();
1077 return *this;
1078 }
1079 DenseMapIterator operator++(int) { // Postincrement
1080 DenseMapIterator tmp = *this; ++*this; return tmp;
1081 }
1082
1083 private:
1084 void AdvancePastEmptyBuckets() {
1085 const KeyT Empty = KeyInfoT::getEmptyKey();
1086 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1087
1088 while (Ptr != End &&
1089 (KeyInfoT::isEqual(Ptr->first, Empty) ||
1090 KeyInfoT::isEqual(Ptr->first, Tombstone)))
1091 ++Ptr;
1092 }
1093 };
1094
1095 } // end namespace objc
1096
1097 #endif