]>
git.saurik.com Git - redis.git/blob - deps/jemalloc/src/ckh.c
742a950bea200293f12a0197581b3a5b3c6998e6
2 *******************************************************************************
3 * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
4 * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
5 * functions are employed. The original cuckoo hashing algorithm was described
8 * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms
11 * Generalization of cuckoo hashing was discussed in:
13 * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
14 * alternative to traditional hash tables. In Proceedings of the 7th
15 * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
18 * This implementation uses precisely two hash functions because that is the
19 * fewest that can work, and supporting multiple hashes is an implementation
20 * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
21 * that shows approximate expected maximum load factors for various
25 * #hashes | 1 | 2 | 4 | 8 |
26 * --------+-------+-------+-------+-------+
27 * 1 | 0.006 | 0.006 | 0.03 | 0.12 |
28 * 2 | 0.49 | 0.86 |>0.93< |>0.96< |
29 * 3 | 0.91 | 0.97 | 0.98 | 0.999 |
30 * 4 | 0.97 | 0.99 | 0.999 | |
32 * The number of cells per bucket is chosen such that a bucket fits in one cache
33 * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
36 ******************************************************************************/
37 #define JEMALLOC_CKH_C_
38 #include "jemalloc/internal/jemalloc_internal.h"
40 /******************************************************************************/
41 /* Function prototypes for non-inline static functions. */
43 static bool ckh_grow(ckh_t
*ckh
);
44 static void ckh_shrink(ckh_t
*ckh
);
46 /******************************************************************************/
49 * Search bucket for key and return the cell number if found; SIZE_T_MAX
52 JEMALLOC_INLINE
size_t
53 ckh_bucket_search(ckh_t
*ckh
, size_t bucket
, const void *key
)
58 for (i
= 0; i
< (ZU(1) << LG_CKH_BUCKET_CELLS
); i
++) {
59 cell
= &ckh
->tab
[(bucket
<< LG_CKH_BUCKET_CELLS
) + i
];
60 if (cell
->key
!= NULL
&& ckh
->keycomp(key
, cell
->key
))
61 return ((bucket
<< LG_CKH_BUCKET_CELLS
) + i
);
68 * Search table for key and return cell number if found; SIZE_T_MAX otherwise.
70 JEMALLOC_INLINE
size_t
71 ckh_isearch(ckh_t
*ckh
, const void *key
)
73 size_t hash1
, hash2
, bucket
, cell
;
77 ckh
->hash(key
, ckh
->lg_curbuckets
, &hash1
, &hash2
);
79 /* Search primary bucket. */
80 bucket
= hash1
& ((ZU(1) << ckh
->lg_curbuckets
) - 1);
81 cell
= ckh_bucket_search(ckh
, bucket
, key
);
82 if (cell
!= SIZE_T_MAX
)
85 /* Search secondary bucket. */
86 bucket
= hash2
& ((ZU(1) << ckh
->lg_curbuckets
) - 1);
87 cell
= ckh_bucket_search(ckh
, bucket
, key
);
92 ckh_try_bucket_insert(ckh_t
*ckh
, size_t bucket
, const void *key
,
99 * Cycle through the cells in the bucket, starting at a random position.
100 * The randomness avoids worst-case search overhead as buckets fill up.
102 prng32(offset
, LG_CKH_BUCKET_CELLS
, ckh
->prng_state
, CKH_A
, CKH_C
);
103 for (i
= 0; i
< (ZU(1) << LG_CKH_BUCKET_CELLS
); i
++) {
104 cell
= &ckh
->tab
[(bucket
<< LG_CKH_BUCKET_CELLS
) +
105 ((i
+ offset
) & ((ZU(1) << LG_CKH_BUCKET_CELLS
) - 1))];
106 if (cell
->key
== NULL
) {
118 * No space is available in bucket. Randomly evict an item, then try to find an
119 * alternate location for that item. Iteratively repeat this
120 * eviction/relocation procedure until either success or detection of an
121 * eviction/relocation bucket cycle.
124 ckh_evict_reloc_insert(ckh_t
*ckh
, size_t argbucket
, void const **argkey
,
125 void const **argdata
)
127 const void *key
, *data
, *tkey
, *tdata
;
129 size_t hash1
, hash2
, bucket
, tbucket
;
137 * Choose a random item within the bucket to evict. This is
138 * critical to correct function, because without (eventually)
139 * evicting all items within a bucket during iteration, it
140 * would be possible to get stuck in an infinite loop if there
141 * were an item for which both hashes indicated the same
144 prng32(i
, LG_CKH_BUCKET_CELLS
, ckh
->prng_state
, CKH_A
, CKH_C
);
145 cell
= &ckh
->tab
[(bucket
<< LG_CKH_BUCKET_CELLS
) + i
];
146 assert(cell
->key
!= NULL
);
148 /* Swap cell->{key,data} and {key,data} (evict). */
149 tkey
= cell
->key
; tdata
= cell
->data
;
150 cell
->key
= key
; cell
->data
= data
;
151 key
= tkey
; data
= tdata
;
157 /* Find the alternate bucket for the evicted item. */
158 ckh
->hash(key
, ckh
->lg_curbuckets
, &hash1
, &hash2
);
159 tbucket
= hash2
& ((ZU(1) << ckh
->lg_curbuckets
) - 1);
160 if (tbucket
== bucket
) {
161 tbucket
= hash1
& ((ZU(1) << ckh
->lg_curbuckets
) - 1);
163 * It may be that (tbucket == bucket) still, if the
164 * item's hashes both indicate this bucket. However,
165 * we are guaranteed to eventually escape this bucket
166 * during iteration, assuming pseudo-random item
167 * selection (true randomness would make infinite
168 * looping a remote possibility). The reason we can
169 * never get trapped forever is that there are two
172 * 1) This bucket == argbucket, so we will quickly
173 * detect an eviction cycle and terminate.
174 * 2) An item was evicted to this bucket from another,
175 * which means that at least one item in this bucket
176 * has hashes that indicate distinct buckets.
179 /* Check for a cycle. */
180 if (tbucket
== argbucket
) {
187 if (ckh_try_bucket_insert(ckh
, bucket
, key
, data
) == false)
193 ckh_try_insert(ckh_t
*ckh
, void const**argkey
, void const**argdata
)
195 size_t hash1
, hash2
, bucket
;
196 const void *key
= *argkey
;
197 const void *data
= *argdata
;
199 ckh
->hash(key
, ckh
->lg_curbuckets
, &hash1
, &hash2
);
201 /* Try to insert in primary bucket. */
202 bucket
= hash1
& ((ZU(1) << ckh
->lg_curbuckets
) - 1);
203 if (ckh_try_bucket_insert(ckh
, bucket
, key
, data
) == false)
206 /* Try to insert in secondary bucket. */
207 bucket
= hash2
& ((ZU(1) << ckh
->lg_curbuckets
) - 1);
208 if (ckh_try_bucket_insert(ckh
, bucket
, key
, data
) == false)
212 * Try to find a place for this item via iterative eviction/relocation.
214 return (ckh_evict_reloc_insert(ckh
, bucket
, argkey
, argdata
));
218 * Try to rebuild the hash table from scratch by inserting all items from the
219 * old table into the new.
222 ckh_rebuild(ckh_t
*ckh
, ckhc_t
*aTab
)
224 size_t count
, i
, nins
;
225 const void *key
, *data
;
229 for (i
= nins
= 0; nins
< count
; i
++) {
230 if (aTab
[i
].key
!= NULL
) {
233 if (ckh_try_insert(ckh
, &key
, &data
)) {
250 unsigned lg_prevbuckets
;
257 * It is possible (though unlikely, given well behaved hashes) that the
258 * table will have to be doubled more than once in order to create a
261 lg_prevbuckets
= ckh
->lg_curbuckets
;
262 lg_curcells
= ckh
->lg_curbuckets
+ LG_CKH_BUCKET_CELLS
;
267 usize
= sa2u(sizeof(ckhc_t
) << lg_curcells
, CACHELINE
);
272 tab
= (ckhc_t
*)ipalloc(usize
, CACHELINE
, true);
277 /* Swap in new table. */
281 ckh
->lg_curbuckets
= lg_curcells
- LG_CKH_BUCKET_CELLS
;
283 if (ckh_rebuild(ckh
, tab
) == false) {
288 /* Rebuilding failed, so back out partially rebuilt table. */
291 ckh
->lg_curbuckets
= lg_prevbuckets
;
300 ckh_shrink(ckh_t
*ckh
)
303 size_t lg_curcells
, usize
;
304 unsigned lg_prevbuckets
;
307 * It is possible (though unlikely, given well behaved hashes) that the
308 * table rebuild will fail.
310 lg_prevbuckets
= ckh
->lg_curbuckets
;
311 lg_curcells
= ckh
->lg_curbuckets
+ LG_CKH_BUCKET_CELLS
- 1;
312 usize
= sa2u(sizeof(ckhc_t
) << lg_curcells
, CACHELINE
);
315 tab
= (ckhc_t
*)ipalloc(usize
, CACHELINE
, true);
318 * An OOM error isn't worth propagating, since it doesn't
319 * prevent this or future operations from proceeding.
323 /* Swap in new table. */
327 ckh
->lg_curbuckets
= lg_curcells
- LG_CKH_BUCKET_CELLS
;
329 if (ckh_rebuild(ckh
, tab
) == false) {
337 /* Rebuilding failed, so back out partially rebuilt table. */
340 ckh
->lg_curbuckets
= lg_prevbuckets
;
347 ckh_new(ckh_t
*ckh
, size_t minitems
, ckh_hash_t
*hash
, ckh_keycomp_t
*keycomp
)
350 size_t mincells
, usize
;
351 unsigned lg_mincells
;
353 assert(minitems
> 0);
354 assert(hash
!= NULL
);
355 assert(keycomp
!= NULL
);
360 ckh
->nshrinkfails
= 0;
364 ckh
->prng_state
= 42; /* Value doesn't really matter. */
368 * Find the minimum power of 2 that is large enough to fit aBaseCount
369 * entries. We are using (2+,2) cuckoo hashing, which has an expected
370 * maximum load factor of at least ~0.86, so 0.75 is a conservative load
371 * factor that will typically allow 2^aLgMinItems to fit without ever
374 assert(LG_CKH_BUCKET_CELLS
> 0);
375 mincells
= ((minitems
+ (3 - (minitems
% 3))) / 3) << 2;
376 for (lg_mincells
= LG_CKH_BUCKET_CELLS
;
377 (ZU(1) << lg_mincells
) < mincells
;
380 ckh
->lg_minbuckets
= lg_mincells
- LG_CKH_BUCKET_CELLS
;
381 ckh
->lg_curbuckets
= lg_mincells
- LG_CKH_BUCKET_CELLS
;
383 ckh
->keycomp
= keycomp
;
385 usize
= sa2u(sizeof(ckhc_t
) << lg_mincells
, CACHELINE
);
390 ckh
->tab
= (ckhc_t
*)ipalloc(usize
, CACHELINE
, true);
391 if (ckh
->tab
== NULL
) {
402 ckh_delete(ckh_t
*ckh
)
409 "%s(%p): ngrows: %"PRIu64
", nshrinks: %"PRIu64
","
410 " nshrinkfails: %"PRIu64
", ninserts: %"PRIu64
","
411 " nrelocs: %"PRIu64
"\n", __func__
, ckh
,
412 (unsigned long long)ckh
->ngrows
,
413 (unsigned long long)ckh
->nshrinks
,
414 (unsigned long long)ckh
->nshrinkfails
,
415 (unsigned long long)ckh
->ninserts
,
416 (unsigned long long)ckh
->nrelocs
);
420 #ifdef JEMALLOC_DEBUG
421 memset(ckh
, 0x5a, sizeof(ckh_t
));
426 ckh_count(ckh_t
*ckh
)
435 ckh_iter(ckh_t
*ckh
, size_t *tabind
, void **key
, void **data
)
439 for (i
= *tabind
, ncells
= (ZU(1) << (ckh
->lg_curbuckets
+
440 LG_CKH_BUCKET_CELLS
)); i
< ncells
; i
++) {
441 if (ckh
->tab
[i
].key
!= NULL
) {
443 *key
= (void *)ckh
->tab
[i
].key
;
445 *data
= (void *)ckh
->tab
[i
].data
;
455 ckh_insert(ckh_t
*ckh
, const void *key
, const void *data
)
460 assert(ckh_search(ckh
, key
, NULL
, NULL
));
466 while (ckh_try_insert(ckh
, &key
, &data
)) {
479 ckh_remove(ckh_t
*ckh
, const void *searchkey
, void **key
, void **data
)
485 cell
= ckh_isearch(ckh
, searchkey
);
486 if (cell
!= SIZE_T_MAX
) {
488 *key
= (void *)ckh
->tab
[cell
].key
;
490 *data
= (void *)ckh
->tab
[cell
].data
;
491 ckh
->tab
[cell
].key
= NULL
;
492 ckh
->tab
[cell
].data
= NULL
; /* Not necessary. */
495 /* Try to halve the table if it is less than 1/4 full. */
496 if (ckh
->count
< (ZU(1) << (ckh
->lg_curbuckets
497 + LG_CKH_BUCKET_CELLS
- 2)) && ckh
->lg_curbuckets
498 > ckh
->lg_minbuckets
) {
499 /* Ignore error due to OOM. */
510 ckh_search(ckh_t
*ckh
, const void *searchkey
, void **key
, void **data
)
516 cell
= ckh_isearch(ckh
, searchkey
);
517 if (cell
!= SIZE_T_MAX
) {
519 *key
= (void *)ckh
->tab
[cell
].key
;
521 *data
= (void *)ckh
->tab
[cell
].data
;
529 ckh_string_hash(const void *key
, unsigned minbits
, size_t *hash1
, size_t *hash2
)
534 assert(minbits
<= 32 || (SIZEOF_PTR
== 8 && minbits
<= 64));
535 assert(hash1
!= NULL
);
536 assert(hash2
!= NULL
);
538 h
= hash(key
, strlen((const char *)key
), UINT64_C(0x94122f335b332aea));
541 * Avoid doing multiple hashes, since a single hash provides
544 ret1
= h
& ZU(0xffffffffU
);
548 ret2
= hash(key
, strlen((const char *)key
),
549 UINT64_C(0x8432a476666bbc13));
557 ckh_string_keycomp(const void *k1
, const void *k2
)
563 return (strcmp((char *)k1
, (char *)k2
) ? false : true);
567 ckh_pointer_hash(const void *key
, unsigned minbits
, size_t *hash1
,
577 assert(minbits
<= 32 || (SIZEOF_PTR
== 8 && minbits
<= 64));
578 assert(hash1
!= NULL
);
579 assert(hash2
!= NULL
);
581 assert(sizeof(u
.v
) == sizeof(u
.i
));
582 #if (LG_SIZEOF_PTR != LG_SIZEOF_INT)
586 h
= hash(&u
.i
, sizeof(u
.i
), UINT64_C(0xd983396e68886082));
589 * Avoid doing multiple hashes, since a single hash provides
592 ret1
= h
& ZU(0xffffffffU
);
595 assert(SIZEOF_PTR
== 8);
597 ret2
= hash(&u
.i
, sizeof(u
.i
), UINT64_C(0x5e2be9aff8709a5d));
605 ckh_pointer_keycomp(const void *k1
, const void *k2
)
608 return ((k1
== k2
) ? true : false);