]>
Commit | Line | Data |
---|---|---|
a78e148b | 1 | /* |
2 | ******************************************************************************* | |
3 | * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each | |
4 | * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash | |
5 | * functions are employed. The original cuckoo hashing algorithm was described | |
6 | * in: | |
7 | * | |
8 | * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms | |
9 | * 51(2):122-144. | |
10 | * | |
11 | * Generalization of cuckoo hashing was discussed in: | |
12 | * | |
13 | * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical | |
14 | * alternative to traditional hash tables. In Proceedings of the 7th | |
15 | * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA, | |
16 | * January 2006. | |
17 | * | |
18 | * This implementation uses precisely two hash functions because that is the | |
19 | * fewest that can work, and supporting multiple hashes is an implementation | |
20 | * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006) | |
21 | * that shows approximate expected maximum load factors for various | |
22 | * configurations: | |
23 | * | |
24 | * | #cells/bucket | | |
25 | * #hashes | 1 | 2 | 4 | 8 | | |
26 | * --------+-------+-------+-------+-------+ | |
27 | * 1 | 0.006 | 0.006 | 0.03 | 0.12 | | |
28 | * 2 | 0.49 | 0.86 |>0.93< |>0.96< | | |
29 | * 3 | 0.91 | 0.97 | 0.98 | 0.999 | | |
30 | * 4 | 0.97 | 0.99 | 0.999 | | | |
31 | * | |
32 | * The number of cells per bucket is chosen such that a bucket fits in one cache | |
33 | * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing, | |
34 | * respectively. | |
35 | * | |
36 | ******************************************************************************/ | |
37 | #define JEMALLOC_CKH_C_ | |
38 | #include "jemalloc/internal/jemalloc_internal.h" | |
39 | ||
40 | /******************************************************************************/ | |
41 | /* Function prototypes for non-inline static functions. */ | |
42 | ||
43 | static bool ckh_grow(ckh_t *ckh); | |
44 | static void ckh_shrink(ckh_t *ckh); | |
45 | ||
46 | /******************************************************************************/ | |
47 | ||
48 | /* | |
49 | * Search bucket for key and return the cell number if found; SIZE_T_MAX | |
50 | * otherwise. | |
51 | */ | |
52 | JEMALLOC_INLINE size_t | |
53 | ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) | |
54 | { | |
55 | ckhc_t *cell; | |
56 | unsigned i; | |
57 | ||
58 | for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { | |
59 | cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; | |
60 | if (cell->key != NULL && ckh->keycomp(key, cell->key)) | |
61 | return ((bucket << LG_CKH_BUCKET_CELLS) + i); | |
62 | } | |
63 | ||
64 | return (SIZE_T_MAX); | |
65 | } | |
66 | ||
67 | /* | |
68 | * Search table for key and return cell number if found; SIZE_T_MAX otherwise. | |
69 | */ | |
70 | JEMALLOC_INLINE size_t | |
71 | ckh_isearch(ckh_t *ckh, const void *key) | |
72 | { | |
73 | size_t hash1, hash2, bucket, cell; | |
74 | ||
75 | assert(ckh != NULL); | |
a78e148b | 76 | |
77 | ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2); | |
78 | ||
79 | /* Search primary bucket. */ | |
80 | bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1); | |
81 | cell = ckh_bucket_search(ckh, bucket, key); | |
82 | if (cell != SIZE_T_MAX) | |
83 | return (cell); | |
84 | ||
85 | /* Search secondary bucket. */ | |
86 | bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1); | |
87 | cell = ckh_bucket_search(ckh, bucket, key); | |
88 | return (cell); | |
89 | } | |
90 | ||
91 | JEMALLOC_INLINE bool | |
92 | ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, | |
93 | const void *data) | |
94 | { | |
95 | ckhc_t *cell; | |
96 | unsigned offset, i; | |
97 | ||
98 | /* | |
99 | * Cycle through the cells in the bucket, starting at a random position. | |
100 | * The randomness avoids worst-case search overhead as buckets fill up. | |
101 | */ | |
4934f93d | 102 | prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); |
a78e148b | 103 | for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { |
104 | cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + | |
105 | ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; | |
106 | if (cell->key == NULL) { | |
107 | cell->key = key; | |
108 | cell->data = data; | |
109 | ckh->count++; | |
110 | return (false); | |
111 | } | |
112 | } | |
113 | ||
114 | return (true); | |
115 | } | |
116 | ||
117 | /* | |
118 | * No space is available in bucket. Randomly evict an item, then try to find an | |
119 | * alternate location for that item. Iteratively repeat this | |
120 | * eviction/relocation procedure until either success or detection of an | |
121 | * eviction/relocation bucket cycle. | |
122 | */ | |
123 | JEMALLOC_INLINE bool | |
124 | ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, | |
125 | void const **argdata) | |
126 | { | |
127 | const void *key, *data, *tkey, *tdata; | |
128 | ckhc_t *cell; | |
129 | size_t hash1, hash2, bucket, tbucket; | |
130 | unsigned i; | |
131 | ||
132 | bucket = argbucket; | |
133 | key = *argkey; | |
134 | data = *argdata; | |
135 | while (true) { | |
136 | /* | |
137 | * Choose a random item within the bucket to evict. This is | |
138 | * critical to correct function, because without (eventually) | |
139 | * evicting all items within a bucket during iteration, it | |
140 | * would be possible to get stuck in an infinite loop if there | |
141 | * were an item for which both hashes indicated the same | |
142 | * bucket. | |
143 | */ | |
4934f93d | 144 | prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); |
a78e148b | 145 | cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; |
146 | assert(cell->key != NULL); | |
147 | ||
148 | /* Swap cell->{key,data} and {key,data} (evict). */ | |
149 | tkey = cell->key; tdata = cell->data; | |
150 | cell->key = key; cell->data = data; | |
151 | key = tkey; data = tdata; | |
152 | ||
153 | #ifdef CKH_COUNT | |
154 | ckh->nrelocs++; | |
155 | #endif | |
156 | ||
157 | /* Find the alternate bucket for the evicted item. */ | |
158 | ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2); | |
159 | tbucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1); | |
160 | if (tbucket == bucket) { | |
161 | tbucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1); | |
162 | /* | |
163 | * It may be that (tbucket == bucket) still, if the | |
164 | * item's hashes both indicate this bucket. However, | |
165 | * we are guaranteed to eventually escape this bucket | |
166 | * during iteration, assuming pseudo-random item | |
167 | * selection (true randomness would make infinite | |
168 | * looping a remote possibility). The reason we can | |
169 | * never get trapped forever is that there are two | |
170 | * cases: | |
171 | * | |
172 | * 1) This bucket == argbucket, so we will quickly | |
173 | * detect an eviction cycle and terminate. | |
174 | * 2) An item was evicted to this bucket from another, | |
175 | * which means that at least one item in this bucket | |
176 | * has hashes that indicate distinct buckets. | |
177 | */ | |
178 | } | |
179 | /* Check for a cycle. */ | |
180 | if (tbucket == argbucket) { | |
181 | *argkey = key; | |
182 | *argdata = data; | |
183 | return (true); | |
184 | } | |
185 | ||
186 | bucket = tbucket; | |
187 | if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) | |
188 | return (false); | |
189 | } | |
190 | } | |
191 | ||
192 | JEMALLOC_INLINE bool | |
193 | ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) | |
194 | { | |
195 | size_t hash1, hash2, bucket; | |
196 | const void *key = *argkey; | |
197 | const void *data = *argdata; | |
198 | ||
199 | ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2); | |
200 | ||
201 | /* Try to insert in primary bucket. */ | |
202 | bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1); | |
203 | if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) | |
204 | return (false); | |
205 | ||
206 | /* Try to insert in secondary bucket. */ | |
207 | bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1); | |
208 | if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) | |
209 | return (false); | |
210 | ||
211 | /* | |
212 | * Try to find a place for this item via iterative eviction/relocation. | |
213 | */ | |
214 | return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); | |
215 | } | |
216 | ||
217 | /* | |
218 | * Try to rebuild the hash table from scratch by inserting all items from the | |
219 | * old table into the new. | |
220 | */ | |
221 | JEMALLOC_INLINE bool | |
222 | ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) | |
223 | { | |
224 | size_t count, i, nins; | |
225 | const void *key, *data; | |
226 | ||
227 | count = ckh->count; | |
228 | ckh->count = 0; | |
229 | for (i = nins = 0; nins < count; i++) { | |
230 | if (aTab[i].key != NULL) { | |
231 | key = aTab[i].key; | |
232 | data = aTab[i].data; | |
233 | if (ckh_try_insert(ckh, &key, &data)) { | |
234 | ckh->count = count; | |
235 | return (true); | |
236 | } | |
237 | nins++; | |
238 | } | |
239 | } | |
240 | ||
241 | return (false); | |
242 | } | |
243 | ||
244 | static bool | |
245 | ckh_grow(ckh_t *ckh) | |
246 | { | |
247 | bool ret; | |
248 | ckhc_t *tab, *ttab; | |
249 | size_t lg_curcells; | |
250 | unsigned lg_prevbuckets; | |
251 | ||
252 | #ifdef CKH_COUNT | |
253 | ckh->ngrows++; | |
254 | #endif | |
255 | ||
256 | /* | |
257 | * It is possible (though unlikely, given well behaved hashes) that the | |
258 | * table will have to be doubled more than once in order to create a | |
259 | * usable table. | |
260 | */ | |
261 | lg_prevbuckets = ckh->lg_curbuckets; | |
262 | lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS; | |
263 | while (true) { | |
264 | size_t usize; | |
265 | ||
266 | lg_curcells++; | |
4934f93d | 267 | usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); |
a78e148b | 268 | if (usize == 0) { |
269 | ret = true; | |
4934f93d | 270 | goto label_return; |
a78e148b | 271 | } |
272 | tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); | |
273 | if (tab == NULL) { | |
274 | ret = true; | |
4934f93d | 275 | goto label_return; |
a78e148b | 276 | } |
277 | /* Swap in new table. */ | |
278 | ttab = ckh->tab; | |
279 | ckh->tab = tab; | |
280 | tab = ttab; | |
281 | ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; | |
282 | ||
283 | if (ckh_rebuild(ckh, tab) == false) { | |
284 | idalloc(tab); | |
285 | break; | |
286 | } | |
287 | ||
288 | /* Rebuilding failed, so back out partially rebuilt table. */ | |
289 | idalloc(ckh->tab); | |
290 | ckh->tab = tab; | |
291 | ckh->lg_curbuckets = lg_prevbuckets; | |
292 | } | |
293 | ||
294 | ret = false; | |
4934f93d | 295 | label_return: |
a78e148b | 296 | return (ret); |
297 | } | |
298 | ||
299 | static void | |
300 | ckh_shrink(ckh_t *ckh) | |
301 | { | |
302 | ckhc_t *tab, *ttab; | |
303 | size_t lg_curcells, usize; | |
304 | unsigned lg_prevbuckets; | |
305 | ||
306 | /* | |
307 | * It is possible (though unlikely, given well behaved hashes) that the | |
308 | * table rebuild will fail. | |
309 | */ | |
310 | lg_prevbuckets = ckh->lg_curbuckets; | |
311 | lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; | |
4934f93d | 312 | usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); |
a78e148b | 313 | if (usize == 0) |
314 | return; | |
315 | tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); | |
316 | if (tab == NULL) { | |
317 | /* | |
318 | * An OOM error isn't worth propagating, since it doesn't | |
319 | * prevent this or future operations from proceeding. | |
320 | */ | |
321 | return; | |
322 | } | |
323 | /* Swap in new table. */ | |
324 | ttab = ckh->tab; | |
325 | ckh->tab = tab; | |
326 | tab = ttab; | |
327 | ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; | |
328 | ||
329 | if (ckh_rebuild(ckh, tab) == false) { | |
330 | idalloc(tab); | |
331 | #ifdef CKH_COUNT | |
332 | ckh->nshrinks++; | |
333 | #endif | |
334 | return; | |
335 | } | |
336 | ||
337 | /* Rebuilding failed, so back out partially rebuilt table. */ | |
338 | idalloc(ckh->tab); | |
339 | ckh->tab = tab; | |
340 | ckh->lg_curbuckets = lg_prevbuckets; | |
341 | #ifdef CKH_COUNT | |
342 | ckh->nshrinkfails++; | |
343 | #endif | |
344 | } | |
345 | ||
346 | bool | |
347 | ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) | |
348 | { | |
349 | bool ret; | |
350 | size_t mincells, usize; | |
351 | unsigned lg_mincells; | |
352 | ||
353 | assert(minitems > 0); | |
354 | assert(hash != NULL); | |
355 | assert(keycomp != NULL); | |
356 | ||
357 | #ifdef CKH_COUNT | |
358 | ckh->ngrows = 0; | |
359 | ckh->nshrinks = 0; | |
360 | ckh->nshrinkfails = 0; | |
361 | ckh->ninserts = 0; | |
362 | ckh->nrelocs = 0; | |
363 | #endif | |
4934f93d | 364 | ckh->prng_state = 42; /* Value doesn't really matter. */ |
a78e148b | 365 | ckh->count = 0; |
366 | ||
367 | /* | |
368 | * Find the minimum power of 2 that is large enough to fit aBaseCount | |
369 | * entries. We are using (2+,2) cuckoo hashing, which has an expected | |
370 | * maximum load factor of at least ~0.86, so 0.75 is a conservative load | |
371 | * factor that will typically allow 2^aLgMinItems to fit without ever | |
372 | * growing the table. | |
373 | */ | |
374 | assert(LG_CKH_BUCKET_CELLS > 0); | |
375 | mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; | |
376 | for (lg_mincells = LG_CKH_BUCKET_CELLS; | |
377 | (ZU(1) << lg_mincells) < mincells; | |
378 | lg_mincells++) | |
379 | ; /* Do nothing. */ | |
380 | ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; | |
381 | ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; | |
382 | ckh->hash = hash; | |
383 | ckh->keycomp = keycomp; | |
384 | ||
4934f93d | 385 | usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); |
a78e148b | 386 | if (usize == 0) { |
387 | ret = true; | |
4934f93d | 388 | goto label_return; |
a78e148b | 389 | } |
390 | ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); | |
391 | if (ckh->tab == NULL) { | |
392 | ret = true; | |
4934f93d | 393 | goto label_return; |
a78e148b | 394 | } |
395 | ||
a78e148b | 396 | ret = false; |
4934f93d | 397 | label_return: |
a78e148b | 398 | return (ret); |
399 | } | |
400 | ||
401 | void | |
402 | ckh_delete(ckh_t *ckh) | |
403 | { | |
404 | ||
405 | assert(ckh != NULL); | |
a78e148b | 406 | |
407 | #ifdef CKH_VERBOSE | |
408 | malloc_printf( | |
409 | "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64"," | |
410 | " nshrinkfails: %"PRIu64", ninserts: %"PRIu64"," | |
411 | " nrelocs: %"PRIu64"\n", __func__, ckh, | |
412 | (unsigned long long)ckh->ngrows, | |
413 | (unsigned long long)ckh->nshrinks, | |
414 | (unsigned long long)ckh->nshrinkfails, | |
415 | (unsigned long long)ckh->ninserts, | |
416 | (unsigned long long)ckh->nrelocs); | |
417 | #endif | |
418 | ||
419 | idalloc(ckh->tab); | |
420 | #ifdef JEMALLOC_DEBUG | |
421 | memset(ckh, 0x5a, sizeof(ckh_t)); | |
422 | #endif | |
423 | } | |
424 | ||
425 | size_t | |
426 | ckh_count(ckh_t *ckh) | |
427 | { | |
428 | ||
429 | assert(ckh != NULL); | |
a78e148b | 430 | |
431 | return (ckh->count); | |
432 | } | |
433 | ||
434 | bool | |
435 | ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) | |
436 | { | |
437 | size_t i, ncells; | |
438 | ||
439 | for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + | |
440 | LG_CKH_BUCKET_CELLS)); i < ncells; i++) { | |
441 | if (ckh->tab[i].key != NULL) { | |
442 | if (key != NULL) | |
443 | *key = (void *)ckh->tab[i].key; | |
444 | if (data != NULL) | |
445 | *data = (void *)ckh->tab[i].data; | |
446 | *tabind = i + 1; | |
447 | return (false); | |
448 | } | |
449 | } | |
450 | ||
451 | return (true); | |
452 | } | |
453 | ||
454 | bool | |
455 | ckh_insert(ckh_t *ckh, const void *key, const void *data) | |
456 | { | |
457 | bool ret; | |
458 | ||
459 | assert(ckh != NULL); | |
a78e148b | 460 | assert(ckh_search(ckh, key, NULL, NULL)); |
461 | ||
462 | #ifdef CKH_COUNT | |
463 | ckh->ninserts++; | |
464 | #endif | |
465 | ||
466 | while (ckh_try_insert(ckh, &key, &data)) { | |
467 | if (ckh_grow(ckh)) { | |
468 | ret = true; | |
4934f93d | 469 | goto label_return; |
a78e148b | 470 | } |
471 | } | |
472 | ||
473 | ret = false; | |
4934f93d | 474 | label_return: |
a78e148b | 475 | return (ret); |
476 | } | |
477 | ||
478 | bool | |
479 | ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data) | |
480 | { | |
481 | size_t cell; | |
482 | ||
483 | assert(ckh != NULL); | |
a78e148b | 484 | |
485 | cell = ckh_isearch(ckh, searchkey); | |
486 | if (cell != SIZE_T_MAX) { | |
487 | if (key != NULL) | |
488 | *key = (void *)ckh->tab[cell].key; | |
489 | if (data != NULL) | |
490 | *data = (void *)ckh->tab[cell].data; | |
491 | ckh->tab[cell].key = NULL; | |
492 | ckh->tab[cell].data = NULL; /* Not necessary. */ | |
493 | ||
494 | ckh->count--; | |
495 | /* Try to halve the table if it is less than 1/4 full. */ | |
496 | if (ckh->count < (ZU(1) << (ckh->lg_curbuckets | |
497 | + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets | |
498 | > ckh->lg_minbuckets) { | |
499 | /* Ignore error due to OOM. */ | |
500 | ckh_shrink(ckh); | |
501 | } | |
502 | ||
503 | return (false); | |
504 | } | |
505 | ||
506 | return (true); | |
507 | } | |
508 | ||
509 | bool | |
510 | ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) | |
511 | { | |
512 | size_t cell; | |
513 | ||
514 | assert(ckh != NULL); | |
a78e148b | 515 | |
516 | cell = ckh_isearch(ckh, searchkey); | |
517 | if (cell != SIZE_T_MAX) { | |
518 | if (key != NULL) | |
519 | *key = (void *)ckh->tab[cell].key; | |
520 | if (data != NULL) | |
521 | *data = (void *)ckh->tab[cell].data; | |
522 | return (false); | |
523 | } | |
524 | ||
525 | return (true); | |
526 | } | |
527 | ||
528 | void | |
529 | ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2) | |
530 | { | |
531 | size_t ret1, ret2; | |
532 | uint64_t h; | |
533 | ||
534 | assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64)); | |
535 | assert(hash1 != NULL); | |
536 | assert(hash2 != NULL); | |
537 | ||
4934f93d | 538 | h = hash(key, strlen((const char *)key), UINT64_C(0x94122f335b332aea)); |
a78e148b | 539 | if (minbits <= 32) { |
540 | /* | |
541 | * Avoid doing multiple hashes, since a single hash provides | |
542 | * enough bits. | |
543 | */ | |
544 | ret1 = h & ZU(0xffffffffU); | |
545 | ret2 = h >> 32; | |
546 | } else { | |
547 | ret1 = h; | |
548 | ret2 = hash(key, strlen((const char *)key), | |
4934f93d | 549 | UINT64_C(0x8432a476666bbc13)); |
a78e148b | 550 | } |
551 | ||
552 | *hash1 = ret1; | |
553 | *hash2 = ret2; | |
554 | } | |
555 | ||
556 | bool | |
557 | ckh_string_keycomp(const void *k1, const void *k2) | |
558 | { | |
559 | ||
560 | assert(k1 != NULL); | |
561 | assert(k2 != NULL); | |
562 | ||
563 | return (strcmp((char *)k1, (char *)k2) ? false : true); | |
564 | } | |
565 | ||
566 | void | |
567 | ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1, | |
568 | size_t *hash2) | |
569 | { | |
570 | size_t ret1, ret2; | |
571 | uint64_t h; | |
572 | union { | |
573 | const void *v; | |
574 | uint64_t i; | |
575 | } u; | |
576 | ||
577 | assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64)); | |
578 | assert(hash1 != NULL); | |
579 | assert(hash2 != NULL); | |
580 | ||
581 | assert(sizeof(u.v) == sizeof(u.i)); | |
582 | #if (LG_SIZEOF_PTR != LG_SIZEOF_INT) | |
583 | u.i = 0; | |
584 | #endif | |
585 | u.v = key; | |
4934f93d | 586 | h = hash(&u.i, sizeof(u.i), UINT64_C(0xd983396e68886082)); |
a78e148b | 587 | if (minbits <= 32) { |
588 | /* | |
589 | * Avoid doing multiple hashes, since a single hash provides | |
590 | * enough bits. | |
591 | */ | |
592 | ret1 = h & ZU(0xffffffffU); | |
593 | ret2 = h >> 32; | |
594 | } else { | |
595 | assert(SIZEOF_PTR == 8); | |
596 | ret1 = h; | |
4934f93d | 597 | ret2 = hash(&u.i, sizeof(u.i), UINT64_C(0x5e2be9aff8709a5d)); |
a78e148b | 598 | } |
599 | ||
600 | *hash1 = ret1; | |
601 | *hash2 = ret2; | |
602 | } | |
603 | ||
604 | bool | |
605 | ckh_pointer_keycomp(const void *k1, const void *k2) | |
606 | { | |
607 | ||
608 | return ((k1 == k2) ? true : false); | |
609 | } |