]> git.saurik.com Git - apple/xnu.git/blob - bsd/net/flowhash.c
e63462423482ba5a0fde662c3130bd775fd9a537
[apple/xnu.git] / bsd / net / flowhash.c
1 /*
2 * Copyright (c) 2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * http://code.google.com/p/smhasher/
31 *
32 * Copyright (c) 2009-2011 Austin Appleby.
33 *
34 * MurmurHash3 was written by Austin Appleby, and is placed in the public
35 * domain. The author hereby disclaims copyright to this source code.
36 */
37
38 /*
39 * http://burtleburtle.net/bob/hash/
40 *
41 * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
42 *
43 * You can use this free for any purpose. It's in the public domain.
44 * It has no warranty.
45 */
46
47 #include <stdbool.h>
48 #include <sys/types.h>
49 #include <machine/endian.h>
50 #include <net/flowhash.h>
51
52 static inline u_int32_t getblock32(const u_int32_t *, int);
53 static inline u_int64_t getblock64(const u_int64_t *, int);
54 static inline u_int32_t mh3_fmix32(u_int32_t);
55 static inline u_int64_t mh3_fmix64(u_int64_t);
56
57 #define ALIGNED16(v) ((((uintptr_t)(v)) & 1) == 0)
58 #define ALIGNED32(v) ((((uintptr_t)(v)) & 3) == 0)
59 #define ALIGNED64(v) ((((uintptr_t)(v)) & 7) == 0)
60
61 #define ROTL32(x, r) (((x) << (r)) | ((x) >> (32 - (r))))
62 #define ROTL64(x, r) (((x) << (r)) | ((x) >> (64 - (r))))
63
64 /*
65 * The following hash algorithms are selected based on performance:
66 *
67 * Intel 32-bit: MurmurHash3_x86_32
68 * Intel 64-bit: MurmurHash3_x64_128
69 * ARM, et al: JHash
70 */
71 #if defined(__i386__)
72 net_flowhash_fn_t *net_flowhash = net_flowhash_mh3_x86_32;
73 #elif defined(__x86_64__)
74 net_flowhash_fn_t *net_flowhash = net_flowhash_mh3_x64_128;
75 #else /* !__i386__ && !__x86_64__ */
76 net_flowhash_fn_t *net_flowhash = net_flowhash_jhash;
77 #endif /* !__i386__ && !__x86_64__ */
78
79 #if defined(__i386__) || defined(__x86_64__)
80 static inline u_int32_t
81 getblock32(const u_int32_t *p, int i)
82 {
83 return (p[i]);
84 }
85
86 static inline u_int64_t
87 getblock64(const u_int64_t *p, int i)
88 {
89 return (p[i]);
90 }
91 #else /* !__i386__ && !__x86_64 */
92 static inline u_int32_t
93 getblock32(const u_int32_t *p, int i)
94 {
95 const u_int8_t *bytes = (u_int8_t *)(void *)(uintptr_t)(p + i);
96 u_int32_t value;
97
98 if (ALIGNED32(p)) {
99 value = p[i];
100 } else {
101 #if BYTE_ORDER == BIG_ENDIAN
102 value =
103 (((u_int32_t)bytes[0]) << 24) |
104 (((u_int32_t)bytes[1]) << 16) |
105 (((u_int32_t)bytes[2]) << 8) |
106 ((u_int32_t)bytes[3]);
107 #else /* LITTLE_ENDIAN */
108 value =
109 (((u_int32_t)bytes[3]) << 24) |
110 (((u_int32_t)bytes[2]) << 16) |
111 (((u_int32_t)bytes[1]) << 8) |
112 ((u_int32_t)bytes[0]);
113 #endif /* LITTLE_ENDIAN */
114 }
115 return (value);
116 }
117
118 static inline u_int64_t
119 getblock64(const u_int64_t *p, int i)
120 {
121 const u_int8_t *bytes = (const u_int8_t *)(void *)(uintptr_t)(p + i);
122 u_int64_t value;
123
124 if (ALIGNED64(p)) {
125 value = p[i];
126 } else {
127 #if BYTE_ORDER == BIG_ENDIAN
128 value =
129 (((u_int64_t)bytes[0]) << 56) |
130 (((u_int64_t)bytes[1]) << 48) |
131 (((u_int64_t)bytes[2]) << 40) |
132 (((u_int64_t)bytes[3]) << 32) |
133 (((u_int64_t)bytes[4]) << 24) |
134 (((u_int64_t)bytes[5]) << 16) |
135 (((u_int64_t)bytes[6]) << 8) |
136 ((u_int64_t)bytes[7]);
137 #else /* LITTLE_ENDIAN */
138 value =
139 (((u_int64_t)bytes[7]) << 56) |
140 (((u_int64_t)bytes[6]) << 48) |
141 (((u_int64_t)bytes[5]) << 40) |
142 (((u_int64_t)bytes[4]) << 32) |
143 (((u_int64_t)bytes[3]) << 24) |
144 (((u_int64_t)bytes[2]) << 16) |
145 (((u_int64_t)bytes[1]) << 8) |
146 ((u_int64_t)bytes[0]);
147 #endif /* LITTLE_ENDIAN */
148 }
149 return (value);
150 }
151 #endif /* !__i386__ && !__x86_64 */
152
153 static inline u_int32_t
154 mh3_fmix32(u_int32_t h)
155 {
156 h ^= h >> 16;
157 h *= 0x85ebca6b;
158 h ^= h >> 13;
159 h *= 0xc2b2ae35;
160 h ^= h >> 16;
161
162 return (h);
163 }
164
165 static inline u_int64_t
166 mh3_fmix64(u_int64_t k)
167 {
168 k ^= k >> 33;
169 k *= 0xff51afd7ed558ccdLLU;
170 k ^= k >> 33;
171 k *= 0xc4ceb9fe1a85ec53LLU;
172 k ^= k >> 33;
173
174 return (k);
175 }
176
177 /*
178 * MurmurHash3_x86_32
179 */
180 #define MH3_X86_32_C1 0xcc9e2d51
181 #define MH3_X86_32_C2 0x1b873593
182
183 u_int32_t
184 net_flowhash_mh3_x86_32(const void *key, u_int32_t len, const u_int32_t seed)
185 {
186 const u_int8_t *data = (const u_int8_t *)key;
187 const u_int32_t nblocks = len / 4;
188 const u_int32_t *blocks;
189 const u_int8_t *tail;
190 u_int32_t h1 = seed, k1;
191 int i;
192
193 /* body */
194 blocks = (const u_int32_t *)(const void *)(data + nblocks * 4);
195
196 for (i = -nblocks; i; i++) {
197 k1 = getblock32(blocks, i);
198
199 k1 *= MH3_X86_32_C1;
200 k1 = ROTL32(k1, 15);
201 k1 *= MH3_X86_32_C2;
202
203 h1 ^= k1;
204 h1 = ROTL32(h1, 13);
205 h1 = h1 * 5 + 0xe6546b64;
206 }
207
208 /* tail */
209 tail = (const u_int8_t *)(const void *)(data + nblocks * 4);
210 k1 = 0;
211
212 switch (len & 3) {
213 case 3:
214 k1 ^= tail[2] << 16;
215 /* FALLTHRU */
216 case 2:
217 k1 ^= tail[1] << 8;
218 /* FALLTHRU */
219 case 1:
220 k1 ^= tail[0];
221 k1 *= MH3_X86_32_C1;
222 k1 = ROTL32(k1, 15);
223 k1 *= MH3_X86_32_C2;
224 h1 ^= k1;
225 };
226
227 /* finalization */
228 h1 ^= len;
229
230 h1 = mh3_fmix32(h1);
231
232 return (h1);
233 }
234
235 /*
236 * MurmurHash3_x64_128
237 */
238 #define MH3_X64_128_C1 0x87c37b91114253d5LLU
239 #define MH3_X64_128_C2 0x4cf5ad432745937fLLU
240
241 u_int32_t
242 net_flowhash_mh3_x64_128(const void *key, u_int32_t len, const u_int32_t seed)
243 {
244 const u_int8_t *data = (const u_int8_t *)key;
245 const u_int32_t nblocks = len / 16;
246 const u_int64_t *blocks;
247 const u_int8_t *tail;
248 u_int64_t h1 = seed, k1;
249 u_int64_t h2 = seed, k2;
250 u_int32_t i;
251
252 /* body */
253 blocks = (const u_int64_t *)(const void *)data;
254
255 for (i = 0; i < nblocks; i++) {
256 k1 = getblock64(blocks, i * 2 + 0);
257 k2 = getblock64(blocks, i * 2 + 1);
258
259 k1 *= MH3_X64_128_C1;
260 k1 = ROTL64(k1, 31);
261 k1 *= MH3_X64_128_C2;
262 h1 ^= k1;
263
264 h1 = ROTL64(h1, 27);
265 h1 += h2;
266 h1 = h1 * 5 + 0x52dce729;
267
268 k2 *= MH3_X64_128_C2;
269 k2 = ROTL64(k2, 33);
270 k2 *= MH3_X64_128_C1;
271 h2 ^= k2;
272
273 h2 = ROTL64(h2, 31);
274 h2 += h1;
275 h2 = h2 * 5+ 0x38495ab5;
276 }
277
278 /* tail */
279 tail = (const u_int8_t *)(const void *)(data + nblocks * 16);
280 k1 = 0;
281 k2 = 0;
282
283 switch (len & 15) {
284 case 15:
285 k2 ^= ((u_int64_t)tail[14]) << 48;
286 /* FALLTHRU */
287 case 14:
288 k2 ^= ((u_int64_t)tail[13]) << 40;
289 /* FALLTHRU */
290 case 13:
291 k2 ^= ((u_int64_t)tail[12]) << 32;
292 /* FALLTHRU */
293 case 12:
294 k2 ^= ((u_int64_t)tail[11]) << 24;
295 /* FALLTHRU */
296 case 11:
297 k2 ^= ((u_int64_t)tail[10]) << 16;
298 /* FALLTHRU */
299 case 10:
300 k2 ^= ((u_int64_t)tail[9]) << 8;
301 /* FALLTHRU */
302 case 9:
303 k2 ^= ((u_int64_t)tail[8]) << 0;
304 k2 *= MH3_X64_128_C2;
305 k2 = ROTL64(k2, 33);
306 k2 *= MH3_X64_128_C1;
307 h2 ^= k2;
308 /* FALLTHRU */
309 case 8:
310 k1 ^= ((u_int64_t)tail[7]) << 56;
311 /* FALLTHRU */
312 case 7:
313 k1 ^= ((u_int64_t)tail[6]) << 48;
314 /* FALLTHRU */
315 case 6:
316 k1 ^= ((u_int64_t)tail[5]) << 40;
317 /* FALLTHRU */
318 case 5:
319 k1 ^= ((u_int64_t)tail[4]) << 32;
320 /* FALLTHRU */
321 case 4:
322 k1 ^= ((u_int64_t)tail[3]) << 24;
323 /* FALLTHRU */
324 case 3:
325 k1 ^= ((u_int64_t)tail[2]) << 16;
326 /* FALLTHRU */
327 case 2:
328 k1 ^= ((u_int64_t)tail[1]) << 8;
329 /* FALLTHRU */
330 case 1:
331 k1 ^= ((u_int64_t)tail[0]) << 0;
332 k1 *= MH3_X64_128_C1;
333 k1 = ROTL64(k1, 31);
334 k1 *= MH3_X64_128_C2;
335 h1 ^= k1;
336 };
337
338 /* finalization */
339 h1 ^= len;
340 h2 ^= len;
341
342 h1 += h2;
343 h2 += h1;
344
345 h1 = mh3_fmix64(h1);
346 h2 = mh3_fmix64(h2);
347
348 h1 += h2;
349 h2 += h1;
350
351 /* throw all but lowest 32-bit */
352 return (h1 & 0xffffffff);
353 }
354
355 #define JHASH_INIT 0xdeadbeef
356
357 #define JHASH_MIX(a, b, c) { \
358 a -= c; a ^= ROTL32(c, 4); c += b; \
359 b -= a; b ^= ROTL32(a, 6); a += c; \
360 c -= b; c ^= ROTL32(b, 8); b += a; \
361 a -= c; a ^= ROTL32(c, 16); c += b; \
362 b -= a; b ^= ROTL32(a, 19); a += c; \
363 c -= b; c ^= ROTL32(b, 4); b += a; \
364 }
365
366 #define JHASH_FINAL(a, b, c) { \
367 c ^= b; c -= ROTL32(b, 14); \
368 a ^= c; a -= ROTL32(c, 11); \
369 b ^= a; b -= ROTL32(a, 25); \
370 c ^= b; c -= ROTL32(b, 16); \
371 a ^= c; a -= ROTL32(c, 4); \
372 b ^= a; b -= ROTL32(a, 14); \
373 c ^= b; c -= ROTL32(b, 24); \
374 }
375
376 #if BYTE_ORDER == BIG_ENDIAN
377 /*
378 * hashbig()
379 */
380 u_int32_t
381 net_flowhash_jhash(const void *key, u_int32_t len, const u_int32_t seed)
382 {
383 u_int32_t a, b, c;
384
385 /* Set up the internal state */
386 a = b = c = JHASH_INIT + len + seed;
387
388 if (ALIGNED32(key)) {
389 /* read 32-bit chunks */
390 const u_int32_t *k = (const u_int32_t *)key;
391
392 /*
393 * all but last block:
394 * aligned reads and affect 32 bits of (a,b,c)
395 */
396 while (len > 12) {
397 a += k[0];
398 b += k[1];
399 c += k[2];
400 JHASH_MIX(a, b, c);
401 len -= 12;
402 k += 3;
403 }
404
405 /*
406 * handle the last (probably partial) block
407 *
408 * "k[2] << 8" actually reads beyond the end of the string,
409 * but then shifts out the part it's not allowed to read.
410 * Because the string is aligned, the illegal read is in
411 * the same word as the rest of the string. The masking
412 * trick does make the hash noticably faster for short
413 * strings (like English words).
414 */
415 switch (len) {
416 case 12:
417 c += k[2];
418 b += k[1];
419 a += k[0];
420 break;
421
422 case 11:
423 c += k[2] & 0xffffff00;
424 b += k[1];
425 a += k[0];
426 break;
427
428 case 10:
429 c += k[2] & 0xffff0000;
430 b += k[1];
431 a += k[0];
432 break;
433
434 case 9:
435 c += k[2] & 0xff000000;
436 b += k[1];
437 a += k[0];
438 break;
439
440 case 8:
441 b += k[1];
442 a += k[0];
443 break;
444
445 case 7:
446 b += k[1] & 0xffffff00;
447 a += k[0];
448 break;
449
450 case 6:
451 b += k[1] & 0xffff0000;
452 a += k[0];
453 break;
454
455 case 5:
456 b += k[1] & 0xff000000;
457 a += k[0];
458 break;
459
460 case 4:
461 a += k[0];
462 break;
463
464 case 3:
465 a += k[0] & 0xffffff00;
466 break;
467
468 case 2:
469 a += k[0] & 0xffff0000;
470 break;
471
472 case 1:
473 a += k[0] & 0xff000000;
474 break;
475
476 case 0:
477 /* zero length requires no mixing */
478 return (c);
479 }
480
481 JHASH_FINAL(a, b, c);
482
483 return (c);
484 }
485
486 /* need to read the key one byte at a time */
487 const u_int8_t *k = (const u_int8_t *)key;
488
489 /* all but the last block: affect some 32 bits of (a,b,c) */
490 while (len > 12) {
491 a += ((u_int32_t)k[0]) << 24;
492 a += ((u_int32_t)k[1]) << 16;
493 a += ((u_int32_t)k[2]) << 8;
494 a += ((u_int32_t)k[3]);
495 b += ((u_int32_t)k[4]) << 24;
496 b += ((u_int32_t)k[5]) << 16;
497 b += ((u_int32_t)k[6]) << 8;
498 b += ((u_int32_t)k[7]);
499 c += ((u_int32_t)k[8]) << 24;
500 c += ((u_int32_t)k[9]) << 16;
501 c += ((u_int32_t)k[10]) << 8;
502 c += ((u_int32_t)k[11]);
503 JHASH_MIX(a, b, c);
504 len -= 12;
505 k += 12;
506 }
507
508 /* last block: affect all 32 bits of (c) */
509 switch (len) {
510 case 12:
511 c += k[11];
512 /* FALLTHRU */
513 case 11:
514 c += ((u_int32_t)k[10]) << 8;
515 /* FALLTHRU */
516 case 10:
517 c += ((u_int32_t)k[9]) << 16;
518 /* FALLTHRU */
519 case 9:
520 c += ((u_int32_t)k[8]) << 24;
521 /* FALLTHRU */
522 case 8:
523 b += k[7];
524 /* FALLTHRU */
525 case 7:
526 b += ((u_int32_t)k[6]) << 8;
527 /* FALLTHRU */
528 case 6:
529 b += ((u_int32_t)k[5]) << 16;
530 /* FALLTHRU */
531 case 5:
532 b += ((u_int32_t)k[4]) << 24;
533 /* FALLTHRU */
534 case 4:
535 a += k[3];
536 /* FALLTHRU */
537 case 3:
538 a += ((u_int32_t)k[2]) << 8;
539 /* FALLTHRU */
540 case 2:
541 a += ((u_int32_t)k[1]) << 16;
542 /* FALLTHRU */
543 case 1:
544 a += ((u_int32_t)k[0]) << 24;
545 break;
546
547 case 0:
548 /* zero length requires no mixing */
549 return (c);
550 }
551
552 JHASH_FINAL(a, b, c);
553
554 return (c);
555 }
556 #else /* LITTLE_ENDIAN */
557 /*
558 * hashlittle()
559 */
560 u_int32_t
561 net_flowhash_jhash(const void *key, u_int32_t len, const u_int32_t seed)
562 {
563 u_int32_t a, b, c;
564
565 /* Set up the internal state */
566 a = b = c = JHASH_INIT + len + seed;
567
568 #if defined(__i386__) || defined(__x86_64__)
569 /*
570 * On i386/x86_64, it is faster to read 32-bit chunks if the key
571 * is aligned 32-bit OR not 16-bit, and perform 16-bit reads if it
572 * is aligned 16-bit.
573 */
574 if (ALIGNED32(key) || !ALIGNED16(key)) {
575 #else /* !defined(__i386__) && !defined(__x86_64__) */
576 if (ALIGNED32(key)) {
577 #endif /* !defined(__i386__) && !defined(__x86_64__) */
578 /* read 32-bit chunks */
579 const u_int32_t *k = (const u_int32_t *)key;
580
581 /*
582 * all but last block:
583 * aligned reads and affect 32 bits of (a,b,c)
584 */
585 while (len > 12) {
586 a += k[0];
587 b += k[1];
588 c += k[2];
589 JHASH_MIX(a, b, c);
590 len -= 12;
591 k += 3;
592 }
593
594 /*
595 * handle the last (probably partial) block
596 *
597 * "k[2] & 0xffffff" actually reads beyond the end of the
598 * string, but then masks off the part it's not allowed
599 * to read. Because the string is aligned, the masked-off
600 * tail is in the same word as the rest of the string.
601 * The masking trick does make the hash noticably faster
602 * for short strings (like English words).
603 */
604 switch (len) {
605 case 12:
606 c += k[2];
607 b += k[1];
608 a += k[0];
609 break;
610
611 case 11:
612 c += k[2] & 0xffffff;
613 b += k[1];
614 a += k[0];
615 break;
616
617 case 10:
618 c += k[2] & 0xffff;
619 b += k[1];
620 a += k[0];
621 break;
622
623 case 9:
624 c += k[2] & 0xff;
625 b += k[1];
626 a += k[0];
627 break;
628
629 case 8:
630 b += k[1];
631 a += k[0];
632 break;
633
634 case 7:
635 b += k[1] & 0xffffff;
636 a += k[0];
637 break;
638
639 case 6:
640 b += k[1] & 0xffff;
641 a += k[0];
642 break;
643
644 case 5:
645 b += k[1] & 0xff;
646 a += k[0];
647 break;
648
649 case 4:
650 a += k[0];
651 break;
652
653 case 3:
654 a += k[0] & 0xffffff;
655 break;
656
657 case 2:
658 a += k[0] & 0xffff;
659 break;
660
661 case 1:
662 a += k[0] & 0xff;
663 break;
664
665 case 0:
666 /* zero length requires no mixing */
667 return (c);
668 }
669
670 JHASH_FINAL(a, b, c);
671
672 return (c);
673 }
674 #if !defined(__i386__) && !defined(__x86_64__)
675 else if (ALIGNED16(key)) {
676 #endif /* !defined(__i386__) && !defined(__x86_64__) */
677 /* read 16-bit chunks */
678 const u_int16_t *k = (const u_int16_t *)key;
679 const u_int8_t *k8;
680
681 /* all but last block: aligned reads and different mixing */
682 while (len > 12) {
683 a += k[0] + (((u_int32_t)k[1]) << 16);
684 b += k[2] + (((u_int32_t)k[3]) << 16);
685 c += k[4] + (((u_int32_t)k[5]) << 16);
686 JHASH_MIX(a, b, c);
687 len -= 12;
688 k += 6;
689 }
690
691 /* handle the last (probably partial) block */
692 k8 = (const u_int8_t *)k;
693 switch (len) {
694 case 12:
695 c += k[4] + (((u_int32_t)k[5]) << 16);
696 b += k[2] + (((u_int32_t)k[3]) << 16);
697 a += k[0] + (((u_int32_t)k[1]) << 16);
698 break;
699
700 case 11:
701 c += ((u_int32_t)k8[10]) << 16;
702 /* FALLTHRU */
703 case 10:
704 c += k[4];
705 b += k[2] + (((u_int32_t)k[3]) << 16);
706 a += k[0] + (((u_int32_t)k[1]) << 16);
707 break;
708
709 case 9:
710 c += k8[8];
711 /* FALLTHRU */
712 case 8:
713 b += k[2] + (((u_int32_t)k[3]) << 16);
714 a += k[0] + (((u_int32_t)k[1]) << 16);
715 break;
716
717 case 7:
718 b += ((u_int32_t)k8[6]) << 16;
719 /* FALLTHRU */
720 case 6:
721 b += k[2];
722 a += k[0] + (((u_int32_t)k[1]) << 16);
723 break;
724
725 case 5:
726 b += k8[4];
727 /* FALLTHRU */
728 case 4:
729 a += k[0] + (((u_int32_t)k[1]) << 16);
730 break;
731
732 case 3:
733 a += ((u_int32_t)k8[2]) << 16;
734 /* FALLTHRU */
735 case 2:
736 a += k[0];
737 break;
738
739 case 1:
740 a += k8[0];
741 break;
742
743 case 0:
744 /* zero length requires no mixing */
745 return (c);
746 }
747
748 JHASH_FINAL(a, b, c);
749
750 return (c);
751 #if !defined(__i386__) && !defined(__x86_64__)
752 }
753
754 /* need to read the key one byte at a time */
755 const u_int8_t *k = (const u_int8_t *)key;
756
757 /* all but the last block: affect some 32 bits of (a,b,c) */
758 while (len > 12) {
759 a += k[0];
760 a += ((u_int32_t)k[1]) << 8;
761 a += ((u_int32_t)k[2]) << 16;
762 a += ((u_int32_t)k[3]) << 24;
763 b += k[4];
764 b += ((u_int32_t)k[5]) << 8;
765 b += ((u_int32_t)k[6]) << 16;
766 b += ((u_int32_t)k[7]) << 24;
767 c += k[8];
768 c += ((u_int32_t)k[9]) << 8;
769 c += ((u_int32_t)k[10]) << 16;
770 c += ((u_int32_t)k[11]) << 24;
771 JHASH_MIX(a, b, c);
772 len -= 12;
773 k += 12;
774 }
775
776 /* last block: affect all 32 bits of (c) */
777 switch (len) {
778 case 12:
779 c += ((u_int32_t)k[11]) << 24;
780 /* FALLTHRU */
781 case 11:
782 c += ((u_int32_t)k[10]) << 16;
783 /* FALLTHRU */
784 case 10:
785 c += ((u_int32_t)k[9]) << 8;
786 /* FALLTHRU */
787 case 9:
788 c += k[8];
789 /* FALLTHRU */
790 case 8:
791 b += ((u_int32_t)k[7]) << 24;
792 /* FALLTHRU */
793 case 7:
794 b += ((u_int32_t)k[6]) << 16;
795 /* FALLTHRU */
796 case 6:
797 b += ((u_int32_t)k[5]) << 8;
798 /* FALLTHRU */
799 case 5:
800 b += k[4];
801 /* FALLTHRU */
802 case 4:
803 a += ((u_int32_t)k[3]) << 24;
804 /* FALLTHRU */
805 case 3:
806 a += ((u_int32_t)k[2]) << 16;
807 /* FALLTHRU */
808 case 2:
809 a += ((u_int32_t)k[1]) << 8;
810 /* FALLTHRU */
811 case 1:
812 a += k[0];
813 break;
814
815 case 0:
816 /* zero length requires no mixing */
817 return (c);
818 }
819
820 JHASH_FINAL(a, b, c);
821
822 return (c);
823 #endif /* !defined(__i386__) && !defined(__x86_64__) */
824 }
825 #endif /* LITTLE_ENDIAN */