]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/cpu_in_cksum_gen.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / bsd / netinet / cpu_in_cksum_gen.c
1 /*
2 * Copyright (c) 2012-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*-
30 * Copyright (c) 2008 Joerg Sonnenberger <joerg@NetBSD.org>.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 *
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in
41 * the documentation and/or other materials provided with the
42 * distribution.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
45 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
46 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
47 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
48 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
49 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
50 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
51 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
52 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
53 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
54 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58 #ifdef KERNEL
59 #include <sys/param.h>
60 #include <machine/endian.h>
61 #include <sys/mcache.h>
62 #include <sys/mbuf.h>
63 #include <kern/debug.h>
64 #include <libkern/libkern.h>
65 #include <mach/boolean.h>
66 #include <pexpert/pexpert.h>
67 #define CKSUM_ERR(fmt, args...) kprintf(fmt, ## args)
68 #else /* !KERNEL */
69 #ifndef LIBSYSCALL_INTERFACE
70 #error "LIBSYSCALL_INTERFACE not defined"
71 #endif /* !LIBSYSCALL_INTERFACE */
72 #include <stdlib.h>
73 #include <stddef.h>
74 #include <stdint.h>
75 #include <unistd.h>
76 #include <strings.h>
77 #include <mach/boolean.h>
78 #endif /* !KERNEL */
79
80 /* compile time assert */
81 #ifndef _CASSERT
82 #define _CASSERT(x) _Static_assert(x, "compile-time assertion failed")
83 #endif /* !_CASSERT */
84
85 #ifndef VERIFY
86 #define VERIFY(EX) ((void)0)
87 #endif /* !VERIFY */
88
89 #ifndef CKSUM_ERR
90 #define CKSUM_ERR(fmt, args...) ((void)0)
91 #endif /* !CKSUM_ERR */
92
93 #define PREDICT_TRUE(x) __builtin_expect(!!((long)(x)), 1L)
94 #define PREDICT_FALSE(x) __builtin_expect(!!((long)(x)), 0L)
95
96 /* fake mbuf struct used only for calling os_cpu_in_cksum_mbuf() */
97 struct _mbuf {
98 struct _mbuf *_m_next;
99 void *_m_pad;
100 uint8_t *_m_data;
101 int32_t _m_len;
102 };
103
104 extern uint32_t os_cpu_in_cksum(const void *, uint32_t, uint32_t);
105 extern uint32_t os_cpu_in_cksum_mbuf(struct _mbuf *, int, int, uint32_t);
106
107 uint32_t
108 os_cpu_in_cksum(const void *data, uint32_t len, uint32_t initial_sum)
109 {
110 /*
111 * If data is 4-bytes aligned, length is multiple of 4-bytes,
112 * and the amount to checksum is small, this would be quicker;
113 * this is suitable for IPv4 header.
114 */
115 if (IS_P2ALIGNED(data, sizeof(uint32_t)) &&
116 len <= 64 && (len & 3) == 0) {
117 uint8_t *p = __DECONST(uint8_t *, data);
118 uint64_t sum = initial_sum;
119
120 if (PREDICT_TRUE(len == 20)) { /* simple IPv4 header */
121 sum += *(uint32_t *)(void *)p;
122 sum += *(uint32_t *)(void *)(p + 4);
123 sum += *(uint32_t *)(void *)(p + 8);
124 sum += *(uint32_t *)(void *)(p + 12);
125 sum += *(uint32_t *)(void *)(p + 16);
126 } else {
127 while (len) {
128 sum += *(uint32_t *)(void *)p;
129 p += 4;
130 len -= 4;
131 }
132 }
133
134 /* fold 64-bit to 16-bit (deferred carries) */
135 sum = (sum >> 32) + (sum & 0xffffffff); /* 33-bit */
136 sum = (sum >> 16) + (sum & 0xffff); /* 17-bit + carry */
137 sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */
138 sum = (sum >> 16) + (sum & 0xffff); /* final carry */
139
140 return sum & 0xffff;
141 }
142
143 /*
144 * Otherwise, let os_cpu_in_cksum_mbuf() handle it; it only looks
145 * at 3 fields: {next,data,len}, and since it doesn't care about
146 * the authenticity of the mbuf, we use a fake one here. Make
147 * sure the offsets are as expected.
148 */
149 #if defined(__LP64__)
150 _CASSERT(offsetof(struct _mbuf, _m_next) == 0);
151 _CASSERT(offsetof(struct _mbuf, _m_data) == 16);
152 _CASSERT(offsetof(struct _mbuf, _m_len) == 24);
153 #else /* !__LP64__ */
154 _CASSERT(offsetof(struct _mbuf, _m_next) == 0);
155 _CASSERT(offsetof(struct _mbuf, _m_data) == 8);
156 _CASSERT(offsetof(struct _mbuf, _m_len) == 12);
157 #endif /* !__LP64__ */
158 #ifdef KERNEL
159 _CASSERT(offsetof(struct _mbuf, _m_next) ==
160 offsetof(struct mbuf, m_next));
161 _CASSERT(offsetof(struct _mbuf, _m_data) ==
162 offsetof(struct mbuf, m_data));
163 _CASSERT(offsetof(struct _mbuf, _m_len) ==
164 offsetof(struct mbuf, m_len));
165 #endif /* KERNEL */
166 struct _mbuf m = {
167 ._m_next = NULL,
168 ._m_data = __DECONST(uint8_t *, data),
169 ._m_len = len,
170 };
171
172 return os_cpu_in_cksum_mbuf(&m, len, 0, initial_sum);
173 }
174
175 #if defined(__i386__) || defined(__x86_64__)
176
177 /*
178 * Checksum routine for Internet Protocol family headers (Portable Version).
179 *
180 * This routine is very heavily used in the network
181 * code and should be modified for each CPU to be as fast as possible.
182 *
183 * A discussion of different implementation techniques can be found in
184 * RFC 1071.
185 *
186 * The default implementation for 32-bit architectures is using
187 * a 32-bit accumulator and operating on 16-bit operands.
188 *
189 * The default implementation for 64-bit architectures is using
190 * a 64-bit accumulator and operating on 32-bit operands.
191 *
192 * Both versions are unrolled to handle 32 Byte / 64 Byte fragments as core
193 * of the inner loop. After each iteration of the inner loop, a partial
194 * reduction is done to avoid carry in long packets.
195 */
196
197 #if !defined(__LP64__)
198 /* 32-bit version */
199 uint32_t
200 os_cpu_in_cksum_mbuf(struct _mbuf *m, int len, int off, uint32_t initial_sum)
201 {
202 int mlen;
203 uint32_t sum, partial;
204 unsigned int final_acc;
205 uint8_t *data;
206 boolean_t needs_swap, started_on_odd;
207
208 VERIFY(len >= 0);
209 VERIFY(off >= 0);
210
211 needs_swap = FALSE;
212 started_on_odd = FALSE;
213 sum = (initial_sum >> 16) + (initial_sum & 0xffff);
214
215 for (;;) {
216 if (PREDICT_FALSE(m == NULL)) {
217 CKSUM_ERR("%s: out of data\n", __func__);
218 return (uint32_t)-1;
219 }
220 mlen = m->_m_len;
221 if (mlen > off) {
222 mlen -= off;
223 data = m->_m_data + off;
224 goto post_initial_offset;
225 }
226 off -= mlen;
227 if (len == 0) {
228 break;
229 }
230 m = m->_m_next;
231 }
232
233 for (; len > 0; m = m->_m_next) {
234 if (PREDICT_FALSE(m == NULL)) {
235 CKSUM_ERR("%s: out of data\n", __func__);
236 return (uint32_t)-1;
237 }
238 mlen = m->_m_len;
239 data = m->_m_data;
240 post_initial_offset:
241 if (mlen == 0) {
242 continue;
243 }
244 if (mlen > len) {
245 mlen = len;
246 }
247 len -= mlen;
248
249 partial = 0;
250 if ((uintptr_t)data & 1) {
251 /* Align on word boundary */
252 started_on_odd = !started_on_odd;
253 #if BYTE_ORDER == LITTLE_ENDIAN
254 partial = *data << 8;
255 #else
256 partial = *data;
257 #endif
258 ++data;
259 --mlen;
260 }
261 needs_swap = started_on_odd;
262 while (mlen >= 32) {
263 __builtin_prefetch(data + 32);
264 partial += *(uint16_t *)(void *)data;
265 partial += *(uint16_t *)(void *)(data + 2);
266 partial += *(uint16_t *)(void *)(data + 4);
267 partial += *(uint16_t *)(void *)(data + 6);
268 partial += *(uint16_t *)(void *)(data + 8);
269 partial += *(uint16_t *)(void *)(data + 10);
270 partial += *(uint16_t *)(void *)(data + 12);
271 partial += *(uint16_t *)(void *)(data + 14);
272 partial += *(uint16_t *)(void *)(data + 16);
273 partial += *(uint16_t *)(void *)(data + 18);
274 partial += *(uint16_t *)(void *)(data + 20);
275 partial += *(uint16_t *)(void *)(data + 22);
276 partial += *(uint16_t *)(void *)(data + 24);
277 partial += *(uint16_t *)(void *)(data + 26);
278 partial += *(uint16_t *)(void *)(data + 28);
279 partial += *(uint16_t *)(void *)(data + 30);
280 data += 32;
281 mlen -= 32;
282 if (PREDICT_FALSE(partial & 0xc0000000)) {
283 if (needs_swap) {
284 partial = (partial << 8) +
285 (partial >> 24);
286 }
287 sum += (partial >> 16);
288 sum += (partial & 0xffff);
289 partial = 0;
290 }
291 }
292 if (mlen & 16) {
293 partial += *(uint16_t *)(void *)data;
294 partial += *(uint16_t *)(void *)(data + 2);
295 partial += *(uint16_t *)(void *)(data + 4);
296 partial += *(uint16_t *)(void *)(data + 6);
297 partial += *(uint16_t *)(void *)(data + 8);
298 partial += *(uint16_t *)(void *)(data + 10);
299 partial += *(uint16_t *)(void *)(data + 12);
300 partial += *(uint16_t *)(void *)(data + 14);
301 data += 16;
302 mlen -= 16;
303 }
304 /*
305 * mlen is not updated below as the remaining tests
306 * are using bit masks, which are not affected.
307 */
308 if (mlen & 8) {
309 partial += *(uint16_t *)(void *)data;
310 partial += *(uint16_t *)(void *)(data + 2);
311 partial += *(uint16_t *)(void *)(data + 4);
312 partial += *(uint16_t *)(void *)(data + 6);
313 data += 8;
314 }
315 if (mlen & 4) {
316 partial += *(uint16_t *)(void *)data;
317 partial += *(uint16_t *)(void *)(data + 2);
318 data += 4;
319 }
320 if (mlen & 2) {
321 partial += *(uint16_t *)(void *)data;
322 data += 2;
323 }
324 if (mlen & 1) {
325 #if BYTE_ORDER == LITTLE_ENDIAN
326 partial += *data;
327 #else
328 partial += *data << 8;
329 #endif
330 started_on_odd = !started_on_odd;
331 }
332
333 if (needs_swap) {
334 partial = (partial << 8) + (partial >> 24);
335 }
336 sum += (partial >> 16) + (partial & 0xffff);
337 /*
338 * Reduce sum to allow potential byte swap
339 * in the next iteration without carry.
340 */
341 sum = (sum >> 16) + (sum & 0xffff);
342 }
343 final_acc = ((sum >> 16) & 0xffff) + (sum & 0xffff);
344 final_acc = (final_acc >> 16) + (final_acc & 0xffff);
345 return final_acc & 0xffff;
346 }
347
348 #else /* __LP64__ */
349 /* 64-bit version */
350 uint32_t
351 os_cpu_in_cksum_mbuf(struct _mbuf *m, int len, int off, uint32_t initial_sum)
352 {
353 int mlen;
354 uint64_t sum, partial;
355 unsigned int final_acc;
356 uint8_t *data;
357 boolean_t needs_swap, started_on_odd;
358
359 VERIFY(len >= 0);
360 VERIFY(off >= 0);
361
362 needs_swap = FALSE;
363 started_on_odd = FALSE;
364 sum = initial_sum;
365
366 for (;;) {
367 if (PREDICT_FALSE(m == NULL)) {
368 CKSUM_ERR("%s: out of data\n", __func__);
369 return (uint32_t)-1;
370 }
371 mlen = m->_m_len;
372 if (mlen > off) {
373 mlen -= off;
374 data = m->_m_data + off;
375 goto post_initial_offset;
376 }
377 off -= mlen;
378 if (len == 0) {
379 break;
380 }
381 m = m->_m_next;
382 }
383
384 for (; len > 0; m = m->_m_next) {
385 if (PREDICT_FALSE(m == NULL)) {
386 CKSUM_ERR("%s: out of data\n", __func__);
387 return (uint32_t)-1;
388 }
389 mlen = m->_m_len;
390 data = m->_m_data;
391 post_initial_offset:
392 if (mlen == 0) {
393 continue;
394 }
395 if (mlen > len) {
396 mlen = len;
397 }
398 len -= mlen;
399
400 partial = 0;
401 if ((uintptr_t)data & 1) {
402 /* Align on word boundary */
403 started_on_odd = !started_on_odd;
404 #if BYTE_ORDER == LITTLE_ENDIAN
405 partial = *data << 8;
406 #else
407 partial = *data;
408 #endif
409 ++data;
410 --mlen;
411 }
412 needs_swap = started_on_odd;
413 if ((uintptr_t)data & 2) {
414 if (mlen < 2) {
415 goto trailing_bytes;
416 }
417 partial += *(uint16_t *)(void *)data;
418 data += 2;
419 mlen -= 2;
420 }
421 while (mlen >= 64) {
422 __builtin_prefetch(data + 32);
423 __builtin_prefetch(data + 64);
424 partial += *(uint32_t *)(void *)data;
425 partial += *(uint32_t *)(void *)(data + 4);
426 partial += *(uint32_t *)(void *)(data + 8);
427 partial += *(uint32_t *)(void *)(data + 12);
428 partial += *(uint32_t *)(void *)(data + 16);
429 partial += *(uint32_t *)(void *)(data + 20);
430 partial += *(uint32_t *)(void *)(data + 24);
431 partial += *(uint32_t *)(void *)(data + 28);
432 partial += *(uint32_t *)(void *)(data + 32);
433 partial += *(uint32_t *)(void *)(data + 36);
434 partial += *(uint32_t *)(void *)(data + 40);
435 partial += *(uint32_t *)(void *)(data + 44);
436 partial += *(uint32_t *)(void *)(data + 48);
437 partial += *(uint32_t *)(void *)(data + 52);
438 partial += *(uint32_t *)(void *)(data + 56);
439 partial += *(uint32_t *)(void *)(data + 60);
440 data += 64;
441 mlen -= 64;
442 if (PREDICT_FALSE(partial & (3ULL << 62))) {
443 if (needs_swap) {
444 partial = (partial << 8) +
445 (partial >> 56);
446 }
447 sum += (partial >> 32);
448 sum += (partial & 0xffffffff);
449 partial = 0;
450 }
451 }
452 /*
453 * mlen is not updated below as the remaining tests
454 * are using bit masks, which are not affected.
455 */
456 if (mlen & 32) {
457 partial += *(uint32_t *)(void *)data;
458 partial += *(uint32_t *)(void *)(data + 4);
459 partial += *(uint32_t *)(void *)(data + 8);
460 partial += *(uint32_t *)(void *)(data + 12);
461 partial += *(uint32_t *)(void *)(data + 16);
462 partial += *(uint32_t *)(void *)(data + 20);
463 partial += *(uint32_t *)(void *)(data + 24);
464 partial += *(uint32_t *)(void *)(data + 28);
465 data += 32;
466 }
467 if (mlen & 16) {
468 partial += *(uint32_t *)(void *)data;
469 partial += *(uint32_t *)(void *)(data + 4);
470 partial += *(uint32_t *)(void *)(data + 8);
471 partial += *(uint32_t *)(void *)(data + 12);
472 data += 16;
473 }
474 if (mlen & 8) {
475 partial += *(uint32_t *)(void *)data;
476 partial += *(uint32_t *)(void *)(data + 4);
477 data += 8;
478 }
479 if (mlen & 4) {
480 partial += *(uint32_t *)(void *)data;
481 data += 4;
482 }
483 if (mlen & 2) {
484 partial += *(uint16_t *)(void *)data;
485 data += 2;
486 }
487 trailing_bytes:
488 if (mlen & 1) {
489 #if BYTE_ORDER == LITTLE_ENDIAN
490 partial += *data;
491 #else
492 partial += *data << 8;
493 #endif
494 started_on_odd = !started_on_odd;
495 }
496
497 if (needs_swap) {
498 partial = (partial << 8) + (partial >> 56);
499 }
500 sum += (partial >> 32) + (partial & 0xffffffff);
501 /*
502 * Reduce sum to allow potential byte swap
503 * in the next iteration without carry.
504 */
505 sum = (sum >> 32) + (sum & 0xffffffff);
506 }
507 final_acc = (sum >> 48) + ((sum >> 32) & 0xffff) +
508 ((sum >> 16) & 0xffff) + (sum & 0xffff);
509 final_acc = (final_acc >> 16) + (final_acc & 0xffff);
510 final_acc = (final_acc >> 16) + (final_acc & 0xffff);
511 return final_acc & 0xffff;
512 }
513 #endif /* __LP64 */
514
515 #endif /* __i386__ || __x86_64__ */