]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_mbuf.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / bsd / kern / kpi_mbuf.c
1 /*
2 * Copyright (c) 2004-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define __KPI__
30
31 #include <sys/param.h>
32 #include <sys/mbuf.h>
33 #include <sys/mcache.h>
34 #include <sys/socket.h>
35 #include <kern/debug.h>
36 #include <libkern/OSAtomic.h>
37 #include <kern/kalloc.h>
38 #include <string.h>
39 #include <net/dlil.h>
40 #include <netinet/in.h>
41 #include <netinet/ip_var.h>
42
43 #include "net/net_str_id.h"
44
45 /* mbuf flags visible to KPI clients; do not add private flags here */
46 static const mbuf_flags_t mbuf_flags_mask = (MBUF_EXT | MBUF_PKTHDR | MBUF_EOR |
47 MBUF_LOOP | MBUF_BCAST | MBUF_MCAST | MBUF_FRAG | MBUF_FIRSTFRAG |
48 MBUF_LASTFRAG | MBUF_PROMISC | MBUF_HASFCS);
49
50 /* Unalterable mbuf flags */
51 static const mbuf_flags_t mbuf_cflags_mask = (MBUF_EXT);
52
53 #define MAX_MBUF_TX_COMPL_FUNC 32
54 mbuf_tx_compl_func
55 mbuf_tx_compl_table[MAX_MBUF_TX_COMPL_FUNC];
56 extern lck_rw_t *mbuf_tx_compl_tbl_lock;
57 u_int32_t mbuf_tx_compl_index = 0;
58
59 #if (DEVELOPMENT || DEBUG)
60 int mbuf_tx_compl_debug = 0;
61 SInt64 mbuf_tx_compl_outstanding __attribute__((aligned(8))) = 0;
62 u_int64_t mbuf_tx_compl_aborted __attribute__((aligned(8))) = 0;
63
64 SYSCTL_DECL(_kern_ipc);
65 SYSCTL_NODE(_kern_ipc, OID_AUTO, mbtxcf,
66 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
67 SYSCTL_INT(_kern_ipc_mbtxcf, OID_AUTO, debug,
68 CTLFLAG_RW | CTLFLAG_LOCKED, &mbuf_tx_compl_debug, 0, "");
69 SYSCTL_INT(_kern_ipc_mbtxcf, OID_AUTO, index,
70 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_index, 0, "");
71 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, oustanding,
72 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_outstanding, "");
73 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, aborted,
74 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_aborted, "");
75 #endif /* (DEBUG || DEVELOPMENT) */
76
77 void *
78 mbuf_data(mbuf_t mbuf)
79 {
80 return mbuf->m_data;
81 }
82
83 void *
84 mbuf_datastart(mbuf_t mbuf)
85 {
86 if (mbuf->m_flags & M_EXT) {
87 return mbuf->m_ext.ext_buf;
88 }
89 if (mbuf->m_flags & M_PKTHDR) {
90 return mbuf->m_pktdat;
91 }
92 return mbuf->m_dat;
93 }
94
95 errno_t
96 mbuf_setdata(mbuf_t mbuf, void *data, size_t len)
97 {
98 size_t start = (size_t)((char *)mbuf_datastart(mbuf));
99 size_t maxlen = mbuf_maxlen(mbuf);
100
101 if ((size_t)data < start || ((size_t)data) + len > start + maxlen) {
102 return EINVAL;
103 }
104 mbuf->m_data = data;
105 mbuf->m_len = len;
106
107 return 0;
108 }
109
110 errno_t
111 mbuf_align_32(mbuf_t mbuf, size_t len)
112 {
113 if ((mbuf->m_flags & M_EXT) != 0 && m_mclhasreference(mbuf)) {
114 return ENOTSUP;
115 }
116 mbuf->m_data = mbuf_datastart(mbuf);
117 mbuf->m_data +=
118 ((mbuf_trailingspace(mbuf) - len) & ~(sizeof(u_int32_t) - 1));
119
120 return 0;
121 }
122
123 /*
124 * This function is used to provide mcl_to_paddr via symbol indirection,
125 * please avoid any change in behavior or remove the indirection in
126 * config/Unsupported*
127 */
128 addr64_t
129 mbuf_data_to_physical(void *ptr)
130 {
131 return (addr64_t)mcl_to_paddr(ptr);
132 }
133
134 errno_t
135 mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
136 {
137 /* Must set *mbuf to NULL in failure case */
138 *mbuf = m_get(how, type);
139
140 return *mbuf == NULL ? ENOMEM : 0;
141 }
142
143 errno_t
144 mbuf_gethdr(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
145 {
146 /* Must set *mbuf to NULL in failure case */
147 *mbuf = m_gethdr(how, type);
148
149 return *mbuf == NULL ? ENOMEM : 0;
150 }
151
152 errno_t
153 mbuf_attachcluster(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
154 caddr_t extbuf, void (*extfree)(caddr_t, u_int, caddr_t),
155 size_t extsize, caddr_t extarg)
156 {
157 if (mbuf == NULL || extbuf == NULL || extfree == NULL || extsize == 0) {
158 return EINVAL;
159 }
160
161 if ((*mbuf = m_clattach(*mbuf, type, extbuf,
162 extfree, extsize, extarg, how, 0)) == NULL) {
163 return ENOMEM;
164 }
165
166 return 0;
167 }
168
169 errno_t
170 mbuf_ring_cluster_alloc(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
171 void (*extfree)(caddr_t, u_int, caddr_t), size_t *size)
172 {
173 caddr_t extbuf = NULL;
174 errno_t err;
175
176 if (mbuf == NULL || extfree == NULL || size == NULL || *size == 0) {
177 return EINVAL;
178 }
179
180 if ((err = mbuf_alloccluster(how, size, &extbuf)) != 0) {
181 return err;
182 }
183
184 if ((*mbuf = m_clattach(*mbuf, type, extbuf,
185 extfree, *size, NULL, how, 1)) == NULL) {
186 mbuf_freecluster(extbuf, *size);
187 return ENOMEM;
188 }
189
190 return 0;
191 }
192
193 int
194 mbuf_ring_cluster_is_active(mbuf_t mbuf)
195 {
196 return m_ext_paired_is_active(mbuf);
197 }
198
199 errno_t
200 mbuf_ring_cluster_activate(mbuf_t mbuf)
201 {
202 if (mbuf_ring_cluster_is_active(mbuf)) {
203 return EBUSY;
204 }
205
206 m_ext_paired_activate(mbuf);
207 return 0;
208 }
209
210 errno_t
211 mbuf_cluster_set_prop(mbuf_t mbuf, u_int32_t oldprop, u_int32_t newprop)
212 {
213 if (mbuf == NULL || !(mbuf->m_flags & M_EXT)) {
214 return EINVAL;
215 }
216
217 return m_ext_set_prop(mbuf, oldprop, newprop) ? 0 : EBUSY;
218 }
219
220 errno_t
221 mbuf_cluster_get_prop(mbuf_t mbuf, u_int32_t *prop)
222 {
223 if (mbuf == NULL || prop == NULL || !(mbuf->m_flags & M_EXT)) {
224 return EINVAL;
225 }
226
227 *prop = m_ext_get_prop(mbuf);
228 return 0;
229 }
230
231 errno_t
232 mbuf_alloccluster(mbuf_how_t how, size_t *size, caddr_t *addr)
233 {
234 if (size == NULL || *size == 0 || addr == NULL) {
235 return EINVAL;
236 }
237
238 *addr = NULL;
239
240 /* Jumbo cluster pool not available? */
241 if (*size > MBIGCLBYTES && njcl == 0) {
242 return ENOTSUP;
243 }
244
245 if (*size <= MCLBYTES && (*addr = m_mclalloc(how)) != NULL) {
246 *size = MCLBYTES;
247 } else if (*size > MCLBYTES && *size <= MBIGCLBYTES &&
248 (*addr = m_bigalloc(how)) != NULL) {
249 *size = MBIGCLBYTES;
250 } else if (*size > MBIGCLBYTES && *size <= M16KCLBYTES &&
251 (*addr = m_16kalloc(how)) != NULL) {
252 *size = M16KCLBYTES;
253 } else {
254 *size = 0;
255 }
256
257 if (*addr == NULL) {
258 return ENOMEM;
259 }
260
261 return 0;
262 }
263
264 void
265 mbuf_freecluster(caddr_t addr, size_t size)
266 {
267 if (size != MCLBYTES && size != MBIGCLBYTES && size != M16KCLBYTES) {
268 panic("%s: invalid size (%ld) for cluster %p", __func__,
269 size, (void *)addr);
270 }
271
272 if (size == MCLBYTES) {
273 m_mclfree(addr);
274 } else if (size == MBIGCLBYTES) {
275 m_bigfree(addr, MBIGCLBYTES, NULL);
276 } else if (njcl > 0) {
277 m_16kfree(addr, M16KCLBYTES, NULL);
278 } else {
279 panic("%s: freeing jumbo cluster to an empty pool", __func__);
280 }
281 }
282
283 errno_t
284 mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t *mbuf)
285 {
286 /* Must set *mbuf to NULL in failure case */
287 errno_t error = 0;
288 int created = 0;
289
290 if (mbuf == NULL) {
291 return EINVAL;
292 }
293 if (*mbuf == NULL) {
294 *mbuf = m_get(how, type);
295 if (*mbuf == NULL) {
296 return ENOMEM;
297 }
298 created = 1;
299 }
300 /*
301 * At the time this code was written, m_{mclget,mbigget,m16kget}
302 * would always return the same value that was passed in to it.
303 */
304 if (size == MCLBYTES) {
305 *mbuf = m_mclget(*mbuf, how);
306 } else if (size == MBIGCLBYTES) {
307 *mbuf = m_mbigget(*mbuf, how);
308 } else if (size == M16KCLBYTES) {
309 if (njcl > 0) {
310 *mbuf = m_m16kget(*mbuf, how);
311 } else {
312 /* Jumbo cluster pool not available? */
313 error = ENOTSUP;
314 goto out;
315 }
316 } else {
317 error = EINVAL;
318 goto out;
319 }
320 if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) {
321 error = ENOMEM;
322 }
323 out:
324 if (created && error != 0) {
325 mbuf_free(*mbuf);
326 *mbuf = NULL;
327 }
328 return error;
329 }
330
331 errno_t
332 mbuf_mclget(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
333 {
334 /* Must set *mbuf to NULL in failure case */
335 errno_t error = 0;
336 int created = 0;
337 if (mbuf == NULL) {
338 return EINVAL;
339 }
340 if (*mbuf == NULL) {
341 error = mbuf_get(how, type, mbuf);
342 if (error) {
343 return error;
344 }
345 created = 1;
346 }
347
348 /*
349 * At the time this code was written, m_mclget would always
350 * return the same value that was passed in to it.
351 */
352 *mbuf = m_mclget(*mbuf, how);
353
354 if (created && ((*mbuf)->m_flags & M_EXT) == 0) {
355 mbuf_free(*mbuf);
356 *mbuf = NULL;
357 }
358 if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) {
359 error = ENOMEM;
360 }
361 return error;
362 }
363
364
365 errno_t
366 mbuf_getpacket(mbuf_how_t how, mbuf_t *mbuf)
367 {
368 /* Must set *mbuf to NULL in failure case */
369 errno_t error = 0;
370
371 *mbuf = m_getpacket_how(how);
372
373 if (*mbuf == NULL) {
374 if (how == MBUF_WAITOK) {
375 error = ENOMEM;
376 } else {
377 error = EWOULDBLOCK;
378 }
379 }
380
381 return error;
382 }
383
384 /*
385 * This function is used to provide m_free via symbol indirection, please avoid
386 * any change in behavior or remove the indirection in config/Unsupported*
387 */
388 mbuf_t
389 mbuf_free(mbuf_t mbuf)
390 {
391 return m_free(mbuf);
392 }
393
394 /*
395 * This function is used to provide m_freem via symbol indirection, please avoid
396 * any change in behavior or remove the indirection in config/Unsupported*
397 */
398 void
399 mbuf_freem(mbuf_t mbuf)
400 {
401 m_freem(mbuf);
402 }
403
404 int
405 mbuf_freem_list(mbuf_t mbuf)
406 {
407 return m_freem_list(mbuf);
408 }
409
410 size_t
411 mbuf_leadingspace(const mbuf_t mbuf)
412 {
413 return M_LEADINGSPACE(mbuf);
414 }
415
416 /*
417 * This function is used to provide m_trailingspace via symbol indirection,
418 * please avoid any change in behavior or remove the indirection in
419 * config/Unsupported*
420 */
421 size_t
422 mbuf_trailingspace(const mbuf_t mbuf)
423 {
424 return M_TRAILINGSPACE(mbuf);
425 }
426
427 /* Manipulation */
428 errno_t
429 mbuf_copym(const mbuf_t src, size_t offset, size_t len,
430 mbuf_how_t how, mbuf_t *new_mbuf)
431 {
432 /* Must set *mbuf to NULL in failure case */
433 *new_mbuf = m_copym(src, offset, len, how);
434
435 return *new_mbuf == NULL ? ENOMEM : 0;
436 }
437
438 errno_t
439 mbuf_dup(const mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf)
440 {
441 /* Must set *new_mbuf to NULL in failure case */
442 *new_mbuf = m_dup(src, how);
443
444 return *new_mbuf == NULL ? ENOMEM : 0;
445 }
446
447 errno_t
448 mbuf_prepend(mbuf_t *orig, size_t len, mbuf_how_t how)
449 {
450 /* Must set *orig to NULL in failure case */
451 *orig = m_prepend_2(*orig, len, how, 0);
452
453 return *orig == NULL ? ENOMEM : 0;
454 }
455
456 errno_t
457 mbuf_split(mbuf_t src, size_t offset,
458 mbuf_how_t how, mbuf_t *new_mbuf)
459 {
460 /* Must set *new_mbuf to NULL in failure case */
461 *new_mbuf = m_split(src, offset, how);
462
463 return *new_mbuf == NULL ? ENOMEM : 0;
464 }
465
466 errno_t
467 mbuf_pullup(mbuf_t *mbuf, size_t len)
468 {
469 /* Must set *mbuf to NULL in failure case */
470 *mbuf = m_pullup(*mbuf, len);
471
472 return *mbuf == NULL ? ENOMEM : 0;
473 }
474
475 errno_t
476 mbuf_pulldown(mbuf_t src, size_t *offset, size_t len, mbuf_t *location)
477 {
478 /* Must set *location to NULL in failure case */
479 int new_offset;
480 *location = m_pulldown(src, *offset, len, &new_offset);
481 *offset = new_offset;
482
483 return *location == NULL ? ENOMEM : 0;
484 }
485
486 /*
487 * This function is used to provide m_adj via symbol indirection, please avoid
488 * any change in behavior or remove the indirection in config/Unsupported*
489 */
490 void
491 mbuf_adj(mbuf_t mbuf, int len)
492 {
493 m_adj(mbuf, len);
494 }
495
496 errno_t
497 mbuf_adjustlen(mbuf_t m, int amount)
498 {
499 /* Verify m_len will be valid after adding amount */
500 if (amount > 0) {
501 int used = (size_t)mbuf_data(m) - (size_t)mbuf_datastart(m) +
502 m->m_len;
503
504 if ((size_t)(amount + used) > mbuf_maxlen(m)) {
505 return EINVAL;
506 }
507 } else if (-amount > m->m_len) {
508 return EINVAL;
509 }
510
511 m->m_len += amount;
512 return 0;
513 }
514
515 mbuf_t
516 mbuf_concatenate(mbuf_t dst, mbuf_t src)
517 {
518 if (dst == NULL) {
519 return NULL;
520 }
521
522 m_cat(dst, src);
523
524 /* return dst as is in the current implementation */
525 return dst;
526 }
527 errno_t
528 mbuf_copydata(const mbuf_t m0, size_t off, size_t len, void *out_data)
529 {
530 /* Copied m_copydata, added error handling (don't just panic) */
531 size_t count;
532 mbuf_t m = m0;
533
534 if (off >= INT_MAX || len >= INT_MAX) {
535 return EINVAL;
536 }
537
538 while (off > 0) {
539 if (m == 0) {
540 return EINVAL;
541 }
542 if (off < (size_t)m->m_len) {
543 break;
544 }
545 off -= m->m_len;
546 m = m->m_next;
547 }
548 while (len > 0) {
549 if (m == 0) {
550 return EINVAL;
551 }
552 count = m->m_len - off > len ? len : m->m_len - off;
553 bcopy(mtod(m, caddr_t) + off, out_data, count);
554 len -= count;
555 out_data = ((char *)out_data) + count;
556 off = 0;
557 m = m->m_next;
558 }
559
560 return 0;
561 }
562
563 int
564 mbuf_mclhasreference(mbuf_t mbuf)
565 {
566 if ((mbuf->m_flags & M_EXT)) {
567 return m_mclhasreference(mbuf);
568 } else {
569 return 0;
570 }
571 }
572
573
574 /* mbuf header */
575 mbuf_t
576 mbuf_next(const mbuf_t mbuf)
577 {
578 return mbuf->m_next;
579 }
580
581 errno_t
582 mbuf_setnext(mbuf_t mbuf, mbuf_t next)
583 {
584 if (next && ((next)->m_nextpkt != NULL ||
585 (next)->m_type == MT_FREE)) {
586 return EINVAL;
587 }
588 mbuf->m_next = next;
589
590 return 0;
591 }
592
593 mbuf_t
594 mbuf_nextpkt(const mbuf_t mbuf)
595 {
596 return mbuf->m_nextpkt;
597 }
598
599 void
600 mbuf_setnextpkt(mbuf_t mbuf, mbuf_t nextpkt)
601 {
602 mbuf->m_nextpkt = nextpkt;
603 }
604
605 size_t
606 mbuf_len(const mbuf_t mbuf)
607 {
608 return mbuf->m_len;
609 }
610
611 void
612 mbuf_setlen(mbuf_t mbuf, size_t len)
613 {
614 mbuf->m_len = len;
615 }
616
617 size_t
618 mbuf_maxlen(const mbuf_t mbuf)
619 {
620 if (mbuf->m_flags & M_EXT) {
621 return mbuf->m_ext.ext_size;
622 }
623 return &mbuf->m_dat[MLEN] - ((char *)mbuf_datastart(mbuf));
624 }
625
626 mbuf_type_t
627 mbuf_type(const mbuf_t mbuf)
628 {
629 return mbuf->m_type;
630 }
631
632 errno_t
633 mbuf_settype(mbuf_t mbuf, mbuf_type_t new_type)
634 {
635 if (new_type == MBUF_TYPE_FREE) {
636 return EINVAL;
637 }
638
639 m_mchtype(mbuf, new_type);
640
641 return 0;
642 }
643
644 mbuf_flags_t
645 mbuf_flags(const mbuf_t mbuf)
646 {
647 return mbuf->m_flags & mbuf_flags_mask;
648 }
649
650 errno_t
651 mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags)
652 {
653 errno_t ret = 0;
654 mbuf_flags_t oflags = mbuf->m_flags;
655
656 /*
657 * 1. Return error if public but un-alterable flags are changed
658 * in flags argument.
659 * 2. Return error if bits other than public flags are set in passed
660 * flags argument.
661 * Please note that private flag bits must be passed as reset by
662 * kexts, as they must use mbuf_flags KPI to get current set of
663 * mbuf flags and mbuf_flags KPI does not expose private flags.
664 */
665 if ((flags ^ oflags) & mbuf_cflags_mask) {
666 ret = EINVAL;
667 } else if (flags & ~mbuf_flags_mask) {
668 ret = EINVAL;
669 } else {
670 mbuf->m_flags = flags | (mbuf->m_flags & ~mbuf_flags_mask);
671 /*
672 * If M_PKTHDR bit has changed, we have work to do;
673 * m_reinit() will take care of setting/clearing the
674 * bit, as well as the rest of bookkeeping.
675 */
676 if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
677 mbuf->m_flags ^= M_PKTHDR; /* restore */
678 ret = m_reinit(mbuf,
679 (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
680 }
681 }
682
683 return ret;
684 }
685
686 errno_t
687 mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask)
688 {
689 errno_t ret = 0;
690
691 if (mask & (~mbuf_flags_mask | mbuf_cflags_mask)) {
692 ret = EINVAL;
693 } else {
694 mbuf_flags_t oflags = mbuf->m_flags;
695 mbuf->m_flags = (flags & mask) | (mbuf->m_flags & ~mask);
696 /*
697 * If M_PKTHDR bit has changed, we have work to do;
698 * m_reinit() will take care of setting/clearing the
699 * bit, as well as the rest of bookkeeping.
700 */
701 if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
702 mbuf->m_flags ^= M_PKTHDR; /* restore */
703 ret = m_reinit(mbuf,
704 (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
705 }
706 }
707
708 return ret;
709 }
710
711 errno_t
712 mbuf_copy_pkthdr(mbuf_t dest, const mbuf_t src)
713 {
714 if (((src)->m_flags & M_PKTHDR) == 0) {
715 return EINVAL;
716 }
717
718 m_copy_pkthdr(dest, src);
719
720 return 0;
721 }
722
723 size_t
724 mbuf_pkthdr_len(const mbuf_t mbuf)
725 {
726 if (((mbuf)->m_flags & M_PKTHDR) == 0) {
727 return 0;
728 }
729 /*
730 * While we Assert for development or debug builds,
731 * also make sure we never return negative length
732 * for release build.
733 */
734 ASSERT(mbuf->m_pkthdr.len >= 0);
735 if (mbuf->m_pkthdr.len < 0) {
736 return 0;
737 }
738 return mbuf->m_pkthdr.len;
739 }
740
741 __private_extern__ size_t
742 mbuf_pkthdr_maxlen(mbuf_t m)
743 {
744 size_t maxlen = 0;
745 mbuf_t n = m;
746
747 while (n) {
748 maxlen += mbuf_maxlen(n);
749 n = mbuf_next(n);
750 }
751 return maxlen;
752 }
753
754 void
755 mbuf_pkthdr_setlen(mbuf_t mbuf, size_t len)
756 {
757 if (len > INT32_MAX) {
758 len = INT32_MAX;
759 }
760
761 mbuf->m_pkthdr.len = len;
762 }
763
764 void
765 mbuf_pkthdr_adjustlen(mbuf_t mbuf, int amount)
766 {
767 mbuf->m_pkthdr.len += amount;
768 }
769
770 ifnet_t
771 mbuf_pkthdr_rcvif(const mbuf_t mbuf)
772 {
773 /*
774 * If we reference count ifnets, we should take a reference here
775 * before returning
776 */
777 return mbuf->m_pkthdr.rcvif;
778 }
779
780 errno_t
781 mbuf_pkthdr_setrcvif(mbuf_t mbuf, ifnet_t ifnet)
782 {
783 /* May want to walk ifnet list to determine if interface is valid */
784 mbuf->m_pkthdr.rcvif = (struct ifnet *)ifnet;
785 return 0;
786 }
787
788 void*
789 mbuf_pkthdr_header(const mbuf_t mbuf)
790 {
791 return mbuf->m_pkthdr.pkt_hdr;
792 }
793
794 void
795 mbuf_pkthdr_setheader(mbuf_t mbuf, void *header)
796 {
797 mbuf->m_pkthdr.pkt_hdr = (void*)header;
798 }
799
800 void
801 mbuf_inbound_modified(mbuf_t mbuf)
802 {
803 /* Invalidate hardware generated checksum flags */
804 mbuf->m_pkthdr.csum_flags = 0;
805 }
806
807 void
808 mbuf_outbound_finalize(struct mbuf *m, u_int32_t pf, size_t o)
809 {
810 /* Generate the packet in software, client needs it */
811 switch (pf) {
812 case PF_INET:
813 (void) in_finalize_cksum(m, o, m->m_pkthdr.csum_flags);
814 break;
815
816 case PF_INET6:
817 #if INET6
818 /*
819 * Checksum offload should not have been enabled when
820 * extension headers exist; indicate that the callee
821 * should skip such case by setting optlen to -1.
822 */
823 (void) in6_finalize_cksum(m, o, -1, -1, m->m_pkthdr.csum_flags);
824 #endif /* INET6 */
825 break;
826
827 default:
828 break;
829 }
830 }
831
832 errno_t
833 mbuf_set_vlan_tag(
834 mbuf_t mbuf,
835 u_int16_t vlan)
836 {
837 mbuf->m_pkthdr.csum_flags |= CSUM_VLAN_TAG_VALID;
838 mbuf->m_pkthdr.vlan_tag = vlan;
839
840 return 0;
841 }
842
843 errno_t
844 mbuf_get_vlan_tag(
845 mbuf_t mbuf,
846 u_int16_t *vlan)
847 {
848 if ((mbuf->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
849 return ENXIO; // No vlan tag set
850 }
851 *vlan = mbuf->m_pkthdr.vlan_tag;
852
853 return 0;
854 }
855
856 errno_t
857 mbuf_clear_vlan_tag(
858 mbuf_t mbuf)
859 {
860 mbuf->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID;
861 mbuf->m_pkthdr.vlan_tag = 0;
862
863 return 0;
864 }
865
866 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags =
867 MBUF_CSUM_REQ_IP | MBUF_CSUM_REQ_TCP | MBUF_CSUM_REQ_UDP |
868 MBUF_CSUM_PARTIAL | MBUF_CSUM_REQ_TCPIPV6 | MBUF_CSUM_REQ_UDPIPV6;
869
870 errno_t
871 mbuf_set_csum_requested(
872 mbuf_t mbuf,
873 mbuf_csum_request_flags_t request,
874 u_int32_t value)
875 {
876 request &= mbuf_valid_csum_request_flags;
877 mbuf->m_pkthdr.csum_flags =
878 (mbuf->m_pkthdr.csum_flags & 0xffff0000) | request;
879 mbuf->m_pkthdr.csum_data = value;
880
881 return 0;
882 }
883
884 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags =
885 MBUF_TSO_IPV4 | MBUF_TSO_IPV6;
886
887 errno_t
888 mbuf_get_tso_requested(
889 mbuf_t mbuf,
890 mbuf_tso_request_flags_t *request,
891 u_int32_t *value)
892 {
893 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
894 request == NULL || value == NULL) {
895 return EINVAL;
896 }
897
898 *request = mbuf->m_pkthdr.csum_flags;
899 *request &= mbuf_valid_tso_request_flags;
900 if (*request && value != NULL) {
901 *value = mbuf->m_pkthdr.tso_segsz;
902 }
903
904 return 0;
905 }
906
907 errno_t
908 mbuf_get_csum_requested(
909 mbuf_t mbuf,
910 mbuf_csum_request_flags_t *request,
911 u_int32_t *value)
912 {
913 *request = mbuf->m_pkthdr.csum_flags;
914 *request &= mbuf_valid_csum_request_flags;
915 if (value != NULL) {
916 *value = mbuf->m_pkthdr.csum_data;
917 }
918
919 return 0;
920 }
921
922 errno_t
923 mbuf_clear_csum_requested(
924 mbuf_t mbuf)
925 {
926 mbuf->m_pkthdr.csum_flags &= 0xffff0000;
927 mbuf->m_pkthdr.csum_data = 0;
928
929 return 0;
930 }
931
932 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags =
933 MBUF_CSUM_DID_IP | MBUF_CSUM_IP_GOOD | MBUF_CSUM_DID_DATA |
934 MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_PARTIAL;
935
936 errno_t
937 mbuf_set_csum_performed(
938 mbuf_t mbuf,
939 mbuf_csum_performed_flags_t performed,
940 u_int32_t value)
941 {
942 performed &= mbuf_valid_csum_performed_flags;
943 mbuf->m_pkthdr.csum_flags =
944 (mbuf->m_pkthdr.csum_flags & 0xffff0000) | performed;
945 mbuf->m_pkthdr.csum_data = value;
946
947 return 0;
948 }
949
950 errno_t
951 mbuf_get_csum_performed(
952 mbuf_t mbuf,
953 mbuf_csum_performed_flags_t *performed,
954 u_int32_t *value)
955 {
956 *performed =
957 mbuf->m_pkthdr.csum_flags & mbuf_valid_csum_performed_flags;
958 *value = mbuf->m_pkthdr.csum_data;
959
960 return 0;
961 }
962
963 errno_t
964 mbuf_clear_csum_performed(
965 mbuf_t mbuf)
966 {
967 mbuf->m_pkthdr.csum_flags &= 0xffff0000;
968 mbuf->m_pkthdr.csum_data = 0;
969
970 return 0;
971 }
972
973 errno_t
974 mbuf_inet_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
975 u_int16_t *csum)
976 {
977 if (mbuf == NULL || length == 0 || csum == NULL ||
978 (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) {
979 return EINVAL;
980 }
981
982 *csum = inet_cksum(mbuf, protocol, offset, length);
983 return 0;
984 }
985
986 #if INET6
987 errno_t
988 mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
989 u_int16_t *csum)
990 {
991 if (mbuf == NULL || length == 0 || csum == NULL ||
992 (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) {
993 return EINVAL;
994 }
995
996 *csum = inet6_cksum(mbuf, protocol, offset, length);
997 return 0;
998 }
999 #else /* INET6 */
1000 errno_t
1001 mbuf_inet6_cksum(__unused mbuf_t mbuf, __unused int protocol,
1002 __unused u_int32_t offset, __unused u_int32_t length,
1003 __unused u_int16_t *csum)
1004 {
1005 panic("mbuf_inet6_cksum() doesn't exist on this platform\n");
1006 return 0;
1007 }
1008
1009 u_int16_t
1010 inet6_cksum(__unused struct mbuf *m, __unused unsigned int nxt,
1011 __unused unsigned int off, __unused unsigned int len)
1012 {
1013 panic("inet6_cksum() doesn't exist on this platform\n");
1014 return 0;
1015 }
1016
1017 void nd6_lookup_ipv6(void);
1018 void
1019 nd6_lookup_ipv6(void)
1020 {
1021 panic("nd6_lookup_ipv6() doesn't exist on this platform\n");
1022 }
1023
1024 int
1025 in6addr_local(__unused struct in6_addr *a)
1026 {
1027 panic("in6addr_local() doesn't exist on this platform\n");
1028 return 0;
1029 }
1030
1031 void nd6_storelladdr(void);
1032 void
1033 nd6_storelladdr(void)
1034 {
1035 panic("nd6_storelladdr() doesn't exist on this platform\n");
1036 }
1037 #endif /* INET6 */
1038
1039 /*
1040 * Mbuf tag KPIs
1041 */
1042
1043 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
1044
1045 errno_t
1046 mbuf_tag_id_find(
1047 const char *string,
1048 mbuf_tag_id_t *out_id)
1049 {
1050 return net_str_id_find_internal(string, out_id, NSI_MBUF_TAG, 1);
1051 }
1052
1053 errno_t
1054 mbuf_tag_allocate(
1055 mbuf_t mbuf,
1056 mbuf_tag_id_t id,
1057 mbuf_tag_type_t type,
1058 size_t length,
1059 mbuf_how_t how,
1060 void** data_p)
1061 {
1062 struct m_tag *tag;
1063 u_int32_t mtag_id_first, mtag_id_last;
1064
1065 if (data_p != NULL) {
1066 *data_p = NULL;
1067 }
1068
1069 /* Sanity check parameters */
1070 (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1071 NSI_MBUF_TAG);
1072 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1073 id < mtag_id_first || id > mtag_id_last || length < 1 ||
1074 (length & 0xffff0000) != 0 || data_p == NULL) {
1075 return EINVAL;
1076 }
1077
1078 /* Make sure this mtag hasn't already been allocated */
1079 tag = m_tag_locate(mbuf, id, type, NULL);
1080 if (tag != NULL) {
1081 return EEXIST;
1082 }
1083
1084 /* Allocate an mtag */
1085 tag = m_tag_create(id, type, length, how, mbuf);
1086 if (tag == NULL) {
1087 return how == M_WAITOK ? ENOMEM : EWOULDBLOCK;
1088 }
1089
1090 /* Attach the mtag and set *data_p */
1091 m_tag_prepend(mbuf, tag);
1092 *data_p = tag + 1;
1093
1094 return 0;
1095 }
1096
1097 errno_t
1098 mbuf_tag_find(
1099 mbuf_t mbuf,
1100 mbuf_tag_id_t id,
1101 mbuf_tag_type_t type,
1102 size_t *length,
1103 void **data_p)
1104 {
1105 struct m_tag *tag;
1106 u_int32_t mtag_id_first, mtag_id_last;
1107
1108 if (length != NULL) {
1109 *length = 0;
1110 }
1111 if (data_p != NULL) {
1112 *data_p = NULL;
1113 }
1114
1115 /* Sanity check parameters */
1116 (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1117 NSI_MBUF_TAG);
1118 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1119 id < mtag_id_first || id > mtag_id_last || length == NULL ||
1120 data_p == NULL) {
1121 return EINVAL;
1122 }
1123
1124 /* Locate an mtag */
1125 tag = m_tag_locate(mbuf, id, type, NULL);
1126 if (tag == NULL) {
1127 return ENOENT;
1128 }
1129
1130 /* Copy out the pointer to the data and the lenght value */
1131 *length = tag->m_tag_len;
1132 *data_p = tag + 1;
1133
1134 return 0;
1135 }
1136
1137 void
1138 mbuf_tag_free(
1139 mbuf_t mbuf,
1140 mbuf_tag_id_t id,
1141 mbuf_tag_type_t type)
1142 {
1143 struct m_tag *tag;
1144 u_int32_t mtag_id_first, mtag_id_last;
1145
1146 /* Sanity check parameters */
1147 (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1148 NSI_MBUF_TAG);
1149 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1150 id < mtag_id_first || id > mtag_id_last) {
1151 return;
1152 }
1153
1154 tag = m_tag_locate(mbuf, id, type, NULL);
1155 if (tag == NULL) {
1156 return;
1157 }
1158
1159 m_tag_delete(mbuf, tag);
1160 }
1161
1162 /*
1163 * Maximum length of driver auxiliary data; keep this small to
1164 * fit in a single mbuf to avoid wasting memory, rounded down to
1165 * the nearest 64-bit boundary. This takes into account mbuf
1166 * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
1167 */
1168 #define MBUF_DRVAUX_MAXLEN \
1169 P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \
1170 M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
1171
1172 errno_t
1173 mbuf_add_drvaux(mbuf_t mbuf, mbuf_how_t how, u_int32_t family,
1174 u_int32_t subfamily, size_t length, void **data_p)
1175 {
1176 struct m_drvaux_tag *p;
1177 struct m_tag *tag;
1178
1179 if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) ||
1180 length == 0 || length > MBUF_DRVAUX_MAXLEN) {
1181 return EINVAL;
1182 }
1183
1184 if (data_p != NULL) {
1185 *data_p = NULL;
1186 }
1187
1188 /* Check if one is already associated */
1189 if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1190 KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) {
1191 return EEXIST;
1192 }
1193
1194 /* Tag is (m_drvaux_tag + module specific data) */
1195 if ((tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DRVAUX,
1196 sizeof(*p) + length, how, mbuf)) == NULL) {
1197 return (how == MBUF_WAITOK) ? ENOMEM : EWOULDBLOCK;
1198 }
1199
1200 p = (struct m_drvaux_tag *)(tag + 1);
1201 p->da_family = family;
1202 p->da_subfamily = subfamily;
1203 p->da_length = length;
1204
1205 /* Associate the tag */
1206 m_tag_prepend(mbuf, tag);
1207
1208 if (data_p != NULL) {
1209 *data_p = (p + 1);
1210 }
1211
1212 return 0;
1213 }
1214
1215 errno_t
1216 mbuf_find_drvaux(mbuf_t mbuf, u_int32_t *family_p, u_int32_t *subfamily_p,
1217 u_int32_t *length_p, void **data_p)
1218 {
1219 struct m_drvaux_tag *p;
1220 struct m_tag *tag;
1221
1222 if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) || data_p == NULL) {
1223 return EINVAL;
1224 }
1225
1226 *data_p = NULL;
1227
1228 if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1229 KERNEL_TAG_TYPE_DRVAUX, NULL)) == NULL) {
1230 return ENOENT;
1231 }
1232
1233 /* Must be at least size of m_drvaux_tag */
1234 VERIFY(tag->m_tag_len >= sizeof(*p));
1235
1236 p = (struct m_drvaux_tag *)(tag + 1);
1237 VERIFY(p->da_length > 0 && p->da_length <= MBUF_DRVAUX_MAXLEN);
1238
1239 if (family_p != NULL) {
1240 *family_p = p->da_family;
1241 }
1242 if (subfamily_p != NULL) {
1243 *subfamily_p = p->da_subfamily;
1244 }
1245 if (length_p != NULL) {
1246 *length_p = p->da_length;
1247 }
1248
1249 *data_p = (p + 1);
1250
1251 return 0;
1252 }
1253
1254 void
1255 mbuf_del_drvaux(mbuf_t mbuf)
1256 {
1257 struct m_tag *tag;
1258
1259 if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR)) {
1260 return;
1261 }
1262
1263 if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1264 KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) {
1265 m_tag_delete(mbuf, tag);
1266 }
1267 }
1268
1269 /* mbuf stats */
1270 void
1271 mbuf_stats(struct mbuf_stat *stats)
1272 {
1273 stats->mbufs = mbstat.m_mbufs;
1274 stats->clusters = mbstat.m_clusters;
1275 stats->clfree = mbstat.m_clfree;
1276 stats->drops = mbstat.m_drops;
1277 stats->wait = mbstat.m_wait;
1278 stats->drain = mbstat.m_drain;
1279 __builtin_memcpy(stats->mtypes, mbstat.m_mtypes, sizeof(stats->mtypes));
1280 stats->mcfail = mbstat.m_mcfail;
1281 stats->mpfail = mbstat.m_mpfail;
1282 stats->msize = mbstat.m_msize;
1283 stats->mclbytes = mbstat.m_mclbytes;
1284 stats->minclsize = mbstat.m_minclsize;
1285 stats->mlen = mbstat.m_mlen;
1286 stats->mhlen = mbstat.m_mhlen;
1287 stats->bigclusters = mbstat.m_bigclusters;
1288 stats->bigclfree = mbstat.m_bigclfree;
1289 stats->bigmclbytes = mbstat.m_bigmclbytes;
1290 }
1291
1292 errno_t
1293 mbuf_allocpacket(mbuf_how_t how, size_t packetlen, unsigned int *maxchunks,
1294 mbuf_t *mbuf)
1295 {
1296 errno_t error;
1297 struct mbuf *m;
1298 unsigned int numpkts = 1;
1299 unsigned int numchunks = maxchunks ? *maxchunks : 0;
1300
1301 if (packetlen == 0) {
1302 error = EINVAL;
1303 goto out;
1304 }
1305 m = m_allocpacket_internal(&numpkts, packetlen,
1306 maxchunks ? &numchunks : NULL, how, 1, 0);
1307 if (m == 0) {
1308 if (maxchunks && *maxchunks && numchunks > *maxchunks) {
1309 error = ENOBUFS;
1310 } else {
1311 error = ENOMEM;
1312 }
1313 } else {
1314 if (maxchunks) {
1315 *maxchunks = numchunks;
1316 }
1317 error = 0;
1318 *mbuf = m;
1319 }
1320 out:
1321 return error;
1322 }
1323
1324 errno_t
1325 mbuf_allocpacket_list(unsigned int numpkts, mbuf_how_t how, size_t packetlen,
1326 unsigned int *maxchunks, mbuf_t *mbuf)
1327 {
1328 errno_t error;
1329 struct mbuf *m;
1330 unsigned int numchunks = maxchunks ? *maxchunks : 0;
1331
1332 if (numpkts == 0) {
1333 error = EINVAL;
1334 goto out;
1335 }
1336 if (packetlen == 0) {
1337 error = EINVAL;
1338 goto out;
1339 }
1340 m = m_allocpacket_internal(&numpkts, packetlen,
1341 maxchunks ? &numchunks : NULL, how, 1, 0);
1342 if (m == 0) {
1343 if (maxchunks && *maxchunks && numchunks > *maxchunks) {
1344 error = ENOBUFS;
1345 } else {
1346 error = ENOMEM;
1347 }
1348 } else {
1349 if (maxchunks) {
1350 *maxchunks = numchunks;
1351 }
1352 error = 0;
1353 *mbuf = m;
1354 }
1355 out:
1356 return error;
1357 }
1358
1359 __private_extern__ size_t
1360 mbuf_pkt_list_len(mbuf_t m)
1361 {
1362 size_t len = 0;
1363 mbuf_t n = m;
1364
1365 while (n) {
1366 len += mbuf_pkthdr_len(n);
1367 n = mbuf_nextpkt(n);
1368 }
1369 return len;
1370 }
1371
1372 __private_extern__ size_t
1373 mbuf_pkt_list_maxlen(mbuf_t m)
1374 {
1375 size_t maxlen = 0;
1376 mbuf_t n = m;
1377
1378 while (n) {
1379 maxlen += mbuf_pkthdr_maxlen(n);
1380 n = mbuf_nextpkt(n);
1381 }
1382 return maxlen;
1383 }
1384
1385 /*
1386 * mbuf_copyback differs from m_copyback in a few ways:
1387 * 1) mbuf_copyback will allocate clusters for new mbufs we append
1388 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1389 * 3) mbuf_copyback reports whether or not the operation succeeded
1390 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1391 */
1392 errno_t
1393 mbuf_copyback(
1394 mbuf_t m,
1395 size_t off,
1396 size_t len,
1397 const void *data,
1398 mbuf_how_t how)
1399 {
1400 size_t mlen;
1401 mbuf_t m_start = m;
1402 mbuf_t n;
1403 int totlen = 0;
1404 errno_t result = 0;
1405 const char *cp = data;
1406
1407 if (m == NULL || len == 0 || data == NULL) {
1408 return EINVAL;
1409 }
1410
1411 while (off > (mlen = m->m_len)) {
1412 off -= mlen;
1413 totlen += mlen;
1414 if (m->m_next == 0) {
1415 n = m_getclr(how, m->m_type);
1416 if (n == 0) {
1417 result = ENOBUFS;
1418 goto out;
1419 }
1420 n->m_len = MIN(MLEN, len + off);
1421 m->m_next = n;
1422 }
1423 m = m->m_next;
1424 }
1425
1426 while (len > 0) {
1427 mlen = MIN(m->m_len - off, len);
1428 if (mlen < len && m->m_next == NULL &&
1429 mbuf_trailingspace(m) > 0) {
1430 size_t grow = MIN(mbuf_trailingspace(m), len - mlen);
1431 mlen += grow;
1432 m->m_len += grow;
1433 }
1434 bcopy(cp, off + (char *)mbuf_data(m), (unsigned)mlen);
1435 cp += mlen;
1436 len -= mlen;
1437 mlen += off;
1438 off = 0;
1439 totlen += mlen;
1440 if (len == 0) {
1441 break;
1442 }
1443 if (m->m_next == 0) {
1444 n = m_get(how, m->m_type);
1445 if (n == NULL) {
1446 result = ENOBUFS;
1447 goto out;
1448 }
1449 if (len > MINCLSIZE) {
1450 /*
1451 * cluster allocation failure is okay,
1452 * we can grow chain
1453 */
1454 mbuf_mclget(how, m->m_type, &n);
1455 }
1456 n->m_len = MIN(mbuf_maxlen(n), len);
1457 m->m_next = n;
1458 }
1459 m = m->m_next;
1460 }
1461
1462 out:
1463 if ((m_start->m_flags & M_PKTHDR) && (m_start->m_pkthdr.len < totlen)) {
1464 m_start->m_pkthdr.len = totlen;
1465 }
1466
1467 return result;
1468 }
1469
1470 u_int32_t
1471 mbuf_get_mlen(void)
1472 {
1473 return _MLEN;
1474 }
1475
1476 u_int32_t
1477 mbuf_get_mhlen(void)
1478 {
1479 return _MHLEN;
1480 }
1481
1482 u_int32_t
1483 mbuf_get_minclsize(void)
1484 {
1485 return MHLEN + MLEN;
1486 }
1487
1488 u_int32_t
1489 mbuf_get_traffic_class_max_count(void)
1490 {
1491 return MBUF_TC_MAX;
1492 }
1493
1494 errno_t
1495 mbuf_get_traffic_class_index(mbuf_traffic_class_t tc, u_int32_t *index)
1496 {
1497 if (index == NULL || (u_int32_t)tc >= MBUF_TC_MAX) {
1498 return EINVAL;
1499 }
1500
1501 *index = MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc)));
1502 return 0;
1503 }
1504
1505 mbuf_traffic_class_t
1506 mbuf_get_traffic_class(mbuf_t m)
1507 {
1508 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1509 return MBUF_TC_BE;
1510 }
1511
1512 return m_get_traffic_class(m);
1513 }
1514
1515 errno_t
1516 mbuf_set_traffic_class(mbuf_t m, mbuf_traffic_class_t tc)
1517 {
1518 if (m == NULL || !(m->m_flags & M_PKTHDR) ||
1519 ((u_int32_t)tc >= MBUF_TC_MAX)) {
1520 return EINVAL;
1521 }
1522
1523 return m_set_traffic_class(m, tc);
1524 }
1525
1526 int
1527 mbuf_is_traffic_class_privileged(mbuf_t m)
1528 {
1529 if (m == NULL || !(m->m_flags & M_PKTHDR) ||
1530 !MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) {
1531 return 0;
1532 }
1533
1534 return (m->m_pkthdr.pkt_flags & PKTF_PRIO_PRIVILEGED) ? 1 : 0;
1535 }
1536
1537 u_int32_t
1538 mbuf_get_service_class_max_count(void)
1539 {
1540 return MBUF_SC_MAX_CLASSES;
1541 }
1542
1543 errno_t
1544 mbuf_get_service_class_index(mbuf_svc_class_t sc, u_int32_t *index)
1545 {
1546 if (index == NULL || !MBUF_VALID_SC(sc)) {
1547 return EINVAL;
1548 }
1549
1550 *index = MBUF_SCIDX(sc);
1551 return 0;
1552 }
1553
1554 mbuf_svc_class_t
1555 mbuf_get_service_class(mbuf_t m)
1556 {
1557 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1558 return MBUF_SC_BE;
1559 }
1560
1561 return m_get_service_class(m);
1562 }
1563
1564 errno_t
1565 mbuf_set_service_class(mbuf_t m, mbuf_svc_class_t sc)
1566 {
1567 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1568 return EINVAL;
1569 }
1570
1571 return m_set_service_class(m, sc);
1572 }
1573
1574 errno_t
1575 mbuf_pkthdr_aux_flags(mbuf_t m, mbuf_pkthdr_aux_flags_t *flagsp)
1576 {
1577 u_int32_t flags;
1578
1579 if (m == NULL || !(m->m_flags & M_PKTHDR) || flagsp == NULL) {
1580 return EINVAL;
1581 }
1582
1583 *flagsp = 0;
1584 flags = m->m_pkthdr.pkt_flags;
1585 if ((flags & (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) ==
1586 (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) {
1587 *flagsp |= MBUF_PKTAUXF_INET_RESOLVE_RTR;
1588 }
1589 if ((flags & (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) ==
1590 (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) {
1591 *flagsp |= MBUF_PKTAUXF_INET6_RESOLVE_RTR;
1592 }
1593
1594 /* These 2 flags are mutually exclusive */
1595 VERIFY((*flagsp &
1596 (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR)) !=
1597 (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR));
1598
1599 return 0;
1600 }
1601
1602 errno_t
1603 mbuf_get_driver_scratch(mbuf_t m, u_int8_t **area, size_t *area_len)
1604 {
1605 if (m == NULL || area == NULL || area_len == NULL ||
1606 !(m->m_flags & M_PKTHDR)) {
1607 return EINVAL;
1608 }
1609
1610 *area_len = m_scratch_get(m, area);
1611 return 0;
1612 }
1613
1614 errno_t
1615 mbuf_get_unsent_data_bytes(const mbuf_t m, u_int32_t *unsent_data)
1616 {
1617 if (m == NULL || unsent_data == NULL || !(m->m_flags & M_PKTHDR)) {
1618 return EINVAL;
1619 }
1620
1621 if (!(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) {
1622 return EINVAL;
1623 }
1624
1625 *unsent_data = m->m_pkthdr.bufstatus_if +
1626 m->m_pkthdr.bufstatus_sndbuf;
1627 return 0;
1628 }
1629
1630 errno_t
1631 mbuf_get_buffer_status(const mbuf_t m, mbuf_buffer_status_t *buf_status)
1632 {
1633 if (m == NULL || buf_status == NULL || !(m->m_flags & M_PKTHDR) ||
1634 !(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) {
1635 return EINVAL;
1636 }
1637
1638 buf_status->buf_interface = m->m_pkthdr.bufstatus_if;
1639 buf_status->buf_sndbuf = m->m_pkthdr.bufstatus_sndbuf;
1640 return 0;
1641 }
1642
1643 errno_t
1644 mbuf_pkt_new_flow(const mbuf_t m, u_int32_t *retval)
1645 {
1646 if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) {
1647 return EINVAL;
1648 }
1649 if (m->m_pkthdr.pkt_flags & PKTF_NEW_FLOW) {
1650 *retval = 1;
1651 } else {
1652 *retval = 0;
1653 }
1654 return 0;
1655 }
1656
1657 errno_t
1658 mbuf_last_pkt(const mbuf_t m, u_int32_t *retval)
1659 {
1660 if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) {
1661 return EINVAL;
1662 }
1663 if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) {
1664 *retval = 1;
1665 } else {
1666 *retval = 0;
1667 }
1668 return 0;
1669 }
1670
1671 errno_t
1672 mbuf_get_timestamp(mbuf_t m, u_int64_t *ts, boolean_t *valid)
1673 {
1674 if (m == NULL || !(m->m_flags & M_PKTHDR) || ts == NULL) {
1675 return EINVAL;
1676 }
1677
1678 if ((m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) {
1679 if (valid != NULL) {
1680 *valid = FALSE;
1681 }
1682 *ts = 0;
1683 } else {
1684 if (valid != NULL) {
1685 *valid = TRUE;
1686 }
1687 *ts = m->m_pkthdr.pkt_timestamp;
1688 }
1689 return 0;
1690 }
1691
1692 errno_t
1693 mbuf_set_timestamp(mbuf_t m, u_int64_t ts, boolean_t valid)
1694 {
1695 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1696 return EINVAL;
1697 }
1698
1699 if (valid == FALSE) {
1700 m->m_pkthdr.pkt_flags &= ~PKTF_TS_VALID;
1701 m->m_pkthdr.pkt_timestamp = 0;
1702 } else {
1703 m->m_pkthdr.pkt_flags |= PKTF_TS_VALID;
1704 m->m_pkthdr.pkt_timestamp = ts;
1705 }
1706 return 0;
1707 }
1708
1709 errno_t
1710 mbuf_get_status(mbuf_t m, kern_return_t *status)
1711 {
1712 if (m == NULL || !(m->m_flags & M_PKTHDR) || status == NULL) {
1713 return EINVAL;
1714 }
1715
1716 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1717 *status = 0;
1718 } else {
1719 *status = m->m_pkthdr.drv_tx_status;
1720 }
1721 return 0;
1722 }
1723
1724 static void
1725 driver_mtag_init(mbuf_t m)
1726 {
1727 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1728 m->m_pkthdr.pkt_flags |= PKTF_DRIVER_MTAG;
1729 bzero(&m->m_pkthdr.driver_mtag,
1730 sizeof(m->m_pkthdr.driver_mtag));
1731 }
1732 }
1733
1734 errno_t
1735 mbuf_set_status(mbuf_t m, kern_return_t status)
1736 {
1737 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1738 return EINVAL;
1739 }
1740
1741 driver_mtag_init(m);
1742
1743 m->m_pkthdr.drv_tx_status = status;
1744
1745 return 0;
1746 }
1747
1748 errno_t
1749 mbuf_get_flowid(mbuf_t m, u_int16_t *flowid)
1750 {
1751 if (m == NULL || !(m->m_flags & M_PKTHDR) || flowid == NULL) {
1752 return EINVAL;
1753 }
1754
1755 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1756 *flowid = 0;
1757 } else {
1758 *flowid = m->m_pkthdr.drv_flowid;
1759 }
1760 return 0;
1761 }
1762
1763 errno_t
1764 mbuf_set_flowid(mbuf_t m, u_int16_t flowid)
1765 {
1766 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1767 return EINVAL;
1768 }
1769
1770 driver_mtag_init(m);
1771
1772 m->m_pkthdr.drv_flowid = flowid;
1773
1774 return 0;
1775 }
1776
1777 errno_t
1778 mbuf_get_tx_compl_data(mbuf_t m, uintptr_t *arg, uintptr_t *data)
1779 {
1780 if (m == NULL || !(m->m_flags & M_PKTHDR) || arg == NULL ||
1781 data == NULL) {
1782 return EINVAL;
1783 }
1784
1785 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1786 *arg = 0;
1787 *data = 0;
1788 } else {
1789 *arg = m->m_pkthdr.drv_tx_compl_arg;
1790 *data = m->m_pkthdr.drv_tx_compl_data;
1791 }
1792 return 0;
1793 }
1794
1795 errno_t
1796 mbuf_set_tx_compl_data(mbuf_t m, uintptr_t arg, uintptr_t data)
1797 {
1798 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1799 return EINVAL;
1800 }
1801
1802 driver_mtag_init(m);
1803
1804 m->m_pkthdr.drv_tx_compl_arg = arg;
1805 m->m_pkthdr.drv_tx_compl_data = data;
1806
1807 return 0;
1808 }
1809
1810 static u_int32_t
1811 get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback)
1812 {
1813 u_int32_t i;
1814
1815 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1816 if (mbuf_tx_compl_table[i] == callback) {
1817 return i;
1818 }
1819 }
1820 return UINT32_MAX;
1821 }
1822
1823 static u_int32_t
1824 get_tx_compl_callback_index(mbuf_tx_compl_func callback)
1825 {
1826 u_int32_t i;
1827
1828 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock);
1829
1830 i = get_tx_compl_callback_index_locked(callback);
1831
1832 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock);
1833
1834 return i;
1835 }
1836
1837 mbuf_tx_compl_func
1838 m_get_tx_compl_callback(u_int32_t idx)
1839 {
1840 mbuf_tx_compl_func cb;
1841
1842 if (idx >= MAX_MBUF_TX_COMPL_FUNC) {
1843 ASSERT(0);
1844 return NULL;
1845 }
1846 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock);
1847 cb = mbuf_tx_compl_table[idx];
1848 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock);
1849 return cb;
1850 }
1851
1852 errno_t
1853 mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback)
1854 {
1855 int i;
1856 errno_t error;
1857
1858 if (callback == NULL) {
1859 return EINVAL;
1860 }
1861
1862 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock);
1863
1864 i = get_tx_compl_callback_index_locked(callback);
1865 if (i != -1) {
1866 error = EEXIST;
1867 goto unlock;
1868 }
1869
1870 /* assume the worst */
1871 error = ENOSPC;
1872 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1873 if (mbuf_tx_compl_table[i] == NULL) {
1874 mbuf_tx_compl_table[i] = callback;
1875 error = 0;
1876 goto unlock;
1877 }
1878 }
1879 unlock:
1880 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock);
1881
1882 return error;
1883 }
1884
1885 errno_t
1886 mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback)
1887 {
1888 int i;
1889 errno_t error;
1890
1891 if (callback == NULL) {
1892 return EINVAL;
1893 }
1894
1895 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock);
1896
1897 /* assume the worst */
1898 error = ENOENT;
1899 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1900 if (mbuf_tx_compl_table[i] == callback) {
1901 mbuf_tx_compl_table[i] = NULL;
1902 error = 0;
1903 goto unlock;
1904 }
1905 }
1906 unlock:
1907 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock);
1908
1909 return error;
1910 }
1911
1912 errno_t
1913 mbuf_get_timestamp_requested(mbuf_t m, boolean_t *requested)
1914 {
1915 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1916 return EINVAL;
1917 }
1918
1919 if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1920 *requested = FALSE;
1921 } else {
1922 *requested = TRUE;
1923 }
1924 return 0;
1925 }
1926
1927 errno_t
1928 mbuf_set_timestamp_requested(mbuf_t m, uintptr_t *pktid,
1929 mbuf_tx_compl_func callback)
1930 {
1931 size_t i;
1932
1933 if (m == NULL || !(m->m_flags & M_PKTHDR) || callback == NULL ||
1934 pktid == NULL) {
1935 return EINVAL;
1936 }
1937
1938 i = get_tx_compl_callback_index(callback);
1939 if (i == UINT32_MAX) {
1940 return ENOENT;
1941 }
1942
1943 #if (DEBUG || DEVELOPMENT)
1944 VERIFY(i < sizeof(m->m_pkthdr.pkt_compl_callbacks));
1945 #endif /* (DEBUG || DEVELOPMENT) */
1946
1947 if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1948 m->m_pkthdr.pkt_compl_callbacks = 0;
1949 m->m_pkthdr.pkt_flags |= PKTF_TX_COMPL_TS_REQ;
1950 m->m_pkthdr.pkt_compl_context =
1951 atomic_add_32_ov(&mbuf_tx_compl_index, 1);
1952
1953 #if (DEBUG || DEVELOPMENT)
1954 if (mbuf_tx_compl_debug != 0) {
1955 OSIncrementAtomic64(&mbuf_tx_compl_outstanding);
1956 }
1957 #endif /* (DEBUG || DEVELOPMENT) */
1958 }
1959 m->m_pkthdr.pkt_compl_callbacks |= (1 << i);
1960 *pktid = m->m_pkthdr.pkt_compl_context;
1961
1962 return 0;
1963 }
1964
1965 void
1966 m_do_tx_compl_callback(struct mbuf *m, struct ifnet *ifp)
1967 {
1968 int i;
1969
1970 if (m == NULL) {
1971 return;
1972 }
1973
1974 if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1975 return;
1976 }
1977
1978 #if (DEBUG || DEVELOPMENT)
1979 if (mbuf_tx_compl_debug != 0 && ifp != NULL &&
1980 (ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) != 0 &&
1981 (m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) {
1982 struct timespec now;
1983
1984 nanouptime(&now);
1985 net_timernsec(&now, &m->m_pkthdr.pkt_timestamp);
1986 }
1987 #endif /* (DEBUG || DEVELOPMENT) */
1988
1989 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1990 mbuf_tx_compl_func callback;
1991
1992 if ((m->m_pkthdr.pkt_compl_callbacks & (1 << i)) == 0) {
1993 continue;
1994 }
1995
1996 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock);
1997 callback = mbuf_tx_compl_table[i];
1998 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock);
1999
2000 if (callback != NULL) {
2001 callback(m->m_pkthdr.pkt_compl_context,
2002 ifp,
2003 (m->m_pkthdr.pkt_flags & PKTF_TS_VALID) ?
2004 m->m_pkthdr.pkt_timestamp: 0,
2005 m->m_pkthdr.drv_tx_compl_arg,
2006 m->m_pkthdr.drv_tx_compl_data,
2007 m->m_pkthdr.drv_tx_status);
2008 }
2009 }
2010 m->m_pkthdr.pkt_compl_callbacks = 0;
2011
2012 #if (DEBUG || DEVELOPMENT)
2013 if (mbuf_tx_compl_debug != 0) {
2014 OSDecrementAtomic64(&mbuf_tx_compl_outstanding);
2015 if (ifp == NULL) {
2016 atomic_add_64(&mbuf_tx_compl_aborted, 1);
2017 }
2018 }
2019 #endif /* (DEBUG || DEVELOPMENT) */
2020 }
2021
2022 errno_t
2023 mbuf_get_keepalive_flag(mbuf_t m, boolean_t *is_keepalive)
2024 {
2025 if (m == NULL || is_keepalive == NULL || !(m->m_flags & M_PKTHDR)) {
2026 return EINVAL;
2027 }
2028
2029 *is_keepalive = (m->m_pkthdr.pkt_flags & PKTF_KEEPALIVE);
2030
2031 return 0;
2032 }
2033
2034 errno_t
2035 mbuf_set_keepalive_flag(mbuf_t m, boolean_t is_keepalive)
2036 {
2037 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
2038 return EINVAL;
2039 }
2040
2041 if (is_keepalive) {
2042 m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
2043 } else {
2044 m->m_pkthdr.pkt_flags &= ~PKTF_KEEPALIVE;
2045 }
2046
2047 return 0;
2048 }