]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_mbuf.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / bsd / kern / kpi_mbuf.c
1 /*
2 * Copyright (c) 2004-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define __KPI__
30
31 #include <sys/param.h>
32 #include <sys/mbuf.h>
33 #include <sys/mcache.h>
34 #include <sys/socket.h>
35 #include <kern/debug.h>
36 #include <libkern/OSAtomic.h>
37 #include <kern/kalloc.h>
38 #include <string.h>
39 #include <net/dlil.h>
40 #include <netinet/in.h>
41 #include <netinet/ip_var.h>
42
43 #include "net/net_str_id.h"
44
45 /* mbuf flags visible to KPI clients; do not add private flags here */
46 static const mbuf_flags_t mbuf_flags_mask = (MBUF_EXT | MBUF_PKTHDR | MBUF_EOR |
47 MBUF_LOOP | MBUF_BCAST | MBUF_MCAST | MBUF_FRAG | MBUF_FIRSTFRAG |
48 MBUF_LASTFRAG | MBUF_PROMISC | MBUF_HASFCS);
49
50 /* Unalterable mbuf flags */
51 static const mbuf_flags_t mbuf_cflags_mask = (MBUF_EXT);
52
53 #define MAX_MBUF_TX_COMPL_FUNC 32
54 mbuf_tx_compl_func
55 mbuf_tx_compl_table[MAX_MBUF_TX_COMPL_FUNC];
56 extern lck_rw_t *mbuf_tx_compl_tbl_lock;
57 u_int32_t mbuf_tx_compl_index = 0;
58
59 #if (DEVELOPMENT || DEBUG)
60 int mbuf_tx_compl_debug = 0;
61 SInt64 mbuf_tx_compl_outstanding __attribute__((aligned(8))) = 0;
62 u_int64_t mbuf_tx_compl_aborted __attribute__((aligned(8))) = 0;
63
64 SYSCTL_DECL(_kern_ipc);
65 SYSCTL_NODE(_kern_ipc, OID_AUTO, mbtxcf,
66 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
67 SYSCTL_INT(_kern_ipc_mbtxcf, OID_AUTO, debug,
68 CTLFLAG_RW | CTLFLAG_LOCKED, &mbuf_tx_compl_debug, 0, "");
69 SYSCTL_INT(_kern_ipc_mbtxcf, OID_AUTO, index,
70 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_index, 0, "");
71 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, oustanding,
72 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_outstanding, "");
73 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, aborted,
74 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_aborted, "");
75 #endif /* (DEBUG || DEVELOPMENT) */
76
77 void *
78 mbuf_data(mbuf_t mbuf)
79 {
80 return mbuf->m_data;
81 }
82
83 void *
84 mbuf_datastart(mbuf_t mbuf)
85 {
86 if (mbuf->m_flags & M_EXT) {
87 return mbuf->m_ext.ext_buf;
88 }
89 if (mbuf->m_flags & M_PKTHDR) {
90 return mbuf->m_pktdat;
91 }
92 return mbuf->m_dat;
93 }
94
95 errno_t
96 mbuf_setdata(mbuf_t mbuf, void *data, size_t len)
97 {
98 size_t start = (size_t)((char *)mbuf_datastart(mbuf));
99 size_t maxlen = mbuf_maxlen(mbuf);
100
101 if ((size_t)data < start || ((size_t)data) + len > start + maxlen) {
102 return EINVAL;
103 }
104 mbuf->m_data = data;
105 mbuf->m_len = len;
106
107 return 0;
108 }
109
110 errno_t
111 mbuf_align_32(mbuf_t mbuf, size_t len)
112 {
113 if ((mbuf->m_flags & M_EXT) != 0 && m_mclhasreference(mbuf)) {
114 return ENOTSUP;
115 }
116 mbuf->m_data = mbuf_datastart(mbuf);
117 mbuf->m_data +=
118 ((mbuf_trailingspace(mbuf) - len) & ~(sizeof(u_int32_t) - 1));
119
120 return 0;
121 }
122
123 /*
124 * This function is used to provide mcl_to_paddr via symbol indirection,
125 * please avoid any change in behavior or remove the indirection in
126 * config/Unsupported*
127 */
128 addr64_t
129 mbuf_data_to_physical(void *ptr)
130 {
131 return (addr64_t)mcl_to_paddr(ptr);
132 }
133
134 errno_t
135 mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
136 {
137 /* Must set *mbuf to NULL in failure case */
138 *mbuf = m_get(how, type);
139
140 return *mbuf == NULL ? ENOMEM : 0;
141 }
142
143 errno_t
144 mbuf_gethdr(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
145 {
146 /* Must set *mbuf to NULL in failure case */
147 *mbuf = m_gethdr(how, type);
148
149 return *mbuf == NULL ? ENOMEM : 0;
150 }
151
152 errno_t
153 mbuf_attachcluster(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
154 caddr_t extbuf, void (*extfree)(caddr_t, u_int, caddr_t),
155 size_t extsize, caddr_t extarg)
156 {
157 if (mbuf == NULL || extbuf == NULL || extfree == NULL || extsize == 0) {
158 return EINVAL;
159 }
160
161 if ((*mbuf = m_clattach(*mbuf, type, extbuf,
162 extfree, extsize, extarg, how, 0)) == NULL) {
163 return ENOMEM;
164 }
165
166 return 0;
167 }
168
169 errno_t
170 mbuf_ring_cluster_alloc(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
171 void (*extfree)(caddr_t, u_int, caddr_t), size_t *size)
172 {
173 caddr_t extbuf = NULL;
174 errno_t err;
175
176 if (mbuf == NULL || extfree == NULL || size == NULL || *size == 0) {
177 return EINVAL;
178 }
179
180 if ((err = mbuf_alloccluster(how, size, &extbuf)) != 0) {
181 return err;
182 }
183
184 if ((*mbuf = m_clattach(*mbuf, type, extbuf,
185 extfree, *size, NULL, how, 1)) == NULL) {
186 mbuf_freecluster(extbuf, *size);
187 return ENOMEM;
188 }
189
190 return 0;
191 }
192
193 int
194 mbuf_ring_cluster_is_active(mbuf_t mbuf)
195 {
196 return m_ext_paired_is_active(mbuf);
197 }
198
199 errno_t
200 mbuf_ring_cluster_activate(mbuf_t mbuf)
201 {
202 if (mbuf_ring_cluster_is_active(mbuf)) {
203 return EBUSY;
204 }
205
206 m_ext_paired_activate(mbuf);
207 return 0;
208 }
209
210 errno_t
211 mbuf_cluster_set_prop(mbuf_t mbuf, u_int32_t oldprop, u_int32_t newprop)
212 {
213 if (mbuf == NULL || !(mbuf->m_flags & M_EXT)) {
214 return EINVAL;
215 }
216
217 return m_ext_set_prop(mbuf, oldprop, newprop) ? 0 : EBUSY;
218 }
219
220 errno_t
221 mbuf_cluster_get_prop(mbuf_t mbuf, u_int32_t *prop)
222 {
223 if (mbuf == NULL || prop == NULL || !(mbuf->m_flags & M_EXT)) {
224 return EINVAL;
225 }
226
227 *prop = m_ext_get_prop(mbuf);
228 return 0;
229 }
230
231 errno_t
232 mbuf_alloccluster(mbuf_how_t how, size_t *size, caddr_t *addr)
233 {
234 if (size == NULL || *size == 0 || addr == NULL) {
235 return EINVAL;
236 }
237
238 *addr = NULL;
239
240 /* Jumbo cluster pool not available? */
241 if (*size > MBIGCLBYTES && njcl == 0) {
242 return ENOTSUP;
243 }
244
245 if (*size <= MCLBYTES && (*addr = m_mclalloc(how)) != NULL) {
246 *size = MCLBYTES;
247 } else if (*size > MCLBYTES && *size <= MBIGCLBYTES &&
248 (*addr = m_bigalloc(how)) != NULL) {
249 *size = MBIGCLBYTES;
250 } else if (*size > MBIGCLBYTES && *size <= M16KCLBYTES &&
251 (*addr = m_16kalloc(how)) != NULL) {
252 *size = M16KCLBYTES;
253 } else {
254 *size = 0;
255 }
256
257 if (*addr == NULL) {
258 return ENOMEM;
259 }
260
261 return 0;
262 }
263
264 void
265 mbuf_freecluster(caddr_t addr, size_t size)
266 {
267 if (size != MCLBYTES && size != MBIGCLBYTES && size != M16KCLBYTES) {
268 panic("%s: invalid size (%ld) for cluster %p", __func__,
269 size, (void *)addr);
270 }
271
272 if (size == MCLBYTES) {
273 m_mclfree(addr);
274 } else if (size == MBIGCLBYTES) {
275 m_bigfree(addr, MBIGCLBYTES, NULL);
276 } else if (njcl > 0) {
277 m_16kfree(addr, M16KCLBYTES, NULL);
278 } else {
279 panic("%s: freeing jumbo cluster to an empty pool", __func__);
280 }
281 }
282
283 errno_t
284 mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t *mbuf)
285 {
286 /* Must set *mbuf to NULL in failure case */
287 errno_t error = 0;
288 int created = 0;
289
290 if (mbuf == NULL) {
291 return EINVAL;
292 }
293 if (*mbuf == NULL) {
294 *mbuf = m_get(how, type);
295 if (*mbuf == NULL) {
296 return ENOMEM;
297 }
298 created = 1;
299 }
300 /*
301 * At the time this code was written, m_{mclget,mbigget,m16kget}
302 * would always return the same value that was passed in to it.
303 */
304 if (size == MCLBYTES) {
305 *mbuf = m_mclget(*mbuf, how);
306 } else if (size == MBIGCLBYTES) {
307 *mbuf = m_mbigget(*mbuf, how);
308 } else if (size == M16KCLBYTES) {
309 if (njcl > 0) {
310 *mbuf = m_m16kget(*mbuf, how);
311 } else {
312 /* Jumbo cluster pool not available? */
313 error = ENOTSUP;
314 goto out;
315 }
316 } else {
317 error = EINVAL;
318 goto out;
319 }
320 if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) {
321 error = ENOMEM;
322 }
323 out:
324 if (created && error != 0) {
325 mbuf_free(*mbuf);
326 *mbuf = NULL;
327 }
328 return error;
329 }
330
331 errno_t
332 mbuf_mclget(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
333 {
334 /* Must set *mbuf to NULL in failure case */
335 errno_t error = 0;
336 int created = 0;
337 if (mbuf == NULL) {
338 return EINVAL;
339 }
340 if (*mbuf == NULL) {
341 error = mbuf_get(how, type, mbuf);
342 if (error) {
343 return error;
344 }
345 created = 1;
346 }
347
348 /*
349 * At the time this code was written, m_mclget would always
350 * return the same value that was passed in to it.
351 */
352 *mbuf = m_mclget(*mbuf, how);
353
354 if (created && ((*mbuf)->m_flags & M_EXT) == 0) {
355 mbuf_free(*mbuf);
356 *mbuf = NULL;
357 }
358 if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) {
359 error = ENOMEM;
360 }
361 return error;
362 }
363
364
365 errno_t
366 mbuf_getpacket(mbuf_how_t how, mbuf_t *mbuf)
367 {
368 /* Must set *mbuf to NULL in failure case */
369 errno_t error = 0;
370
371 *mbuf = m_getpacket_how(how);
372
373 if (*mbuf == NULL) {
374 if (how == MBUF_WAITOK) {
375 error = ENOMEM;
376 } else {
377 error = EWOULDBLOCK;
378 }
379 }
380
381 return error;
382 }
383
384 /*
385 * This function is used to provide m_free via symbol indirection, please avoid
386 * any change in behavior or remove the indirection in config/Unsupported*
387 */
388 mbuf_t
389 mbuf_free(mbuf_t mbuf)
390 {
391 return m_free(mbuf);
392 }
393
394 /*
395 * This function is used to provide m_freem via symbol indirection, please avoid
396 * any change in behavior or remove the indirection in config/Unsupported*
397 */
398 void
399 mbuf_freem(mbuf_t mbuf)
400 {
401 m_freem(mbuf);
402 }
403
404 int
405 mbuf_freem_list(mbuf_t mbuf)
406 {
407 return m_freem_list(mbuf);
408 }
409
410 size_t
411 mbuf_leadingspace(const mbuf_t mbuf)
412 {
413 return M_LEADINGSPACE(mbuf);
414 }
415
416 /*
417 * This function is used to provide m_trailingspace via symbol indirection,
418 * please avoid any change in behavior or remove the indirection in
419 * config/Unsupported*
420 */
421 size_t
422 mbuf_trailingspace(const mbuf_t mbuf)
423 {
424 return M_TRAILINGSPACE(mbuf);
425 }
426
427 /* Manipulation */
428 errno_t
429 mbuf_copym(const mbuf_t src, size_t offset, size_t len,
430 mbuf_how_t how, mbuf_t *new_mbuf)
431 {
432 /* Must set *mbuf to NULL in failure case */
433 *new_mbuf = m_copym(src, offset, len, how);
434
435 return *new_mbuf == NULL ? ENOMEM : 0;
436 }
437
438 errno_t
439 mbuf_dup(const mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf)
440 {
441 /* Must set *new_mbuf to NULL in failure case */
442 *new_mbuf = m_dup(src, how);
443
444 return *new_mbuf == NULL ? ENOMEM : 0;
445 }
446
447 errno_t
448 mbuf_prepend(mbuf_t *orig, size_t len, mbuf_how_t how)
449 {
450 /* Must set *orig to NULL in failure case */
451 *orig = m_prepend_2(*orig, len, how, 0);
452
453 return *orig == NULL ? ENOMEM : 0;
454 }
455
456 errno_t
457 mbuf_split(mbuf_t src, size_t offset,
458 mbuf_how_t how, mbuf_t *new_mbuf)
459 {
460 /* Must set *new_mbuf to NULL in failure case */
461 *new_mbuf = m_split(src, offset, how);
462
463 return *new_mbuf == NULL ? ENOMEM : 0;
464 }
465
466 errno_t
467 mbuf_pullup(mbuf_t *mbuf, size_t len)
468 {
469 /* Must set *mbuf to NULL in failure case */
470 *mbuf = m_pullup(*mbuf, len);
471
472 return *mbuf == NULL ? ENOMEM : 0;
473 }
474
475 errno_t
476 mbuf_pulldown(mbuf_t src, size_t *offset, size_t len, mbuf_t *location)
477 {
478 /* Must set *location to NULL in failure case */
479 int new_offset;
480 *location = m_pulldown(src, *offset, len, &new_offset);
481 *offset = new_offset;
482
483 return *location == NULL ? ENOMEM : 0;
484 }
485
486 /*
487 * This function is used to provide m_adj via symbol indirection, please avoid
488 * any change in behavior or remove the indirection in config/Unsupported*
489 */
490 void
491 mbuf_adj(mbuf_t mbuf, int len)
492 {
493 m_adj(mbuf, len);
494 }
495
496 errno_t
497 mbuf_adjustlen(mbuf_t m, int amount)
498 {
499 /* Verify m_len will be valid after adding amount */
500 if (amount > 0) {
501 int used = (size_t)mbuf_data(m) - (size_t)mbuf_datastart(m) +
502 m->m_len;
503
504 if ((size_t)(amount + used) > mbuf_maxlen(m)) {
505 return EINVAL;
506 }
507 } else if (-amount > m->m_len) {
508 return EINVAL;
509 }
510
511 m->m_len += amount;
512 return 0;
513 }
514
515 mbuf_t
516 mbuf_concatenate(mbuf_t dst, mbuf_t src)
517 {
518 if (dst == NULL) {
519 return NULL;
520 }
521
522 m_cat(dst, src);
523
524 /* return dst as is in the current implementation */
525 return dst;
526 }
527 errno_t
528 mbuf_copydata(const mbuf_t m0, size_t off, size_t len, void *out_data)
529 {
530 /* Copied m_copydata, added error handling (don't just panic) */
531 int count;
532 mbuf_t m = m0;
533
534 while (off > 0) {
535 if (m == 0) {
536 return EINVAL;
537 }
538 if (off < (size_t)m->m_len) {
539 break;
540 }
541 off -= m->m_len;
542 m = m->m_next;
543 }
544 while (len > 0) {
545 if (m == 0) {
546 return EINVAL;
547 }
548 count = m->m_len - off > len ? len : m->m_len - off;
549 bcopy(mtod(m, caddr_t) + off, out_data, count);
550 len -= count;
551 out_data = ((char *)out_data) + count;
552 off = 0;
553 m = m->m_next;
554 }
555
556 return 0;
557 }
558
559 int
560 mbuf_mclhasreference(mbuf_t mbuf)
561 {
562 if ((mbuf->m_flags & M_EXT)) {
563 return m_mclhasreference(mbuf);
564 } else {
565 return 0;
566 }
567 }
568
569
570 /* mbuf header */
571 mbuf_t
572 mbuf_next(const mbuf_t mbuf)
573 {
574 return mbuf->m_next;
575 }
576
577 errno_t
578 mbuf_setnext(mbuf_t mbuf, mbuf_t next)
579 {
580 if (next && ((next)->m_nextpkt != NULL ||
581 (next)->m_type == MT_FREE)) {
582 return EINVAL;
583 }
584 mbuf->m_next = next;
585
586 return 0;
587 }
588
589 mbuf_t
590 mbuf_nextpkt(const mbuf_t mbuf)
591 {
592 return mbuf->m_nextpkt;
593 }
594
595 void
596 mbuf_setnextpkt(mbuf_t mbuf, mbuf_t nextpkt)
597 {
598 mbuf->m_nextpkt = nextpkt;
599 }
600
601 size_t
602 mbuf_len(const mbuf_t mbuf)
603 {
604 return mbuf->m_len;
605 }
606
607 void
608 mbuf_setlen(mbuf_t mbuf, size_t len)
609 {
610 mbuf->m_len = len;
611 }
612
613 size_t
614 mbuf_maxlen(const mbuf_t mbuf)
615 {
616 if (mbuf->m_flags & M_EXT) {
617 return mbuf->m_ext.ext_size;
618 }
619 return &mbuf->m_dat[MLEN] - ((char *)mbuf_datastart(mbuf));
620 }
621
622 mbuf_type_t
623 mbuf_type(const mbuf_t mbuf)
624 {
625 return mbuf->m_type;
626 }
627
628 errno_t
629 mbuf_settype(mbuf_t mbuf, mbuf_type_t new_type)
630 {
631 if (new_type == MBUF_TYPE_FREE) {
632 return EINVAL;
633 }
634
635 m_mchtype(mbuf, new_type);
636
637 return 0;
638 }
639
640 mbuf_flags_t
641 mbuf_flags(const mbuf_t mbuf)
642 {
643 return mbuf->m_flags & mbuf_flags_mask;
644 }
645
646 errno_t
647 mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags)
648 {
649 errno_t ret = 0;
650 mbuf_flags_t oflags = mbuf->m_flags;
651
652 /*
653 * 1. Return error if public but un-alterable flags are changed
654 * in flags argument.
655 * 2. Return error if bits other than public flags are set in passed
656 * flags argument.
657 * Please note that private flag bits must be passed as reset by
658 * kexts, as they must use mbuf_flags KPI to get current set of
659 * mbuf flags and mbuf_flags KPI does not expose private flags.
660 */
661 if ((flags ^ oflags) & mbuf_cflags_mask) {
662 ret = EINVAL;
663 } else if (flags & ~mbuf_flags_mask) {
664 ret = EINVAL;
665 } else {
666 mbuf->m_flags = flags | (mbuf->m_flags & ~mbuf_flags_mask);
667 /*
668 * If M_PKTHDR bit has changed, we have work to do;
669 * m_reinit() will take care of setting/clearing the
670 * bit, as well as the rest of bookkeeping.
671 */
672 if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
673 mbuf->m_flags ^= M_PKTHDR; /* restore */
674 ret = m_reinit(mbuf,
675 (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
676 }
677 }
678
679 return ret;
680 }
681
682 errno_t
683 mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask)
684 {
685 errno_t ret = 0;
686
687 if (mask & (~mbuf_flags_mask | mbuf_cflags_mask)) {
688 ret = EINVAL;
689 } else {
690 mbuf_flags_t oflags = mbuf->m_flags;
691 mbuf->m_flags = (flags & mask) | (mbuf->m_flags & ~mask);
692 /*
693 * If M_PKTHDR bit has changed, we have work to do;
694 * m_reinit() will take care of setting/clearing the
695 * bit, as well as the rest of bookkeeping.
696 */
697 if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
698 mbuf->m_flags ^= M_PKTHDR; /* restore */
699 ret = m_reinit(mbuf,
700 (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
701 }
702 }
703
704 return ret;
705 }
706
707 errno_t
708 mbuf_copy_pkthdr(mbuf_t dest, const mbuf_t src)
709 {
710 if (((src)->m_flags & M_PKTHDR) == 0) {
711 return EINVAL;
712 }
713
714 m_copy_pkthdr(dest, src);
715
716 return 0;
717 }
718
719 size_t
720 mbuf_pkthdr_len(const mbuf_t mbuf)
721 {
722 if (((mbuf)->m_flags & M_PKTHDR) == 0) {
723 return 0;
724 }
725 /*
726 * While we Assert for development or debug builds,
727 * also make sure we never return negative length
728 * for release build.
729 */
730 ASSERT(mbuf->m_pkthdr.len >= 0);
731 if (mbuf->m_pkthdr.len < 0) {
732 return 0;
733 }
734 return mbuf->m_pkthdr.len;
735 }
736
737 __private_extern__ size_t
738 mbuf_pkthdr_maxlen(mbuf_t m)
739 {
740 size_t maxlen = 0;
741 mbuf_t n = m;
742
743 while (n) {
744 maxlen += mbuf_maxlen(n);
745 n = mbuf_next(n);
746 }
747 return maxlen;
748 }
749
750 void
751 mbuf_pkthdr_setlen(mbuf_t mbuf, size_t len)
752 {
753 if (len > INT32_MAX) {
754 len = INT32_MAX;
755 }
756
757 mbuf->m_pkthdr.len = len;
758 }
759
760 void
761 mbuf_pkthdr_adjustlen(mbuf_t mbuf, int amount)
762 {
763 mbuf->m_pkthdr.len += amount;
764 }
765
766 ifnet_t
767 mbuf_pkthdr_rcvif(const mbuf_t mbuf)
768 {
769 /*
770 * If we reference count ifnets, we should take a reference here
771 * before returning
772 */
773 return mbuf->m_pkthdr.rcvif;
774 }
775
776 errno_t
777 mbuf_pkthdr_setrcvif(mbuf_t mbuf, ifnet_t ifnet)
778 {
779 /* May want to walk ifnet list to determine if interface is valid */
780 mbuf->m_pkthdr.rcvif = (struct ifnet *)ifnet;
781 return 0;
782 }
783
784 void*
785 mbuf_pkthdr_header(const mbuf_t mbuf)
786 {
787 return mbuf->m_pkthdr.pkt_hdr;
788 }
789
790 void
791 mbuf_pkthdr_setheader(mbuf_t mbuf, void *header)
792 {
793 mbuf->m_pkthdr.pkt_hdr = (void*)header;
794 }
795
796 void
797 mbuf_inbound_modified(mbuf_t mbuf)
798 {
799 /* Invalidate hardware generated checksum flags */
800 mbuf->m_pkthdr.csum_flags = 0;
801 }
802
803 void
804 mbuf_outbound_finalize(struct mbuf *m, u_int32_t pf, size_t o)
805 {
806 /* Generate the packet in software, client needs it */
807 switch (pf) {
808 case PF_INET:
809 (void) in_finalize_cksum(m, o, m->m_pkthdr.csum_flags);
810 break;
811
812 case PF_INET6:
813 #if INET6
814 /*
815 * Checksum offload should not have been enabled when
816 * extension headers exist; indicate that the callee
817 * should skip such case by setting optlen to -1.
818 */
819 (void) in6_finalize_cksum(m, o, -1, -1, m->m_pkthdr.csum_flags);
820 #endif /* INET6 */
821 break;
822
823 default:
824 break;
825 }
826 }
827
828 errno_t
829 mbuf_set_vlan_tag(
830 mbuf_t mbuf,
831 u_int16_t vlan)
832 {
833 mbuf->m_pkthdr.csum_flags |= CSUM_VLAN_TAG_VALID;
834 mbuf->m_pkthdr.vlan_tag = vlan;
835
836 return 0;
837 }
838
839 errno_t
840 mbuf_get_vlan_tag(
841 mbuf_t mbuf,
842 u_int16_t *vlan)
843 {
844 if ((mbuf->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
845 return ENXIO; // No vlan tag set
846 }
847 *vlan = mbuf->m_pkthdr.vlan_tag;
848
849 return 0;
850 }
851
852 errno_t
853 mbuf_clear_vlan_tag(
854 mbuf_t mbuf)
855 {
856 mbuf->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID;
857 mbuf->m_pkthdr.vlan_tag = 0;
858
859 return 0;
860 }
861
862 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags =
863 MBUF_CSUM_REQ_IP | MBUF_CSUM_REQ_TCP | MBUF_CSUM_REQ_UDP |
864 MBUF_CSUM_PARTIAL | MBUF_CSUM_REQ_TCPIPV6 | MBUF_CSUM_REQ_UDPIPV6;
865
866 errno_t
867 mbuf_set_csum_requested(
868 mbuf_t mbuf,
869 mbuf_csum_request_flags_t request,
870 u_int32_t value)
871 {
872 request &= mbuf_valid_csum_request_flags;
873 mbuf->m_pkthdr.csum_flags =
874 (mbuf->m_pkthdr.csum_flags & 0xffff0000) | request;
875 mbuf->m_pkthdr.csum_data = value;
876
877 return 0;
878 }
879
880 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags =
881 MBUF_TSO_IPV4 | MBUF_TSO_IPV6;
882
883 errno_t
884 mbuf_get_tso_requested(
885 mbuf_t mbuf,
886 mbuf_tso_request_flags_t *request,
887 u_int32_t *value)
888 {
889 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
890 request == NULL || value == NULL) {
891 return EINVAL;
892 }
893
894 *request = mbuf->m_pkthdr.csum_flags;
895 *request &= mbuf_valid_tso_request_flags;
896 if (*request && value != NULL) {
897 *value = mbuf->m_pkthdr.tso_segsz;
898 }
899
900 return 0;
901 }
902
903 errno_t
904 mbuf_get_csum_requested(
905 mbuf_t mbuf,
906 mbuf_csum_request_flags_t *request,
907 u_int32_t *value)
908 {
909 *request = mbuf->m_pkthdr.csum_flags;
910 *request &= mbuf_valid_csum_request_flags;
911 if (value != NULL) {
912 *value = mbuf->m_pkthdr.csum_data;
913 }
914
915 return 0;
916 }
917
918 errno_t
919 mbuf_clear_csum_requested(
920 mbuf_t mbuf)
921 {
922 mbuf->m_pkthdr.csum_flags &= 0xffff0000;
923 mbuf->m_pkthdr.csum_data = 0;
924
925 return 0;
926 }
927
928 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags =
929 MBUF_CSUM_DID_IP | MBUF_CSUM_IP_GOOD | MBUF_CSUM_DID_DATA |
930 MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_PARTIAL;
931
932 errno_t
933 mbuf_set_csum_performed(
934 mbuf_t mbuf,
935 mbuf_csum_performed_flags_t performed,
936 u_int32_t value)
937 {
938 performed &= mbuf_valid_csum_performed_flags;
939 mbuf->m_pkthdr.csum_flags =
940 (mbuf->m_pkthdr.csum_flags & 0xffff0000) | performed;
941 mbuf->m_pkthdr.csum_data = value;
942
943 return 0;
944 }
945
946 errno_t
947 mbuf_get_csum_performed(
948 mbuf_t mbuf,
949 mbuf_csum_performed_flags_t *performed,
950 u_int32_t *value)
951 {
952 *performed =
953 mbuf->m_pkthdr.csum_flags & mbuf_valid_csum_performed_flags;
954 *value = mbuf->m_pkthdr.csum_data;
955
956 return 0;
957 }
958
959 errno_t
960 mbuf_clear_csum_performed(
961 mbuf_t mbuf)
962 {
963 mbuf->m_pkthdr.csum_flags &= 0xffff0000;
964 mbuf->m_pkthdr.csum_data = 0;
965
966 return 0;
967 }
968
969 errno_t
970 mbuf_inet_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
971 u_int16_t *csum)
972 {
973 if (mbuf == NULL || length == 0 || csum == NULL ||
974 (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) {
975 return EINVAL;
976 }
977
978 *csum = inet_cksum(mbuf, protocol, offset, length);
979 return 0;
980 }
981
982 #if INET6
983 errno_t
984 mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
985 u_int16_t *csum)
986 {
987 if (mbuf == NULL || length == 0 || csum == NULL ||
988 (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) {
989 return EINVAL;
990 }
991
992 *csum = inet6_cksum(mbuf, protocol, offset, length);
993 return 0;
994 }
995 #else /* INET6 */
996 errno_t
997 mbuf_inet6_cksum(__unused mbuf_t mbuf, __unused int protocol,
998 __unused u_int32_t offset, __unused u_int32_t length,
999 __unused u_int16_t *csum)
1000 {
1001 panic("mbuf_inet6_cksum() doesn't exist on this platform\n");
1002 return 0;
1003 }
1004
1005 u_int16_t
1006 inet6_cksum(__unused struct mbuf *m, __unused unsigned int nxt,
1007 __unused unsigned int off, __unused unsigned int len)
1008 {
1009 panic("inet6_cksum() doesn't exist on this platform\n");
1010 return 0;
1011 }
1012
1013 void nd6_lookup_ipv6(void);
1014 void
1015 nd6_lookup_ipv6(void)
1016 {
1017 panic("nd6_lookup_ipv6() doesn't exist on this platform\n");
1018 }
1019
1020 int
1021 in6addr_local(__unused struct in6_addr *a)
1022 {
1023 panic("in6addr_local() doesn't exist on this platform\n");
1024 return 0;
1025 }
1026
1027 void nd6_storelladdr(void);
1028 void
1029 nd6_storelladdr(void)
1030 {
1031 panic("nd6_storelladdr() doesn't exist on this platform\n");
1032 }
1033 #endif /* INET6 */
1034
1035 /*
1036 * Mbuf tag KPIs
1037 */
1038
1039 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
1040
1041 errno_t
1042 mbuf_tag_id_find(
1043 const char *string,
1044 mbuf_tag_id_t *out_id)
1045 {
1046 return net_str_id_find_internal(string, out_id, NSI_MBUF_TAG, 1);
1047 }
1048
1049 errno_t
1050 mbuf_tag_allocate(
1051 mbuf_t mbuf,
1052 mbuf_tag_id_t id,
1053 mbuf_tag_type_t type,
1054 size_t length,
1055 mbuf_how_t how,
1056 void** data_p)
1057 {
1058 struct m_tag *tag;
1059 u_int32_t mtag_id_first, mtag_id_last;
1060
1061 if (data_p != NULL) {
1062 *data_p = NULL;
1063 }
1064
1065 /* Sanity check parameters */
1066 (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1067 NSI_MBUF_TAG);
1068 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1069 id < mtag_id_first || id > mtag_id_last || length < 1 ||
1070 (length & 0xffff0000) != 0 || data_p == NULL) {
1071 return EINVAL;
1072 }
1073
1074 /* Make sure this mtag hasn't already been allocated */
1075 tag = m_tag_locate(mbuf, id, type, NULL);
1076 if (tag != NULL) {
1077 return EEXIST;
1078 }
1079
1080 /* Allocate an mtag */
1081 tag = m_tag_create(id, type, length, how, mbuf);
1082 if (tag == NULL) {
1083 return how == M_WAITOK ? ENOMEM : EWOULDBLOCK;
1084 }
1085
1086 /* Attach the mtag and set *data_p */
1087 m_tag_prepend(mbuf, tag);
1088 *data_p = tag + 1;
1089
1090 return 0;
1091 }
1092
1093 errno_t
1094 mbuf_tag_find(
1095 mbuf_t mbuf,
1096 mbuf_tag_id_t id,
1097 mbuf_tag_type_t type,
1098 size_t *length,
1099 void **data_p)
1100 {
1101 struct m_tag *tag;
1102 u_int32_t mtag_id_first, mtag_id_last;
1103
1104 if (length != NULL) {
1105 *length = 0;
1106 }
1107 if (data_p != NULL) {
1108 *data_p = NULL;
1109 }
1110
1111 /* Sanity check parameters */
1112 (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1113 NSI_MBUF_TAG);
1114 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1115 id < mtag_id_first || id > mtag_id_last || length == NULL ||
1116 data_p == NULL) {
1117 return EINVAL;
1118 }
1119
1120 /* Locate an mtag */
1121 tag = m_tag_locate(mbuf, id, type, NULL);
1122 if (tag == NULL) {
1123 return ENOENT;
1124 }
1125
1126 /* Copy out the pointer to the data and the lenght value */
1127 *length = tag->m_tag_len;
1128 *data_p = tag + 1;
1129
1130 return 0;
1131 }
1132
1133 void
1134 mbuf_tag_free(
1135 mbuf_t mbuf,
1136 mbuf_tag_id_t id,
1137 mbuf_tag_type_t type)
1138 {
1139 struct m_tag *tag;
1140 u_int32_t mtag_id_first, mtag_id_last;
1141
1142 /* Sanity check parameters */
1143 (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1144 NSI_MBUF_TAG);
1145 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1146 id < mtag_id_first || id > mtag_id_last) {
1147 return;
1148 }
1149
1150 tag = m_tag_locate(mbuf, id, type, NULL);
1151 if (tag == NULL) {
1152 return;
1153 }
1154
1155 m_tag_delete(mbuf, tag);
1156 }
1157
1158 /*
1159 * Maximum length of driver auxiliary data; keep this small to
1160 * fit in a single mbuf to avoid wasting memory, rounded down to
1161 * the nearest 64-bit boundary. This takes into account mbuf
1162 * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
1163 */
1164 #define MBUF_DRVAUX_MAXLEN \
1165 P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \
1166 M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
1167
1168 errno_t
1169 mbuf_add_drvaux(mbuf_t mbuf, mbuf_how_t how, u_int32_t family,
1170 u_int32_t subfamily, size_t length, void **data_p)
1171 {
1172 struct m_drvaux_tag *p;
1173 struct m_tag *tag;
1174
1175 if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) ||
1176 length == 0 || length > MBUF_DRVAUX_MAXLEN) {
1177 return EINVAL;
1178 }
1179
1180 if (data_p != NULL) {
1181 *data_p = NULL;
1182 }
1183
1184 /* Check if one is already associated */
1185 if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1186 KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) {
1187 return EEXIST;
1188 }
1189
1190 /* Tag is (m_drvaux_tag + module specific data) */
1191 if ((tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DRVAUX,
1192 sizeof(*p) + length, how, mbuf)) == NULL) {
1193 return (how == MBUF_WAITOK) ? ENOMEM : EWOULDBLOCK;
1194 }
1195
1196 p = (struct m_drvaux_tag *)(tag + 1);
1197 p->da_family = family;
1198 p->da_subfamily = subfamily;
1199 p->da_length = length;
1200
1201 /* Associate the tag */
1202 m_tag_prepend(mbuf, tag);
1203
1204 if (data_p != NULL) {
1205 *data_p = (p + 1);
1206 }
1207
1208 return 0;
1209 }
1210
1211 errno_t
1212 mbuf_find_drvaux(mbuf_t mbuf, u_int32_t *family_p, u_int32_t *subfamily_p,
1213 u_int32_t *length_p, void **data_p)
1214 {
1215 struct m_drvaux_tag *p;
1216 struct m_tag *tag;
1217
1218 if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) || data_p == NULL) {
1219 return EINVAL;
1220 }
1221
1222 *data_p = NULL;
1223
1224 if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1225 KERNEL_TAG_TYPE_DRVAUX, NULL)) == NULL) {
1226 return ENOENT;
1227 }
1228
1229 /* Must be at least size of m_drvaux_tag */
1230 VERIFY(tag->m_tag_len >= sizeof(*p));
1231
1232 p = (struct m_drvaux_tag *)(tag + 1);
1233 VERIFY(p->da_length > 0 && p->da_length <= MBUF_DRVAUX_MAXLEN);
1234
1235 if (family_p != NULL) {
1236 *family_p = p->da_family;
1237 }
1238 if (subfamily_p != NULL) {
1239 *subfamily_p = p->da_subfamily;
1240 }
1241 if (length_p != NULL) {
1242 *length_p = p->da_length;
1243 }
1244
1245 *data_p = (p + 1);
1246
1247 return 0;
1248 }
1249
1250 void
1251 mbuf_del_drvaux(mbuf_t mbuf)
1252 {
1253 struct m_tag *tag;
1254
1255 if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR)) {
1256 return;
1257 }
1258
1259 if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1260 KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) {
1261 m_tag_delete(mbuf, tag);
1262 }
1263 }
1264
1265 /* mbuf stats */
1266 void
1267 mbuf_stats(struct mbuf_stat *stats)
1268 {
1269 stats->mbufs = mbstat.m_mbufs;
1270 stats->clusters = mbstat.m_clusters;
1271 stats->clfree = mbstat.m_clfree;
1272 stats->drops = mbstat.m_drops;
1273 stats->wait = mbstat.m_wait;
1274 stats->drain = mbstat.m_drain;
1275 __builtin_memcpy(stats->mtypes, mbstat.m_mtypes, sizeof(stats->mtypes));
1276 stats->mcfail = mbstat.m_mcfail;
1277 stats->mpfail = mbstat.m_mpfail;
1278 stats->msize = mbstat.m_msize;
1279 stats->mclbytes = mbstat.m_mclbytes;
1280 stats->minclsize = mbstat.m_minclsize;
1281 stats->mlen = mbstat.m_mlen;
1282 stats->mhlen = mbstat.m_mhlen;
1283 stats->bigclusters = mbstat.m_bigclusters;
1284 stats->bigclfree = mbstat.m_bigclfree;
1285 stats->bigmclbytes = mbstat.m_bigmclbytes;
1286 }
1287
1288 errno_t
1289 mbuf_allocpacket(mbuf_how_t how, size_t packetlen, unsigned int *maxchunks,
1290 mbuf_t *mbuf)
1291 {
1292 errno_t error;
1293 struct mbuf *m;
1294 unsigned int numpkts = 1;
1295 unsigned int numchunks = maxchunks ? *maxchunks : 0;
1296
1297 if (packetlen == 0) {
1298 error = EINVAL;
1299 goto out;
1300 }
1301 m = m_allocpacket_internal(&numpkts, packetlen,
1302 maxchunks ? &numchunks : NULL, how, 1, 0);
1303 if (m == 0) {
1304 if (maxchunks && *maxchunks && numchunks > *maxchunks) {
1305 error = ENOBUFS;
1306 } else {
1307 error = ENOMEM;
1308 }
1309 } else {
1310 if (maxchunks) {
1311 *maxchunks = numchunks;
1312 }
1313 error = 0;
1314 *mbuf = m;
1315 }
1316 out:
1317 return error;
1318 }
1319
1320 errno_t
1321 mbuf_allocpacket_list(unsigned int numpkts, mbuf_how_t how, size_t packetlen,
1322 unsigned int *maxchunks, mbuf_t *mbuf)
1323 {
1324 errno_t error;
1325 struct mbuf *m;
1326 unsigned int numchunks = maxchunks ? *maxchunks : 0;
1327
1328 if (numpkts == 0) {
1329 error = EINVAL;
1330 goto out;
1331 }
1332 if (packetlen == 0) {
1333 error = EINVAL;
1334 goto out;
1335 }
1336 m = m_allocpacket_internal(&numpkts, packetlen,
1337 maxchunks ? &numchunks : NULL, how, 1, 0);
1338 if (m == 0) {
1339 if (maxchunks && *maxchunks && numchunks > *maxchunks) {
1340 error = ENOBUFS;
1341 } else {
1342 error = ENOMEM;
1343 }
1344 } else {
1345 if (maxchunks) {
1346 *maxchunks = numchunks;
1347 }
1348 error = 0;
1349 *mbuf = m;
1350 }
1351 out:
1352 return error;
1353 }
1354
1355 __private_extern__ size_t
1356 mbuf_pkt_list_len(mbuf_t m)
1357 {
1358 size_t len = 0;
1359 mbuf_t n = m;
1360
1361 while (n) {
1362 len += mbuf_pkthdr_len(n);
1363 n = mbuf_nextpkt(n);
1364 }
1365 return len;
1366 }
1367
1368 __private_extern__ size_t
1369 mbuf_pkt_list_maxlen(mbuf_t m)
1370 {
1371 size_t maxlen = 0;
1372 mbuf_t n = m;
1373
1374 while (n) {
1375 maxlen += mbuf_pkthdr_maxlen(n);
1376 n = mbuf_nextpkt(n);
1377 }
1378 return maxlen;
1379 }
1380
1381 /*
1382 * mbuf_copyback differs from m_copyback in a few ways:
1383 * 1) mbuf_copyback will allocate clusters for new mbufs we append
1384 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1385 * 3) mbuf_copyback reports whether or not the operation succeeded
1386 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1387 */
1388 errno_t
1389 mbuf_copyback(
1390 mbuf_t m,
1391 size_t off,
1392 size_t len,
1393 const void *data,
1394 mbuf_how_t how)
1395 {
1396 size_t mlen;
1397 mbuf_t m_start = m;
1398 mbuf_t n;
1399 int totlen = 0;
1400 errno_t result = 0;
1401 const char *cp = data;
1402
1403 if (m == NULL || len == 0 || data == NULL) {
1404 return EINVAL;
1405 }
1406
1407 while (off > (mlen = m->m_len)) {
1408 off -= mlen;
1409 totlen += mlen;
1410 if (m->m_next == 0) {
1411 n = m_getclr(how, m->m_type);
1412 if (n == 0) {
1413 result = ENOBUFS;
1414 goto out;
1415 }
1416 n->m_len = MIN(MLEN, len + off);
1417 m->m_next = n;
1418 }
1419 m = m->m_next;
1420 }
1421
1422 while (len > 0) {
1423 mlen = MIN(m->m_len - off, len);
1424 if (mlen < len && m->m_next == NULL &&
1425 mbuf_trailingspace(m) > 0) {
1426 size_t grow = MIN(mbuf_trailingspace(m), len - mlen);
1427 mlen += grow;
1428 m->m_len += grow;
1429 }
1430 bcopy(cp, off + (char *)mbuf_data(m), (unsigned)mlen);
1431 cp += mlen;
1432 len -= mlen;
1433 mlen += off;
1434 off = 0;
1435 totlen += mlen;
1436 if (len == 0) {
1437 break;
1438 }
1439 if (m->m_next == 0) {
1440 n = m_get(how, m->m_type);
1441 if (n == NULL) {
1442 result = ENOBUFS;
1443 goto out;
1444 }
1445 if (len > MINCLSIZE) {
1446 /*
1447 * cluster allocation failure is okay,
1448 * we can grow chain
1449 */
1450 mbuf_mclget(how, m->m_type, &n);
1451 }
1452 n->m_len = MIN(mbuf_maxlen(n), len);
1453 m->m_next = n;
1454 }
1455 m = m->m_next;
1456 }
1457
1458 out:
1459 if ((m_start->m_flags & M_PKTHDR) && (m_start->m_pkthdr.len < totlen)) {
1460 m_start->m_pkthdr.len = totlen;
1461 }
1462
1463 return result;
1464 }
1465
1466 u_int32_t
1467 mbuf_get_mlen(void)
1468 {
1469 return _MLEN;
1470 }
1471
1472 u_int32_t
1473 mbuf_get_mhlen(void)
1474 {
1475 return _MHLEN;
1476 }
1477
1478 u_int32_t
1479 mbuf_get_minclsize(void)
1480 {
1481 return MHLEN + MLEN;
1482 }
1483
1484 u_int32_t
1485 mbuf_get_traffic_class_max_count(void)
1486 {
1487 return MBUF_TC_MAX;
1488 }
1489
1490 errno_t
1491 mbuf_get_traffic_class_index(mbuf_traffic_class_t tc, u_int32_t *index)
1492 {
1493 if (index == NULL || (u_int32_t)tc >= MBUF_TC_MAX) {
1494 return EINVAL;
1495 }
1496
1497 *index = MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc)));
1498 return 0;
1499 }
1500
1501 mbuf_traffic_class_t
1502 mbuf_get_traffic_class(mbuf_t m)
1503 {
1504 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1505 return MBUF_TC_BE;
1506 }
1507
1508 return m_get_traffic_class(m);
1509 }
1510
1511 errno_t
1512 mbuf_set_traffic_class(mbuf_t m, mbuf_traffic_class_t tc)
1513 {
1514 if (m == NULL || !(m->m_flags & M_PKTHDR) ||
1515 ((u_int32_t)tc >= MBUF_TC_MAX)) {
1516 return EINVAL;
1517 }
1518
1519 return m_set_traffic_class(m, tc);
1520 }
1521
1522 int
1523 mbuf_is_traffic_class_privileged(mbuf_t m)
1524 {
1525 if (m == NULL || !(m->m_flags & M_PKTHDR) ||
1526 !MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) {
1527 return 0;
1528 }
1529
1530 return (m->m_pkthdr.pkt_flags & PKTF_PRIO_PRIVILEGED) ? 1 : 0;
1531 }
1532
1533 u_int32_t
1534 mbuf_get_service_class_max_count(void)
1535 {
1536 return MBUF_SC_MAX_CLASSES;
1537 }
1538
1539 errno_t
1540 mbuf_get_service_class_index(mbuf_svc_class_t sc, u_int32_t *index)
1541 {
1542 if (index == NULL || !MBUF_VALID_SC(sc)) {
1543 return EINVAL;
1544 }
1545
1546 *index = MBUF_SCIDX(sc);
1547 return 0;
1548 }
1549
1550 mbuf_svc_class_t
1551 mbuf_get_service_class(mbuf_t m)
1552 {
1553 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1554 return MBUF_SC_BE;
1555 }
1556
1557 return m_get_service_class(m);
1558 }
1559
1560 errno_t
1561 mbuf_set_service_class(mbuf_t m, mbuf_svc_class_t sc)
1562 {
1563 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1564 return EINVAL;
1565 }
1566
1567 return m_set_service_class(m, sc);
1568 }
1569
1570 errno_t
1571 mbuf_pkthdr_aux_flags(mbuf_t m, mbuf_pkthdr_aux_flags_t *flagsp)
1572 {
1573 u_int32_t flags;
1574
1575 if (m == NULL || !(m->m_flags & M_PKTHDR) || flagsp == NULL) {
1576 return EINVAL;
1577 }
1578
1579 *flagsp = 0;
1580 flags = m->m_pkthdr.pkt_flags;
1581 if ((flags & (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) ==
1582 (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) {
1583 *flagsp |= MBUF_PKTAUXF_INET_RESOLVE_RTR;
1584 }
1585 if ((flags & (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) ==
1586 (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) {
1587 *flagsp |= MBUF_PKTAUXF_INET6_RESOLVE_RTR;
1588 }
1589
1590 /* These 2 flags are mutually exclusive */
1591 VERIFY((*flagsp &
1592 (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR)) !=
1593 (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR));
1594
1595 return 0;
1596 }
1597
1598 errno_t
1599 mbuf_get_driver_scratch(mbuf_t m, u_int8_t **area, size_t *area_len)
1600 {
1601 if (m == NULL || area == NULL || area_len == NULL ||
1602 !(m->m_flags & M_PKTHDR)) {
1603 return EINVAL;
1604 }
1605
1606 *area_len = m_scratch_get(m, area);
1607 return 0;
1608 }
1609
1610 errno_t
1611 mbuf_get_unsent_data_bytes(const mbuf_t m, u_int32_t *unsent_data)
1612 {
1613 if (m == NULL || unsent_data == NULL || !(m->m_flags & M_PKTHDR)) {
1614 return EINVAL;
1615 }
1616
1617 if (!(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) {
1618 return EINVAL;
1619 }
1620
1621 *unsent_data = m->m_pkthdr.bufstatus_if +
1622 m->m_pkthdr.bufstatus_sndbuf;
1623 return 0;
1624 }
1625
1626 errno_t
1627 mbuf_get_buffer_status(const mbuf_t m, mbuf_buffer_status_t *buf_status)
1628 {
1629 if (m == NULL || buf_status == NULL || !(m->m_flags & M_PKTHDR) ||
1630 !(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) {
1631 return EINVAL;
1632 }
1633
1634 buf_status->buf_interface = m->m_pkthdr.bufstatus_if;
1635 buf_status->buf_sndbuf = m->m_pkthdr.bufstatus_sndbuf;
1636 return 0;
1637 }
1638
1639 errno_t
1640 mbuf_pkt_new_flow(const mbuf_t m, u_int32_t *retval)
1641 {
1642 if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) {
1643 return EINVAL;
1644 }
1645 if (m->m_pkthdr.pkt_flags & PKTF_NEW_FLOW) {
1646 *retval = 1;
1647 } else {
1648 *retval = 0;
1649 }
1650 return 0;
1651 }
1652
1653 errno_t
1654 mbuf_last_pkt(const mbuf_t m, u_int32_t *retval)
1655 {
1656 if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) {
1657 return EINVAL;
1658 }
1659 if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) {
1660 *retval = 1;
1661 } else {
1662 *retval = 0;
1663 }
1664 return 0;
1665 }
1666
1667 errno_t
1668 mbuf_get_timestamp(mbuf_t m, u_int64_t *ts, boolean_t *valid)
1669 {
1670 if (m == NULL || !(m->m_flags & M_PKTHDR) || ts == NULL) {
1671 return EINVAL;
1672 }
1673
1674 if ((m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) {
1675 if (valid != NULL) {
1676 *valid = FALSE;
1677 }
1678 *ts = 0;
1679 } else {
1680 if (valid != NULL) {
1681 *valid = TRUE;
1682 }
1683 *ts = m->m_pkthdr.pkt_timestamp;
1684 }
1685 return 0;
1686 }
1687
1688 errno_t
1689 mbuf_set_timestamp(mbuf_t m, u_int64_t ts, boolean_t valid)
1690 {
1691 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1692 return EINVAL;
1693 }
1694
1695 if (valid == FALSE) {
1696 m->m_pkthdr.pkt_flags &= ~PKTF_TS_VALID;
1697 m->m_pkthdr.pkt_timestamp = 0;
1698 } else {
1699 m->m_pkthdr.pkt_flags |= PKTF_TS_VALID;
1700 m->m_pkthdr.pkt_timestamp = ts;
1701 }
1702 return 0;
1703 }
1704
1705 errno_t
1706 mbuf_get_status(mbuf_t m, kern_return_t *status)
1707 {
1708 if (m == NULL || !(m->m_flags & M_PKTHDR) || status == NULL) {
1709 return EINVAL;
1710 }
1711
1712 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1713 *status = 0;
1714 } else {
1715 *status = m->m_pkthdr.drv_tx_status;
1716 }
1717 return 0;
1718 }
1719
1720 static void
1721 driver_mtag_init(mbuf_t m)
1722 {
1723 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1724 m->m_pkthdr.pkt_flags |= PKTF_DRIVER_MTAG;
1725 bzero(&m->m_pkthdr.driver_mtag,
1726 sizeof(m->m_pkthdr.driver_mtag));
1727 }
1728 }
1729
1730 errno_t
1731 mbuf_set_status(mbuf_t m, kern_return_t status)
1732 {
1733 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1734 return EINVAL;
1735 }
1736
1737 driver_mtag_init(m);
1738
1739 m->m_pkthdr.drv_tx_status = status;
1740
1741 return 0;
1742 }
1743
1744 errno_t
1745 mbuf_get_flowid(mbuf_t m, u_int16_t *flowid)
1746 {
1747 if (m == NULL || !(m->m_flags & M_PKTHDR) || flowid == NULL) {
1748 return EINVAL;
1749 }
1750
1751 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1752 *flowid = 0;
1753 } else {
1754 *flowid = m->m_pkthdr.drv_flowid;
1755 }
1756 return 0;
1757 }
1758
1759 errno_t
1760 mbuf_set_flowid(mbuf_t m, u_int16_t flowid)
1761 {
1762 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1763 return EINVAL;
1764 }
1765
1766 driver_mtag_init(m);
1767
1768 m->m_pkthdr.drv_flowid = flowid;
1769
1770 return 0;
1771 }
1772
1773 errno_t
1774 mbuf_get_tx_compl_data(mbuf_t m, uintptr_t *arg, uintptr_t *data)
1775 {
1776 if (m == NULL || !(m->m_flags & M_PKTHDR) || arg == NULL ||
1777 data == NULL) {
1778 return EINVAL;
1779 }
1780
1781 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1782 *arg = 0;
1783 *data = 0;
1784 } else {
1785 *arg = m->m_pkthdr.drv_tx_compl_arg;
1786 *data = m->m_pkthdr.drv_tx_compl_data;
1787 }
1788 return 0;
1789 }
1790
1791 errno_t
1792 mbuf_set_tx_compl_data(mbuf_t m, uintptr_t arg, uintptr_t data)
1793 {
1794 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1795 return EINVAL;
1796 }
1797
1798 driver_mtag_init(m);
1799
1800 m->m_pkthdr.drv_tx_compl_arg = arg;
1801 m->m_pkthdr.drv_tx_compl_data = data;
1802
1803 return 0;
1804 }
1805
1806 static u_int32_t
1807 get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback)
1808 {
1809 u_int32_t i;
1810
1811 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1812 if (mbuf_tx_compl_table[i] == callback) {
1813 return i;
1814 }
1815 }
1816 return UINT32_MAX;
1817 }
1818
1819 static u_int32_t
1820 get_tx_compl_callback_index(mbuf_tx_compl_func callback)
1821 {
1822 u_int32_t i;
1823
1824 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock);
1825
1826 i = get_tx_compl_callback_index_locked(callback);
1827
1828 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock);
1829
1830 return i;
1831 }
1832
1833 mbuf_tx_compl_func
1834 m_get_tx_compl_callback(u_int32_t idx)
1835 {
1836 mbuf_tx_compl_func cb;
1837
1838 if (idx >= MAX_MBUF_TX_COMPL_FUNC) {
1839 ASSERT(0);
1840 return NULL;
1841 }
1842 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock);
1843 cb = mbuf_tx_compl_table[idx];
1844 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock);
1845 return cb;
1846 }
1847
1848 errno_t
1849 mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback)
1850 {
1851 int i;
1852 errno_t error;
1853
1854 if (callback == NULL) {
1855 return EINVAL;
1856 }
1857
1858 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock);
1859
1860 i = get_tx_compl_callback_index_locked(callback);
1861 if (i != -1) {
1862 error = EEXIST;
1863 goto unlock;
1864 }
1865
1866 /* assume the worst */
1867 error = ENOSPC;
1868 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1869 if (mbuf_tx_compl_table[i] == NULL) {
1870 mbuf_tx_compl_table[i] = callback;
1871 error = 0;
1872 goto unlock;
1873 }
1874 }
1875 unlock:
1876 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock);
1877
1878 return error;
1879 }
1880
1881 errno_t
1882 mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback)
1883 {
1884 int i;
1885 errno_t error;
1886
1887 if (callback == NULL) {
1888 return EINVAL;
1889 }
1890
1891 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock);
1892
1893 /* assume the worst */
1894 error = ENOENT;
1895 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1896 if (mbuf_tx_compl_table[i] == callback) {
1897 mbuf_tx_compl_table[i] = NULL;
1898 error = 0;
1899 goto unlock;
1900 }
1901 }
1902 unlock:
1903 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock);
1904
1905 return error;
1906 }
1907
1908 errno_t
1909 mbuf_get_timestamp_requested(mbuf_t m, boolean_t *requested)
1910 {
1911 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1912 return EINVAL;
1913 }
1914
1915 if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1916 *requested = FALSE;
1917 } else {
1918 *requested = TRUE;
1919 }
1920 return 0;
1921 }
1922
1923 errno_t
1924 mbuf_set_timestamp_requested(mbuf_t m, uintptr_t *pktid,
1925 mbuf_tx_compl_func callback)
1926 {
1927 size_t i;
1928
1929 if (m == NULL || !(m->m_flags & M_PKTHDR) || callback == NULL ||
1930 pktid == NULL) {
1931 return EINVAL;
1932 }
1933
1934 i = get_tx_compl_callback_index(callback);
1935 if (i == UINT32_MAX) {
1936 return ENOENT;
1937 }
1938
1939 #if (DEBUG || DEVELOPMENT)
1940 VERIFY(i < sizeof(m->m_pkthdr.pkt_compl_callbacks));
1941 #endif /* (DEBUG || DEVELOPMENT) */
1942
1943 if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1944 m->m_pkthdr.pkt_compl_callbacks = 0;
1945 m->m_pkthdr.pkt_flags |= PKTF_TX_COMPL_TS_REQ;
1946 m->m_pkthdr.pkt_compl_context =
1947 atomic_add_32_ov(&mbuf_tx_compl_index, 1);
1948
1949 #if (DEBUG || DEVELOPMENT)
1950 if (mbuf_tx_compl_debug != 0) {
1951 OSIncrementAtomic64(&mbuf_tx_compl_outstanding);
1952 }
1953 #endif /* (DEBUG || DEVELOPMENT) */
1954 }
1955 m->m_pkthdr.pkt_compl_callbacks |= (1 << i);
1956 *pktid = m->m_pkthdr.pkt_compl_context;
1957
1958 return 0;
1959 }
1960
1961 void
1962 m_do_tx_compl_callback(struct mbuf *m, struct ifnet *ifp)
1963 {
1964 int i;
1965
1966 if (m == NULL) {
1967 return;
1968 }
1969
1970 if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1971 return;
1972 }
1973
1974 #if (DEBUG || DEVELOPMENT)
1975 if (mbuf_tx_compl_debug != 0 && ifp != NULL &&
1976 (ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) != 0 &&
1977 (m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) {
1978 struct timespec now;
1979
1980 nanouptime(&now);
1981 net_timernsec(&now, &m->m_pkthdr.pkt_timestamp);
1982 }
1983 #endif /* (DEBUG || DEVELOPMENT) */
1984
1985 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1986 mbuf_tx_compl_func callback;
1987
1988 if ((m->m_pkthdr.pkt_compl_callbacks & (1 << i)) == 0) {
1989 continue;
1990 }
1991
1992 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock);
1993 callback = mbuf_tx_compl_table[i];
1994 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock);
1995
1996 if (callback != NULL) {
1997 callback(m->m_pkthdr.pkt_compl_context,
1998 ifp,
1999 (m->m_pkthdr.pkt_flags & PKTF_TS_VALID) ?
2000 m->m_pkthdr.pkt_timestamp: 0,
2001 m->m_pkthdr.drv_tx_compl_arg,
2002 m->m_pkthdr.drv_tx_compl_data,
2003 m->m_pkthdr.drv_tx_status);
2004 }
2005 }
2006 m->m_pkthdr.pkt_compl_callbacks = 0;
2007
2008 #if (DEBUG || DEVELOPMENT)
2009 if (mbuf_tx_compl_debug != 0) {
2010 OSDecrementAtomic64(&mbuf_tx_compl_outstanding);
2011 if (ifp == NULL) {
2012 atomic_add_64(&mbuf_tx_compl_aborted, 1);
2013 }
2014 }
2015 #endif /* (DEBUG || DEVELOPMENT) */
2016 }