]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_mbuf.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / bsd / kern / kpi_mbuf.c
1 /*
2 * Copyright (c) 2004-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #define __KPI__
30
31 #include <sys/param.h>
32 #include <sys/mbuf.h>
33 #include <sys/mcache.h>
34 #include <sys/socket.h>
35 #include <kern/debug.h>
36 #include <libkern/OSAtomic.h>
37 #include <string.h>
38 #include <net/dlil.h>
39 #include <netinet/in.h>
40 #include <netinet/ip_var.h>
41
42 #include "net/net_str_id.h"
43
44 /* mbuf flags visible to KPI clients; do not add private flags here */
45 static const mbuf_flags_t mbuf_flags_mask = (MBUF_EXT | MBUF_PKTHDR | MBUF_EOR |
46 MBUF_LOOP | MBUF_BCAST | MBUF_MCAST | MBUF_FRAG | MBUF_FIRSTFRAG |
47 MBUF_LASTFRAG | MBUF_PROMISC | MBUF_HASFCS);
48
49 /* Unalterable mbuf flags */
50 static const mbuf_flags_t mbuf_cflags_mask = (MBUF_EXT);
51
52 #define MAX_MBUF_TX_COMPL_FUNC 32
53 mbuf_tx_compl_func
54 mbuf_tx_compl_table[MAX_MBUF_TX_COMPL_FUNC];
55 extern lck_rw_t *mbuf_tx_compl_tbl_lock;
56 u_int32_t mbuf_tx_compl_index = 0;
57
58 #if (DEVELOPMENT || DEBUG)
59 int mbuf_tx_compl_debug = 0;
60 SInt64 mbuf_tx_compl_outstanding __attribute__((aligned(8))) = 0;
61 u_int64_t mbuf_tx_compl_aborted __attribute__((aligned(8))) = 0;
62
63 SYSCTL_DECL(_kern_ipc);
64 SYSCTL_NODE(_kern_ipc, OID_AUTO, mbtxcf,
65 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
66 SYSCTL_INT(_kern_ipc_mbtxcf, OID_AUTO, debug,
67 CTLFLAG_RW | CTLFLAG_LOCKED, &mbuf_tx_compl_debug, 0, "");
68 SYSCTL_INT(_kern_ipc_mbtxcf, OID_AUTO, index,
69 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_index, 0, "");
70 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, oustanding,
71 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_outstanding, "");
72 SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, aborted,
73 CTLFLAG_RD | CTLFLAG_LOCKED, &mbuf_tx_compl_aborted, "");
74 #endif /* (DEBUG || DEVELOPMENT) */
75
76 void *
77 mbuf_data(mbuf_t mbuf)
78 {
79 return mbuf->m_data;
80 }
81
82 void *
83 mbuf_datastart(mbuf_t mbuf)
84 {
85 if (mbuf->m_flags & M_EXT) {
86 return mbuf->m_ext.ext_buf;
87 }
88 if (mbuf->m_flags & M_PKTHDR) {
89 return mbuf->m_pktdat;
90 }
91 return mbuf->m_dat;
92 }
93
94 errno_t
95 mbuf_setdata(mbuf_t mbuf, void *data, size_t len)
96 {
97 size_t start = (size_t)((char *)mbuf_datastart(mbuf));
98 size_t maxlen = mbuf_maxlen(mbuf);
99
100 if ((size_t)data < start || ((size_t)data) + len > start + maxlen) {
101 return EINVAL;
102 }
103 mbuf->m_data = data;
104 mbuf->m_len = len;
105
106 return 0;
107 }
108
109 errno_t
110 mbuf_align_32(mbuf_t mbuf, size_t len)
111 {
112 if ((mbuf->m_flags & M_EXT) != 0 && m_mclhasreference(mbuf)) {
113 return ENOTSUP;
114 }
115 mbuf->m_data = mbuf_datastart(mbuf);
116 mbuf->m_data +=
117 ((mbuf_trailingspace(mbuf) - len) & ~(sizeof(u_int32_t) - 1));
118
119 return 0;
120 }
121
122 /*
123 * This function is used to provide mcl_to_paddr via symbol indirection,
124 * please avoid any change in behavior or remove the indirection in
125 * config/Unsupported*
126 */
127 addr64_t
128 mbuf_data_to_physical(void *ptr)
129 {
130 return (addr64_t)mcl_to_paddr(ptr);
131 }
132
133 errno_t
134 mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
135 {
136 /* Must set *mbuf to NULL in failure case */
137 *mbuf = m_get(how, type);
138
139 return *mbuf == NULL ? ENOMEM : 0;
140 }
141
142 errno_t
143 mbuf_gethdr(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
144 {
145 /* Must set *mbuf to NULL in failure case */
146 *mbuf = m_gethdr(how, type);
147
148 return *mbuf == NULL ? ENOMEM : 0;
149 }
150
151 errno_t
152 mbuf_attachcluster(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
153 caddr_t extbuf, void (*extfree)(caddr_t, u_int, caddr_t),
154 size_t extsize, caddr_t extarg)
155 {
156 if (mbuf == NULL || extbuf == NULL || extfree == NULL || extsize == 0) {
157 return EINVAL;
158 }
159
160 if ((*mbuf = m_clattach(*mbuf, type, extbuf,
161 extfree, extsize, extarg, how, 0)) == NULL) {
162 return ENOMEM;
163 }
164
165 return 0;
166 }
167
168 errno_t
169 mbuf_ring_cluster_alloc(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
170 void (*extfree)(caddr_t, u_int, caddr_t), size_t *size)
171 {
172 caddr_t extbuf = NULL;
173 errno_t err;
174
175 if (mbuf == NULL || extfree == NULL || size == NULL || *size == 0) {
176 return EINVAL;
177 }
178
179 if ((err = mbuf_alloccluster(how, size, &extbuf)) != 0) {
180 return err;
181 }
182
183 if ((*mbuf = m_clattach(*mbuf, type, extbuf,
184 extfree, *size, NULL, how, 1)) == NULL) {
185 mbuf_freecluster(extbuf, *size);
186 return ENOMEM;
187 }
188
189 return 0;
190 }
191
192 int
193 mbuf_ring_cluster_is_active(mbuf_t mbuf)
194 {
195 return m_ext_paired_is_active(mbuf);
196 }
197
198 errno_t
199 mbuf_ring_cluster_activate(mbuf_t mbuf)
200 {
201 if (mbuf_ring_cluster_is_active(mbuf)) {
202 return EBUSY;
203 }
204
205 m_ext_paired_activate(mbuf);
206 return 0;
207 }
208
209 errno_t
210 mbuf_cluster_set_prop(mbuf_t mbuf, u_int32_t oldprop, u_int32_t newprop)
211 {
212 if (mbuf == NULL || !(mbuf->m_flags & M_EXT)) {
213 return EINVAL;
214 }
215
216 return m_ext_set_prop(mbuf, oldprop, newprop) ? 0 : EBUSY;
217 }
218
219 errno_t
220 mbuf_cluster_get_prop(mbuf_t mbuf, u_int32_t *prop)
221 {
222 if (mbuf == NULL || prop == NULL || !(mbuf->m_flags & M_EXT)) {
223 return EINVAL;
224 }
225
226 *prop = m_ext_get_prop(mbuf);
227 return 0;
228 }
229
230 errno_t
231 mbuf_alloccluster(mbuf_how_t how, size_t *size, caddr_t *addr)
232 {
233 if (size == NULL || *size == 0 || addr == NULL) {
234 return EINVAL;
235 }
236
237 *addr = NULL;
238
239 /* Jumbo cluster pool not available? */
240 if (*size > MBIGCLBYTES && njcl == 0) {
241 return ENOTSUP;
242 }
243
244 if (*size <= MCLBYTES && (*addr = m_mclalloc(how)) != NULL) {
245 *size = MCLBYTES;
246 } else if (*size > MCLBYTES && *size <= MBIGCLBYTES &&
247 (*addr = m_bigalloc(how)) != NULL) {
248 *size = MBIGCLBYTES;
249 } else if (*size > MBIGCLBYTES && *size <= M16KCLBYTES &&
250 (*addr = m_16kalloc(how)) != NULL) {
251 *size = M16KCLBYTES;
252 } else {
253 *size = 0;
254 }
255
256 if (*addr == NULL) {
257 return ENOMEM;
258 }
259
260 return 0;
261 }
262
263 void
264 mbuf_freecluster(caddr_t addr, size_t size)
265 {
266 if (size != MCLBYTES && size != MBIGCLBYTES && size != M16KCLBYTES) {
267 panic("%s: invalid size (%ld) for cluster %p", __func__,
268 size, (void *)addr);
269 }
270
271 if (size == MCLBYTES) {
272 m_mclfree(addr);
273 } else if (size == MBIGCLBYTES) {
274 m_bigfree(addr, MBIGCLBYTES, NULL);
275 } else if (njcl > 0) {
276 m_16kfree(addr, M16KCLBYTES, NULL);
277 } else {
278 panic("%s: freeing jumbo cluster to an empty pool", __func__);
279 }
280 }
281
282 errno_t
283 mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t *mbuf)
284 {
285 /* Must set *mbuf to NULL in failure case */
286 errno_t error = 0;
287 int created = 0;
288
289 if (mbuf == NULL) {
290 return EINVAL;
291 }
292 if (*mbuf == NULL) {
293 *mbuf = m_get(how, type);
294 if (*mbuf == NULL) {
295 return ENOMEM;
296 }
297 created = 1;
298 }
299 /*
300 * At the time this code was written, m_{mclget,mbigget,m16kget}
301 * would always return the same value that was passed in to it.
302 */
303 if (size == MCLBYTES) {
304 *mbuf = m_mclget(*mbuf, how);
305 } else if (size == MBIGCLBYTES) {
306 *mbuf = m_mbigget(*mbuf, how);
307 } else if (size == M16KCLBYTES) {
308 if (njcl > 0) {
309 *mbuf = m_m16kget(*mbuf, how);
310 } else {
311 /* Jumbo cluster pool not available? */
312 error = ENOTSUP;
313 goto out;
314 }
315 } else {
316 error = EINVAL;
317 goto out;
318 }
319 if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) {
320 error = ENOMEM;
321 }
322 out:
323 if (created && error != 0) {
324 mbuf_free(*mbuf);
325 *mbuf = NULL;
326 }
327 return error;
328 }
329
330 errno_t
331 mbuf_mclget(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
332 {
333 /* Must set *mbuf to NULL in failure case */
334 errno_t error = 0;
335 int created = 0;
336 if (mbuf == NULL) {
337 return EINVAL;
338 }
339 if (*mbuf == NULL) {
340 error = mbuf_get(how, type, mbuf);
341 if (error) {
342 return error;
343 }
344 created = 1;
345 }
346
347 /*
348 * At the time this code was written, m_mclget would always
349 * return the same value that was passed in to it.
350 */
351 *mbuf = m_mclget(*mbuf, how);
352
353 if (created && ((*mbuf)->m_flags & M_EXT) == 0) {
354 mbuf_free(*mbuf);
355 *mbuf = NULL;
356 }
357 if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) {
358 error = ENOMEM;
359 }
360 return error;
361 }
362
363
364 errno_t
365 mbuf_getpacket(mbuf_how_t how, mbuf_t *mbuf)
366 {
367 /* Must set *mbuf to NULL in failure case */
368 errno_t error = 0;
369
370 *mbuf = m_getpacket_how(how);
371
372 if (*mbuf == NULL) {
373 if (how == MBUF_WAITOK) {
374 error = ENOMEM;
375 } else {
376 error = EWOULDBLOCK;
377 }
378 }
379
380 return error;
381 }
382
383 /*
384 * This function is used to provide m_free via symbol indirection, please avoid
385 * any change in behavior or remove the indirection in config/Unsupported*
386 */
387 mbuf_t
388 mbuf_free(mbuf_t mbuf)
389 {
390 return m_free(mbuf);
391 }
392
393 /*
394 * This function is used to provide m_freem via symbol indirection, please avoid
395 * any change in behavior or remove the indirection in config/Unsupported*
396 */
397 void
398 mbuf_freem(mbuf_t mbuf)
399 {
400 m_freem(mbuf);
401 }
402
403 int
404 mbuf_freem_list(mbuf_t mbuf)
405 {
406 return m_freem_list(mbuf);
407 }
408
409 size_t
410 mbuf_leadingspace(const mbuf_t mbuf)
411 {
412 return M_LEADINGSPACE(mbuf);
413 }
414
415 /*
416 * This function is used to provide m_trailingspace via symbol indirection,
417 * please avoid any change in behavior or remove the indirection in
418 * config/Unsupported*
419 */
420 size_t
421 mbuf_trailingspace(const mbuf_t mbuf)
422 {
423 return M_TRAILINGSPACE(mbuf);
424 }
425
426 /* Manipulation */
427 errno_t
428 mbuf_copym(const mbuf_t src, size_t offset, size_t len,
429 mbuf_how_t how, mbuf_t *new_mbuf)
430 {
431 /* Must set *mbuf to NULL in failure case */
432 *new_mbuf = m_copym(src, offset, len, how);
433
434 return *new_mbuf == NULL ? ENOMEM : 0;
435 }
436
437 errno_t
438 mbuf_dup(const mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf)
439 {
440 /* Must set *new_mbuf to NULL in failure case */
441 *new_mbuf = m_dup(src, how);
442
443 return *new_mbuf == NULL ? ENOMEM : 0;
444 }
445
446 errno_t
447 mbuf_prepend(mbuf_t *orig, size_t len, mbuf_how_t how)
448 {
449 /* Must set *orig to NULL in failure case */
450 *orig = m_prepend_2(*orig, len, how, 0);
451
452 return *orig == NULL ? ENOMEM : 0;
453 }
454
455 errno_t
456 mbuf_split(mbuf_t src, size_t offset,
457 mbuf_how_t how, mbuf_t *new_mbuf)
458 {
459 /* Must set *new_mbuf to NULL in failure case */
460 *new_mbuf = m_split(src, offset, how);
461
462 return *new_mbuf == NULL ? ENOMEM : 0;
463 }
464
465 errno_t
466 mbuf_pullup(mbuf_t *mbuf, size_t len)
467 {
468 /* Must set *mbuf to NULL in failure case */
469 *mbuf = m_pullup(*mbuf, len);
470
471 return *mbuf == NULL ? ENOMEM : 0;
472 }
473
474 errno_t
475 mbuf_pulldown(mbuf_t src, size_t *offset, size_t len, mbuf_t *location)
476 {
477 /* Must set *location to NULL in failure case */
478 int new_offset;
479 *location = m_pulldown(src, *offset, len, &new_offset);
480 *offset = new_offset;
481
482 return *location == NULL ? ENOMEM : 0;
483 }
484
485 /*
486 * This function is used to provide m_adj via symbol indirection, please avoid
487 * any change in behavior or remove the indirection in config/Unsupported*
488 */
489 void
490 mbuf_adj(mbuf_t mbuf, int len)
491 {
492 m_adj(mbuf, len);
493 }
494
495 errno_t
496 mbuf_adjustlen(mbuf_t m, int amount)
497 {
498 /* Verify m_len will be valid after adding amount */
499 if (amount > 0) {
500 int used = (size_t)mbuf_data(m) - (size_t)mbuf_datastart(m) +
501 m->m_len;
502
503 if ((size_t)(amount + used) > mbuf_maxlen(m)) {
504 return EINVAL;
505 }
506 } else if (-amount > m->m_len) {
507 return EINVAL;
508 }
509
510 m->m_len += amount;
511 return 0;
512 }
513
514 mbuf_t
515 mbuf_concatenate(mbuf_t dst, mbuf_t src)
516 {
517 if (dst == NULL) {
518 return NULL;
519 }
520
521 m_cat(dst, src);
522
523 /* return dst as is in the current implementation */
524 return dst;
525 }
526 errno_t
527 mbuf_copydata(const mbuf_t m0, size_t off, size_t len, void *out_data)
528 {
529 /* Copied m_copydata, added error handling (don't just panic) */
530 size_t count;
531 mbuf_t m = m0;
532
533 if (off >= INT_MAX || len >= INT_MAX) {
534 return EINVAL;
535 }
536
537 while (off > 0) {
538 if (m == 0) {
539 return EINVAL;
540 }
541 if (off < (size_t)m->m_len) {
542 break;
543 }
544 off -= m->m_len;
545 m = m->m_next;
546 }
547 while (len > 0) {
548 if (m == 0) {
549 return EINVAL;
550 }
551 count = m->m_len - off > len ? len : m->m_len - off;
552 bcopy(mtod(m, caddr_t) + off, out_data, count);
553 len -= count;
554 out_data = ((char *)out_data) + count;
555 off = 0;
556 m = m->m_next;
557 }
558
559 return 0;
560 }
561
562 int
563 mbuf_mclhasreference(mbuf_t mbuf)
564 {
565 if ((mbuf->m_flags & M_EXT)) {
566 return m_mclhasreference(mbuf);
567 } else {
568 return 0;
569 }
570 }
571
572
573 /* mbuf header */
574 mbuf_t
575 mbuf_next(const mbuf_t mbuf)
576 {
577 return mbuf->m_next;
578 }
579
580 errno_t
581 mbuf_setnext(mbuf_t mbuf, mbuf_t next)
582 {
583 if (next && ((next)->m_nextpkt != NULL ||
584 (next)->m_type == MT_FREE)) {
585 return EINVAL;
586 }
587 mbuf->m_next = next;
588
589 return 0;
590 }
591
592 mbuf_t
593 mbuf_nextpkt(const mbuf_t mbuf)
594 {
595 return mbuf->m_nextpkt;
596 }
597
598 void
599 mbuf_setnextpkt(mbuf_t mbuf, mbuf_t nextpkt)
600 {
601 mbuf->m_nextpkt = nextpkt;
602 }
603
604 size_t
605 mbuf_len(const mbuf_t mbuf)
606 {
607 return mbuf->m_len;
608 }
609
610 void
611 mbuf_setlen(mbuf_t mbuf, size_t len)
612 {
613 mbuf->m_len = len;
614 }
615
616 size_t
617 mbuf_maxlen(const mbuf_t mbuf)
618 {
619 if (mbuf->m_flags & M_EXT) {
620 return mbuf->m_ext.ext_size;
621 }
622 return &mbuf->m_dat[MLEN] - ((char *)mbuf_datastart(mbuf));
623 }
624
625 mbuf_type_t
626 mbuf_type(const mbuf_t mbuf)
627 {
628 return mbuf->m_type;
629 }
630
631 errno_t
632 mbuf_settype(mbuf_t mbuf, mbuf_type_t new_type)
633 {
634 if (new_type == MBUF_TYPE_FREE) {
635 return EINVAL;
636 }
637
638 m_mchtype(mbuf, new_type);
639
640 return 0;
641 }
642
643 mbuf_flags_t
644 mbuf_flags(const mbuf_t mbuf)
645 {
646 return mbuf->m_flags & mbuf_flags_mask;
647 }
648
649 errno_t
650 mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags)
651 {
652 errno_t ret = 0;
653 mbuf_flags_t oflags = mbuf->m_flags;
654
655 /*
656 * 1. Return error if public but un-alterable flags are changed
657 * in flags argument.
658 * 2. Return error if bits other than public flags are set in passed
659 * flags argument.
660 * Please note that private flag bits must be passed as reset by
661 * kexts, as they must use mbuf_flags KPI to get current set of
662 * mbuf flags and mbuf_flags KPI does not expose private flags.
663 */
664 if ((flags ^ oflags) & mbuf_cflags_mask) {
665 ret = EINVAL;
666 } else if (flags & ~mbuf_flags_mask) {
667 ret = EINVAL;
668 } else {
669 mbuf->m_flags = flags | (mbuf->m_flags & ~mbuf_flags_mask);
670 /*
671 * If M_PKTHDR bit has changed, we have work to do;
672 * m_reinit() will take care of setting/clearing the
673 * bit, as well as the rest of bookkeeping.
674 */
675 if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
676 mbuf->m_flags ^= M_PKTHDR; /* restore */
677 ret = m_reinit(mbuf,
678 (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
679 }
680 }
681
682 return ret;
683 }
684
685 errno_t
686 mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask)
687 {
688 errno_t ret = 0;
689
690 if (mask & (~mbuf_flags_mask | mbuf_cflags_mask)) {
691 ret = EINVAL;
692 } else {
693 mbuf_flags_t oflags = mbuf->m_flags;
694 mbuf->m_flags = (flags & mask) | (mbuf->m_flags & ~mask);
695 /*
696 * If M_PKTHDR bit has changed, we have work to do;
697 * m_reinit() will take care of setting/clearing the
698 * bit, as well as the rest of bookkeeping.
699 */
700 if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
701 mbuf->m_flags ^= M_PKTHDR; /* restore */
702 ret = m_reinit(mbuf,
703 (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
704 }
705 }
706
707 return ret;
708 }
709
710 errno_t
711 mbuf_copy_pkthdr(mbuf_t dest, const mbuf_t src)
712 {
713 if (((src)->m_flags & M_PKTHDR) == 0) {
714 return EINVAL;
715 }
716
717 m_copy_pkthdr(dest, src);
718
719 return 0;
720 }
721
722 size_t
723 mbuf_pkthdr_len(const mbuf_t mbuf)
724 {
725 if (((mbuf)->m_flags & M_PKTHDR) == 0) {
726 return 0;
727 }
728 /*
729 * While we Assert for development or debug builds,
730 * also make sure we never return negative length
731 * for release build.
732 */
733 ASSERT(mbuf->m_pkthdr.len >= 0);
734 if (mbuf->m_pkthdr.len < 0) {
735 return 0;
736 }
737 return mbuf->m_pkthdr.len;
738 }
739
740 __private_extern__ size_t
741 mbuf_pkthdr_maxlen(mbuf_t m)
742 {
743 size_t maxlen = 0;
744 mbuf_t n = m;
745
746 while (n) {
747 maxlen += mbuf_maxlen(n);
748 n = mbuf_next(n);
749 }
750 return maxlen;
751 }
752
753 void
754 mbuf_pkthdr_setlen(mbuf_t mbuf, size_t len)
755 {
756 if (len > INT32_MAX) {
757 len = INT32_MAX;
758 }
759
760 mbuf->m_pkthdr.len = len;
761 }
762
763 void
764 mbuf_pkthdr_adjustlen(mbuf_t mbuf, int amount)
765 {
766 mbuf->m_pkthdr.len += amount;
767 }
768
769 ifnet_t
770 mbuf_pkthdr_rcvif(const mbuf_t mbuf)
771 {
772 /*
773 * If we reference count ifnets, we should take a reference here
774 * before returning
775 */
776 return mbuf->m_pkthdr.rcvif;
777 }
778
779 errno_t
780 mbuf_pkthdr_setrcvif(mbuf_t mbuf, ifnet_t ifnet)
781 {
782 /* May want to walk ifnet list to determine if interface is valid */
783 mbuf->m_pkthdr.rcvif = (struct ifnet *)ifnet;
784 return 0;
785 }
786
787 void*
788 mbuf_pkthdr_header(const mbuf_t mbuf)
789 {
790 return mbuf->m_pkthdr.pkt_hdr;
791 }
792
793 void
794 mbuf_pkthdr_setheader(mbuf_t mbuf, void *header)
795 {
796 mbuf->m_pkthdr.pkt_hdr = (void*)header;
797 }
798
799 void
800 mbuf_inbound_modified(mbuf_t mbuf)
801 {
802 /* Invalidate hardware generated checksum flags */
803 mbuf->m_pkthdr.csum_flags = 0;
804 }
805
806 void
807 mbuf_outbound_finalize(struct mbuf *m, u_int32_t pf, size_t o)
808 {
809 /* Generate the packet in software, client needs it */
810 switch (pf) {
811 case PF_INET:
812 (void) in_finalize_cksum(m, o, m->m_pkthdr.csum_flags);
813 break;
814
815 case PF_INET6:
816 /*
817 * Checksum offload should not have been enabled when
818 * extension headers exist; indicate that the callee
819 * should skip such case by setting optlen to -1.
820 */
821 (void) in6_finalize_cksum(m, o, -1, -1, m->m_pkthdr.csum_flags);
822 break;
823
824 default:
825 break;
826 }
827 }
828
829 errno_t
830 mbuf_set_vlan_tag(
831 mbuf_t mbuf,
832 u_int16_t vlan)
833 {
834 mbuf->m_pkthdr.csum_flags |= CSUM_VLAN_TAG_VALID;
835 mbuf->m_pkthdr.vlan_tag = vlan;
836
837 return 0;
838 }
839
840 errno_t
841 mbuf_get_vlan_tag(
842 mbuf_t mbuf,
843 u_int16_t *vlan)
844 {
845 if ((mbuf->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) {
846 return ENXIO; // No vlan tag set
847 }
848 *vlan = mbuf->m_pkthdr.vlan_tag;
849
850 return 0;
851 }
852
853 errno_t
854 mbuf_clear_vlan_tag(
855 mbuf_t mbuf)
856 {
857 mbuf->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID;
858 mbuf->m_pkthdr.vlan_tag = 0;
859
860 return 0;
861 }
862
863 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags =
864 MBUF_CSUM_REQ_IP | MBUF_CSUM_REQ_TCP | MBUF_CSUM_REQ_UDP |
865 MBUF_CSUM_PARTIAL | MBUF_CSUM_REQ_TCPIPV6 | MBUF_CSUM_REQ_UDPIPV6;
866
867 errno_t
868 mbuf_set_csum_requested(
869 mbuf_t mbuf,
870 mbuf_csum_request_flags_t request,
871 u_int32_t value)
872 {
873 request &= mbuf_valid_csum_request_flags;
874 mbuf->m_pkthdr.csum_flags =
875 (mbuf->m_pkthdr.csum_flags & 0xffff0000) | request;
876 mbuf->m_pkthdr.csum_data = value;
877
878 return 0;
879 }
880
881 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags =
882 MBUF_TSO_IPV4 | MBUF_TSO_IPV6;
883
884 errno_t
885 mbuf_get_tso_requested(
886 mbuf_t mbuf,
887 mbuf_tso_request_flags_t *request,
888 u_int32_t *value)
889 {
890 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
891 request == NULL || value == NULL) {
892 return EINVAL;
893 }
894
895 *request = mbuf->m_pkthdr.csum_flags;
896 *request &= mbuf_valid_tso_request_flags;
897 if (*request && value != NULL) {
898 *value = mbuf->m_pkthdr.tso_segsz;
899 }
900
901 return 0;
902 }
903
904 errno_t
905 mbuf_get_csum_requested(
906 mbuf_t mbuf,
907 mbuf_csum_request_flags_t *request,
908 u_int32_t *value)
909 {
910 *request = mbuf->m_pkthdr.csum_flags;
911 *request &= mbuf_valid_csum_request_flags;
912 if (value != NULL) {
913 *value = mbuf->m_pkthdr.csum_data;
914 }
915
916 return 0;
917 }
918
919 errno_t
920 mbuf_clear_csum_requested(
921 mbuf_t mbuf)
922 {
923 mbuf->m_pkthdr.csum_flags &= 0xffff0000;
924 mbuf->m_pkthdr.csum_data = 0;
925
926 return 0;
927 }
928
929 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags =
930 MBUF_CSUM_DID_IP | MBUF_CSUM_IP_GOOD | MBUF_CSUM_DID_DATA |
931 MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_PARTIAL;
932
933 errno_t
934 mbuf_set_csum_performed(
935 mbuf_t mbuf,
936 mbuf_csum_performed_flags_t performed,
937 u_int32_t value)
938 {
939 performed &= mbuf_valid_csum_performed_flags;
940 mbuf->m_pkthdr.csum_flags =
941 (mbuf->m_pkthdr.csum_flags & 0xffff0000) | performed;
942 mbuf->m_pkthdr.csum_data = value;
943
944 return 0;
945 }
946
947 errno_t
948 mbuf_get_csum_performed(
949 mbuf_t mbuf,
950 mbuf_csum_performed_flags_t *performed,
951 u_int32_t *value)
952 {
953 *performed =
954 mbuf->m_pkthdr.csum_flags & mbuf_valid_csum_performed_flags;
955 *value = mbuf->m_pkthdr.csum_data;
956
957 return 0;
958 }
959
960 errno_t
961 mbuf_clear_csum_performed(
962 mbuf_t mbuf)
963 {
964 mbuf->m_pkthdr.csum_flags &= 0xffff0000;
965 mbuf->m_pkthdr.csum_data = 0;
966
967 return 0;
968 }
969
970 errno_t
971 mbuf_inet_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
972 u_int16_t *csum)
973 {
974 if (mbuf == NULL || length == 0 || csum == NULL ||
975 (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) {
976 return EINVAL;
977 }
978
979 *csum = inet_cksum(mbuf, protocol, offset, length);
980 return 0;
981 }
982
983 errno_t
984 mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
985 u_int16_t *csum)
986 {
987 if (mbuf == NULL || length == 0 || csum == NULL ||
988 (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) {
989 return EINVAL;
990 }
991
992 *csum = inet6_cksum(mbuf, protocol, offset, length);
993 return 0;
994 }
995
996 /*
997 * Mbuf tag KPIs
998 */
999
1000 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
1001
1002 errno_t
1003 mbuf_tag_id_find(
1004 const char *string,
1005 mbuf_tag_id_t *out_id)
1006 {
1007 return net_str_id_find_internal(string, out_id, NSI_MBUF_TAG, 1);
1008 }
1009
1010 errno_t
1011 mbuf_tag_allocate(
1012 mbuf_t mbuf,
1013 mbuf_tag_id_t id,
1014 mbuf_tag_type_t type,
1015 size_t length,
1016 mbuf_how_t how,
1017 void** data_p)
1018 {
1019 struct m_tag *tag;
1020 u_int32_t mtag_id_first, mtag_id_last;
1021
1022 if (data_p != NULL) {
1023 *data_p = NULL;
1024 }
1025
1026 /* Sanity check parameters */
1027 (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1028 NSI_MBUF_TAG);
1029 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1030 id < mtag_id_first || id > mtag_id_last || length < 1 ||
1031 (length & 0xffff0000) != 0 || data_p == NULL) {
1032 return EINVAL;
1033 }
1034
1035 /* Make sure this mtag hasn't already been allocated */
1036 tag = m_tag_locate(mbuf, id, type, NULL);
1037 if (tag != NULL) {
1038 return EEXIST;
1039 }
1040
1041 /* Allocate an mtag */
1042 tag = m_tag_create(id, type, length, how, mbuf);
1043 if (tag == NULL) {
1044 return how == M_WAITOK ? ENOMEM : EWOULDBLOCK;
1045 }
1046
1047 /* Attach the mtag and set *data_p */
1048 m_tag_prepend(mbuf, tag);
1049 *data_p = tag + 1;
1050
1051 return 0;
1052 }
1053
1054 errno_t
1055 mbuf_tag_find(
1056 mbuf_t mbuf,
1057 mbuf_tag_id_t id,
1058 mbuf_tag_type_t type,
1059 size_t *length,
1060 void **data_p)
1061 {
1062 struct m_tag *tag;
1063 u_int32_t mtag_id_first, mtag_id_last;
1064
1065 if (length != NULL) {
1066 *length = 0;
1067 }
1068 if (data_p != NULL) {
1069 *data_p = NULL;
1070 }
1071
1072 /* Sanity check parameters */
1073 (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1074 NSI_MBUF_TAG);
1075 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1076 id < mtag_id_first || id > mtag_id_last || length == NULL ||
1077 data_p == NULL) {
1078 return EINVAL;
1079 }
1080
1081 /* Locate an mtag */
1082 tag = m_tag_locate(mbuf, id, type, NULL);
1083 if (tag == NULL) {
1084 return ENOENT;
1085 }
1086
1087 /* Copy out the pointer to the data and the lenght value */
1088 *length = tag->m_tag_len;
1089 *data_p = tag + 1;
1090
1091 return 0;
1092 }
1093
1094 void
1095 mbuf_tag_free(
1096 mbuf_t mbuf,
1097 mbuf_tag_id_t id,
1098 mbuf_tag_type_t type)
1099 {
1100 struct m_tag *tag;
1101 u_int32_t mtag_id_first, mtag_id_last;
1102
1103 /* Sanity check parameters */
1104 (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last,
1105 NSI_MBUF_TAG);
1106 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
1107 id < mtag_id_first || id > mtag_id_last) {
1108 return;
1109 }
1110
1111 tag = m_tag_locate(mbuf, id, type, NULL);
1112 if (tag == NULL) {
1113 return;
1114 }
1115
1116 m_tag_delete(mbuf, tag);
1117 }
1118
1119 /*
1120 * Maximum length of driver auxiliary data; keep this small to
1121 * fit in a single mbuf to avoid wasting memory, rounded down to
1122 * the nearest 64-bit boundary. This takes into account mbuf
1123 * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
1124 */
1125 #define MBUF_DRVAUX_MAXLEN \
1126 P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \
1127 M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
1128
1129 errno_t
1130 mbuf_add_drvaux(mbuf_t mbuf, mbuf_how_t how, u_int32_t family,
1131 u_int32_t subfamily, size_t length, void **data_p)
1132 {
1133 struct m_drvaux_tag *p;
1134 struct m_tag *tag;
1135
1136 if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) ||
1137 length == 0 || length > MBUF_DRVAUX_MAXLEN) {
1138 return EINVAL;
1139 }
1140
1141 if (data_p != NULL) {
1142 *data_p = NULL;
1143 }
1144
1145 /* Check if one is already associated */
1146 if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1147 KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) {
1148 return EEXIST;
1149 }
1150
1151 /* Tag is (m_drvaux_tag + module specific data) */
1152 if ((tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DRVAUX,
1153 sizeof(*p) + length, how, mbuf)) == NULL) {
1154 return (how == MBUF_WAITOK) ? ENOMEM : EWOULDBLOCK;
1155 }
1156
1157 p = (struct m_drvaux_tag *)(tag + 1);
1158 p->da_family = family;
1159 p->da_subfamily = subfamily;
1160 p->da_length = length;
1161
1162 /* Associate the tag */
1163 m_tag_prepend(mbuf, tag);
1164
1165 if (data_p != NULL) {
1166 *data_p = (p + 1);
1167 }
1168
1169 return 0;
1170 }
1171
1172 errno_t
1173 mbuf_find_drvaux(mbuf_t mbuf, u_int32_t *family_p, u_int32_t *subfamily_p,
1174 u_int32_t *length_p, void **data_p)
1175 {
1176 struct m_drvaux_tag *p;
1177 struct m_tag *tag;
1178
1179 if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) || data_p == NULL) {
1180 return EINVAL;
1181 }
1182
1183 *data_p = NULL;
1184
1185 if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1186 KERNEL_TAG_TYPE_DRVAUX, NULL)) == NULL) {
1187 return ENOENT;
1188 }
1189
1190 /* Must be at least size of m_drvaux_tag */
1191 VERIFY(tag->m_tag_len >= sizeof(*p));
1192
1193 p = (struct m_drvaux_tag *)(tag + 1);
1194 VERIFY(p->da_length > 0 && p->da_length <= MBUF_DRVAUX_MAXLEN);
1195
1196 if (family_p != NULL) {
1197 *family_p = p->da_family;
1198 }
1199 if (subfamily_p != NULL) {
1200 *subfamily_p = p->da_subfamily;
1201 }
1202 if (length_p != NULL) {
1203 *length_p = p->da_length;
1204 }
1205
1206 *data_p = (p + 1);
1207
1208 return 0;
1209 }
1210
1211 void
1212 mbuf_del_drvaux(mbuf_t mbuf)
1213 {
1214 struct m_tag *tag;
1215
1216 if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR)) {
1217 return;
1218 }
1219
1220 if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
1221 KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) {
1222 m_tag_delete(mbuf, tag);
1223 }
1224 }
1225
1226 /* mbuf stats */
1227 void
1228 mbuf_stats(struct mbuf_stat *stats)
1229 {
1230 stats->mbufs = mbstat.m_mbufs;
1231 stats->clusters = mbstat.m_clusters;
1232 stats->clfree = mbstat.m_clfree;
1233 stats->drops = mbstat.m_drops;
1234 stats->wait = mbstat.m_wait;
1235 stats->drain = mbstat.m_drain;
1236 __builtin_memcpy(stats->mtypes, mbstat.m_mtypes, sizeof(stats->mtypes));
1237 stats->mcfail = mbstat.m_mcfail;
1238 stats->mpfail = mbstat.m_mpfail;
1239 stats->msize = mbstat.m_msize;
1240 stats->mclbytes = mbstat.m_mclbytes;
1241 stats->minclsize = mbstat.m_minclsize;
1242 stats->mlen = mbstat.m_mlen;
1243 stats->mhlen = mbstat.m_mhlen;
1244 stats->bigclusters = mbstat.m_bigclusters;
1245 stats->bigclfree = mbstat.m_bigclfree;
1246 stats->bigmclbytes = mbstat.m_bigmclbytes;
1247 }
1248
1249 errno_t
1250 mbuf_allocpacket(mbuf_how_t how, size_t packetlen, unsigned int *maxchunks,
1251 mbuf_t *mbuf)
1252 {
1253 errno_t error;
1254 struct mbuf *m;
1255 unsigned int numpkts = 1;
1256 unsigned int numchunks = maxchunks ? *maxchunks : 0;
1257
1258 if (packetlen == 0) {
1259 error = EINVAL;
1260 goto out;
1261 }
1262 m = m_allocpacket_internal(&numpkts, packetlen,
1263 maxchunks ? &numchunks : NULL, how, 1, 0);
1264 if (m == 0) {
1265 if (maxchunks && *maxchunks && numchunks > *maxchunks) {
1266 error = ENOBUFS;
1267 } else {
1268 error = ENOMEM;
1269 }
1270 } else {
1271 if (maxchunks) {
1272 *maxchunks = numchunks;
1273 }
1274 error = 0;
1275 *mbuf = m;
1276 }
1277 out:
1278 return error;
1279 }
1280
1281 errno_t
1282 mbuf_allocpacket_list(unsigned int numpkts, mbuf_how_t how, size_t packetlen,
1283 unsigned int *maxchunks, mbuf_t *mbuf)
1284 {
1285 errno_t error;
1286 struct mbuf *m;
1287 unsigned int numchunks = maxchunks ? *maxchunks : 0;
1288
1289 if (numpkts == 0) {
1290 error = EINVAL;
1291 goto out;
1292 }
1293 if (packetlen == 0) {
1294 error = EINVAL;
1295 goto out;
1296 }
1297 m = m_allocpacket_internal(&numpkts, packetlen,
1298 maxchunks ? &numchunks : NULL, how, 1, 0);
1299 if (m == 0) {
1300 if (maxchunks && *maxchunks && numchunks > *maxchunks) {
1301 error = ENOBUFS;
1302 } else {
1303 error = ENOMEM;
1304 }
1305 } else {
1306 if (maxchunks) {
1307 *maxchunks = numchunks;
1308 }
1309 error = 0;
1310 *mbuf = m;
1311 }
1312 out:
1313 return error;
1314 }
1315
1316 __private_extern__ size_t
1317 mbuf_pkt_list_len(mbuf_t m)
1318 {
1319 size_t len = 0;
1320 mbuf_t n = m;
1321
1322 while (n) {
1323 len += mbuf_pkthdr_len(n);
1324 n = mbuf_nextpkt(n);
1325 }
1326 return len;
1327 }
1328
1329 __private_extern__ size_t
1330 mbuf_pkt_list_maxlen(mbuf_t m)
1331 {
1332 size_t maxlen = 0;
1333 mbuf_t n = m;
1334
1335 while (n) {
1336 maxlen += mbuf_pkthdr_maxlen(n);
1337 n = mbuf_nextpkt(n);
1338 }
1339 return maxlen;
1340 }
1341
1342 /*
1343 * mbuf_copyback differs from m_copyback in a few ways:
1344 * 1) mbuf_copyback will allocate clusters for new mbufs we append
1345 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1346 * 3) mbuf_copyback reports whether or not the operation succeeded
1347 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1348 */
1349 errno_t
1350 mbuf_copyback(
1351 mbuf_t m,
1352 size_t off,
1353 size_t len,
1354 const void *data,
1355 mbuf_how_t how)
1356 {
1357 size_t mlen;
1358 mbuf_t m_start = m;
1359 mbuf_t n;
1360 int totlen = 0;
1361 errno_t result = 0;
1362 const char *cp = data;
1363
1364 if (m == NULL || len == 0 || data == NULL) {
1365 return EINVAL;
1366 }
1367
1368 while (off > (mlen = m->m_len)) {
1369 off -= mlen;
1370 totlen += mlen;
1371 if (m->m_next == 0) {
1372 n = m_getclr(how, m->m_type);
1373 if (n == 0) {
1374 result = ENOBUFS;
1375 goto out;
1376 }
1377 n->m_len = MIN(MLEN, len + off);
1378 m->m_next = n;
1379 }
1380 m = m->m_next;
1381 }
1382
1383 while (len > 0) {
1384 mlen = MIN(m->m_len - off, len);
1385 if (mlen < len && m->m_next == NULL &&
1386 mbuf_trailingspace(m) > 0) {
1387 size_t grow = MIN(mbuf_trailingspace(m), len - mlen);
1388 mlen += grow;
1389 m->m_len += grow;
1390 }
1391 bcopy(cp, off + (char *)mbuf_data(m), (unsigned)mlen);
1392 cp += mlen;
1393 len -= mlen;
1394 mlen += off;
1395 off = 0;
1396 totlen += mlen;
1397 if (len == 0) {
1398 break;
1399 }
1400 if (m->m_next == 0) {
1401 n = m_get(how, m->m_type);
1402 if (n == NULL) {
1403 result = ENOBUFS;
1404 goto out;
1405 }
1406 if (len > MINCLSIZE) {
1407 /*
1408 * cluster allocation failure is okay,
1409 * we can grow chain
1410 */
1411 mbuf_mclget(how, m->m_type, &n);
1412 }
1413 n->m_len = MIN(mbuf_maxlen(n), len);
1414 m->m_next = n;
1415 }
1416 m = m->m_next;
1417 }
1418
1419 out:
1420 if ((m_start->m_flags & M_PKTHDR) && (m_start->m_pkthdr.len < totlen)) {
1421 m_start->m_pkthdr.len = totlen;
1422 }
1423
1424 return result;
1425 }
1426
1427 u_int32_t
1428 mbuf_get_mlen(void)
1429 {
1430 return _MLEN;
1431 }
1432
1433 u_int32_t
1434 mbuf_get_mhlen(void)
1435 {
1436 return _MHLEN;
1437 }
1438
1439 u_int32_t
1440 mbuf_get_minclsize(void)
1441 {
1442 return MHLEN + MLEN;
1443 }
1444
1445 u_int32_t
1446 mbuf_get_traffic_class_max_count(void)
1447 {
1448 return MBUF_TC_MAX;
1449 }
1450
1451 errno_t
1452 mbuf_get_traffic_class_index(mbuf_traffic_class_t tc, u_int32_t *index)
1453 {
1454 if (index == NULL || (u_int32_t)tc >= MBUF_TC_MAX) {
1455 return EINVAL;
1456 }
1457
1458 *index = MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc)));
1459 return 0;
1460 }
1461
1462 mbuf_traffic_class_t
1463 mbuf_get_traffic_class(mbuf_t m)
1464 {
1465 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1466 return MBUF_TC_BE;
1467 }
1468
1469 return m_get_traffic_class(m);
1470 }
1471
1472 errno_t
1473 mbuf_set_traffic_class(mbuf_t m, mbuf_traffic_class_t tc)
1474 {
1475 if (m == NULL || !(m->m_flags & M_PKTHDR) ||
1476 ((u_int32_t)tc >= MBUF_TC_MAX)) {
1477 return EINVAL;
1478 }
1479
1480 return m_set_traffic_class(m, tc);
1481 }
1482
1483 int
1484 mbuf_is_traffic_class_privileged(mbuf_t m)
1485 {
1486 if (m == NULL || !(m->m_flags & M_PKTHDR) ||
1487 !MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) {
1488 return 0;
1489 }
1490
1491 return (m->m_pkthdr.pkt_flags & PKTF_PRIO_PRIVILEGED) ? 1 : 0;
1492 }
1493
1494 u_int32_t
1495 mbuf_get_service_class_max_count(void)
1496 {
1497 return MBUF_SC_MAX_CLASSES;
1498 }
1499
1500 errno_t
1501 mbuf_get_service_class_index(mbuf_svc_class_t sc, u_int32_t *index)
1502 {
1503 if (index == NULL || !MBUF_VALID_SC(sc)) {
1504 return EINVAL;
1505 }
1506
1507 *index = MBUF_SCIDX(sc);
1508 return 0;
1509 }
1510
1511 mbuf_svc_class_t
1512 mbuf_get_service_class(mbuf_t m)
1513 {
1514 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1515 return MBUF_SC_BE;
1516 }
1517
1518 return m_get_service_class(m);
1519 }
1520
1521 errno_t
1522 mbuf_set_service_class(mbuf_t m, mbuf_svc_class_t sc)
1523 {
1524 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1525 return EINVAL;
1526 }
1527
1528 return m_set_service_class(m, sc);
1529 }
1530
1531 errno_t
1532 mbuf_pkthdr_aux_flags(mbuf_t m, mbuf_pkthdr_aux_flags_t *flagsp)
1533 {
1534 u_int32_t flags;
1535
1536 if (m == NULL || !(m->m_flags & M_PKTHDR) || flagsp == NULL) {
1537 return EINVAL;
1538 }
1539
1540 *flagsp = 0;
1541 flags = m->m_pkthdr.pkt_flags;
1542 if ((flags & (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) ==
1543 (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) {
1544 *flagsp |= MBUF_PKTAUXF_INET_RESOLVE_RTR;
1545 }
1546 if ((flags & (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) ==
1547 (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) {
1548 *flagsp |= MBUF_PKTAUXF_INET6_RESOLVE_RTR;
1549 }
1550
1551 /* These 2 flags are mutually exclusive */
1552 VERIFY((*flagsp &
1553 (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR)) !=
1554 (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR));
1555
1556 return 0;
1557 }
1558
1559 errno_t
1560 mbuf_get_driver_scratch(mbuf_t m, u_int8_t **area, size_t *area_len)
1561 {
1562 if (m == NULL || area == NULL || area_len == NULL ||
1563 !(m->m_flags & M_PKTHDR)) {
1564 return EINVAL;
1565 }
1566
1567 *area_len = m_scratch_get(m, area);
1568 return 0;
1569 }
1570
1571 errno_t
1572 mbuf_get_unsent_data_bytes(const mbuf_t m, u_int32_t *unsent_data)
1573 {
1574 if (m == NULL || unsent_data == NULL || !(m->m_flags & M_PKTHDR)) {
1575 return EINVAL;
1576 }
1577
1578 if (!(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) {
1579 return EINVAL;
1580 }
1581
1582 *unsent_data = m->m_pkthdr.bufstatus_if +
1583 m->m_pkthdr.bufstatus_sndbuf;
1584 return 0;
1585 }
1586
1587 errno_t
1588 mbuf_get_buffer_status(const mbuf_t m, mbuf_buffer_status_t *buf_status)
1589 {
1590 if (m == NULL || buf_status == NULL || !(m->m_flags & M_PKTHDR) ||
1591 !(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) {
1592 return EINVAL;
1593 }
1594
1595 buf_status->buf_interface = m->m_pkthdr.bufstatus_if;
1596 buf_status->buf_sndbuf = m->m_pkthdr.bufstatus_sndbuf;
1597 return 0;
1598 }
1599
1600 errno_t
1601 mbuf_pkt_new_flow(const mbuf_t m, u_int32_t *retval)
1602 {
1603 if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) {
1604 return EINVAL;
1605 }
1606 if (m->m_pkthdr.pkt_flags & PKTF_NEW_FLOW) {
1607 *retval = 1;
1608 } else {
1609 *retval = 0;
1610 }
1611 return 0;
1612 }
1613
1614 errno_t
1615 mbuf_last_pkt(const mbuf_t m, u_int32_t *retval)
1616 {
1617 if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) {
1618 return EINVAL;
1619 }
1620 if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) {
1621 *retval = 1;
1622 } else {
1623 *retval = 0;
1624 }
1625 return 0;
1626 }
1627
1628 errno_t
1629 mbuf_get_timestamp(mbuf_t m, u_int64_t *ts, boolean_t *valid)
1630 {
1631 if (m == NULL || !(m->m_flags & M_PKTHDR) || ts == NULL) {
1632 return EINVAL;
1633 }
1634
1635 if ((m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) {
1636 if (valid != NULL) {
1637 *valid = FALSE;
1638 }
1639 *ts = 0;
1640 } else {
1641 if (valid != NULL) {
1642 *valid = TRUE;
1643 }
1644 *ts = m->m_pkthdr.pkt_timestamp;
1645 }
1646 return 0;
1647 }
1648
1649 errno_t
1650 mbuf_set_timestamp(mbuf_t m, u_int64_t ts, boolean_t valid)
1651 {
1652 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1653 return EINVAL;
1654 }
1655
1656 if (valid == FALSE) {
1657 m->m_pkthdr.pkt_flags &= ~PKTF_TS_VALID;
1658 m->m_pkthdr.pkt_timestamp = 0;
1659 } else {
1660 m->m_pkthdr.pkt_flags |= PKTF_TS_VALID;
1661 m->m_pkthdr.pkt_timestamp = ts;
1662 }
1663 return 0;
1664 }
1665
1666 errno_t
1667 mbuf_get_status(mbuf_t m, kern_return_t *status)
1668 {
1669 if (m == NULL || !(m->m_flags & M_PKTHDR) || status == NULL) {
1670 return EINVAL;
1671 }
1672
1673 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1674 *status = 0;
1675 } else {
1676 *status = m->m_pkthdr.drv_tx_status;
1677 }
1678 return 0;
1679 }
1680
1681 static void
1682 driver_mtag_init(mbuf_t m)
1683 {
1684 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1685 m->m_pkthdr.pkt_flags |= PKTF_DRIVER_MTAG;
1686 bzero(&m->m_pkthdr.driver_mtag,
1687 sizeof(m->m_pkthdr.driver_mtag));
1688 }
1689 }
1690
1691 errno_t
1692 mbuf_set_status(mbuf_t m, kern_return_t status)
1693 {
1694 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1695 return EINVAL;
1696 }
1697
1698 driver_mtag_init(m);
1699
1700 m->m_pkthdr.drv_tx_status = status;
1701
1702 return 0;
1703 }
1704
1705 errno_t
1706 mbuf_get_flowid(mbuf_t m, u_int16_t *flowid)
1707 {
1708 if (m == NULL || !(m->m_flags & M_PKTHDR) || flowid == NULL) {
1709 return EINVAL;
1710 }
1711
1712 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1713 *flowid = 0;
1714 } else {
1715 *flowid = m->m_pkthdr.drv_flowid;
1716 }
1717 return 0;
1718 }
1719
1720 errno_t
1721 mbuf_set_flowid(mbuf_t m, u_int16_t flowid)
1722 {
1723 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1724 return EINVAL;
1725 }
1726
1727 driver_mtag_init(m);
1728
1729 m->m_pkthdr.drv_flowid = flowid;
1730
1731 return 0;
1732 }
1733
1734 errno_t
1735 mbuf_get_tx_compl_data(mbuf_t m, uintptr_t *arg, uintptr_t *data)
1736 {
1737 if (m == NULL || !(m->m_flags & M_PKTHDR) || arg == NULL ||
1738 data == NULL) {
1739 return EINVAL;
1740 }
1741
1742 if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) {
1743 *arg = 0;
1744 *data = 0;
1745 } else {
1746 *arg = m->m_pkthdr.drv_tx_compl_arg;
1747 *data = m->m_pkthdr.drv_tx_compl_data;
1748 }
1749 return 0;
1750 }
1751
1752 errno_t
1753 mbuf_set_tx_compl_data(mbuf_t m, uintptr_t arg, uintptr_t data)
1754 {
1755 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1756 return EINVAL;
1757 }
1758
1759 driver_mtag_init(m);
1760
1761 m->m_pkthdr.drv_tx_compl_arg = arg;
1762 m->m_pkthdr.drv_tx_compl_data = data;
1763
1764 return 0;
1765 }
1766
1767 static u_int32_t
1768 get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback)
1769 {
1770 u_int32_t i;
1771
1772 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1773 if (mbuf_tx_compl_table[i] == callback) {
1774 return i;
1775 }
1776 }
1777 return UINT32_MAX;
1778 }
1779
1780 static u_int32_t
1781 get_tx_compl_callback_index(mbuf_tx_compl_func callback)
1782 {
1783 u_int32_t i;
1784
1785 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock);
1786
1787 i = get_tx_compl_callback_index_locked(callback);
1788
1789 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock);
1790
1791 return i;
1792 }
1793
1794 mbuf_tx_compl_func
1795 m_get_tx_compl_callback(u_int32_t idx)
1796 {
1797 mbuf_tx_compl_func cb;
1798
1799 if (idx >= MAX_MBUF_TX_COMPL_FUNC) {
1800 ASSERT(0);
1801 return NULL;
1802 }
1803 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock);
1804 cb = mbuf_tx_compl_table[idx];
1805 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock);
1806 return cb;
1807 }
1808
1809 errno_t
1810 mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback)
1811 {
1812 int i;
1813 errno_t error;
1814
1815 if (callback == NULL) {
1816 return EINVAL;
1817 }
1818
1819 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock);
1820
1821 i = get_tx_compl_callback_index_locked(callback);
1822 if (i != -1) {
1823 error = EEXIST;
1824 goto unlock;
1825 }
1826
1827 /* assume the worst */
1828 error = ENOSPC;
1829 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1830 if (mbuf_tx_compl_table[i] == NULL) {
1831 mbuf_tx_compl_table[i] = callback;
1832 error = 0;
1833 goto unlock;
1834 }
1835 }
1836 unlock:
1837 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock);
1838
1839 return error;
1840 }
1841
1842 errno_t
1843 mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback)
1844 {
1845 int i;
1846 errno_t error;
1847
1848 if (callback == NULL) {
1849 return EINVAL;
1850 }
1851
1852 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock);
1853
1854 /* assume the worst */
1855 error = ENOENT;
1856 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1857 if (mbuf_tx_compl_table[i] == callback) {
1858 mbuf_tx_compl_table[i] = NULL;
1859 error = 0;
1860 goto unlock;
1861 }
1862 }
1863 unlock:
1864 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock);
1865
1866 return error;
1867 }
1868
1869 errno_t
1870 mbuf_get_timestamp_requested(mbuf_t m, boolean_t *requested)
1871 {
1872 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1873 return EINVAL;
1874 }
1875
1876 if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1877 *requested = FALSE;
1878 } else {
1879 *requested = TRUE;
1880 }
1881 return 0;
1882 }
1883
1884 errno_t
1885 mbuf_set_timestamp_requested(mbuf_t m, uintptr_t *pktid,
1886 mbuf_tx_compl_func callback)
1887 {
1888 size_t i;
1889
1890 if (m == NULL || !(m->m_flags & M_PKTHDR) || callback == NULL ||
1891 pktid == NULL) {
1892 return EINVAL;
1893 }
1894
1895 i = get_tx_compl_callback_index(callback);
1896 if (i == UINT32_MAX) {
1897 return ENOENT;
1898 }
1899
1900 #if (DEBUG || DEVELOPMENT)
1901 VERIFY(i < sizeof(m->m_pkthdr.pkt_compl_callbacks));
1902 #endif /* (DEBUG || DEVELOPMENT) */
1903
1904 if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1905 m->m_pkthdr.pkt_compl_callbacks = 0;
1906 m->m_pkthdr.pkt_flags |= PKTF_TX_COMPL_TS_REQ;
1907 m->m_pkthdr.pkt_compl_context =
1908 atomic_add_32_ov(&mbuf_tx_compl_index, 1);
1909
1910 #if (DEBUG || DEVELOPMENT)
1911 if (mbuf_tx_compl_debug != 0) {
1912 OSIncrementAtomic64(&mbuf_tx_compl_outstanding);
1913 }
1914 #endif /* (DEBUG || DEVELOPMENT) */
1915 }
1916 m->m_pkthdr.pkt_compl_callbacks |= (1 << i);
1917 *pktid = m->m_pkthdr.pkt_compl_context;
1918
1919 return 0;
1920 }
1921
1922 void
1923 m_do_tx_compl_callback(struct mbuf *m, struct ifnet *ifp)
1924 {
1925 int i;
1926
1927 if (m == NULL) {
1928 return;
1929 }
1930
1931 if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) {
1932 return;
1933 }
1934
1935 #if (DEBUG || DEVELOPMENT)
1936 if (mbuf_tx_compl_debug != 0 && ifp != NULL &&
1937 (ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) != 0 &&
1938 (m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) {
1939 struct timespec now;
1940
1941 nanouptime(&now);
1942 net_timernsec(&now, &m->m_pkthdr.pkt_timestamp);
1943 }
1944 #endif /* (DEBUG || DEVELOPMENT) */
1945
1946 for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) {
1947 mbuf_tx_compl_func callback;
1948
1949 if ((m->m_pkthdr.pkt_compl_callbacks & (1 << i)) == 0) {
1950 continue;
1951 }
1952
1953 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock);
1954 callback = mbuf_tx_compl_table[i];
1955 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock);
1956
1957 if (callback != NULL) {
1958 callback(m->m_pkthdr.pkt_compl_context,
1959 ifp,
1960 (m->m_pkthdr.pkt_flags & PKTF_TS_VALID) ?
1961 m->m_pkthdr.pkt_timestamp: 0,
1962 m->m_pkthdr.drv_tx_compl_arg,
1963 m->m_pkthdr.drv_tx_compl_data,
1964 m->m_pkthdr.drv_tx_status);
1965 }
1966 }
1967 m->m_pkthdr.pkt_compl_callbacks = 0;
1968
1969 #if (DEBUG || DEVELOPMENT)
1970 if (mbuf_tx_compl_debug != 0) {
1971 OSDecrementAtomic64(&mbuf_tx_compl_outstanding);
1972 if (ifp == NULL) {
1973 atomic_add_64(&mbuf_tx_compl_aborted, 1);
1974 }
1975 }
1976 #endif /* (DEBUG || DEVELOPMENT) */
1977 }
1978
1979 errno_t
1980 mbuf_get_keepalive_flag(mbuf_t m, boolean_t *is_keepalive)
1981 {
1982 if (m == NULL || is_keepalive == NULL || !(m->m_flags & M_PKTHDR)) {
1983 return EINVAL;
1984 }
1985
1986 *is_keepalive = (m->m_pkthdr.pkt_flags & PKTF_KEEPALIVE);
1987
1988 return 0;
1989 }
1990
1991 errno_t
1992 mbuf_set_keepalive_flag(mbuf_t m, boolean_t is_keepalive)
1993 {
1994 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1995 return EINVAL;
1996 }
1997
1998 if (is_keepalive) {
1999 m->m_pkthdr.pkt_flags |= PKTF_KEEPALIVE;
2000 } else {
2001 m->m_pkthdr.pkt_flags &= ~PKTF_KEEPALIVE;
2002 }
2003
2004 return 0;
2005 }