2 * Copyright (c) 2004-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <sys/param.h>
33 #include <sys/mcache.h>
34 #include <sys/socket.h>
35 #include <kern/debug.h>
36 #include <libkern/OSAtomic.h>
39 #include <netinet/in.h>
40 #include <netinet/ip_var.h>
42 #include "net/net_str_id.h"
44 /* mbuf flags visible to KPI clients; do not add private flags here */
45 static const mbuf_flags_t mbuf_flags_mask
= (MBUF_EXT
| MBUF_PKTHDR
| MBUF_EOR
|
46 MBUF_LOOP
| MBUF_BCAST
| MBUF_MCAST
| MBUF_FRAG
| MBUF_FIRSTFRAG
|
47 MBUF_LASTFRAG
| MBUF_PROMISC
| MBUF_HASFCS
);
49 /* Unalterable mbuf flags */
50 static const mbuf_flags_t mbuf_cflags_mask
= (MBUF_EXT
);
52 #define MAX_MBUF_TX_COMPL_FUNC 32
54 mbuf_tx_compl_table
[MAX_MBUF_TX_COMPL_FUNC
];
55 extern lck_rw_t
*mbuf_tx_compl_tbl_lock
;
56 u_int32_t mbuf_tx_compl_index
= 0;
58 #if (DEVELOPMENT || DEBUG)
59 int mbuf_tx_compl_debug
= 0;
60 SInt64 mbuf_tx_compl_outstanding
__attribute__((aligned(8))) = 0;
61 u_int64_t mbuf_tx_compl_aborted
__attribute__((aligned(8))) = 0;
63 SYSCTL_DECL(_kern_ipc
);
64 SYSCTL_NODE(_kern_ipc
, OID_AUTO
, mbtxcf
,
65 CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "");
66 SYSCTL_INT(_kern_ipc_mbtxcf
, OID_AUTO
, debug
,
67 CTLFLAG_RW
| CTLFLAG_LOCKED
, &mbuf_tx_compl_debug
, 0, "");
68 SYSCTL_INT(_kern_ipc_mbtxcf
, OID_AUTO
, index
,
69 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mbuf_tx_compl_index
, 0, "");
70 SYSCTL_QUAD(_kern_ipc_mbtxcf
, OID_AUTO
, oustanding
,
71 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mbuf_tx_compl_outstanding
, "");
72 SYSCTL_QUAD(_kern_ipc_mbtxcf
, OID_AUTO
, aborted
,
73 CTLFLAG_RD
| CTLFLAG_LOCKED
, &mbuf_tx_compl_aborted
, "");
74 #endif /* (DEBUG || DEVELOPMENT) */
77 mbuf_data(mbuf_t mbuf
)
83 mbuf_datastart(mbuf_t mbuf
)
85 if (mbuf
->m_flags
& M_EXT
) {
86 return mbuf
->m_ext
.ext_buf
;
88 if (mbuf
->m_flags
& M_PKTHDR
) {
89 return mbuf
->m_pktdat
;
95 mbuf_setdata(mbuf_t mbuf
, void *data
, size_t len
)
97 size_t start
= (size_t)((char *)mbuf_datastart(mbuf
));
98 size_t maxlen
= mbuf_maxlen(mbuf
);
100 if ((size_t)data
< start
|| ((size_t)data
) + len
> start
+ maxlen
) {
110 mbuf_align_32(mbuf_t mbuf
, size_t len
)
112 if ((mbuf
->m_flags
& M_EXT
) != 0 && m_mclhasreference(mbuf
)) {
115 mbuf
->m_data
= mbuf_datastart(mbuf
);
117 ((mbuf_trailingspace(mbuf
) - len
) & ~(sizeof(u_int32_t
) - 1));
123 * This function is used to provide mcl_to_paddr via symbol indirection,
124 * please avoid any change in behavior or remove the indirection in
125 * config/Unsupported*
128 mbuf_data_to_physical(void *ptr
)
130 return (addr64_t
)mcl_to_paddr(ptr
);
134 mbuf_get(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
136 /* Must set *mbuf to NULL in failure case */
137 *mbuf
= m_get(how
, type
);
139 return *mbuf
== NULL
? ENOMEM
: 0;
143 mbuf_gethdr(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
145 /* Must set *mbuf to NULL in failure case */
146 *mbuf
= m_gethdr(how
, type
);
148 return *mbuf
== NULL
? ENOMEM
: 0;
152 mbuf_attachcluster(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
,
153 caddr_t extbuf
, void (*extfree
)(caddr_t
, u_int
, caddr_t
),
154 size_t extsize
, caddr_t extarg
)
156 if (mbuf
== NULL
|| extbuf
== NULL
|| extfree
== NULL
|| extsize
== 0) {
160 if ((*mbuf
= m_clattach(*mbuf
, type
, extbuf
,
161 extfree
, extsize
, extarg
, how
, 0)) == NULL
) {
169 mbuf_ring_cluster_alloc(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
,
170 void (*extfree
)(caddr_t
, u_int
, caddr_t
), size_t *size
)
172 caddr_t extbuf
= NULL
;
175 if (mbuf
== NULL
|| extfree
== NULL
|| size
== NULL
|| *size
== 0) {
179 if ((err
= mbuf_alloccluster(how
, size
, &extbuf
)) != 0) {
183 if ((*mbuf
= m_clattach(*mbuf
, type
, extbuf
,
184 extfree
, *size
, NULL
, how
, 1)) == NULL
) {
185 mbuf_freecluster(extbuf
, *size
);
193 mbuf_ring_cluster_is_active(mbuf_t mbuf
)
195 return m_ext_paired_is_active(mbuf
);
199 mbuf_ring_cluster_activate(mbuf_t mbuf
)
201 if (mbuf_ring_cluster_is_active(mbuf
)) {
205 m_ext_paired_activate(mbuf
);
210 mbuf_cluster_set_prop(mbuf_t mbuf
, u_int32_t oldprop
, u_int32_t newprop
)
212 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_EXT
)) {
216 return m_ext_set_prop(mbuf
, oldprop
, newprop
) ? 0 : EBUSY
;
220 mbuf_cluster_get_prop(mbuf_t mbuf
, u_int32_t
*prop
)
222 if (mbuf
== NULL
|| prop
== NULL
|| !(mbuf
->m_flags
& M_EXT
)) {
226 *prop
= m_ext_get_prop(mbuf
);
231 mbuf_alloccluster(mbuf_how_t how
, size_t *size
, caddr_t
*addr
)
233 if (size
== NULL
|| *size
== 0 || addr
== NULL
) {
239 /* Jumbo cluster pool not available? */
240 if (*size
> MBIGCLBYTES
&& njcl
== 0) {
244 if (*size
<= MCLBYTES
&& (*addr
= m_mclalloc(how
)) != NULL
) {
246 } else if (*size
> MCLBYTES
&& *size
<= MBIGCLBYTES
&&
247 (*addr
= m_bigalloc(how
)) != NULL
) {
249 } else if (*size
> MBIGCLBYTES
&& *size
<= M16KCLBYTES
&&
250 (*addr
= m_16kalloc(how
)) != NULL
) {
264 mbuf_freecluster(caddr_t addr
, size_t size
)
266 if (size
!= MCLBYTES
&& size
!= MBIGCLBYTES
&& size
!= M16KCLBYTES
) {
267 panic("%s: invalid size (%ld) for cluster %p", __func__
,
271 if (size
== MCLBYTES
) {
273 } else if (size
== MBIGCLBYTES
) {
274 m_bigfree(addr
, MBIGCLBYTES
, NULL
);
275 } else if (njcl
> 0) {
276 m_16kfree(addr
, M16KCLBYTES
, NULL
);
278 panic("%s: freeing jumbo cluster to an empty pool", __func__
);
283 mbuf_getcluster(mbuf_how_t how
, mbuf_type_t type
, size_t size
, mbuf_t
*mbuf
)
285 /* Must set *mbuf to NULL in failure case */
293 *mbuf
= m_get(how
, type
);
300 * At the time this code was written, m_{mclget,mbigget,m16kget}
301 * would always return the same value that was passed in to it.
303 if (size
== MCLBYTES
) {
304 *mbuf
= m_mclget(*mbuf
, how
);
305 } else if (size
== MBIGCLBYTES
) {
306 *mbuf
= m_mbigget(*mbuf
, how
);
307 } else if (size
== M16KCLBYTES
) {
309 *mbuf
= m_m16kget(*mbuf
, how
);
311 /* Jumbo cluster pool not available? */
319 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0) {
323 if (created
&& error
!= 0) {
331 mbuf_mclget(mbuf_how_t how
, mbuf_type_t type
, mbuf_t
*mbuf
)
333 /* Must set *mbuf to NULL in failure case */
340 error
= mbuf_get(how
, type
, mbuf
);
348 * At the time this code was written, m_mclget would always
349 * return the same value that was passed in to it.
351 *mbuf
= m_mclget(*mbuf
, how
);
353 if (created
&& ((*mbuf
)->m_flags
& M_EXT
) == 0) {
357 if (*mbuf
== NULL
|| ((*mbuf
)->m_flags
& M_EXT
) == 0) {
365 mbuf_getpacket(mbuf_how_t how
, mbuf_t
*mbuf
)
367 /* Must set *mbuf to NULL in failure case */
370 *mbuf
= m_getpacket_how(how
);
373 if (how
== MBUF_WAITOK
) {
384 * This function is used to provide m_free via symbol indirection, please avoid
385 * any change in behavior or remove the indirection in config/Unsupported*
388 mbuf_free(mbuf_t mbuf
)
394 * This function is used to provide m_freem via symbol indirection, please avoid
395 * any change in behavior or remove the indirection in config/Unsupported*
398 mbuf_freem(mbuf_t mbuf
)
404 mbuf_freem_list(mbuf_t mbuf
)
406 return m_freem_list(mbuf
);
410 mbuf_leadingspace(const mbuf_t mbuf
)
412 return M_LEADINGSPACE(mbuf
);
416 * This function is used to provide m_trailingspace via symbol indirection,
417 * please avoid any change in behavior or remove the indirection in
418 * config/Unsupported*
421 mbuf_trailingspace(const mbuf_t mbuf
)
423 return M_TRAILINGSPACE(mbuf
);
428 mbuf_copym(const mbuf_t src
, size_t offset
, size_t len
,
429 mbuf_how_t how
, mbuf_t
*new_mbuf
)
431 /* Must set *mbuf to NULL in failure case */
432 *new_mbuf
= m_copym(src
, offset
, len
, how
);
434 return *new_mbuf
== NULL
? ENOMEM
: 0;
438 mbuf_dup(const mbuf_t src
, mbuf_how_t how
, mbuf_t
*new_mbuf
)
440 /* Must set *new_mbuf to NULL in failure case */
441 *new_mbuf
= m_dup(src
, how
);
443 return *new_mbuf
== NULL
? ENOMEM
: 0;
447 mbuf_prepend(mbuf_t
*orig
, size_t len
, mbuf_how_t how
)
449 /* Must set *orig to NULL in failure case */
450 *orig
= m_prepend_2(*orig
, len
, how
, 0);
452 return *orig
== NULL
? ENOMEM
: 0;
456 mbuf_split(mbuf_t src
, size_t offset
,
457 mbuf_how_t how
, mbuf_t
*new_mbuf
)
459 /* Must set *new_mbuf to NULL in failure case */
460 *new_mbuf
= m_split(src
, offset
, how
);
462 return *new_mbuf
== NULL
? ENOMEM
: 0;
466 mbuf_pullup(mbuf_t
*mbuf
, size_t len
)
468 /* Must set *mbuf to NULL in failure case */
469 *mbuf
= m_pullup(*mbuf
, len
);
471 return *mbuf
== NULL
? ENOMEM
: 0;
475 mbuf_pulldown(mbuf_t src
, size_t *offset
, size_t len
, mbuf_t
*location
)
477 /* Must set *location to NULL in failure case */
479 *location
= m_pulldown(src
, *offset
, len
, &new_offset
);
480 *offset
= new_offset
;
482 return *location
== NULL
? ENOMEM
: 0;
486 * This function is used to provide m_adj via symbol indirection, please avoid
487 * any change in behavior or remove the indirection in config/Unsupported*
490 mbuf_adj(mbuf_t mbuf
, int len
)
496 mbuf_adjustlen(mbuf_t m
, int amount
)
498 /* Verify m_len will be valid after adding amount */
500 int used
= (size_t)mbuf_data(m
) - (size_t)mbuf_datastart(m
) +
503 if ((size_t)(amount
+ used
) > mbuf_maxlen(m
)) {
506 } else if (-amount
> m
->m_len
) {
515 mbuf_concatenate(mbuf_t dst
, mbuf_t src
)
523 /* return dst as is in the current implementation */
527 mbuf_copydata(const mbuf_t m0
, size_t off
, size_t len
, void *out_data
)
529 /* Copied m_copydata, added error handling (don't just panic) */
533 if (off
>= INT_MAX
|| len
>= INT_MAX
) {
541 if (off
< (size_t)m
->m_len
) {
551 count
= m
->m_len
- off
> len
? len
: m
->m_len
- off
;
552 bcopy(mtod(m
, caddr_t
) + off
, out_data
, count
);
554 out_data
= ((char *)out_data
) + count
;
563 mbuf_mclhasreference(mbuf_t mbuf
)
565 if ((mbuf
->m_flags
& M_EXT
)) {
566 return m_mclhasreference(mbuf
);
575 mbuf_next(const mbuf_t mbuf
)
581 mbuf_setnext(mbuf_t mbuf
, mbuf_t next
)
583 if (next
&& ((next
)->m_nextpkt
!= NULL
||
584 (next
)->m_type
== MT_FREE
)) {
593 mbuf_nextpkt(const mbuf_t mbuf
)
595 return mbuf
->m_nextpkt
;
599 mbuf_setnextpkt(mbuf_t mbuf
, mbuf_t nextpkt
)
601 mbuf
->m_nextpkt
= nextpkt
;
605 mbuf_len(const mbuf_t mbuf
)
611 mbuf_setlen(mbuf_t mbuf
, size_t len
)
617 mbuf_maxlen(const mbuf_t mbuf
)
619 if (mbuf
->m_flags
& M_EXT
) {
620 return mbuf
->m_ext
.ext_size
;
622 return &mbuf
->m_dat
[MLEN
] - ((char *)mbuf_datastart(mbuf
));
626 mbuf_type(const mbuf_t mbuf
)
632 mbuf_settype(mbuf_t mbuf
, mbuf_type_t new_type
)
634 if (new_type
== MBUF_TYPE_FREE
) {
638 m_mchtype(mbuf
, new_type
);
644 mbuf_flags(const mbuf_t mbuf
)
646 return mbuf
->m_flags
& mbuf_flags_mask
;
650 mbuf_setflags(mbuf_t mbuf
, mbuf_flags_t flags
)
653 mbuf_flags_t oflags
= mbuf
->m_flags
;
656 * 1. Return error if public but un-alterable flags are changed
658 * 2. Return error if bits other than public flags are set in passed
660 * Please note that private flag bits must be passed as reset by
661 * kexts, as they must use mbuf_flags KPI to get current set of
662 * mbuf flags and mbuf_flags KPI does not expose private flags.
664 if ((flags
^ oflags
) & mbuf_cflags_mask
) {
666 } else if (flags
& ~mbuf_flags_mask
) {
669 mbuf
->m_flags
= flags
| (mbuf
->m_flags
& ~mbuf_flags_mask
);
671 * If M_PKTHDR bit has changed, we have work to do;
672 * m_reinit() will take care of setting/clearing the
673 * bit, as well as the rest of bookkeeping.
675 if ((oflags
^ mbuf
->m_flags
) & M_PKTHDR
) {
676 mbuf
->m_flags
^= M_PKTHDR
; /* restore */
678 (mbuf
->m_flags
& M_PKTHDR
) ? 0 : 1);
686 mbuf_setflags_mask(mbuf_t mbuf
, mbuf_flags_t flags
, mbuf_flags_t mask
)
690 if (mask
& (~mbuf_flags_mask
| mbuf_cflags_mask
)) {
693 mbuf_flags_t oflags
= mbuf
->m_flags
;
694 mbuf
->m_flags
= (flags
& mask
) | (mbuf
->m_flags
& ~mask
);
696 * If M_PKTHDR bit has changed, we have work to do;
697 * m_reinit() will take care of setting/clearing the
698 * bit, as well as the rest of bookkeeping.
700 if ((oflags
^ mbuf
->m_flags
) & M_PKTHDR
) {
701 mbuf
->m_flags
^= M_PKTHDR
; /* restore */
703 (mbuf
->m_flags
& M_PKTHDR
) ? 0 : 1);
711 mbuf_copy_pkthdr(mbuf_t dest
, const mbuf_t src
)
713 if (((src
)->m_flags
& M_PKTHDR
) == 0) {
717 m_copy_pkthdr(dest
, src
);
723 mbuf_pkthdr_len(const mbuf_t mbuf
)
725 if (((mbuf
)->m_flags
& M_PKTHDR
) == 0) {
729 * While we Assert for development or debug builds,
730 * also make sure we never return negative length
733 ASSERT(mbuf
->m_pkthdr
.len
>= 0);
734 if (mbuf
->m_pkthdr
.len
< 0) {
737 return mbuf
->m_pkthdr
.len
;
740 __private_extern__
size_t
741 mbuf_pkthdr_maxlen(mbuf_t m
)
747 maxlen
+= mbuf_maxlen(n
);
754 mbuf_pkthdr_setlen(mbuf_t mbuf
, size_t len
)
756 if (len
> INT32_MAX
) {
760 mbuf
->m_pkthdr
.len
= len
;
764 mbuf_pkthdr_adjustlen(mbuf_t mbuf
, int amount
)
766 mbuf
->m_pkthdr
.len
+= amount
;
770 mbuf_pkthdr_rcvif(const mbuf_t mbuf
)
773 * If we reference count ifnets, we should take a reference here
776 return mbuf
->m_pkthdr
.rcvif
;
780 mbuf_pkthdr_setrcvif(mbuf_t mbuf
, ifnet_t ifnet
)
782 /* May want to walk ifnet list to determine if interface is valid */
783 mbuf
->m_pkthdr
.rcvif
= (struct ifnet
*)ifnet
;
788 mbuf_pkthdr_header(const mbuf_t mbuf
)
790 return mbuf
->m_pkthdr
.pkt_hdr
;
794 mbuf_pkthdr_setheader(mbuf_t mbuf
, void *header
)
796 mbuf
->m_pkthdr
.pkt_hdr
= (void*)header
;
800 mbuf_inbound_modified(mbuf_t mbuf
)
802 /* Invalidate hardware generated checksum flags */
803 mbuf
->m_pkthdr
.csum_flags
= 0;
807 mbuf_outbound_finalize(struct mbuf
*m
, u_int32_t pf
, size_t o
)
809 /* Generate the packet in software, client needs it */
812 (void) in_finalize_cksum(m
, o
, m
->m_pkthdr
.csum_flags
);
817 * Checksum offload should not have been enabled when
818 * extension headers exist; indicate that the callee
819 * should skip such case by setting optlen to -1.
821 (void) in6_finalize_cksum(m
, o
, -1, -1, m
->m_pkthdr
.csum_flags
);
834 mbuf
->m_pkthdr
.csum_flags
|= CSUM_VLAN_TAG_VALID
;
835 mbuf
->m_pkthdr
.vlan_tag
= vlan
;
845 if ((mbuf
->m_pkthdr
.csum_flags
& CSUM_VLAN_TAG_VALID
) == 0) {
846 return ENXIO
; // No vlan tag set
848 *vlan
= mbuf
->m_pkthdr
.vlan_tag
;
857 mbuf
->m_pkthdr
.csum_flags
&= ~CSUM_VLAN_TAG_VALID
;
858 mbuf
->m_pkthdr
.vlan_tag
= 0;
863 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags
=
864 MBUF_CSUM_REQ_IP
| MBUF_CSUM_REQ_TCP
| MBUF_CSUM_REQ_UDP
|
865 MBUF_CSUM_PARTIAL
| MBUF_CSUM_REQ_TCPIPV6
| MBUF_CSUM_REQ_UDPIPV6
;
868 mbuf_set_csum_requested(
870 mbuf_csum_request_flags_t request
,
873 request
&= mbuf_valid_csum_request_flags
;
874 mbuf
->m_pkthdr
.csum_flags
=
875 (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | request
;
876 mbuf
->m_pkthdr
.csum_data
= value
;
881 static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags
=
882 MBUF_TSO_IPV4
| MBUF_TSO_IPV6
;
885 mbuf_get_tso_requested(
887 mbuf_tso_request_flags_t
*request
,
890 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
891 request
== NULL
|| value
== NULL
) {
895 *request
= mbuf
->m_pkthdr
.csum_flags
;
896 *request
&= mbuf_valid_tso_request_flags
;
897 if (*request
&& value
!= NULL
) {
898 *value
= mbuf
->m_pkthdr
.tso_segsz
;
905 mbuf_get_csum_requested(
907 mbuf_csum_request_flags_t
*request
,
910 *request
= mbuf
->m_pkthdr
.csum_flags
;
911 *request
&= mbuf_valid_csum_request_flags
;
913 *value
= mbuf
->m_pkthdr
.csum_data
;
920 mbuf_clear_csum_requested(
923 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
924 mbuf
->m_pkthdr
.csum_data
= 0;
929 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags
=
930 MBUF_CSUM_DID_IP
| MBUF_CSUM_IP_GOOD
| MBUF_CSUM_DID_DATA
|
931 MBUF_CSUM_PSEUDO_HDR
| MBUF_CSUM_PARTIAL
;
934 mbuf_set_csum_performed(
936 mbuf_csum_performed_flags_t performed
,
939 performed
&= mbuf_valid_csum_performed_flags
;
940 mbuf
->m_pkthdr
.csum_flags
=
941 (mbuf
->m_pkthdr
.csum_flags
& 0xffff0000) | performed
;
942 mbuf
->m_pkthdr
.csum_data
= value
;
948 mbuf_get_csum_performed(
950 mbuf_csum_performed_flags_t
*performed
,
954 mbuf
->m_pkthdr
.csum_flags
& mbuf_valid_csum_performed_flags
;
955 *value
= mbuf
->m_pkthdr
.csum_data
;
961 mbuf_clear_csum_performed(
964 mbuf
->m_pkthdr
.csum_flags
&= 0xffff0000;
965 mbuf
->m_pkthdr
.csum_data
= 0;
971 mbuf_inet_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
974 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
975 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
)) {
979 *csum
= inet_cksum(mbuf
, protocol
, offset
, length
);
984 mbuf_inet6_cksum(mbuf_t mbuf
, int protocol
, u_int32_t offset
, u_int32_t length
,
987 if (mbuf
== NULL
|| length
== 0 || csum
== NULL
||
988 (u_int32_t
)mbuf
->m_pkthdr
.len
< (offset
+ length
)) {
992 *csum
= inet6_cksum(mbuf
, protocol
, offset
, length
);
1000 #define MTAG_FIRST_ID FIRST_KPI_STR_ID
1005 mbuf_tag_id_t
*out_id
)
1007 return net_str_id_find_internal(string
, out_id
, NSI_MBUF_TAG
, 1);
1014 mbuf_tag_type_t type
,
1020 u_int32_t mtag_id_first
, mtag_id_last
;
1022 if (data_p
!= NULL
) {
1026 /* Sanity check parameters */
1027 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
,
1029 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
1030 id
< mtag_id_first
|| id
> mtag_id_last
|| length
< 1 ||
1031 (length
& 0xffff0000) != 0 || data_p
== NULL
) {
1035 /* Make sure this mtag hasn't already been allocated */
1036 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
1041 /* Allocate an mtag */
1042 tag
= m_tag_create(id
, type
, length
, how
, mbuf
);
1044 return how
== M_WAITOK
? ENOMEM
: EWOULDBLOCK
;
1047 /* Attach the mtag and set *data_p */
1048 m_tag_prepend(mbuf
, tag
);
1058 mbuf_tag_type_t type
,
1063 u_int32_t mtag_id_first
, mtag_id_last
;
1065 if (length
!= NULL
) {
1068 if (data_p
!= NULL
) {
1072 /* Sanity check parameters */
1073 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
,
1075 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
1076 id
< mtag_id_first
|| id
> mtag_id_last
|| length
== NULL
||
1081 /* Locate an mtag */
1082 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
1087 /* Copy out the pointer to the data and the lenght value */
1088 *length
= tag
->m_tag_len
;
1098 mbuf_tag_type_t type
)
1101 u_int32_t mtag_id_first
, mtag_id_last
;
1103 /* Sanity check parameters */
1104 (void) net_str_id_first_last(&mtag_id_first
, &mtag_id_last
,
1106 if (mbuf
== NULL
|| (mbuf
->m_flags
& M_PKTHDR
) == 0 ||
1107 id
< mtag_id_first
|| id
> mtag_id_last
) {
1111 tag
= m_tag_locate(mbuf
, id
, type
, NULL
);
1116 m_tag_delete(mbuf
, tag
);
1120 * Maximum length of driver auxiliary data; keep this small to
1121 * fit in a single mbuf to avoid wasting memory, rounded down to
1122 * the nearest 64-bit boundary. This takes into account mbuf
1123 * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
1125 #define MBUF_DRVAUX_MAXLEN \
1126 P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \
1127 M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
1130 mbuf_add_drvaux(mbuf_t mbuf
, mbuf_how_t how
, u_int32_t family
,
1131 u_int32_t subfamily
, size_t length
, void **data_p
)
1133 struct m_drvaux_tag
*p
;
1136 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
) ||
1137 length
== 0 || length
> MBUF_DRVAUX_MAXLEN
) {
1141 if (data_p
!= NULL
) {
1145 /* Check if one is already associated */
1146 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1147 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) != NULL
) {
1151 /* Tag is (m_drvaux_tag + module specific data) */
1152 if ((tag
= m_tag_create(KERNEL_MODULE_TAG_ID
, KERNEL_TAG_TYPE_DRVAUX
,
1153 sizeof(*p
) + length
, how
, mbuf
)) == NULL
) {
1154 return (how
== MBUF_WAITOK
) ? ENOMEM
: EWOULDBLOCK
;
1157 p
= (struct m_drvaux_tag
*)(tag
+ 1);
1158 p
->da_family
= family
;
1159 p
->da_subfamily
= subfamily
;
1160 p
->da_length
= length
;
1162 /* Associate the tag */
1163 m_tag_prepend(mbuf
, tag
);
1165 if (data_p
!= NULL
) {
1173 mbuf_find_drvaux(mbuf_t mbuf
, u_int32_t
*family_p
, u_int32_t
*subfamily_p
,
1174 u_int32_t
*length_p
, void **data_p
)
1176 struct m_drvaux_tag
*p
;
1179 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
) || data_p
== NULL
) {
1185 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1186 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) == NULL
) {
1190 /* Must be at least size of m_drvaux_tag */
1191 VERIFY(tag
->m_tag_len
>= sizeof(*p
));
1193 p
= (struct m_drvaux_tag
*)(tag
+ 1);
1194 VERIFY(p
->da_length
> 0 && p
->da_length
<= MBUF_DRVAUX_MAXLEN
);
1196 if (family_p
!= NULL
) {
1197 *family_p
= p
->da_family
;
1199 if (subfamily_p
!= NULL
) {
1200 *subfamily_p
= p
->da_subfamily
;
1202 if (length_p
!= NULL
) {
1203 *length_p
= p
->da_length
;
1212 mbuf_del_drvaux(mbuf_t mbuf
)
1216 if (mbuf
== NULL
|| !(mbuf
->m_flags
& M_PKTHDR
)) {
1220 if ((tag
= m_tag_locate(mbuf
, KERNEL_MODULE_TAG_ID
,
1221 KERNEL_TAG_TYPE_DRVAUX
, NULL
)) != NULL
) {
1222 m_tag_delete(mbuf
, tag
);
1228 mbuf_stats(struct mbuf_stat
*stats
)
1230 stats
->mbufs
= mbstat
.m_mbufs
;
1231 stats
->clusters
= mbstat
.m_clusters
;
1232 stats
->clfree
= mbstat
.m_clfree
;
1233 stats
->drops
= mbstat
.m_drops
;
1234 stats
->wait
= mbstat
.m_wait
;
1235 stats
->drain
= mbstat
.m_drain
;
1236 __builtin_memcpy(stats
->mtypes
, mbstat
.m_mtypes
, sizeof(stats
->mtypes
));
1237 stats
->mcfail
= mbstat
.m_mcfail
;
1238 stats
->mpfail
= mbstat
.m_mpfail
;
1239 stats
->msize
= mbstat
.m_msize
;
1240 stats
->mclbytes
= mbstat
.m_mclbytes
;
1241 stats
->minclsize
= mbstat
.m_minclsize
;
1242 stats
->mlen
= mbstat
.m_mlen
;
1243 stats
->mhlen
= mbstat
.m_mhlen
;
1244 stats
->bigclusters
= mbstat
.m_bigclusters
;
1245 stats
->bigclfree
= mbstat
.m_bigclfree
;
1246 stats
->bigmclbytes
= mbstat
.m_bigmclbytes
;
1250 mbuf_allocpacket(mbuf_how_t how
, size_t packetlen
, unsigned int *maxchunks
,
1255 unsigned int numpkts
= 1;
1256 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
1258 if (packetlen
== 0) {
1262 m
= m_allocpacket_internal(&numpkts
, packetlen
,
1263 maxchunks
? &numchunks
: NULL
, how
, 1, 0);
1265 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
) {
1272 *maxchunks
= numchunks
;
1282 mbuf_allocpacket_list(unsigned int numpkts
, mbuf_how_t how
, size_t packetlen
,
1283 unsigned int *maxchunks
, mbuf_t
*mbuf
)
1287 unsigned int numchunks
= maxchunks
? *maxchunks
: 0;
1293 if (packetlen
== 0) {
1297 m
= m_allocpacket_internal(&numpkts
, packetlen
,
1298 maxchunks
? &numchunks
: NULL
, how
, 1, 0);
1300 if (maxchunks
&& *maxchunks
&& numchunks
> *maxchunks
) {
1307 *maxchunks
= numchunks
;
1316 __private_extern__
size_t
1317 mbuf_pkt_list_len(mbuf_t m
)
1323 len
+= mbuf_pkthdr_len(n
);
1324 n
= mbuf_nextpkt(n
);
1329 __private_extern__
size_t
1330 mbuf_pkt_list_maxlen(mbuf_t m
)
1336 maxlen
+= mbuf_pkthdr_maxlen(n
);
1337 n
= mbuf_nextpkt(n
);
1343 * mbuf_copyback differs from m_copyback in a few ways:
1344 * 1) mbuf_copyback will allocate clusters for new mbufs we append
1345 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
1346 * 3) mbuf_copyback reports whether or not the operation succeeded
1347 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
1362 const char *cp
= data
;
1364 if (m
== NULL
|| len
== 0 || data
== NULL
) {
1368 while (off
> (mlen
= m
->m_len
)) {
1371 if (m
->m_next
== 0) {
1372 n
= m_getclr(how
, m
->m_type
);
1377 n
->m_len
= MIN(MLEN
, len
+ off
);
1384 mlen
= MIN(m
->m_len
- off
, len
);
1385 if (mlen
< len
&& m
->m_next
== NULL
&&
1386 mbuf_trailingspace(m
) > 0) {
1387 size_t grow
= MIN(mbuf_trailingspace(m
), len
- mlen
);
1391 bcopy(cp
, off
+ (char *)mbuf_data(m
), (unsigned)mlen
);
1400 if (m
->m_next
== 0) {
1401 n
= m_get(how
, m
->m_type
);
1406 if (len
> MINCLSIZE
) {
1408 * cluster allocation failure is okay,
1411 mbuf_mclget(how
, m
->m_type
, &n
);
1413 n
->m_len
= MIN(mbuf_maxlen(n
), len
);
1420 if ((m_start
->m_flags
& M_PKTHDR
) && (m_start
->m_pkthdr
.len
< totlen
)) {
1421 m_start
->m_pkthdr
.len
= totlen
;
1434 mbuf_get_mhlen(void)
1440 mbuf_get_minclsize(void)
1442 return MHLEN
+ MLEN
;
1446 mbuf_get_traffic_class_max_count(void)
1452 mbuf_get_traffic_class_index(mbuf_traffic_class_t tc
, u_int32_t
*index
)
1454 if (index
== NULL
|| (u_int32_t
)tc
>= MBUF_TC_MAX
) {
1458 *index
= MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc
)));
1462 mbuf_traffic_class_t
1463 mbuf_get_traffic_class(mbuf_t m
)
1465 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1469 return m_get_traffic_class(m
);
1473 mbuf_set_traffic_class(mbuf_t m
, mbuf_traffic_class_t tc
)
1475 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1476 ((u_int32_t
)tc
>= MBUF_TC_MAX
)) {
1480 return m_set_traffic_class(m
, tc
);
1484 mbuf_is_traffic_class_privileged(mbuf_t m
)
1486 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1487 !MBUF_VALID_SC(m
->m_pkthdr
.pkt_svc
)) {
1491 return (m
->m_pkthdr
.pkt_flags
& PKTF_PRIO_PRIVILEGED
) ? 1 : 0;
1495 mbuf_get_service_class_max_count(void)
1497 return MBUF_SC_MAX_CLASSES
;
1501 mbuf_get_service_class_index(mbuf_svc_class_t sc
, u_int32_t
*index
)
1503 if (index
== NULL
|| !MBUF_VALID_SC(sc
)) {
1507 *index
= MBUF_SCIDX(sc
);
1512 mbuf_get_service_class(mbuf_t m
)
1514 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1518 return m_get_service_class(m
);
1522 mbuf_set_service_class(mbuf_t m
, mbuf_svc_class_t sc
)
1524 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1528 return m_set_service_class(m
, sc
);
1532 mbuf_pkthdr_aux_flags(mbuf_t m
, mbuf_pkthdr_aux_flags_t
*flagsp
)
1536 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || flagsp
== NULL
) {
1541 flags
= m
->m_pkthdr
.pkt_flags
;
1542 if ((flags
& (PKTF_INET_RESOLVE
| PKTF_RESOLVE_RTR
)) ==
1543 (PKTF_INET_RESOLVE
| PKTF_RESOLVE_RTR
)) {
1544 *flagsp
|= MBUF_PKTAUXF_INET_RESOLVE_RTR
;
1546 if ((flags
& (PKTF_INET6_RESOLVE
| PKTF_RESOLVE_RTR
)) ==
1547 (PKTF_INET6_RESOLVE
| PKTF_RESOLVE_RTR
)) {
1548 *flagsp
|= MBUF_PKTAUXF_INET6_RESOLVE_RTR
;
1551 /* These 2 flags are mutually exclusive */
1553 (MBUF_PKTAUXF_INET_RESOLVE_RTR
| MBUF_PKTAUXF_INET6_RESOLVE_RTR
)) !=
1554 (MBUF_PKTAUXF_INET_RESOLVE_RTR
| MBUF_PKTAUXF_INET6_RESOLVE_RTR
));
1560 mbuf_get_driver_scratch(mbuf_t m
, u_int8_t
**area
, size_t *area_len
)
1562 if (m
== NULL
|| area
== NULL
|| area_len
== NULL
||
1563 !(m
->m_flags
& M_PKTHDR
)) {
1567 *area_len
= m_scratch_get(m
, area
);
1572 mbuf_get_unsent_data_bytes(const mbuf_t m
, u_int32_t
*unsent_data
)
1574 if (m
== NULL
|| unsent_data
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1578 if (!(m
->m_pkthdr
.pkt_flags
& PKTF_VALID_UNSENT_DATA
)) {
1582 *unsent_data
= m
->m_pkthdr
.bufstatus_if
+
1583 m
->m_pkthdr
.bufstatus_sndbuf
;
1588 mbuf_get_buffer_status(const mbuf_t m
, mbuf_buffer_status_t
*buf_status
)
1590 if (m
== NULL
|| buf_status
== NULL
|| !(m
->m_flags
& M_PKTHDR
) ||
1591 !(m
->m_pkthdr
.pkt_flags
& PKTF_VALID_UNSENT_DATA
)) {
1595 buf_status
->buf_interface
= m
->m_pkthdr
.bufstatus_if
;
1596 buf_status
->buf_sndbuf
= m
->m_pkthdr
.bufstatus_sndbuf
;
1601 mbuf_pkt_new_flow(const mbuf_t m
, u_int32_t
*retval
)
1603 if (m
== NULL
|| retval
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1606 if (m
->m_pkthdr
.pkt_flags
& PKTF_NEW_FLOW
) {
1615 mbuf_last_pkt(const mbuf_t m
, u_int32_t
*retval
)
1617 if (m
== NULL
|| retval
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1620 if (m
->m_pkthdr
.pkt_flags
& PKTF_LAST_PKT
) {
1629 mbuf_get_timestamp(mbuf_t m
, u_int64_t
*ts
, boolean_t
*valid
)
1631 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || ts
== NULL
) {
1635 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TS_VALID
) == 0) {
1636 if (valid
!= NULL
) {
1641 if (valid
!= NULL
) {
1644 *ts
= m
->m_pkthdr
.pkt_timestamp
;
1650 mbuf_set_timestamp(mbuf_t m
, u_int64_t ts
, boolean_t valid
)
1652 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1656 if (valid
== FALSE
) {
1657 m
->m_pkthdr
.pkt_flags
&= ~PKTF_TS_VALID
;
1658 m
->m_pkthdr
.pkt_timestamp
= 0;
1660 m
->m_pkthdr
.pkt_flags
|= PKTF_TS_VALID
;
1661 m
->m_pkthdr
.pkt_timestamp
= ts
;
1667 mbuf_get_status(mbuf_t m
, kern_return_t
*status
)
1669 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || status
== NULL
) {
1673 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1676 *status
= m
->m_pkthdr
.drv_tx_status
;
1682 driver_mtag_init(mbuf_t m
)
1684 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1685 m
->m_pkthdr
.pkt_flags
|= PKTF_DRIVER_MTAG
;
1686 bzero(&m
->m_pkthdr
.driver_mtag
,
1687 sizeof(m
->m_pkthdr
.driver_mtag
));
1692 mbuf_set_status(mbuf_t m
, kern_return_t status
)
1694 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1698 driver_mtag_init(m
);
1700 m
->m_pkthdr
.drv_tx_status
= status
;
1706 mbuf_get_flowid(mbuf_t m
, u_int16_t
*flowid
)
1708 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || flowid
== NULL
) {
1712 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1715 *flowid
= m
->m_pkthdr
.drv_flowid
;
1721 mbuf_set_flowid(mbuf_t m
, u_int16_t flowid
)
1723 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1727 driver_mtag_init(m
);
1729 m
->m_pkthdr
.drv_flowid
= flowid
;
1735 mbuf_get_tx_compl_data(mbuf_t m
, uintptr_t *arg
, uintptr_t *data
)
1737 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || arg
== NULL
||
1742 if ((m
->m_pkthdr
.pkt_flags
& PKTF_DRIVER_MTAG
) == 0) {
1746 *arg
= m
->m_pkthdr
.drv_tx_compl_arg
;
1747 *data
= m
->m_pkthdr
.drv_tx_compl_data
;
1753 mbuf_set_tx_compl_data(mbuf_t m
, uintptr_t arg
, uintptr_t data
)
1755 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1759 driver_mtag_init(m
);
1761 m
->m_pkthdr
.drv_tx_compl_arg
= arg
;
1762 m
->m_pkthdr
.drv_tx_compl_data
= data
;
1768 get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback
)
1772 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1773 if (mbuf_tx_compl_table
[i
] == callback
) {
1781 get_tx_compl_callback_index(mbuf_tx_compl_func callback
)
1785 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock
);
1787 i
= get_tx_compl_callback_index_locked(callback
);
1789 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock
);
1795 m_get_tx_compl_callback(u_int32_t idx
)
1797 mbuf_tx_compl_func cb
;
1799 if (idx
>= MAX_MBUF_TX_COMPL_FUNC
) {
1803 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock
);
1804 cb
= mbuf_tx_compl_table
[idx
];
1805 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock
);
1810 mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback
)
1815 if (callback
== NULL
) {
1819 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock
);
1821 i
= get_tx_compl_callback_index_locked(callback
);
1827 /* assume the worst */
1829 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1830 if (mbuf_tx_compl_table
[i
] == NULL
) {
1831 mbuf_tx_compl_table
[i
] = callback
;
1837 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock
);
1843 mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback
)
1848 if (callback
== NULL
) {
1852 lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock
);
1854 /* assume the worst */
1856 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1857 if (mbuf_tx_compl_table
[i
] == callback
) {
1858 mbuf_tx_compl_table
[i
] = NULL
;
1864 lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock
);
1870 mbuf_get_timestamp_requested(mbuf_t m
, boolean_t
*requested
)
1872 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1876 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TX_COMPL_TS_REQ
) == 0) {
1885 mbuf_set_timestamp_requested(mbuf_t m
, uintptr_t *pktid
,
1886 mbuf_tx_compl_func callback
)
1890 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
) || callback
== NULL
||
1895 i
= get_tx_compl_callback_index(callback
);
1896 if (i
== UINT32_MAX
) {
1900 #if (DEBUG || DEVELOPMENT)
1901 VERIFY(i
< sizeof(m
->m_pkthdr
.pkt_compl_callbacks
));
1902 #endif /* (DEBUG || DEVELOPMENT) */
1904 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TX_COMPL_TS_REQ
) == 0) {
1905 m
->m_pkthdr
.pkt_compl_callbacks
= 0;
1906 m
->m_pkthdr
.pkt_flags
|= PKTF_TX_COMPL_TS_REQ
;
1907 m
->m_pkthdr
.pkt_compl_context
=
1908 atomic_add_32_ov(&mbuf_tx_compl_index
, 1);
1910 #if (DEBUG || DEVELOPMENT)
1911 if (mbuf_tx_compl_debug
!= 0) {
1912 OSIncrementAtomic64(&mbuf_tx_compl_outstanding
);
1914 #endif /* (DEBUG || DEVELOPMENT) */
1916 m
->m_pkthdr
.pkt_compl_callbacks
|= (1 << i
);
1917 *pktid
= m
->m_pkthdr
.pkt_compl_context
;
1923 m_do_tx_compl_callback(struct mbuf
*m
, struct ifnet
*ifp
)
1931 if ((m
->m_pkthdr
.pkt_flags
& PKTF_TX_COMPL_TS_REQ
) == 0) {
1935 #if (DEBUG || DEVELOPMENT)
1936 if (mbuf_tx_compl_debug
!= 0 && ifp
!= NULL
&&
1937 (ifp
->if_xflags
& IFXF_TIMESTAMP_ENABLED
) != 0 &&
1938 (m
->m_pkthdr
.pkt_flags
& PKTF_TS_VALID
) == 0) {
1939 struct timespec now
;
1942 net_timernsec(&now
, &m
->m_pkthdr
.pkt_timestamp
);
1944 #endif /* (DEBUG || DEVELOPMENT) */
1946 for (i
= 0; i
< MAX_MBUF_TX_COMPL_FUNC
; i
++) {
1947 mbuf_tx_compl_func callback
;
1949 if ((m
->m_pkthdr
.pkt_compl_callbacks
& (1 << i
)) == 0) {
1953 lck_rw_lock_shared(mbuf_tx_compl_tbl_lock
);
1954 callback
= mbuf_tx_compl_table
[i
];
1955 lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock
);
1957 if (callback
!= NULL
) {
1958 callback(m
->m_pkthdr
.pkt_compl_context
,
1960 (m
->m_pkthdr
.pkt_flags
& PKTF_TS_VALID
) ?
1961 m
->m_pkthdr
.pkt_timestamp
: 0,
1962 m
->m_pkthdr
.drv_tx_compl_arg
,
1963 m
->m_pkthdr
.drv_tx_compl_data
,
1964 m
->m_pkthdr
.drv_tx_status
);
1967 m
->m_pkthdr
.pkt_compl_callbacks
= 0;
1969 #if (DEBUG || DEVELOPMENT)
1970 if (mbuf_tx_compl_debug
!= 0) {
1971 OSDecrementAtomic64(&mbuf_tx_compl_outstanding
);
1973 atomic_add_64(&mbuf_tx_compl_aborted
, 1);
1976 #endif /* (DEBUG || DEVELOPMENT) */
1980 mbuf_get_keepalive_flag(mbuf_t m
, boolean_t
*is_keepalive
)
1982 if (m
== NULL
|| is_keepalive
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1986 *is_keepalive
= (m
->m_pkthdr
.pkt_flags
& PKTF_KEEPALIVE
);
1992 mbuf_set_keepalive_flag(mbuf_t m
, boolean_t is_keepalive
)
1994 if (m
== NULL
|| !(m
->m_flags
& M_PKTHDR
)) {
1999 m
->m_pkthdr
.pkt_flags
|= PKTF_KEEPALIVE
;
2001 m
->m_pkthdr
.pkt_flags
&= ~PKTF_KEEPALIVE
;